diff --git a/.dockerignore b/.dockerignore index b6687c95fa7a..51d4ec61a457 100644 --- a/.dockerignore +++ b/.dockerignore @@ -28,3 +28,4 @@ _isaac_sim? docker/.isaac-lab-docker-history # ignore uv environment env_isaaclab +tools/wheel_builder/build/ diff --git a/.gitattributes b/.gitattributes index e3c0ead689d1..99e35795611a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -11,4 +11,7 @@ *.jit filter=lfs diff=lfs merge=lfs -text *.hdf5 filter=lfs diff=lfs merge=lfs -text +source/isaaclab_tasks/test/golden_images/**/*.png filter=lfs diff=lfs merge=lfs -text + *.bat text eol=crlf +*.sh text eol=lf diff --git a/.github/ISSUE_TEMPLATE/proposal.md b/.github/ISSUE_TEMPLATE/proposal.md index c07d7f56dc85..a6dd4081906a 100644 --- a/.github/ISSUE_TEMPLATE/proposal.md +++ b/.github/ISSUE_TEMPLATE/proposal.md @@ -26,8 +26,8 @@ A clear and concise description of any alternative solutions or features you've Describe the versions where you are observing the missing feature in: -- Isaac Lab Version: [e.g. 2.3.2] -- Isaac Sim Version: [e.g. 5.1, this can be obtained by `cat ${ISAACSIM_PATH}/VERSION`] +- Isaac Lab Version: [e.g. 3.0.0] +- Isaac Sim Version: [e.g. 6.0, this can be obtained by `cat ${ISAACSIM_PATH}/VERSION`] ### Additional context diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 6e0582f37379..ff0ac5b8f122 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -17,5 +17,5 @@ For questions that are related to running and understanding Isaac Sim, please po Describe the versions that you are currently using: -- Isaac Lab Version: [e.g. 2.3.2] -- Isaac Sim Version: [e.g. 5.1, this can be obtained by `cat ${ISAACSIM_PATH}/VERSION`] +- Isaac Lab Version: [e.g. 3.0.0] +- Isaac Sim Version: [e.g. 6.0, this can be obtained by `cat ${ISAACSIM_PATH}/VERSION`] diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ee9fa4ebdc5e..c19d35fb7e79 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -47,7 +47,7 @@ To upload images to a PR -- simply drag and drop an image while in edit mode and - [ ] I have made corresponding changes to the documentation - [ ] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works -- [ ] I have updated the changelog and the corresponding version in the extension's `config/extension.toml` file +- [ ] I have added a changelog fragment under `source//changelog.d/` for every touched package (do **not** edit `CHANGELOG.rst` or bump `extension.toml` — CI handles that) - [ ] I have added my name to the `CONTRIBUTORS.md` or my name already exists there we don't use heading command for normal velocity command. @configclass @@ -133,7 +141,7 @@ class Ranges: class UniformPoseCommandCfg(CommandTermCfg): """Configuration for uniform pose command generator.""" - class_type: type = UniformPoseCommand + class_type: type["UniformPoseCommand"] | str = "{DIR}.pose_command:UniformPoseCommand" asset_name: str = MISSING """Name of the asset in the environment for which the commands are generated.""" @@ -172,6 +180,12 @@ class Ranges: ranges: Ranges = MISSING """Ranges for the commands.""" + position_success_threshold: float | None = None + """If set, position-error norm below this value (per step) flags the episode as successful. + + The episode-level binary "ever within threshold" is mean-reduced across environments and + logged under ``Metrics/success_rate``. Defaults to ``None`` (success tracking disabled).""" + goal_pose_visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/Command/goal_pose") """The configuration for the goal pose visualization marker. Defaults to FRAME_MARKER_CFG.""" @@ -189,7 +203,7 @@ class Ranges: class UniformPose2dCommandCfg(CommandTermCfg): """Configuration for the uniform 2D-pose command generator.""" - class_type: type = UniformPose2dCommand + class_type: type["UniformPose2dCommand"] | str = "{DIR}.pose_2d_command:UniformPose2dCommand" asset_name: str = MISSING """Name of the asset in the environment for which the commands are generated.""" @@ -219,6 +233,12 @@ class Ranges: ranges: Ranges = MISSING """Distribution ranges for the position commands.""" + position_success_threshold: float | None = None + """If set, XY position-error norm below this value (per step) flags the episode as successful. + + The episode-level binary "ever within threshold" is mean-reduced across environments and + logged under ``Metrics/success_rate``. Defaults to ``None`` (success tracking disabled).""" + goal_pose_visualizer_cfg: VisualizationMarkersCfg = GREEN_ARROW_X_MARKER_CFG.replace( prim_path="/Visuals/Command/pose_goal" ) @@ -232,7 +252,7 @@ class Ranges: class TerrainBasedPose2dCommandCfg(UniformPose2dCommandCfg): """Configuration for the terrain-based position command generator.""" - class_type = TerrainBasedPose2dCommand + class_type: type["TerrainBasedPose2dCommand"] | str = "{DIR}.pose_2d_command:TerrainBasedPose2dCommand" @configclass class Ranges: diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py index a10ee0473e4a..ffd7bdad5e49 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py @@ -60,6 +60,15 @@ def __init__(self, cfg: UniformPose2dCommandCfg, env: ManagerBasedEnv): # -- metrics self.metrics["error_pos"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_heading"] = torch.zeros(self.num_envs, device=self.device) + # -- per-episode sticky success bit (only used when cfg.position_success_threshold is set) + self._track_success = cfg.position_success_threshold is not None + if self._track_success: + self._succeeded = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) + + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "heading"] def __str__(self) -> str: msg = "PositionCommand:\n" @@ -82,8 +91,25 @@ def command(self) -> torch.Tensor: def _update_metrics(self): # logs data - self.metrics["error_pos_2d"] = torch.norm(self.pos_command_w[:, :2] - self.robot.data.root_pos_w[:, :2], dim=1) - self.metrics["error_heading"] = torch.abs(wrap_to_pi(self.heading_command_w - self.robot.data.heading_w)) + self.metrics["error_pos"] = torch.linalg.norm( + self.pos_command_w[:, :2] - self.robot.data.root_pos_w.torch[:, :2], dim=1 + ) + self.metrics["error_heading"] = torch.abs(wrap_to_pi(self.heading_command_w - self.robot.data.heading_w.torch)) + if self._track_success: + self._succeeded |= self.metrics["error_pos"] < self.cfg.position_success_threshold + + def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: + extras = super().reset(env_ids) + if self._track_success: + if env_ids is None: + env_ids = slice(None) + # Write the unified ``Metrics/success_rate`` directly to env extras so it shares + # a TensorBoard card with the same metric from other tasks. + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = ( + self._succeeded[env_ids].float().mean().item() + ) + self._succeeded[env_ids] = False + return extras def _resample_command(self, env_ids: Sequence[int]): # obtain env origins for the environments @@ -92,18 +118,20 @@ def _resample_command(self, env_ids: Sequence[int]): r = torch.empty(len(env_ids), device=self.device) self.pos_command_w[env_ids, 0] += r.uniform_(*self.cfg.ranges.pos_x) self.pos_command_w[env_ids, 1] += r.uniform_(*self.cfg.ranges.pos_y) - self.pos_command_w[env_ids, 2] += self.robot.data.default_root_state[env_ids, 2] + self.pos_command_w[env_ids, 2] += self.robot.data.default_root_pose.torch[env_ids, 2] if self.cfg.simple_heading: # set heading command to point towards target - target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w[env_ids] + target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w.torch[env_ids] target_direction = torch.atan2(target_vec[:, 1], target_vec[:, 0]) flipped_target_direction = wrap_to_pi(target_direction + torch.pi) # compute errors to find the closest direction to the current heading # this is done to avoid the discontinuity at the -pi/pi boundary - curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w[env_ids]).abs() - curr_to_flipped_target = wrap_to_pi(flipped_target_direction - self.robot.data.heading_w[env_ids]).abs() + curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w.torch[env_ids]).abs() + curr_to_flipped_target = wrap_to_pi( + flipped_target_direction - self.robot.data.heading_w.torch[env_ids] + ).abs() # set the heading command to the closest direction self.heading_command_w[env_ids] = torch.where( @@ -117,9 +145,9 @@ def _resample_command(self, env_ids: Sequence[int]): def _update_command(self): """Re-target the position command to the current root state.""" - target_vec = self.pos_command_w - self.robot.data.root_pos_w[:, :3] - self.pos_command_b[:] = quat_apply_inverse(yaw_quat(self.robot.data.root_quat_w), target_vec) - self.heading_command_b[:] = wrap_to_pi(self.heading_command_w - self.robot.data.heading_w) + target_vec = self.pos_command_w - self.robot.data.root_pos_w.torch[:, :3] + self.pos_command_b[:] = quat_apply_inverse(yaw_quat(self.robot.data.root_quat_w.torch), target_vec) + self.heading_command_b[:] = wrap_to_pi(self.heading_command_w - self.robot.data.heading_w.torch) def _set_debug_vis_impl(self, debug_vis: bool): # create markers if necessary for the first time @@ -179,18 +207,20 @@ def _resample_command(self, env_ids: Sequence[int]): self.terrain.terrain_levels[env_ids], self.terrain.terrain_types[env_ids], ids ] # offset the position command by the current root height - self.pos_command_w[env_ids, 2] += self.robot.data.default_root_state[env_ids, 2] + self.pos_command_w[env_ids, 2] += self.robot.data.default_root_pose.torch[env_ids, 2] if self.cfg.simple_heading: # set heading command to point towards target - target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w[env_ids] + target_vec = self.pos_command_w[env_ids] - self.robot.data.root_pos_w.torch[env_ids] target_direction = torch.atan2(target_vec[:, 1], target_vec[:, 0]) flipped_target_direction = wrap_to_pi(target_direction + torch.pi) # compute errors to find the closest direction to the current heading # this is done to avoid the discontinuity at the -pi/pi boundary - curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w[env_ids]).abs() - curr_to_flipped_target = wrap_to_pi(flipped_target_direction - self.robot.data.heading_w[env_ids]).abs() + curr_to_target = wrap_to_pi(target_direction - self.robot.data.heading_w.torch[env_ids]).abs() + curr_to_flipped_target = wrap_to_pi( + flipped_target_direction - self.robot.data.heading_w.torch[env_ids] + ).abs() # set the heading command to the closest direction self.heading_command_w[env_ids] = torch.where( diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py index 2c62c4baf4b8..d98a6b5e7f81 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py @@ -15,6 +15,7 @@ from isaaclab.assets import Articulation from isaaclab.managers import CommandTerm from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.leapp import POSE7_ELEMENT_NAMES from isaaclab.utils.math import combine_frame_transforms, compute_pose_error, quat_from_euler_xyz, quat_unique if TYPE_CHECKING: @@ -28,7 +29,7 @@ class UniformPoseCommand(CommandTerm): The command generator generates poses by sampling positions uniformly within specified regions in cartesian space. For orientation, it samples uniformly the euler angles - (roll-pitch-yaw) and converts them into quaternion representation (w, x, y, z). + (roll-pitch-yaw) and converts them into quaternion representation (x, y, z, w). The position and orientation commands are generated in the base frame of the robot, and not the simulation world frame. This means that users need to handle the transformation from the @@ -60,13 +61,22 @@ def __init__(self, cfg: UniformPoseCommandCfg, env: ManagerBasedEnv): self.body_idx = self.robot.find_bodies(cfg.body_name)[0][0] # create buffers - # -- commands: (x, y, z, qw, qx, qy, qz) in root frame + # -- commands: (x, y, z, qx, qy, qz, qw) in root frame self.pose_command_b = torch.zeros(self.num_envs, 7, device=self.device) self.pose_command_b[:, 3] = 1.0 self.pose_command_w = torch.zeros_like(self.pose_command_b) # -- metrics self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) + # -- per-episode sticky success bit (only used when cfg.position_success_threshold is set) + self._track_success = cfg.position_success_threshold is not None + if self._track_success: + self._succeeded = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) + + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or POSE7_ELEMENT_NAMES def __str__(self) -> str: msg = "UniformPoseCommand:\n" @@ -82,7 +92,7 @@ def __str__(self) -> str: def command(self) -> torch.Tensor: """The desired pose command. Shape is (num_envs, 7). - The first three elements correspond to the position, followed by the quaternion orientation in (w, x, y, z). + The first three elements correspond to the position, followed by the quaternion orientation in (x, y, z, w). """ return self.pose_command_b @@ -93,8 +103,8 @@ def command(self) -> torch.Tensor: def _update_metrics(self): # transform command from base frame to simulation world frame self.pose_command_w[:, :3], self.pose_command_w[:, 3:] = combine_frame_transforms( - self.robot.data.root_pos_w, - self.robot.data.root_quat_w, + self.robot.data.root_pos_w.torch, + self.robot.data.root_quat_w.torch, self.pose_command_b[:, :3], self.pose_command_b[:, 3:], ) @@ -102,11 +112,26 @@ def _update_metrics(self): pos_error, rot_error = compute_pose_error( self.pose_command_w[:, :3], self.pose_command_w[:, 3:], - self.robot.data.body_pos_w[:, self.body_idx], - self.robot.data.body_quat_w[:, self.body_idx], + self.robot.data.body_pos_w.torch[:, self.body_idx], + self.robot.data.body_quat_w.torch[:, self.body_idx], ) - self.metrics["position_error"] = torch.norm(pos_error, dim=-1) - self.metrics["orientation_error"] = torch.norm(rot_error, dim=-1) + self.metrics["position_error"] = torch.linalg.norm(pos_error, dim=-1) + self.metrics["orientation_error"] = torch.linalg.norm(rot_error, dim=-1) + if self._track_success: + self._succeeded |= self.metrics["position_error"] < self.cfg.position_success_threshold + + def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: + extras = super().reset(env_ids) + if self._track_success: + if env_ids is None: + env_ids = slice(None) + # Write the unified ``Metrics/success_rate`` directly to env extras so it shares + # a TensorBoard card with the same metric from other tasks. + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = ( + self._succeeded[env_ids].float().mean().item() + ) + self._succeeded[env_ids] = False + return extras def _resample_command(self, env_ids: Sequence[int]): # sample new pose targets @@ -152,5 +177,5 @@ def _debug_vis_callback(self, event): # -- goal pose self.goal_pose_visualizer.visualize(self.pose_command_w[:, :3], self.pose_command_w[:, 3:]) # -- current body pose - body_link_pose_w = self.robot.data.body_link_pose_w[:, self.body_idx] + body_link_pose_w = self.robot.data.body_link_pose_w.torch[:, self.body_idx] self.current_pose_visualizer.visualize(body_link_pose_w[:, :3], body_link_pose_w[:, 3:7]) diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py index 38bc076a9591..930f663f65d3 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py @@ -83,9 +83,19 @@ def __init__(self, cfg: UniformVelocityCommandCfg, env: ManagerBasedEnv): self.heading_target = torch.zeros(self.num_envs, device=self.device) self.is_heading_env = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) self.is_standing_env = torch.zeros_like(self.is_heading_env) - # -- metrics + # -- metrics: finalized per-episode means/rates, written at reset() and read by the base class self.metrics["error_vel_xy"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_vel_yaw"] = torch.zeros(self.num_envs, device=self.device) + self.metrics["success_rate"] = torch.zeros(self.num_envs, device=self.device) + # -- per-episode running sums (cleared at episode reset) + self._error_xy_sum = torch.zeros(self.num_envs, device=self.device) + self._error_yaw_sum = torch.zeros(self.num_envs, device=self.device) + self._step_count = torch.zeros(self.num_envs, device=self.device) + + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/velocity" + self.cfg.element_names = self.cfg.element_names or ["lin_vel_x", "lin_vel_y", "ang_vel_z"] def __str__(self) -> str: """Return a string representation of the command generator.""" @@ -112,16 +122,39 @@ def command(self) -> torch.Tensor: """ def _update_metrics(self): - # time for which the command was executed - max_command_time = self.cfg.resampling_time_range[1] - max_command_step = max_command_time / self._env.step_dt - # logs data - self.metrics["error_vel_xy"] += ( - torch.norm(self.vel_command_b[:, :2] - self.robot.data.root_lin_vel_b[:, :2], dim=-1) / max_command_step - ) - self.metrics["error_vel_yaw"] += ( - torch.abs(self.vel_command_b[:, 2] - self.robot.data.root_ang_vel_b[:, 2]) / max_command_step - ) + # accumulate per-step tracking error sums; the per-episode mean is finalized in + # :meth:`reset` so the value is independent of episode length and + # ``resampling_time_range`` (no magic ``max_command_step`` divisor). + error_xy = torch.linalg.norm(self.vel_command_b[:, :2] - self.robot.data.root_lin_vel_b.torch[:, :2], dim=-1) + error_yaw = torch.abs(self.vel_command_b[:, 2] - self.robot.data.root_ang_vel_b.torch[:, 2]) + self._error_xy_sum += error_xy + self._error_yaw_sum += error_yaw + self._step_count += 1.0 + + def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: + # Finalize the just-ended episode's metrics into ``self.metrics`` BEFORE the base + # class reads them. ``success_rate`` is per-env binary: the *episode-mean* error + # is below both thresholds. Then super().reset() logs and zeros ``self.metrics``; + # we zero the running sums for ``env_ids`` afterwards so the next episode starts clean. + if env_ids is None: + env_ids = slice(None) + denom = self._step_count[env_ids].clamp_min(1.0) + mean_error_xy = self._error_xy_sum[env_ids] / denom + mean_error_yaw = self._error_yaw_sum[env_ids] / denom + self.metrics["error_vel_xy"][env_ids] = mean_error_xy + self.metrics["error_vel_yaw"][env_ids] = mean_error_yaw + self.metrics["success_rate"][env_ids] = ( + (mean_error_xy < self.cfg.vel_xy_success_threshold) & (mean_error_yaw < self.cfg.vel_yaw_success_threshold) + ).float() + extras = super().reset(env_ids) + # Route success_rate to the unified ``Metrics/success_rate`` path (shared TensorBoard + # / wandb card across tasks); pop it from the returned dict so CommandManager does + # not additionally log it under ``Metrics//success_rate``. + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = extras.pop("success_rate") + self._error_xy_sum[env_ids] = 0.0 + self._error_yaw_sum[env_ids] = 0.0 + self._step_count[env_ids] = 0.0 + return extras def _resample_command(self, env_ids: Sequence[int]): # sample velocity commands @@ -151,7 +184,9 @@ def _update_command(self): # resolve indices of heading envs env_ids = self.is_heading_env.nonzero(as_tuple=False).flatten() # compute angular velocity - heading_error = math_utils.wrap_to_pi(self.heading_target[env_ids] - self.robot.data.heading_w[env_ids]) + heading_error = math_utils.wrap_to_pi( + self.heading_target[env_ids] - self.robot.data.heading_w.torch[env_ids] + ) self.vel_command_b[env_ids, 2] = torch.clip( self.cfg.heading_control_stiffness * heading_error, min=self.cfg.ranges.ang_vel_z[0], @@ -187,11 +222,13 @@ def _debug_vis_callback(self, event): return # get marker location # -- base state - base_pos_w = self.robot.data.root_pos_w.clone() + base_pos_w = self.robot.data.root_pos_w.torch.clone() base_pos_w[:, 2] += 0.5 # -- resolve the scales and quaternions vel_des_arrow_scale, vel_des_arrow_quat = self._resolve_xy_velocity_to_arrow(self.command[:, :2]) - vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow(self.robot.data.root_lin_vel_b[:, :2]) + vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow( + self.robot.data.root_lin_vel_b.torch[:, :2] + ) # display markers self.goal_vel_visualizer.visualize(base_pos_w, vel_des_arrow_quat, vel_des_arrow_scale) self.current_vel_visualizer.visualize(base_pos_w, vel_arrow_quat, vel_arrow_scale) @@ -212,7 +249,7 @@ def _resolve_xy_velocity_to_arrow(self, xy_velocity: torch.Tensor) -> tuple[torc zeros = torch.zeros_like(heading_angle) arrow_quat = math_utils.quat_from_euler_xyz(zeros, zeros, heading_angle) # convert everything back from base to world frame - base_quat_w = self.robot.data.root_quat_w + base_quat_w = self.robot.data.root_quat_w.torch arrow_quat = math_utils.quat_mul(base_quat_w, arrow_quat) return arrow_scale, arrow_quat diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 484121f4e491..9483d2d2d0cd 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -20,23 +20,20 @@ from typing import TYPE_CHECKING, Literal import torch - -import carb -import omni.physics.tensors.impl.api as physx -from isaacsim.core.utils.extensions import enable_extension -from pxr import Gf, Sdf, UsdGeom, Vt +import warp as wp import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.actuators import ImplicitActuator -from isaaclab.assets import Articulation, DeformableObject, RigidObject from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg -from isaaclab.sim.utils.stage import get_current_stage -from isaaclab.terrains import TerrainImporter -from isaaclab.utils.version import compare_versions, get_isaac_sim_version +from isaaclab.utils.version import compare_versions if TYPE_CHECKING: + from isaaclab_physx.assets import DeformableObject + + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedEnv + from isaaclab.terrains import TerrainImporter # import logger logger = logging.getLogger(__name__) @@ -84,7 +81,7 @@ def randomize_rigid_body_scale( # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - if isinstance(asset, Articulation): + if any(cls.__name__ == "Articulation" for cls in type(asset).__mro__): raise ValueError( "Scaling an articulation randomly is not supported, as it affects joint attributes and can cause" " unexpected behavior. To achieve different scales, we recommend generating separate USD files for" @@ -99,7 +96,7 @@ def randomize_rigid_body_scale( env_ids = env_ids.cpu() # acquire stage - stage = get_current_stage() + stage = env.sim.stage # resolve prim paths for spawning and cloning prim_paths = sim_utils.find_matching_prim_paths(asset.cfg.prim_path) @@ -121,7 +118,9 @@ def randomize_rigid_body_scale( elif not relative_child_path.startswith("/"): relative_child_path = "/" + relative_child_path - # use sdf changeblock for faster processing of USD properties + # use sdf changeblock for faster processing of USD properties (local: pxr only available with Kit) + from pxr import Gf, Sdf, UsdGeom, Vt # noqa: PLC0415 + with Sdf.ChangeBlock(): for i, env_id in enumerate(env_ids): # path to prim to randomize @@ -152,67 +151,50 @@ def randomize_rigid_body_scale( op_order_spec.default = Vt.TokenArray(["xformOp:translate", "xformOp:orient", "xformOp:scale"]) -class randomize_rigid_body_material(ManagerTermBase): - """Randomize the physics materials on all geometries of the asset. - - This function creates a set of physics materials with random static friction, dynamic friction, and restitution - values. The number of materials is specified by ``num_buckets``. The materials are generated by sampling - uniform random values from the given ranges. - - The material properties are then assigned to the geometries of the asset. The assignment is done by - creating a random integer tensor of shape (num_instances, max_num_shapes) where ``num_instances`` - is the number of assets spawned and ``max_num_shapes`` is the maximum number of shapes in the asset (over - all bodies). The integer values are used as indices to select the material properties from the - material buckets. - - If the flag ``make_consistent`` is set to ``True``, the dynamic friction is set to be less than or equal to - the static friction. This obeys the physics constraint on friction values. However, it may not always be - essential for the application. Thus, the flag is set to ``False`` by default. +class _RandomizeRigidBodyMaterialPhysx: + """PhysX backend implementation for material randomization. - .. attention:: - This function uses CPU tensors to assign the material properties. It is recommended to use this function - only during the initialization of the environment. Otherwise, it may lead to a significant performance - overhead. - - .. note:: - PhysX only allows 64000 unique physics materials in the scene. If the number of materials exceeds this - limit, the simulation will crash. Due to this reason, we sample the materials only once during initialization. - Afterwards, these materials are randomly assigned to the geometries of the asset. + Uses the bucket-based approach required by PhysX's 64000 unique material limit. + Materials are pre-sampled into buckets and randomly assigned to shapes. """ - def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): - """Initialize the term. + def __init__( + self, cfg: EventTermCfg, env: ManagerBasedEnv, asset: RigidObject | Articulation, asset_cfg: SceneEntityCfg + ): + from isaaclab.assets import BaseArticulation - Args: - cfg: The configuration of the event term. - env: The environment instance. + # obtain parameters for sampling friction and restitution values + static_friction_range = cfg.params.get("static_friction_range", (1.0, 1.0)) + dynamic_friction_range = cfg.params.get("dynamic_friction_range", (1.0, 1.0)) + restitution_range = cfg.params.get("restitution_range", (0.0, 0.0)) + num_buckets = int(cfg.params.get("num_buckets", 1)) - Raises: - ValueError: If the asset is not a RigidObject or an Articulation. - """ - super().__init__(cfg, env) + # sample material properties from the given ranges + # note: we only sample the materials once during initialization + # afterwards these are randomly assigned to the geometries of the asset + range_list = [static_friction_range, dynamic_friction_range, restitution_range] + ranges = torch.tensor(range_list, device="cpu") + self.material_buckets = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (num_buckets, 3), device="cpu") - # extract the used quantities (to enable type-hinting) - self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] - self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + # ensure dynamic friction is always less than static friction + make_consistent = cfg.params.get("make_consistent", False) + if make_consistent: + self.material_buckets[:, 1] = torch.min(self.material_buckets[:, 0], self.material_buckets[:, 1]) - if not isinstance(self.asset, (RigidObject, Articulation)): - raise ValueError( - f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{self.asset_cfg.name}'" - f" with type: '{type(self.asset)}'." - ) + self.asset = asset + self.asset_cfg = asset_cfg # obtain number of shapes per body (needed for indexing the material properties correctly) # note: this is a workaround since the Articulation does not provide a direct way to obtain the number of shapes # per body. We use the physics simulation view to obtain the number of shapes per body. - if isinstance(self.asset, Articulation) and self.asset_cfg.body_ids != slice(None): + if isinstance(asset, BaseArticulation) and asset_cfg.body_ids != slice(None): self.num_shapes_per_body = [] - for link_path in self.asset.root_physx_view.link_paths[0]: - link_physx_view = self.asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore + for link_path in asset.root_view.link_paths[0]: + link_physx_view = asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore self.num_shapes_per_body.append(link_physx_view.max_shapes) # ensure the parsing is correct num_shapes = sum(self.num_shapes_per_body) - expected_shapes = self.asset.root_physx_view.max_shapes + expected_shapes = asset.root_view.max_shapes if num_shapes != expected_shapes: raise ValueError( "Randomization term 'randomize_rigid_body_material' failed to parse the number of shapes per body." @@ -222,24 +204,6 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # in this case, we don't need to do special indexing self.num_shapes_per_body = None - # obtain parameters for sampling friction and restitution values - static_friction_range = cfg.params.get("static_friction_range", (1.0, 1.0)) - dynamic_friction_range = cfg.params.get("dynamic_friction_range", (1.0, 1.0)) - restitution_range = cfg.params.get("restitution_range", (0.0, 0.0)) - num_buckets = int(cfg.params.get("num_buckets", 1)) - - # sample material properties from the given ranges - # note: we only sample the materials once during initialization - # afterwards these are randomly assigned to the geometries of the asset - range_list = [static_friction_range, dynamic_friction_range, restitution_range] - ranges = torch.tensor(range_list, device="cpu") - self.material_buckets = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (num_buckets, 3), device="cpu") - - # ensure dynamic friction is always less than static friction - make_consistent = cfg.params.get("make_consistent", False) - if make_consistent: - self.material_buckets[:, 1] = torch.min(self.material_buckets[:, 0], self.material_buckets[:, 1]) - def __call__( self, env: ManagerBasedEnv, @@ -253,17 +217,17 @@ def __call__( ): # resolve environment ids if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") + env_ids = torch.arange(env.scene.num_envs, device="cpu", dtype=torch.int32) else: env_ids = env_ids.cpu() # randomly assign material IDs to the geometries - total_num_shapes = self.asset.root_physx_view.max_shapes + total_num_shapes = self.asset.root_view.max_shapes bucket_ids = torch.randint(0, num_buckets, (len(env_ids), total_num_shapes), device="cpu") material_samples = self.material_buckets[bucket_ids] # retrieve material buffer from the physics simulation - materials = self.asset.root_physx_view.get_material_properties() + materials = wp.to_torch(self.asset.root_view.get_material_properties()) # update material buffer with new samples if self.num_shapes_per_body is not None: @@ -280,7 +244,175 @@ def __call__( materials[env_ids] = material_samples[:] # apply to simulation - self.asset.root_physx_view.set_material_properties(materials, env_ids) + self.asset.root_view.set_material_properties( + wp.from_torch(materials, dtype=wp.float32), wp.from_torch(env_ids, dtype=wp.int32) + ) + + +class _RandomizeRigidBodyMaterialNewton: + """Newton backend implementation for material randomization. + + Newton can assign arbitrary friction/restitution per shape (no bucket limitation). + Samples friction (mu) and restitution continuously from the given ranges. + Newton uses a single friction coefficient (mu), so ``dynamic_friction_range`` + and ``num_buckets`` are ignored. + """ + + def __init__( + self, cfg: EventTermCfg, env: ManagerBasedEnv, asset: RigidObject | Articulation, asset_cfg: SceneEntityCfg + ): + import isaaclab_newton.physics.newton_manager as newton_manager_module # noqa: PLC0415 + from isaaclab_newton.assets import Articulation as NewtonArticulation # noqa: PLC0415 + from newton.solvers import SolverNotifyFlags # noqa: PLC0415 + + self.asset = asset + self.asset_cfg = asset_cfg + self._newton_manager = newton_manager_module.NewtonManager + self._notify_shape_properties = SolverNotifyFlags.SHAPE_PROPERTIES + + # cache friction/restitution ranges for continuous per-shape sampling + self._static_friction_range = cfg.params.get("static_friction_range", (1.0, 1.0)) + self._restitution_range = cfg.params.get("restitution_range", (0.0, 0.0)) + + # get friction/restitution view-level bindings + model = self._newton_manager.get_model() + self._friction_binding = asset._root_view.get_attribute("shape_material_mu", model)[:, 0] # type: ignore + self._restitution_binding = asset._root_view.get_attribute("shape_material_restitution", model)[:, 0] # type: ignore + + # compute shape indices for body-specific randomization + if isinstance(asset, NewtonArticulation) and asset_cfg.body_ids != slice(None): + num_shapes_per_body = asset.num_shapes_per_body + shape_indices_list = [] + for body_id in asset_cfg.body_ids: + start_idx = sum(num_shapes_per_body[:body_id]) + end_idx = start_idx + num_shapes_per_body[body_id] + shape_indices_list.extend(range(start_idx, end_idx)) + self._shape_indices = torch.tensor(shape_indices_list, dtype=torch.long) + else: + self._shape_indices = torch.arange(self._friction_binding.shape[1], dtype=torch.long) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + static_friction_range: tuple[float, float], + dynamic_friction_range: tuple[float, float], + restitution_range: tuple[float, float], + num_buckets: int, + asset_cfg: SceneEntityCfg, + make_consistent: bool = False, + ): + device = env.device + # resolve environment ids + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=device, dtype=torch.int32) + else: + env_ids = env_ids.to(device) + + num_shapes = len(self._shape_indices) + shape_idx = self._shape_indices.to(device) + + # sample friction (mu) and restitution continuously per shape + friction_range = torch.tensor(self._static_friction_range, device=device) + restitution_range_t = torch.tensor(self._restitution_range, device=device) + friction_samples = math_utils.sample_uniform( + friction_range[0], friction_range[1], (len(env_ids), num_shapes), device=device + ) + restitution_samples = math_utils.sample_uniform( + restitution_range_t[0], restitution_range_t[1], (len(env_ids), num_shapes), device=device + ) + + # write only the affected env_ids to the warp binding + friction_view = wp.to_torch(self._friction_binding) + restitution_view = wp.to_torch(self._restitution_binding) + friction_view[env_ids[:, None], shape_idx] = friction_samples + restitution_view[env_ids[:, None], shape_idx] = restitution_samples + + # notify the physics engine + self._newton_manager.add_model_change(self._notify_shape_properties) + + +class randomize_rigid_body_material(ManagerTermBase): + """Randomize the physics materials on all geometries of the asset. + + This function creates a set of physics materials with random static friction, dynamic friction, and restitution + values and assigns them to the geometries of the asset. + + Automatically detects the active physics backend (PhysX or Newton) and delegates to + the appropriate backend-specific implementation: + + - **PhysX**: Uses the 3-tuple material format (static_friction, dynamic_friction, restitution) + with bucket-based assignment (limited to 64000 unique materials). Applied via the PhysX + tensor API (``root_view.set_material_properties``). + - **Newton**: Samples friction (mu) and restitution continuously per shape (no bucket + limitation). Newton uses a single friction coefficient, so ``dynamic_friction_range`` + and ``num_buckets`` are ignored. Applied directly to Newton's view-level bindings. + + If the flag ``make_consistent`` is set to ``True``, the dynamic friction is set to be less than or equal to + the static friction (PhysX only). This obeys the physics constraint on friction values. + + .. attention:: + On PhysX, this function uses CPU tensors to assign the material properties. It is recommended to + use this function only during the initialization of the environment. + + .. note:: + PhysX only allows 64000 unique physics materials in the scene. If the number of materials exceeds this + limit, the simulation will crash. Due to this reason, we sample the materials only once during initialization. + Afterwards, these materials are randomly assigned to the geometries of the asset. + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the term. + + Args: + cfg: The configuration of the event term. + env: The environment instance. + + Raises: + ValueError: If the asset is not a RigidObject or an Articulation. + """ + from isaaclab.assets import BaseArticulation, BaseRigidObject + + super().__init__(cfg, env) + + # extract the used quantities (to enable type-hinting) + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + if not isinstance(self.asset, (BaseRigidObject, BaseArticulation)): + raise ValueError( + f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{self.asset_cfg.name}'" + f" with type: '{type(self.asset)}'." + ) + + # detect physics backend and instantiate the appropriate implementation + manager_name = env.sim.physics_manager.__name__.lower() + if "newton" in manager_name: + self._impl = _RandomizeRigidBodyMaterialNewton(cfg, env, self.asset, self.asset_cfg) + else: + self._impl = _RandomizeRigidBodyMaterialPhysx(cfg, env, self.asset, self.asset_cfg) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + static_friction_range: tuple[float, float], + dynamic_friction_range: tuple[float, float], + restitution_range: tuple[float, float], + num_buckets: int, + asset_cfg: SceneEntityCfg, + make_consistent: bool = False, + ): + self._impl( + env, + env_ids, + static_friction_range, + dynamic_friction_range, + restitution_range, + num_buckets, + asset_cfg, + make_consistent, + ) class randomize_rigid_body_mass(ManagerTermBase): @@ -336,6 +468,9 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): " physics errors." ) + self.default_mass = None + self.default_inertia = None + def __call__( self, env: ManagerBasedEnv, @@ -347,25 +482,29 @@ def __call__( recompute_inertia: bool = True, min_mass: float = 1e-6, ): + if self.default_mass is None: + self.default_mass = self.asset.data.body_mass.torch.clone() + if self.default_inertia is None: + self.default_inertia = self.asset.data.body_inertia.torch.clone() # resolve environment ids if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") + env_ids = torch.arange(env.scene.num_envs, device=self.asset.device, dtype=torch.int32) else: - env_ids = env_ids.cpu() + env_ids = env_ids.to(self.asset.device) # resolve body indices if self.asset_cfg.body_ids == slice(None): - body_ids = torch.arange(self.asset.num_bodies, dtype=torch.int, device="cpu") + body_ids = torch.arange(self.asset.num_bodies, dtype=torch.int32, device=self.asset.device) else: - body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int, device="cpu") + body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int32, device=self.asset.device) # get the current masses of the bodies (num_assets, num_bodies) - masses = self.asset.root_physx_view.get_masses() + masses = self.asset.data.body_mass.torch.clone() # apply randomization on default values # this is to make sure when calling the function multiple times, the randomization is applied on the # default values and not the previously randomized values - masses[env_ids[:, None], body_ids] = self.asset.data.default_mass[env_ids[:, None], body_ids].clone() + masses[env_ids[:, None], body_ids] = self.default_mass[env_ids[:, None], body_ids].clone() # sample from the given range # note: we modify the masses in-place for all environments @@ -376,166 +515,588 @@ def __call__( masses = torch.clamp(masses, min=min_mass) # ensure masses are positive # set the mass into the physics simulation - self.asset.root_physx_view.set_masses(masses, env_ids) + # note: backends expect partial data of shape (len(env_ids), len(body_ids)) + self.asset.set_masses_index(masses=masses[env_ids[:, None], body_ids], body_ids=body_ids, env_ids=env_ids) # recompute inertia tensors if needed if recompute_inertia: # compute the ratios of the new masses to the initial masses - ratios = masses[env_ids[:, None], body_ids] / self.asset.data.default_mass[env_ids[:, None], body_ids] + ratios = masses[env_ids[:, None], body_ids] / self.default_mass[env_ids[:, None], body_ids] # scale the inertia tensors by the the ratios # since mass randomization is done on default values, we can use the default inertia tensors - inertias = self.asset.root_physx_view.get_inertias() - if isinstance(self.asset, Articulation): - # inertia has shape: (num_envs, num_bodies, 9) for articulation - inertias[env_ids[:, None], body_ids] = ( - self.asset.data.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] - ) - else: - # inertia has shape: (num_envs, 9) for rigid object - inertias[env_ids] = self.asset.data.default_inertia[env_ids] * ratios + inertias = self.asset.data.body_inertia.torch.clone() + # inertia has shape: (num_envs, num_bodies, 9) for all assets + inertias[env_ids[:, None], body_ids] = self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] # set the inertia tensors into the physics simulation - self.asset.root_physx_view.set_inertias(inertias, env_ids) + # note: backends expect partial data of shape (len(env_ids), len(body_ids), 9) + self.asset.set_inertias_index( + inertias=inertias[env_ids[:, None], body_ids], body_ids=body_ids, env_ids=env_ids + ) -def randomize_rigid_body_com( - env: ManagerBasedEnv, - env_ids: torch.Tensor | None, - com_range: dict[str, tuple[float, float]], - asset_cfg: SceneEntityCfg, -): - """Randomize the center of mass (CoM) of rigid bodies by adding a random value sampled from the given ranges. +class randomize_rigid_body_inertia(ManagerTermBase): + """Randomize the inertia tensor of rigid bodies by adding, scaling, or setting values. + + This function modifies body inertia tensors independently of mass. The inertia tensor + is a 3x3 symmetric matrix stored as 9 elements: ``[Ixx, Ixy, Ixz, Iyx, Iyy, Iyz, Izx, Izy, Izz]``. + + Two modes are supported via the :attr:`diagonal_only` parameter: + + - **diagonal_only=True** (default): Only modifies diagonal elements (Ixx, Iyy, Izz at + indices 0, 4, 8). This is useful for adding numerical stability (armature/regularization) + without changing rotational coupling between axes. The diagonal elements represent + resistance to rotation about each principal axis. + + - **diagonal_only=False**: Modifies all 9 elements of the inertia tensor. This can + simulate manufacturing variations or asymmetric mass distributions. Off-diagonal + elements represent coupling between rotations about different axes. .. note:: - This function uses CPU tensors to assign the CoM. It is recommended to use this function - only during the initialization of the environment. + Unlike :class:`randomize_rigid_body_mass` which recomputes inertia based on mass + ratios, this function modifies inertia directly without affecting mass. """ - # extract the used quantities (to enable type-hinting) - asset: Articulation = env.scene[asset_cfg.name] - # resolve environment ids - if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") - else: - env_ids = env_ids.cpu() - # resolve body indices - if asset_cfg.body_ids == slice(None): - body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu") - else: - body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu") + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the term. - # sample random CoM values - range_list = [com_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] - ranges = torch.tensor(range_list, device="cpu") - rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device="cpu").unsqueeze(1) + Args: + cfg: The configuration of the event term. + env: The environment instance. - # get the current com of the bodies (num_assets, num_bodies) - coms = asset.root_physx_view.get_coms().clone() + Raises: + ValueError: If the operation is not supported. + ValueError: If the lower bound is negative or zero when not allowed for scale operation. + ValueError: If the upper bound is less than the lower bound. + """ + from isaaclab.assets import BaseArticulation, BaseRigidObject, BaseRigidObjectCollection - # Randomize the com in range - coms[env_ids[:, None], body_ids, :3] += rand_samples + super().__init__(cfg, env) - # Set the new coms - asset.root_physx_view.set_coms(coms, env_ids) + # extract the used quantities (to enable type-hinting) + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + if not isinstance(self.asset, (BaseRigidObject, BaseArticulation, BaseRigidObjectCollection)): + raise ValueError( + f"Randomization term 'randomize_rigid_body_inertia' not supported for asset: '{self.asset_cfg.name}'" + f" with type: '{type(self.asset)}'." + ) -def randomize_rigid_body_collider_offsets( - env: ManagerBasedEnv, - env_ids: torch.Tensor | None, - asset_cfg: SceneEntityCfg, - rest_offset_distribution_params: tuple[float, float] | None = None, - contact_offset_distribution_params: tuple[float, float] | None = None, - distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", -): - """Randomize the collider parameters of rigid bodies in an asset by adding, scaling, or setting random values. + # check for valid operation + if cfg.params["operation"] == "scale": + if "inertia_distribution_params" in cfg.params: + _validate_scale_range( + cfg.params["inertia_distribution_params"], "inertia_distribution_params", allow_zero=False + ) + elif cfg.params["operation"] not in ("abs", "add"): + raise ValueError( + f"Randomization term 'randomize_rigid_body_inertia' does not support operation:" + f" '{cfg.params['operation']}'." + ) + + self.default_inertia = None + # cache inertia indices: diagonal (0, 4, 8) for regularization, or all elements + diagonal_only = cfg.params.get("diagonal_only", True) + self._inertia_idx = torch.tensor([0, 4, 8], device=self.asset.device) if diagonal_only else slice(None) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + asset_cfg: SceneEntityCfg, + inertia_distribution_params: tuple[float, float], + operation: Literal["add", "scale", "abs"] = "add", + distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", + diagonal_only: bool = True, + ): + """Randomize body inertia tensors. + + Args: + env: The environment instance. + env_ids: The environment indices to randomize. If None, all environments are randomized. + asset_cfg: The asset configuration specifying the asset and body names. + inertia_distribution_params: Distribution parameters as a tuple of two floats + ``(low, high)`` for sampling inertia modification values. + operation: The operation to apply. Options: ``"add"``, ``"scale"``, ``"abs"``. + Defaults to ``"add"`` which is typical for regularization/armature. + distribution: The distribution to sample from. Options: ``"uniform"``, + ``"log_uniform"``, ``"gaussian"``. Defaults to ``"uniform"``. + diagonal_only: If True, only modify diagonal elements (Ixx, Iyy, Izz) for + numerical stability. If False, modify all 9 elements. Defaults to True. + """ + # store default inertia on first call for repeatable randomization + if self.default_inertia is None: + self.default_inertia = self.asset.data.body_inertia.torch.clone() + + # resolve environment ids + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=self.asset.device, dtype=torch.int32) + else: + env_ids = env_ids.to(self.asset.device) + + # resolve body indices + if self.asset_cfg.body_ids == slice(None): + body_ids = torch.arange(self.asset.num_bodies, dtype=torch.int32, device=self.asset.device) + else: + body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int32, device=self.asset.device) + + # get default inertias for affected envs/bodies (advanced indexing creates a copy) + # shape: (len(env_ids), len(body_ids), 9) + inertias = self.default_inertia[env_ids[:, None], body_ids] + + # resolve the distribution function + if distribution == "uniform": + dist_fn = math_utils.sample_uniform + elif distribution == "log_uniform": + dist_fn = math_utils.sample_log_uniform + elif distribution == "gaussian": + dist_fn = math_utils.sample_gaussian + else: + raise NotImplementedError( + f"Unknown distribution: '{distribution}' for inertia randomization." + " Please use 'uniform', 'log_uniform', or 'gaussian'." + ) + + # sample random values once per (env, body) - shape: (len(env_ids), len(body_ids)) + random_values = dist_fn(*inertia_distribution_params, (len(env_ids), len(body_ids)), device=self.asset.device) + + # apply the operation with the SAME random value per body + if operation == "add": + inertias[:, :, self._inertia_idx] += random_values[..., None] + elif operation == "scale": + inertias[:, :, self._inertia_idx] *= random_values[..., None] + elif operation == "abs": + inertias[:, :, self._inertia_idx] = random_values[..., None] + + # set the inertia tensors into the physics simulation + self.asset.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids) + + +class randomize_rigid_body_com(ManagerTermBase): + """Randomize the center of mass (CoM) of rigid bodies by adding a random value sampled from the given ranges. + + This class tracks the original CoM values and randomizes from those defaults on each call, + ensuring repeatable randomization across resets. + + Automatically detects the active physics backend: + + - **PhysX**: Passes the full CoM pose (position + quaternion) to ``set_coms_index``. + - **Newton**: Passes position-only (vec3) to ``set_coms_index``. Note that on Newton + (MuJoCo Warp), runtime CoM changes may cause simulation instability because + ``notify_model_changed(BODY_INERTIAL_PROPERTIES)`` does not fully recompute the + mass matrix after ``body_ipos`` changes. Use with caution until this is fixed upstream. + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the term. + + Args: + cfg: The configuration of the event term. + env: The environment instance. + """ + super().__init__(cfg, env) + + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + # detect physics backend + manager_name = env.sim.physics_manager.__name__.lower() + self._is_newton = "newton" in manager_name + + self.default_com = None + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + com_range: dict[str, tuple[float, float]], + asset_cfg: SceneEntityCfg, + ): + # store default CoM on first call for repeatable randomization + if self.default_com is None: + self.default_com = self.asset.data.body_com_pose_b.torch.clone() + + # resolve environment ids + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=self.asset.device) + else: + env_ids = env_ids.to(self.asset.device) + + # resolve body indices + if self.asset_cfg.body_ids == slice(None): + body_ids = torch.arange(self.asset.num_bodies, dtype=torch.int, device=self.asset.device) + else: + body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int, device=self.asset.device) + + # sample random CoM values + range_list = [com_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] + ranges = torch.tensor(range_list, device=self.asset.device) + rand_samples = math_utils.sample_uniform( + ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=self.asset.device + ).unsqueeze(1) + + # start from defaults and add random offsets + coms = self.default_com.clone() + coms[env_ids[:, None], body_ids, :3] += rand_samples + + # Newton expects position-only (vec3f), PhysX expects the full pose (pos + quat) + # note: pass partial data of shape (len(env_ids), len(body_ids), ...) to match the API + if self._is_newton: + self.asset.set_coms_index(coms=coms[env_ids[:, None], body_ids, :3], body_ids=body_ids, env_ids=env_ids) + else: + self.asset.set_coms_index(coms=coms[env_ids[:, None], body_ids], body_ids=body_ids, env_ids=env_ids) + + +class _RandomizeRigidBodyColliderOffsetsPhysx: + """PhysX backend implementation for collider offset randomization. + + Uses rest offset and contact offset directly via the PhysX tensor API. + """ + + def __init__(self, asset: RigidObject | Articulation): + self.asset = asset + self.default_rest_offsets = wp.to_torch(asset.root_view.get_rest_offsets()).clone() + self.default_contact_offsets = wp.to_torch(asset.root_view.get_contact_offsets()).clone() + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + asset_cfg: SceneEntityCfg, + rest_offset_distribution_params: tuple[float, float] | None = None, + contact_offset_distribution_params: tuple[float, float] | None = None, + distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", + ): + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device="cpu", dtype=torch.int32) + else: + env_ids = env_ids.to(device="cpu", dtype=torch.int32) + wp_env_ids = wp.from_torch(env_ids, dtype=wp.int32) + + if rest_offset_distribution_params is not None: + rest_offset = self.default_rest_offsets.clone() + rest_offset = _randomize_prop_by_op( + rest_offset, + rest_offset_distribution_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + self.asset.root_view.set_rest_offsets(wp.from_torch(rest_offset), wp_env_ids) + + if contact_offset_distribution_params is not None: + contact_offset = self.default_contact_offsets.clone() + contact_offset = _randomize_prop_by_op( + contact_offset, + contact_offset_distribution_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + self.asset.root_view.set_contact_offsets(wp.from_torch(contact_offset), wp_env_ids) + + +class _RandomizeRigidBodyColliderOffsetsNewton: + """Newton backend implementation for collider offset randomization. + + Maps PhysX concepts to Newton's geometry properties: + + - ``rest_offset`` -> ``shape_margin`` (Newton margin) + - ``contact_offset`` -> ``shape_gap`` (Newton gap = contact_offset - margin) + + See the `Newton collision schema`_ for details. + + .. _Newton collision schema: https://newton-physics.github.io/newton/latest/concepts/collisions.html + """ + + def __init__(self, asset: RigidObject | Articulation): + import isaaclab_newton.physics.newton_manager as newton_manager_module # noqa: PLC0415 + from newton.solvers import SolverNotifyFlags # noqa: PLC0415 + + self.asset = asset + self._newton_manager = newton_manager_module.NewtonManager + self._notify_shape_properties = SolverNotifyFlags.SHAPE_PROPERTIES + + model = self._newton_manager.get_model() + self._sim_bind_shape_margin = asset._root_view.get_attribute("shape_margin", model)[:, 0] # type: ignore + self._sim_bind_shape_gap = asset._root_view.get_attribute("shape_gap", model)[:, 0] # type: ignore + + self.default_margin = wp.to_torch(self._sim_bind_shape_margin).clone() + self.default_gap = wp.to_torch(self._sim_bind_shape_gap).clone() + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + asset_cfg: SceneEntityCfg, + rest_offset_distribution_params: tuple[float, float] | None = None, + contact_offset_distribution_params: tuple[float, float] | None = None, + distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", + ): + device = env.device + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=device, dtype=torch.int32) + else: + env_ids = env_ids.to(device) + + margin_view = wp.to_torch(self._sim_bind_shape_margin) + + if rest_offset_distribution_params is not None: + margin = self.default_margin.clone() + margin = _randomize_prop_by_op( + margin, + rest_offset_distribution_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + self.default_margin[env_ids] = margin[env_ids] + margin_view[env_ids] = margin[env_ids] + + if contact_offset_distribution_params is not None: + current_margin = self.default_margin + contact_offset = torch.zeros_like(self.default_gap) + contact_offset = _randomize_prop_by_op( + contact_offset, + contact_offset_distribution_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + gap = torch.clamp(contact_offset - current_margin, min=0.0) + self.default_gap[env_ids] = gap[env_ids] + gap_view = wp.to_torch(self._sim_bind_shape_gap) + gap_view[env_ids] = gap[env_ids] + + if rest_offset_distribution_params is not None or contact_offset_distribution_params is not None: + self._newton_manager.add_model_change(self._notify_shape_properties) + + +class randomize_rigid_body_collider_offsets(ManagerTermBase): + """Randomize the collider parameters of rigid bodies by setting random values. This function allows randomizing the collider parameters of the asset, such as rest and contact offsets. - These correspond to the physics engine collider properties that affect the collision checking. + These correspond to the physics engine collider properties that affect collision checking. - The function samples random values from the given distribution parameters and applies the operation to - the collider properties. It then sets the values into the physics simulation. If the distribution parameters - are not provided for a particular property, the function does not modify the property. + Automatically detects the active physics backend (PhysX or Newton) and delegates to + the appropriate backend-specific implementation: - Currently, the distribution parameters are applied as absolute values. + - **PhysX**: Uses rest offset and contact offset directly via the PhysX tensor API + (``root_view.set_rest_offsets`` / ``root_view.set_contact_offsets``). + - **Newton**: Maps PhysX concepts to Newton's geometry properties. PhysX ``rest_offset`` + maps to Newton ``shape_margin``, and PhysX ``contact_offset`` is converted to Newton + ``shape_gap`` via ``gap = contact_offset - margin``. + + The function samples random values from the given distribution parameters and applies them + as absolute values to the collider properties. If the distribution parameters are not + provided for a particular property, the function does not modify it. .. tip:: - This function uses CPU tensors to assign the collision properties. It is recommended to use this function - only during the initialization of the environment. + This function uses CPU tensors (PhysX) or GPU tensors (Newton) to assign the collision + properties. It is recommended to use this function only during the initialization of + the environment. """ - # extract the used quantities (to enable type-hinting) - asset: RigidObject | Articulation = env.scene[asset_cfg.name] - # resolve environment ids - if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the term. + + Args: + cfg: The configuration of the event term. + env: The environment instance. + + Raises: + ValueError: If the asset is not a RigidObject or an Articulation. + """ + from isaaclab.assets import BaseArticulation, BaseRigidObject + + super().__init__(cfg, env) + + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + if not isinstance(self.asset, (BaseRigidObject, BaseArticulation)): + raise ValueError( + f"Randomization term 'randomize_rigid_body_collider_offsets' not supported for asset:" + f" '{self.asset_cfg.name}' with type: '{type(self.asset)}'." + ) - # sample collider properties from the given ranges and set into the physics simulation - # -- rest offsets - if rest_offset_distribution_params is not None: - rest_offset = asset.root_physx_view.get_rest_offsets().clone() - rest_offset = _randomize_prop_by_op( - rest_offset, + # detect physics backend and instantiate the appropriate implementation + manager_name = env.sim.physics_manager.__name__.lower() + if "newton" in manager_name: + self._impl = _RandomizeRigidBodyColliderOffsetsNewton(self.asset) + else: + self._impl = _RandomizeRigidBodyColliderOffsetsPhysx(self.asset) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + asset_cfg: SceneEntityCfg, + rest_offset_distribution_params: tuple[float, float] | None = None, + contact_offset_distribution_params: tuple[float, float] | None = None, + distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", + ): + self._impl( + env, + env_ids, + asset_cfg, rest_offset_distribution_params, - None, - slice(None), - operation="abs", - distribution=distribution, - ) - asset.root_physx_view.set_rest_offsets(rest_offset, env_ids.cpu()) - # -- contact offsets - if contact_offset_distribution_params is not None: - contact_offset = asset.root_physx_view.get_contact_offsets().clone() - contact_offset = _randomize_prop_by_op( - contact_offset, contact_offset_distribution_params, - None, - slice(None), - operation="abs", - distribution=distribution, + distribution, ) - asset.root_physx_view.set_contact_offsets(contact_offset, env_ids.cpu()) -def randomize_physics_scene_gravity( - env: ManagerBasedEnv, - env_ids: torch.Tensor | None, - gravity_distribution_params: tuple[list[float], list[float]], - operation: Literal["add", "scale", "abs"], - distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", -): +class randomize_physics_scene_gravity(ManagerTermBase): """Randomize gravity by adding, scaling, or setting random values. - This function allows randomizing gravity of the physics scene. The function samples random values from the - given distribution parameters and adds, scales, or sets the values into the physics simulation based on the - operation. + Automatically detects the active physics backend (PhysX or Newton) and applies + the appropriate gravity randomization strategy: - The distribution parameters are lists of two elements each, representing the lower and upper bounds of the - distribution for the x, y, and z components of the gravity vector. The function samples random values for each - component independently. + - **PhysX**: samples a single gravity vector and sets it scene-wide via the PhysX + simulation view. All environments share the same gravity. + - **Newton**: samples per-environment gravity vectors and writes them in-place to + the Newton model's per-world gravity array on GPU. - .. attention:: - This function applied the same gravity for all the environments. + The distribution parameters are tuples of two lists with three floats each, + representing the lower and upper bounds for the x, y, and z gravity components [m/s^2]. - .. tip:: - This function uses CPU tensors to assign gravity. + Args: + cfg: The configuration of the event term. + env: The environment instance. """ - # get the current gravity - gravity = torch.tensor(env.sim.cfg.gravity, device="cpu").unsqueeze(0) - dist_param_0 = torch.tensor(gravity_distribution_params[0], device="cpu") - dist_param_1 = torch.tensor(gravity_distribution_params[1], device="cpu") - gravity = _randomize_prop_by_op( - gravity, - (dist_param_0, dist_param_1), - None, - slice(None), - operation=operation, - distribution=distribution, - ) - # unbatch the gravity tensor into a list - gravity = gravity[0].tolist() - # set the gravity into the physics simulation - physics_sim_view: physx.SimulationView = sim_utils.SimulationContext.instance().physics_sim_view - physics_sim_view.set_gravity(carb.Float3(*gravity)) + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + + manager_name = env.sim.physics_manager.__name__.lower() + if "newton" in manager_name: + self._backend = "newton" + self._init_newton(cfg, env) + else: + self._backend = "physx" + self._init_physx(env) + + distribution = cfg.params.get("distribution", "uniform") + if distribution == "uniform": + self._dist_fn = math_utils.sample_uniform + elif distribution == "log_uniform": + self._dist_fn = math_utils.sample_log_uniform + elif distribution == "gaussian": + self._dist_fn = math_utils.sample_gaussian + else: + raise NotImplementedError( + f"Unknown distribution: '{distribution}' for gravity randomization." + " Please use 'uniform', 'log_uniform', or 'gaussian'." + ) + + operation = cfg.params["operation"] + if operation not in ("add", "scale", "abs"): + raise NotImplementedError( + f"Unknown operation: '{operation}' for gravity randomization. Please use 'add', 'scale', or 'abs'." + ) + + gravity_distribution_params = cfg.params["gravity_distribution_params"] + self._dist_param_0 = torch.tensor(gravity_distribution_params[0], device=env.device, dtype=torch.float32) + self._dist_param_1 = torch.tensor(gravity_distribution_params[1], device=env.device, dtype=torch.float32) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + gravity_distribution_params: tuple[list[float], list[float]], + operation: Literal["add", "scale", "abs"], + distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", + ): + """Randomize gravity for the specified environments. + + Args: + env: The environment instance. + env_ids: The environment IDs to randomize. If None, all environments are randomized. + gravity_distribution_params: Distribution parameters as a tuple of two lists, each + with 3 floats corresponding to (x, y, z) gravity components [m/s^2]. Updated + into pre-allocated tensors each call to support curriculum-driven range changes. + operation: The operation to apply ('add', 'scale', or 'abs'). + distribution: The distribution type (cached at init, param ignored at runtime). + """ + self._dist_param_0[0] = gravity_distribution_params[0][0] + self._dist_param_1[0] = gravity_distribution_params[1][0] + self._dist_param_0[1] = gravity_distribution_params[0][1] + self._dist_param_1[1] = gravity_distribution_params[1][1] + self._dist_param_0[2] = gravity_distribution_params[0][2] + self._dist_param_1[2] = gravity_distribution_params[1][2] + + if self._backend == "newton": + self._call_newton(env, env_ids, operation) + else: + self._call_physx(env, operation) + + def _init_newton(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Cache Newton manager reference and solver notification flag.""" + import isaaclab_newton.physics.newton_manager as newton_manager_module # noqa: PLC0415 + from newton.solvers import SolverNotifyFlags # noqa: PLC0415 + + self._newton_manager = newton_manager_module.NewtonManager + self._notify_model_properties = SolverNotifyFlags.MODEL_PROPERTIES + + def _call_newton( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + operation: str, + ): + """Apply per-environment gravity via Newton's per-world gravity array on GPU.""" + model = self._newton_manager.get_model() + if model is None or model.gravity is None: + raise RuntimeError("Newton model is not initialized. Cannot randomize gravity.") + + gravity = wp.to_torch(model.gravity) + + if env_ids is None: + env_ids = env.scene._ALL_INDICES + if len(env_ids) == 0: + return + + num = len(env_ids) + random_values = self._dist_fn( + self._dist_param_0.unsqueeze(0).expand(num, -1), + self._dist_param_1.unsqueeze(0).expand(num, -1), + (num, 3), + device=env.device, + ) + + if operation == "abs": + gravity[env_ids] = random_values + elif operation == "add": + gravity[env_ids] += random_values + elif operation == "scale": + gravity[env_ids] *= random_values + + self._newton_manager.add_model_change(self._notify_model_properties) + + def _init_physx(self, env: ManagerBasedEnv): + """Cache the ``carb`` module and PhysX simulation view for scene-wide gravity updates.""" + import carb # noqa: PLC0415 + + self._carb = carb + self._physics_sim_view = sim_utils.SimulationContext.instance().physics_sim_view + + def _call_physx(self, env: ManagerBasedEnv, operation: str): + """Sample a single gravity vector and apply it scene-wide via the PhysX simulation view.""" + gravity = torch.tensor(env.sim.cfg.gravity, device="cpu").unsqueeze(0) + gravity = _randomize_prop_by_op( + gravity, + (self._dist_param_0.cpu(), self._dist_param_1.cpu()), + None, + slice(None), + operation=operation, + distribution="uniform", + ) + gravity = gravity[0].tolist() + self._physics_sim_view.set_gravity(self._carb.Float3(*gravity)) class randomize_actuator_gains(ManagerTermBase): @@ -570,6 +1131,18 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # extract the used quantities (to enable type-hinting) self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + self.default_joint_stiffness = self.asset.data.joint_stiffness.torch.clone() + self.default_joint_damping = self.asset.data.joint_damping.torch.clone() + + # For explicit actuators the sim-level stiffness/damping is zeroed out, so patch + # the defaults with the actual actuator PD gains. + for actuator in self.asset.actuators.values(): + if not isinstance(actuator, ImplicitActuator): + joint_ids = actuator.joint_indices + self.default_joint_stiffness[:, joint_ids] = actuator.stiffness + self.default_joint_damping[:, joint_ids] = actuator.damping + # check for valid operation if cfg.params["operation"] == "scale": if "stiffness_distribution_params" in cfg.params: @@ -630,23 +1203,23 @@ def randomize(data: torch.Tensor, params: tuple[float, float]) -> torch.Tensor: # Randomize stiffness if stiffness_distribution_params is not None: stiffness = actuator.stiffness[env_ids].clone() - stiffness[:, actuator_indices] = self.asset.data.default_joint_stiffness[env_ids][ - :, global_indices - ].clone() + stiffness[:, actuator_indices] = self.default_joint_stiffness[env_ids][:, global_indices].clone() randomize(stiffness, stiffness_distribution_params) actuator.stiffness[env_ids] = stiffness if isinstance(actuator, ImplicitActuator): - self.asset.write_joint_stiffness_to_sim( - stiffness, joint_ids=actuator.joint_indices, env_ids=env_ids + self.asset.write_joint_stiffness_to_sim_index( + stiffness=stiffness, joint_ids=actuator.joint_indices, env_ids=env_ids ) # Randomize damping if damping_distribution_params is not None: damping = actuator.damping[env_ids].clone() - damping[:, actuator_indices] = self.asset.data.default_joint_damping[env_ids][:, global_indices].clone() + damping[:, actuator_indices] = self.default_joint_damping[env_ids][:, global_indices].clone() randomize(damping, damping_distribution_params) actuator.damping[env_ids] = damping if isinstance(actuator, ImplicitActuator): - self.asset.write_joint_damping_to_sim(damping, joint_ids=actuator.joint_indices, env_ids=env_ids) + self.asset.write_joint_damping_to_sim_index( + damping=damping, joint_ids=actuator.joint_indices, env_ids=env_ids + ) class randomize_joint_parameters(ManagerTermBase): @@ -682,7 +1255,22 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # extract the used quantities (to enable type-hinting) self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] - self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + self.asset: Articulation = env.scene[self.asset_cfg.name] + + # detect physics backend + manager_name = env.sim.physics_manager.__name__.lower() + self._backend = "newton" if "newton" in manager_name else "physx" + + # cache default values (common to both backends) + self.default_joint_friction_coeff = self.asset.data.joint_friction_coeff.torch.clone() + self.default_joint_armature = self.asset.data.joint_armature.torch.clone() + self.default_joint_pos_limits = self.asset.data.joint_pos_limits.torch.clone() + + # cache dynamic/viscous friction (PhysX only - Newton only has static friction) + if self._backend == "physx": + self.default_dynamic_joint_friction_coeff = (self.asset.data.joint_dynamic_friction_coeff.torch).clone() + self.default_viscous_joint_friction_coeff = (self.asset.data.joint_viscous_friction_coeff.torch).clone() + # check for valid operation if cfg.params["operation"] == "scale": if "friction_distribution_params" in cfg.params: @@ -726,7 +1314,7 @@ def __call__( # joint friction coefficient if friction_distribution_params is not None: friction_coeff = _randomize_prop_by_op( - self.asset.data.default_joint_friction_coeff.clone(), + self.default_joint_friction_coeff.clone(), friction_distribution_params, env_ids, joint_ids, @@ -740,11 +1328,17 @@ def __call__( # Always set static friction (indexed once) static_friction_coeff = friction_coeff[env_ids_for_slice, joint_ids] - # if isaacsim version is lower than 5.0.0 we can set only the static friction coefficient - if get_isaac_sim_version().major >= 5: + if self._backend == "newton": + # Newton only supports static friction coefficient + self.asset.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=static_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + ) + else: # Randomize raw tensors dynamic_friction_coeff = _randomize_prop_by_op( - self.asset.data.default_joint_dynamic_friction_coeff.clone(), + self.default_dynamic_joint_friction_coeff.clone(), friction_distribution_params, env_ids, joint_ids, @@ -752,7 +1346,7 @@ def __call__( distribution=distribution, ) viscous_friction_coeff = _randomize_prop_by_op( - self.asset.data.default_joint_viscous_friction_coeff.clone(), + self.default_viscous_joint_friction_coeff.clone(), friction_distribution_params, env_ids, joint_ids, @@ -770,24 +1364,20 @@ def __call__( # Index once at the end dynamic_friction_coeff = dynamic_friction_coeff[env_ids_for_slice, joint_ids] viscous_friction_coeff = viscous_friction_coeff[env_ids_for_slice, joint_ids] - else: - # For versions < 5.0.0, we do not set these values - dynamic_friction_coeff = None - viscous_friction_coeff = None - - # Single write call for all versions - self.asset.write_joint_friction_coefficient_to_sim( - joint_friction_coeff=static_friction_coeff, - joint_dynamic_friction_coeff=dynamic_friction_coeff, - joint_viscous_friction_coeff=viscous_friction_coeff, - joint_ids=joint_ids, - env_ids=env_ids, - ) + + # Single write call for all versions + self.asset.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=static_friction_coeff, + joint_dynamic_friction_coeff=dynamic_friction_coeff, + joint_viscous_friction_coeff=viscous_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + ) # joint armature if armature_distribution_params is not None: armature = _randomize_prop_by_op( - self.asset.data.default_joint_armature.clone(), + self.asset.data.default_joint_armature.torch.clone(), armature_distribution_params, env_ids, joint_ids, @@ -800,7 +1390,7 @@ def __call__( # joint position limits if lower_limit_distribution_params is not None or upper_limit_distribution_params is not None: - joint_pos_limits = self.asset.data.default_joint_pos_limits.clone() + joint_pos_limits = self.default_joint_pos_limits.clone() # -- randomize the lower limits if lower_limit_distribution_params is not None: joint_pos_limits[..., 0] = _randomize_prop_by_op( @@ -830,8 +1420,8 @@ def __call__( " than upper joint limits. Please check the distribution parameters for the joint position limits." ) # set the position limits into the physics simulation - self.asset.write_joint_position_limit_to_sim( - joint_pos_limits, joint_ids=joint_ids, env_ids=env_ids, warn_limit_violation=False + self.asset.write_joint_position_limit_to_sim_index( + limits=joint_pos_limits, joint_ids=joint_ids, env_ids=env_ids, warn_limit_violation=False ) @@ -911,31 +1501,35 @@ def __call__( # stiffness if stiffness_distribution_params is not None: stiffness = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_stiffness.clone(), + self.asset.data.fixed_tendon_stiffness.torch.clone(), stiffness_distribution_params, env_ids, tendon_ids, operation=operation, distribution=distribution, ) - self.asset.set_fixed_tendon_stiffness(stiffness[env_ids[:, None], tendon_ids], tendon_ids, env_ids) + self.asset.set_fixed_tendon_stiffness_index( + stiffness=stiffness[env_ids[:, None], tendon_ids], fixed_tendon_ids=tendon_ids, env_ids=env_ids + ) # damping if damping_distribution_params is not None: damping = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_damping.clone(), + self.asset.data.fixed_tendon_damping.torch.clone(), damping_distribution_params, env_ids, tendon_ids, operation=operation, distribution=distribution, ) - self.asset.set_fixed_tendon_damping(damping[env_ids[:, None], tendon_ids], tendon_ids, env_ids) + self.asset.set_fixed_tendon_damping_index( + damping=damping[env_ids[:, None], tendon_ids], fixed_tendon_ids=tendon_ids, env_ids=env_ids + ) # limit stiffness if limit_stiffness_distribution_params is not None: limit_stiffness = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_limit_stiffness.clone(), + self.asset.data.fixed_tendon_limit_stiffness.torch.clone(), limit_stiffness_distribution_params, env_ids, tendon_ids, @@ -948,7 +1542,7 @@ def __call__( # position limits if lower_limit_distribution_params is not None or upper_limit_distribution_params is not None: - limit = self.asset.data.default_fixed_tendon_pos_limits.clone() + limit = self.asset.data.fixed_tendon_pos_limits.torch.clone() # -- lower limit if lower_limit_distribution_params is not None: limit[..., 0] = _randomize_prop_by_op( @@ -977,34 +1571,40 @@ def __call__( "Randomization term 'randomize_fixed_tendon_parameters' is setting lower tendon limits that are" " greater than upper tendon limits." ) - self.asset.set_fixed_tendon_position_limit(tendon_limits, tendon_ids, env_ids) + self.asset.set_fixed_tendon_position_limit_index( + limit=tendon_limits, fixed_tendon_ids=tendon_ids, env_ids=env_ids + ) # rest length if rest_length_distribution_params is not None: rest_length = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_rest_length.clone(), + self.asset.data.fixed_tendon_rest_length.torch.clone(), rest_length_distribution_params, env_ids, tendon_ids, operation=operation, distribution=distribution, ) - self.asset.set_fixed_tendon_rest_length(rest_length[env_ids[:, None], tendon_ids], tendon_ids, env_ids) + self.asset.set_fixed_tendon_rest_length_index( + rest_length=rest_length[env_ids[:, None], tendon_ids], fixed_tendon_ids=tendon_ids, env_ids=env_ids + ) # offset if offset_distribution_params is not None: offset = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_offset.clone(), + self.asset.data.fixed_tendon_offset.torch.clone(), offset_distribution_params, env_ids, tendon_ids, operation=operation, distribution=distribution, ) - self.asset.set_fixed_tendon_offset(offset[env_ids[:, None], tendon_ids], tendon_ids, env_ids) + self.asset.set_fixed_tendon_offset_index( + offset=offset[env_ids[:, None], tendon_ids], fixed_tendon_ids=tendon_ids, env_ids=env_ids + ) # write the fixed tendon properties into the simulation - self.asset.write_fixed_tendon_properties_to_sim(tendon_ids, env_ids) + self.asset.write_fixed_tendon_properties_to_sim_index(env_ids=env_ids) def apply_external_force_torque( @@ -1025,7 +1625,9 @@ def apply_external_force_torque( asset: RigidObject | Articulation = env.scene[asset_cfg.name] # resolve environment ids if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device=asset.device) + env_ids = torch.arange(env.scene.num_envs, device=asset.device, dtype=torch.int32) + else: + env_ids = env_ids.to(dtype=torch.int32) # resolve number of bodies num_bodies = len(asset_cfg.body_ids) if isinstance(asset_cfg.body_ids, list) else asset.num_bodies @@ -1035,7 +1637,7 @@ def apply_external_force_torque( torques = math_utils.sample_uniform(*torque_range, size, asset.device) # set the forces and torques into the buffers # note: these are only applied when you call: `asset.write_data_to_sim()` - asset.permanent_wrench_composer.set_forces_and_torques( + asset.permanent_wrench_composer.set_forces_and_torques_index( forces=forces, torques=torques, body_ids=asset_cfg.body_ids, @@ -1062,13 +1664,13 @@ def push_by_setting_velocity( asset: RigidObject | Articulation = env.scene[asset_cfg.name] # velocities - vel_w = asset.data.root_vel_w[env_ids] + vel_w = asset.data.root_vel_w.torch[env_ids] # sample random velocities range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) vel_w += math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], vel_w.shape, device=asset.device) # set the velocities into the physics simulation - asset.write_root_velocity_to_sim(vel_w, env_ids=env_ids) + asset.write_root_velocity_to_sim_index(root_velocity=vel_w, env_ids=env_ids) def reset_root_state_uniform( @@ -1094,26 +1696,27 @@ def reset_root_state_uniform( # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # get default root state - root_states = asset.data.default_root_state[env_ids].clone() + default_root_pose = asset.data.default_root_pose.torch[env_ids].clone() + default_root_vel = asset.data.default_root_vel.torch[env_ids].clone() # poses range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) - positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + positions = default_root_pose[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] orientations_delta = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) - orientations = math_utils.quat_mul(root_states[:, 3:7], orientations_delta) + orientations = math_utils.quat_mul(default_root_pose[:, 3:7], orientations_delta) # velocities range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) - velocities = root_states[:, 7:13] + rand_samples + velocities = default_root_vel + rand_samples # set into the physics simulation - asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids) - asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) + asset.write_root_pose_to_sim_index(root_pose=torch.cat([positions, orientations], dim=-1), env_ids=env_ids) + asset.write_root_velocity_to_sim_index(root_velocity=velocities, env_ids=env_ids) def reset_root_state_with_random_orientation( @@ -1146,14 +1749,15 @@ def reset_root_state_with_random_orientation( # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # get default root state - root_states = asset.data.default_root_state[env_ids].clone() + default_root_pose = asset.data.default_root_pose.torch[env_ids].clone() + default_root_vel = asset.data.default_root_vel.torch[env_ids].clone() # poses range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] ranges = torch.tensor(range_list, device=asset.device) rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device) - positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples + positions = default_root_pose[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples orientations = math_utils.random_orientation(len(env_ids), device=asset.device) # velocities @@ -1161,11 +1765,11 @@ def reset_root_state_with_random_orientation( ranges = torch.tensor(range_list, device=asset.device) rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) - velocities = root_states[:, 7:13] + rand_samples + velocities = default_root_vel + rand_samples # set into the physics simulation - asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids) - asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) + asset.write_root_pose_to_sim_index(root_pose=torch.cat([positions, orientations], dim=-1), env_ids=env_ids) + asset.write_root_velocity_to_sim_index(root_velocity=velocities, env_ids=env_ids) def reset_root_state_from_terrain( @@ -1213,7 +1817,7 @@ def reset_root_state_from_terrain( # sample random valid poses ids = torch.randint(0, valid_positions.shape[2], size=(len(env_ids),), device=env.device) positions = valid_positions[terrain.terrain_levels[env_ids], terrain.terrain_types[env_ids], ids] - positions += asset.data.default_root_state[env_ids, :3] + positions += asset.data.default_root_pose.torch[env_ids, :3] # sample random orientations range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["roll", "pitch", "yaw"]] @@ -1228,11 +1832,11 @@ def reset_root_state_from_terrain( ranges = torch.tensor(range_list, device=asset.device) rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device) - velocities = asset.data.default_root_state[env_ids, 7:13] + rand_samples + velocities = asset.data.default_root_vel.torch[env_ids] + rand_samples # set into the physics simulation - asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids) - asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) + asset.write_root_pose_to_sim_index(root_pose=torch.cat([positions, orientations], dim=-1), env_ids=env_ids) + asset.write_root_velocity_to_sim_index(root_velocity=velocities, env_ids=env_ids) def reset_joints_by_scale( @@ -1257,22 +1861,23 @@ def reset_joints_by_scale( iter_env_ids = env_ids # get default joint state - joint_pos = asset.data.default_joint_pos[iter_env_ids, asset_cfg.joint_ids].clone() - joint_vel = asset.data.default_joint_vel[iter_env_ids, asset_cfg.joint_ids].clone() + joint_pos = asset.data.default_joint_pos.torch[iter_env_ids, asset_cfg.joint_ids].clone() + joint_vel = asset.data.default_joint_vel.torch[iter_env_ids, asset_cfg.joint_ids].clone() # scale these values randomly joint_pos *= math_utils.sample_uniform(*position_range, joint_pos.shape, joint_pos.device) joint_vel *= math_utils.sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device) # clamp joint pos to limits - joint_pos_limits = asset.data.soft_joint_pos_limits[iter_env_ids, asset_cfg.joint_ids] + joint_pos_limits = asset.data.soft_joint_pos_limits.torch[iter_env_ids, asset_cfg.joint_ids] joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1]) # clamp joint vel to limits - joint_vel_limits = asset.data.soft_joint_vel_limits[iter_env_ids, asset_cfg.joint_ids] + joint_vel_limits = asset.data.soft_joint_vel_limits.torch[iter_env_ids, asset_cfg.joint_ids] joint_vel = joint_vel.clamp_(-joint_vel_limits, joint_vel_limits) # set into the physics simulation - asset.write_joint_state_to_sim(joint_pos, joint_vel, joint_ids=asset_cfg.joint_ids, env_ids=env_ids) + asset.write_joint_position_to_sim_index(position=joint_pos, joint_ids=asset_cfg.joint_ids, env_ids=env_ids) + asset.write_joint_velocity_to_sim_index(velocity=joint_vel, joint_ids=asset_cfg.joint_ids, env_ids=env_ids) def reset_joints_by_offset( @@ -1297,22 +1902,23 @@ def reset_joints_by_offset( iter_env_ids = env_ids # get default joint state - joint_pos = asset.data.default_joint_pos[iter_env_ids, asset_cfg.joint_ids].clone() - joint_vel = asset.data.default_joint_vel[iter_env_ids, asset_cfg.joint_ids].clone() + joint_pos = asset.data.default_joint_pos.torch[iter_env_ids, asset_cfg.joint_ids].clone() + joint_vel = asset.data.default_joint_vel.torch[iter_env_ids, asset_cfg.joint_ids].clone() # bias these values randomly joint_pos += math_utils.sample_uniform(*position_range, joint_pos.shape, joint_pos.device) joint_vel += math_utils.sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device) # clamp joint pos to limits - joint_pos_limits = asset.data.soft_joint_pos_limits[iter_env_ids, asset_cfg.joint_ids] + joint_pos_limits = asset.data.soft_joint_pos_limits.torch[iter_env_ids, asset_cfg.joint_ids] joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1]) # clamp joint vel to limits - joint_vel_limits = asset.data.soft_joint_vel_limits[iter_env_ids, asset_cfg.joint_ids] + joint_vel_limits = asset.data.soft_joint_vel_limits.torch[iter_env_ids, asset_cfg.joint_ids] joint_vel = joint_vel.clamp_(-joint_vel_limits, joint_vel_limits) # set into the physics simulation - asset.write_joint_state_to_sim(joint_pos, joint_vel, joint_ids=asset_cfg.joint_ids, env_ids=env_ids) + asset.write_joint_position_to_sim_index(position=joint_pos, joint_ids=asset_cfg.joint_ids, env_ids=env_ids) + asset.write_joint_velocity_to_sim_index(velocity=joint_vel, joint_ids=asset_cfg.joint_ids, env_ids=env_ids) def reset_nodal_state_uniform( @@ -1337,7 +1943,7 @@ def reset_nodal_state_uniform( # extract the used quantities (to enable type-hinting) asset: DeformableObject = env.scene[asset_cfg.name] # get default root state - nodal_state = asset.data.default_nodal_state_w[env_ids].clone() + nodal_state = asset.data.default_nodal_state_w.torch[env_ids].clone() # position range_list = [position_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] @@ -1368,32 +1974,35 @@ def reset_scene_to_default(env: ManagerBasedEnv, env_ids: torch.Tensor, reset_jo # rigid bodies for rigid_object in env.scene.rigid_objects.values(): # obtain default and deal with the offset for env origins - default_root_state = rigid_object.data.default_root_state[env_ids].clone() - default_root_state[:, 0:3] += env.scene.env_origins[env_ids] + default_root_pose = rigid_object.data.default_root_pose.torch[env_ids].clone() + default_root_vel = rigid_object.data.default_root_vel.torch[env_ids].clone() + default_root_pose[:, :3] += env.scene.env_origins[env_ids] # set into the physics simulation - rigid_object.write_root_pose_to_sim(default_root_state[:, :7], env_ids=env_ids) - rigid_object.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids=env_ids) + rigid_object.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + rigid_object.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) # articulations for articulation_asset in env.scene.articulations.values(): # obtain default and deal with the offset for env origins - default_root_state = articulation_asset.data.default_root_state[env_ids].clone() - default_root_state[:, 0:3] += env.scene.env_origins[env_ids] + default_root_pose = articulation_asset.data.default_root_pose.torch[env_ids].clone() + default_root_vel = articulation_asset.data.default_root_vel.torch[env_ids].clone() + default_root_pose[:, :3] += env.scene.env_origins[env_ids] # set into the physics simulation - articulation_asset.write_root_pose_to_sim(default_root_state[:, :7], env_ids=env_ids) - articulation_asset.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids=env_ids) + articulation_asset.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + articulation_asset.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) # obtain default joint positions - default_joint_pos = articulation_asset.data.default_joint_pos[env_ids].clone() - default_joint_vel = articulation_asset.data.default_joint_vel[env_ids].clone() + default_joint_pos = articulation_asset.data.default_joint_pos.torch[env_ids].clone() + default_joint_vel = articulation_asset.data.default_joint_vel.torch[env_ids].clone() # set into the physics simulation - articulation_asset.write_joint_state_to_sim(default_joint_pos, default_joint_vel, env_ids=env_ids) + articulation_asset.write_joint_position_to_sim_index(position=default_joint_pos, env_ids=env_ids) + articulation_asset.write_joint_velocity_to_sim_index(velocity=default_joint_vel, env_ids=env_ids) # reset joint targets if required if reset_joint_targets: - articulation_asset.set_joint_position_target(default_joint_pos, env_ids=env_ids) - articulation_asset.set_joint_velocity_target(default_joint_vel, env_ids=env_ids) + articulation_asset.set_joint_position_target_index(target=default_joint_pos, env_ids=env_ids) + articulation_asset.set_joint_velocity_target_index(target=default_joint_vel, env_ids=env_ids) # deformable objects for deformable_object in env.scene.deformable_objects.values(): # obtain default and set into the physics simulation - nodal_state = deformable_object.data.default_nodal_state_w[env_ids].clone() + nodal_state = deformable_object.data.default_nodal_state_w.torch[env_ids].clone() deformable_object.write_nodal_state_to_sim(nodal_state, env_ids=env_ids) @@ -1435,11 +2044,12 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): " by setting 'replicate_physics' to False in 'InteractiveSceneCfg'." ) - # enable replicator extension if not already enabled - enable_extension("omni.replicator.core") + # enable replicator extension if not already enabled (local: isaacsim only available with Kit) + from isaacsim.core.experimental.utils.app import enable_extension # noqa: PLC0415 + enable_extension("omni.replicator.core") # we import the module here since we may not always need the replicator - import omni.replicator.core as rep + import omni.replicator.core as rep # noqa: PLC0415 # read parameters from the configuration asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg") @@ -1504,8 +2114,8 @@ def rep_texture_randomization(): with rep.trigger.on_custom_event(event_name=event_name): rep_texture_randomization() else: - # acquire stage - stage = get_current_stage() + # acquire stage from env simulation context + stage = env.sim.stage prims_group = rep.functional.get.prims(path_pattern=prim_path, stage=stage) num_prims = len(prims_group) @@ -1518,9 +2128,18 @@ def rep_texture_randomization(): if prim.IsInstanceable(): prim.SetInstanceable(False) + # Resolve OmniPBR.mdl to an absolute path so that pxr.Ar.GetResolver().Resolve() + # returns a valid path. Kit's omni_usd_resolver intentionally returns "" for builtin + # MDL short-names (OMNI_USD_RESOLVER_MDL_BUILTIN_BYPASS=1), which causes Replicator + # >= 1.13.0 to pass an empty resolved path into UsdMdl.RegistryUtils, raising a + # 'rtx::neuraylib::MdlModuleId' is Invalid error. + import carb.tokens # noqa: PLC0415 + + omni_pbr_mdl = carb.tokens.get_tokens_interface().resolve("${kit}/mdl/core/Base/OmniPBR.mdl") + # TODO: Should we specify the value when creating the material? self.material_prims = rep.functional.create_batch.material( - mdl="OmniPBR.mdl", bind_prims=prims_group, count=num_prims, project_uvw=True + mdl=omni_pbr_mdl, bind_prims=prims_group, count=num_prims, project_uvw=True ) def __call__( @@ -1596,10 +2215,12 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): """ super().__init__(cfg, env) - # enable replicator extension if not already enabled + # enable replicator extension if not already enabled (local: isaacsim only available with Kit) + from isaacsim.core.experimental.utils.app import enable_extension # noqa: PLC0415 + enable_extension("omni.replicator.core") # we import the module here since we may not always need the replicator - import omni.replicator.core as rep + import omni.replicator.core as rep # noqa: PLC0415 # read parameters from the configuration asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg") @@ -1653,7 +2274,7 @@ def rep_color_randomization(): with rep.trigger.on_custom_event(event_name=event_name): rep_color_randomization() else: - stage = get_current_stage() + stage = env.sim.stage prims_group = rep.functional.get.prims(path_pattern=mesh_prim_path, stage=stage) num_prims = len(prims_group) @@ -1665,9 +2286,18 @@ def rep_color_randomization(): if prim.IsInstanceable(): prim.SetInstanceable(False) + # Resolve OmniPBR.mdl to an absolute path so that pxr.Ar.GetResolver().Resolve() + # returns a valid path. Kit's omni_usd_resolver intentionally returns "" for builtin + # MDL short-names (OMNI_USD_RESOLVER_MDL_BUILTIN_BYPASS=1), which causes Replicator + # >= 1.13.0 to pass an empty resolved path into UsdMdl.RegistryUtils, raising a + # 'rtx::neuraylib::MdlModuleId' is Invalid error. + import carb.tokens # noqa: PLC0415 + + omni_pbr_mdl = carb.tokens.get_tokens_interface().resolve("${kit}/mdl/core/Base/OmniPBR.mdl") + # TODO: Should we specify the value when creating the material? self.material_prims = rep.functional.create_batch.material( - mdl="OmniPBR.mdl", bind_prims=prims_group, count=num_prims, project_uvw=True + mdl=omni_pbr_mdl, bind_prims=prims_group, count=num_prims, project_uvw=True ) def __call__( diff --git a/source/isaaclab/isaaclab/envs/mdp/observations.py b/source/isaaclab/isaaclab/envs/mdp/observations.py index 9839e0d70837..8f97da3595dd 100644 --- a/source/isaaclab/isaaclab/envs/mdp/observations.py +++ b/source/isaaclab/isaaclab/envs/mdp/observations.py @@ -16,14 +16,14 @@ import torch import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import SceneEntityCfg from isaaclab.managers.manager_base import ManagerTermBase from isaaclab.managers.manager_term_cfg import ObservationTermCfg -from isaaclab.sensors import Camera, Imu, RayCaster, RayCasterCamera, TiledCamera if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedEnv, ManagerBasedRLEnv + from isaaclab.sensors import Camera, Imu, JointWrenchSensor, Pva, RayCaster, RayCasterCamera from isaaclab.envs.utils.io_descriptors import ( generic_io_descriptor, @@ -45,7 +45,7 @@ def base_pos_z(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg( """Root height in the simulation world frame.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return asset.data.root_pos_w[:, 2].unsqueeze(-1) + return asset.data.root_pos_w.torch[:, 2].unsqueeze(-1) @generic_io_descriptor( @@ -55,7 +55,7 @@ def base_lin_vel(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCf """Root linear velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_lin_vel_b + return asset.data.root_lin_vel_b.torch @generic_io_descriptor( @@ -65,7 +65,7 @@ def base_ang_vel(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCf """Root angular velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_ang_vel_b + return asset.data.root_ang_vel_b.torch @generic_io_descriptor( @@ -75,7 +75,7 @@ def projected_gravity(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEnt """Gravity projection on the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.projected_gravity_b + return asset.data.projected_gravity_b.torch @generic_io_descriptor( @@ -85,7 +85,7 @@ def root_pos_w(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg( """Asset root position in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_pos_w - env.scene.env_origins + return asset.data.root_pos_w.torch - env.scene.env_origins @generic_io_descriptor( @@ -94,7 +94,7 @@ def root_pos_w(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg( def root_quat_w( env: ManagerBasedEnv, make_quat_unique: bool = False, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") ) -> torch.Tensor: - """Asset root orientation (w, x, y, z) in the environment frame. + """Asset root orientation (x, y, z, w) in the environment frame. If :attr:`make_quat_unique` is True, then returned quaternion is made unique by ensuring the quaternion has non-negative real component. This is because both ``q`` and ``-q`` represent @@ -103,7 +103,7 @@ def root_quat_w( # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - quat = asset.data.root_quat_w + quat = asset.data.root_quat_w.torch # make the quaternion real-part positive if configured return math_utils.quat_unique(quat) if make_quat_unique else quat @@ -115,7 +115,7 @@ def root_lin_vel_w(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntity """Asset root linear velocity in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_lin_vel_w + return asset.data.root_lin_vel_w.torch @generic_io_descriptor( @@ -125,7 +125,7 @@ def root_ang_vel_w(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntity """Asset root angular velocity in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_ang_vel_w + return asset.data.root_ang_vel_w.torch """ @@ -154,7 +154,7 @@ def body_pose_w( asset: Articulation = env.scene[asset_cfg.name] # access the body poses in world frame - pose = asset.data.body_pose_w[:, asset_cfg.body_ids, :7] + pose = asset.data.body_pose_w.torch[:, asset_cfg.body_ids, :7] if isinstance(asset_cfg.body_ids, (slice, int)): pose = pose.clone() # if slice or int, make a copy to avoid modifying original data pose[..., :3] = pose[..., :3] - env.scene.env_origins.unsqueeze(1) @@ -181,8 +181,8 @@ def body_projected_gravity_b( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - body_quat = asset.data.body_quat_w[:, asset_cfg.body_ids] - gravity_dir = asset.data.GRAVITY_VEC_W.unsqueeze(1) + body_quat = asset.data.body_quat_w.torch[:, asset_cfg.body_ids] + gravity_dir = asset.data.GRAVITY_VEC_W.torch.unsqueeze(1) return math_utils.quat_apply_inverse(body_quat, gravity_dir).view(env.num_envs, -1) @@ -201,7 +201,7 @@ def joint_pos(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg(" """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return asset.data.joint_pos[:, asset_cfg.joint_ids] + return asset.data.joint_pos.torch[:, asset_cfg.joint_ids] @generic_io_descriptor( @@ -216,7 +216,9 @@ def joint_pos_rel(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityC """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.default_joint_pos[:, asset_cfg.joint_ids] + return ( + asset.data.joint_pos.torch[:, asset_cfg.joint_ids] - asset.data.default_joint_pos.torch[:, asset_cfg.joint_ids] + ) @generic_io_descriptor(observation_type="JointState", on_inspect=[record_joint_names, record_dtype, record_shape]) @@ -230,9 +232,9 @@ def joint_pos_limit_normalized( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] return math_utils.scale_transform( - asset.data.joint_pos[:, asset_cfg.joint_ids], - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 0], - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 1], + asset.data.joint_pos.torch[:, asset_cfg.joint_ids], + asset.data.soft_joint_pos_limits.torch[:, asset_cfg.joint_ids, 0], + asset.data.soft_joint_pos_limits.torch[:, asset_cfg.joint_ids, 1], ) @@ -246,7 +248,7 @@ def joint_vel(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg(" """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return asset.data.joint_vel[:, asset_cfg.joint_ids] + return asset.data.joint_vel.torch[:, asset_cfg.joint_ids] @generic_io_descriptor( @@ -261,7 +263,9 @@ def joint_vel_rel(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityC """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return asset.data.joint_vel[:, asset_cfg.joint_ids] - asset.data.default_joint_vel[:, asset_cfg.joint_ids] + return ( + asset.data.joint_vel.torch[:, asset_cfg.joint_ids] - asset.data.default_joint_vel.torch[:, asset_cfg.joint_ids] + ) @generic_io_descriptor( @@ -281,7 +285,7 @@ def joint_effort(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCf """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return asset.data.applied_torque[:, asset_cfg.joint_ids] + return asset.data.applied_torque.torch[:, asset_cfg.joint_ids] """ @@ -297,80 +301,80 @@ def height_scan(env: ManagerBasedEnv, sensor_cfg: SceneEntityCfg, offset: float # extract the used quantities (to enable type-hinting) sensor: RayCaster = env.scene.sensors[sensor_cfg.name] # height scan: height = sensor_height - hit_point_z - offset - return sensor.data.pos_w[:, 2].unsqueeze(1) - sensor.data.ray_hits_w[..., 2] - offset + return sensor.data.pos_w.torch[:, 2].unsqueeze(1) - sensor.data.ray_hits_w.torch[..., 2] - offset -def body_incoming_wrench(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: - """Incoming spatial wrench on bodies of an articulation in the simulation world frame. +def body_incoming_wrench(env: ManagerBasedEnv, sensor_cfg: SceneEntityCfg) -> torch.Tensor: + """Incoming spatial wrench [N, N·m] on bodies of an articulation in the sensor convention. - This is the 6-D wrench (force and torque) applied to the body link by the incoming joint force. + This is the 6-D wrench (force followed by torque) applied to the body link by the incoming joint force. """ # extract the used quantities (to enable type-hinting) - asset: Articulation = env.scene[asset_cfg.name] - # obtain the link incoming forces in world frame - body_incoming_joint_wrench_b = asset.data.body_incoming_joint_wrench_b[:, asset_cfg.body_ids] - return body_incoming_joint_wrench_b.view(env.num_envs, -1) + sensor: JointWrenchSensor = env.scene.sensors[sensor_cfg.name] + sensor_data = sensor.data + force_data = sensor_data.force + torque_data = sensor_data.torque + if force_data is None or torque_data is None: + raise RuntimeError("Joint wrench sensor data is not initialized. Call sim.reset() before reading observations.") + force = force_data.torch[:, sensor_cfg.body_ids] + torque = torque_data.torch[:, sensor_cfg.body_ids] + return torch.cat((force, torque), dim=-1).view(env.num_envs, -1) -def imu_orientation(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("imu")) -> torch.Tensor: - """Imu sensor orientation in the simulation world frame. +def pva_orientation(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("pva")) -> torch.Tensor: + """PVA sensor orientation in the simulation world frame. Args: env: The environment. - asset_cfg: The SceneEntity associated with an IMU sensor. Defaults to SceneEntityCfg("imu"). + asset_cfg: The SceneEntity associated with a PVA sensor. Defaults to SceneEntityCfg("pva"). Returns: - Orientation in the world frame in (w, x, y, z) quaternion form. Shape is (num_envs, 4). + Orientation in the world frame in (x, y, z, w) quaternion form. Shape is (num_envs, 4). """ - # extract the used quantities (to enable type-hinting) - asset: Imu = env.scene[asset_cfg.name] - # return the orientation quaternion - return asset.data.quat_w + asset: Pva = env.scene[asset_cfg.name] + return asset.data.quat_w.torch -def imu_projected_gravity(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("imu")) -> torch.Tensor: - """Imu sensor orientation w.r.t the env.scene.origin. +def pva_projected_gravity(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("pva")) -> torch.Tensor: + """PVA sensor projected gravity in the sensor frame. Args: env: The environment. - asset_cfg: The SceneEntity associated with an Imu sensor. + asset_cfg: The SceneEntity associated with a PVA sensor. Defaults to SceneEntityCfg("pva"). Returns: - Gravity projected on imu_frame, shape of torch.tensor is (num_env,3). + Gravity projected on sensor frame, shape of torch.tensor is (num_env, 3). """ - - asset: Imu = env.scene[asset_cfg.name] - return asset.data.projected_gravity_b + asset: Pva = env.scene[asset_cfg.name] + return asset.data.projected_gravity_b.torch def imu_ang_vel(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("imu")) -> torch.Tensor: - """Imu sensor angular velocity w.r.t. environment origin expressed in the sensor frame. + """IMU sensor angular velocity w.r.t. environment origin expressed in the sensor frame [rad/s]. Args: env: The environment. asset_cfg: The SceneEntity associated with an IMU sensor. Defaults to SceneEntityCfg("imu"). Returns: - The angular velocity (rad/s) in the sensor frame. Shape is (num_envs, 3). + The angular velocity [rad/s] in the sensor frame. Shape is (num_envs, 3). """ - # extract the used quantities (to enable type-hinting) asset: Imu = env.scene[asset_cfg.name] - # return the angular velocity - return asset.data.ang_vel_b + return asset.data.ang_vel_b.torch def imu_lin_acc(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("imu")) -> torch.Tensor: - """Imu sensor linear acceleration w.r.t. the environment origin expressed in sensor frame. + """IMU sensor linear acceleration w.r.t. the environment origin expressed in sensor frame [m/s^2]. Args: env: The environment. asset_cfg: The SceneEntity associated with an IMU sensor. Defaults to SceneEntityCfg("imu"). Returns: - The linear acceleration (m/s^2) in the sensor frame. Shape is (num_envs, 3). + The linear acceleration [m/s^2] in the sensor frame. Shape is (num_envs, 3). """ asset: Imu = env.scene[asset_cfg.name] - return asset.data.lin_acc_b + return asset.data.lin_acc_b.torch def image( @@ -401,7 +405,7 @@ def image( The images produced at the last time-step """ # extract the used quantities (to enable type-hinting) - sensor: TiledCamera | Camera | RayCasterCamera = env.scene.sensors[sensor_cfg.name] + sensor: Camera | RayCasterCamera = env.scene.sensors[sensor_cfg.name] # obtain the input image images = sensor.data.output[data_type] diff --git a/source/isaaclab/isaaclab/envs/mdp/recorders/__init__.py b/source/isaaclab/isaaclab/envs/mdp/recorders/__init__.py index cacc5e15c7f5..326cc758f8f8 100644 --- a/source/isaaclab/isaaclab/envs/mdp/recorders/__init__.py +++ b/source/isaaclab/isaaclab/envs/mdp/recorders/__init__.py @@ -4,5 +4,6 @@ # SPDX-License-Identifier: BSD-3-Clause """Various recorder terms that can be used in the environment.""" -from .recorders import * -from .recorders_cfg import * +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/envs/mdp/recorders/__init__.pyi b/source/isaaclab/isaaclab/envs/mdp/recorders/__init__.pyi new file mode 100644 index 000000000000..1f14927549b5 --- /dev/null +++ b/source/isaaclab/isaaclab/envs/mdp/recorders/__init__.pyi @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "InitialStateRecorder", + "PostStepProcessedActionsRecorder", + "PostStepStatesRecorder", + "PreStepActionsRecorder", + "PreStepFlatPolicyObservationsRecorder", + "ActionStateRecorderManagerCfg", + "InitialStateRecorderCfg", + "PostStepProcessedActionsRecorderCfg", + "PostStepStatesRecorderCfg", + "PreStepActionsRecorderCfg", + "PreStepFlatPolicyObservationsRecorderCfg", +] + +from .recorders import ( + InitialStateRecorder, + PostStepProcessedActionsRecorder, + PostStepStatesRecorder, + PreStepActionsRecorder, + PreStepFlatPolicyObservationsRecorder, +) +from .recorders_cfg import ( + ActionStateRecorderManagerCfg, + InitialStateRecorderCfg, + PostStepProcessedActionsRecorderCfg, + PostStepStatesRecorderCfg, + PreStepActionsRecorderCfg, + PreStepFlatPolicyObservationsRecorderCfg, +) diff --git a/source/isaaclab/isaaclab/envs/mdp/recorders/recorders_cfg.py b/source/isaaclab/isaaclab/envs/mdp/recorders/recorders_cfg.py index d67350d7f209..473501591053 100644 --- a/source/isaaclab/isaaclab/envs/mdp/recorders/recorders_cfg.py +++ b/source/isaaclab/isaaclab/envs/mdp/recorders/recorders_cfg.py @@ -2,10 +2,19 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.managers.recorder_manager import RecorderManagerBaseCfg, RecorderTerm, RecorderTermCfg +from typing import TYPE_CHECKING + +from isaaclab.managers.recorder_manager import RecorderManagerBaseCfg, RecorderTermCfg from isaaclab.utils import configclass -from . import recorders +if TYPE_CHECKING: + from .recorders import ( + InitialStateRecorder, + PostStepProcessedActionsRecorder, + PostStepStatesRecorder, + PreStepActionsRecorder, + PreStepFlatPolicyObservationsRecorder, + ) ## # State recorders. @@ -16,35 +25,37 @@ class InitialStateRecorderCfg(RecorderTermCfg): """Configuration for the initial state recorder term.""" - class_type: type[RecorderTerm] = recorders.InitialStateRecorder + class_type: type["InitialStateRecorder"] | str = "{DIR}.recorders:InitialStateRecorder" @configclass class PostStepStatesRecorderCfg(RecorderTermCfg): """Configuration for the step state recorder term.""" - class_type: type[RecorderTerm] = recorders.PostStepStatesRecorder + class_type: type["PostStepStatesRecorder"] | str = "{DIR}.recorders:PostStepStatesRecorder" @configclass class PreStepActionsRecorderCfg(RecorderTermCfg): """Configuration for the step action recorder term.""" - class_type: type[RecorderTerm] = recorders.PreStepActionsRecorder + class_type: type["PreStepActionsRecorder"] | str = "{DIR}.recorders:PreStepActionsRecorder" @configclass class PreStepFlatPolicyObservationsRecorderCfg(RecorderTermCfg): """Configuration for the step policy observation recorder term.""" - class_type: type[RecorderTerm] = recorders.PreStepFlatPolicyObservationsRecorder + class_type: type["PreStepFlatPolicyObservationsRecorder"] | str = ( + "{DIR}.recorders:PreStepFlatPolicyObservationsRecorder" + ) @configclass class PostStepProcessedActionsRecorderCfg(RecorderTermCfg): """Configuration for the post step processed actions recorder term.""" - class_type: type[RecorderTerm] = recorders.PostStepProcessedActionsRecorder + class_type: type["PostStepProcessedActionsRecorder"] | str = "{DIR}.recorders:PostStepProcessedActionsRecorder" ## diff --git a/source/isaaclab/isaaclab/envs/mdp/rewards.py b/source/isaaclab/isaaclab/envs/mdp/rewards.py index 774c37aefa91..4645e4fd805e 100644 --- a/source/isaaclab/isaaclab/envs/mdp/rewards.py +++ b/source/isaaclab/isaaclab/envs/mdp/rewards.py @@ -15,14 +15,14 @@ import torch -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import SceneEntityCfg from isaaclab.managers.manager_base import ManagerTermBase from isaaclab.managers.manager_term_cfg import RewardTermCfg -from isaaclab.sensors import ContactSensor, RayCaster if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import ContactSensor, RayCaster """ General. @@ -78,14 +78,14 @@ def lin_vel_z_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntity """Penalize z-axis base linear velocity using L2 squared kernel.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return torch.square(asset.data.root_lin_vel_b[:, 2]) + return torch.square(asset.data.root_lin_vel_b.torch[:, 2]) def ang_vel_xy_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize xy-axis base angular velocity using L2 squared kernel.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return torch.sum(torch.square(asset.data.root_ang_vel_b[:, :2]), dim=1) + return torch.sum(torch.square(asset.data.root_ang_vel_b.torch[:, :2]), dim=1) def flat_orientation_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: @@ -95,7 +95,7 @@ def flat_orientation_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = Scen """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return torch.sum(torch.square(asset.data.projected_gravity_b[:, :2]), dim=1) + return torch.sum(torch.square(asset.data.projected_gravity_b.torch[:, :2]), dim=1) def base_height_l2( @@ -115,18 +115,18 @@ def base_height_l2( if sensor_cfg is not None: sensor: RayCaster = env.scene[sensor_cfg.name] # Adjust the target height using the sensor data - adjusted_target_height = target_height + torch.mean(sensor.data.ray_hits_w[..., 2], dim=1) + adjusted_target_height = target_height + torch.mean(sensor.data.ray_hits_w.torch[..., 2], dim=1) else: # Use the provided target height directly for flat terrain adjusted_target_height = target_height # Compute the L2 squared penalty - return torch.square(asset.data.root_pos_w[:, 2] - adjusted_target_height) + return torch.square(asset.data.root_pos_w.torch[:, 2] - adjusted_target_height) def body_lin_acc_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Penalize the linear acceleration of bodies using L2-kernel.""" asset: Articulation = env.scene[asset_cfg.name] - return torch.sum(torch.norm(asset.data.body_lin_acc_w[:, asset_cfg.body_ids, :], dim=-1), dim=1) + return torch.sum(torch.linalg.norm(asset.data.body_lin_acc_w.torch[:, asset_cfg.body_ids, :], dim=-1), dim=1) """ @@ -143,14 +143,14 @@ def joint_torques_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEn """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return torch.sum(torch.square(asset.data.applied_torque[:, asset_cfg.joint_ids]), dim=1) + return torch.sum(torch.square(asset.data.applied_torque.torch[:, asset_cfg.joint_ids]), dim=1) def joint_vel_l1(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize joint velocities on the articulation using an L1-kernel.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return torch.sum(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]), dim=1) + return torch.sum(torch.abs(asset.data.joint_vel.torch[:, asset_cfg.joint_ids]), dim=1) def joint_vel_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: @@ -162,7 +162,7 @@ def joint_vel_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntity """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return torch.sum(torch.square(asset.data.joint_vel[:, asset_cfg.joint_ids]), dim=1) + return torch.sum(torch.square(asset.data.joint_vel.torch[:, asset_cfg.joint_ids]), dim=1) def joint_acc_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: @@ -174,7 +174,7 @@ def joint_acc_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntity """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return torch.sum(torch.square(asset.data.joint_acc[:, asset_cfg.joint_ids]), dim=1) + return torch.sum(torch.square(asset.data.joint_acc.torch[:, asset_cfg.joint_ids]), dim=1) def joint_deviation_l1(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: @@ -182,7 +182,9 @@ def joint_deviation_l1(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = Scene # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute out of limits constraints - angle = asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.default_joint_pos[:, asset_cfg.joint_ids] + angle = ( + asset.data.joint_pos.torch[:, asset_cfg.joint_ids] - asset.data.default_joint_pos.torch[:, asset_cfg.joint_ids] + ) return torch.sum(torch.abs(angle), dim=1) @@ -195,10 +197,12 @@ def joint_pos_limits(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEn asset: Articulation = env.scene[asset_cfg.name] # compute out of limits constraints out_of_limits = -( - asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 0] + asset.data.joint_pos.torch[:, asset_cfg.joint_ids] + - asset.data.soft_joint_pos_limits.torch[:, asset_cfg.joint_ids, 0] ).clip(max=0.0) out_of_limits += ( - asset.data.joint_pos[:, asset_cfg.joint_ids] - asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids, 1] + asset.data.joint_pos.torch[:, asset_cfg.joint_ids] + - asset.data.soft_joint_pos_limits.torch[:, asset_cfg.joint_ids, 1] ).clip(min=0.0) return torch.sum(out_of_limits, dim=1) @@ -217,8 +221,8 @@ def joint_vel_limits( asset: Articulation = env.scene[asset_cfg.name] # compute out of limits constraints out_of_limits = ( - torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) - - asset.data.soft_joint_vel_limits[:, asset_cfg.joint_ids] * soft_ratio + torch.abs(asset.data.joint_vel.torch[:, asset_cfg.joint_ids]) + - asset.data.soft_joint_vel_limits.torch[:, asset_cfg.joint_ids] * soft_ratio ) # clip to max error = 1 rad/s per joint to avoid huge penalties out_of_limits = out_of_limits.clip_(min=0.0, max=1.0) @@ -244,7 +248,8 @@ def applied_torque_limits(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = Sc # compute out of limits constraints # TODO: We need to fix this to support implicit joints. out_of_limits = torch.abs( - asset.data.applied_torque[:, asset_cfg.joint_ids] - asset.data.computed_torque[:, asset_cfg.joint_ids] + asset.data.applied_torque.torch[:, asset_cfg.joint_ids] + - asset.data.computed_torque.torch[:, asset_cfg.joint_ids] ) return torch.sum(out_of_limits, dim=1) @@ -269,8 +274,10 @@ def undesired_contacts(env: ManagerBasedRLEnv, threshold: float, sensor_cfg: Sce # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # check if contact force is above threshold - net_contact_forces = contact_sensor.data.net_forces_w_history - is_contact = torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold + net_contact_forces = contact_sensor.data.net_forces_w_history.torch + is_contact = ( + torch.max(torch.linalg.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold + ) # sum over contacts for each environment return torch.sum(is_contact, dim=1) @@ -279,7 +286,8 @@ def desired_contacts(env, sensor_cfg: SceneEntityCfg, threshold: float = 1.0) -> """Penalize if none of the desired contacts are present.""" contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] contacts = ( - contact_sensor.data.net_forces_w_history[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] > threshold + contact_sensor.data.net_forces_w_history.torch[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] + > threshold ) zero_contact = (~contacts).all(dim=1) return 1.0 * zero_contact @@ -289,9 +297,11 @@ def contact_forces(env: ManagerBasedRLEnv, threshold: float, sensor_cfg: SceneEn """Penalize contact forces as the amount of violations of the net contact force.""" # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] - net_contact_forces = contact_sensor.data.net_forces_w_history + net_contact_forces = contact_sensor.data.net_forces_w_history.torch # compute the violation - violation = torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] - threshold + violation = ( + torch.max(torch.linalg.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] - threshold + ) # compute the penalty return torch.sum(violation.clip(min=0.0), dim=1) @@ -309,7 +319,7 @@ def track_lin_vel_xy_exp( asset: RigidObject = env.scene[asset_cfg.name] # compute the error lin_vel_error = torch.sum( - torch.square(env.command_manager.get_command(command_name)[:, :2] - asset.data.root_lin_vel_b[:, :2]), + torch.square(env.command_manager.get_command(command_name)[:, :2] - asset.data.root_lin_vel_b.torch[:, :2]), dim=1, ) return torch.exp(-lin_vel_error / std**2) @@ -322,5 +332,7 @@ def track_ang_vel_z_exp( # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] # compute the error - ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_b[:, 2]) + ang_vel_error = torch.square( + env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_b.torch[:, 2] + ) return torch.exp(-ang_vel_error / std**2) diff --git a/source/isaaclab/isaaclab/envs/mdp/terminations.py b/source/isaaclab/isaaclab/envs/mdp/terminations.py index 84c83b2a213e..d717fb0dfcd0 100644 --- a/source/isaaclab/isaaclab/envs/mdp/terminations.py +++ b/source/isaaclab/isaaclab/envs/mdp/terminations.py @@ -15,13 +15,13 @@ import torch -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import ContactSensor if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv from isaaclab.managers.command_manager import CommandTerm + from isaaclab.sensors import ContactSensor """ MDP terminations. @@ -57,7 +57,7 @@ def bad_orientation( """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return torch.acos(-asset.data.projected_gravity_b[:, 2]).abs() > limit_angle + return torch.acos(-asset.data.projected_gravity_b.torch[:, 2]).abs() > limit_angle def root_height_below_minimum( @@ -70,7 +70,7 @@ def root_height_below_minimum( """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_pos_w[:, 2] < minimum_height + return asset.data.root_pos_w.torch[:, 2] < minimum_height """ @@ -85,9 +85,9 @@ def joint_pos_out_of_limit(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = S if asset_cfg.joint_ids is None: asset_cfg.joint_ids = slice(None) - limits = asset.data.soft_joint_pos_limits[:, asset_cfg.joint_ids] - out_of_upper_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] > limits[..., 1], dim=1) - out_of_lower_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] < limits[..., 0], dim=1) + limits = asset.data.soft_joint_pos_limits.torch[:, asset_cfg.joint_ids] + out_of_upper_limits = torch.any(asset.data.joint_pos.torch[:, asset_cfg.joint_ids] > limits[..., 1], dim=1) + out_of_lower_limits = torch.any(asset.data.joint_pos.torch[:, asset_cfg.joint_ids] < limits[..., 0], dim=1) return torch.logical_or(out_of_upper_limits, out_of_lower_limits) @@ -104,8 +104,8 @@ def joint_pos_out_of_manual_limit( if asset_cfg.joint_ids is None: asset_cfg.joint_ids = slice(None) # compute any violations - out_of_upper_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] > bounds[1], dim=1) - out_of_lower_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] < bounds[0], dim=1) + out_of_upper_limits = torch.any(asset.data.joint_pos.torch[:, asset_cfg.joint_ids] > bounds[1], dim=1) + out_of_lower_limits = torch.any(asset.data.joint_pos.torch[:, asset_cfg.joint_ids] < bounds[0], dim=1) return torch.logical_or(out_of_upper_limits, out_of_lower_limits) @@ -114,8 +114,10 @@ def joint_vel_out_of_limit(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = S # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute any violations - limits = asset.data.soft_joint_vel_limits - return torch.any(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) > limits[:, asset_cfg.joint_ids], dim=1) + limits = asset.data.soft_joint_vel_limits.torch + return torch.any( + torch.abs(asset.data.joint_vel.torch[:, asset_cfg.joint_ids]) > limits[:, asset_cfg.joint_ids], dim=1 + ) def joint_vel_out_of_manual_limit( @@ -125,7 +127,7 @@ def joint_vel_out_of_manual_limit( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute any violations - return torch.any(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) > max_velocity, dim=1) + return torch.any(torch.abs(asset.data.joint_vel.torch[:, asset_cfg.joint_ids]) > max_velocity, dim=1) def joint_effort_out_of_limit( @@ -141,7 +143,8 @@ def joint_effort_out_of_limit( asset: Articulation = env.scene[asset_cfg.name] # check if any joint effort is out of limit out_of_limits = ~torch.isclose( - asset.data.computed_torque[:, asset_cfg.joint_ids], asset.data.applied_torque[:, asset_cfg.joint_ids] + asset.data.computed_torque.torch[:, asset_cfg.joint_ids], + asset.data.applied_torque.torch[:, asset_cfg.joint_ids], ) return torch.any(out_of_limits, dim=1) @@ -155,8 +158,8 @@ def illegal_contact(env: ManagerBasedRLEnv, threshold: float, sensor_cfg: SceneE """Terminate when the contact force on the sensor exceeds the force threshold.""" # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] - net_contact_forces = contact_sensor.data.net_forces_w_history + net_contact_forces = contact_sensor.data.net_forces_w_history.torch # check if any contact force exceeds the threshold return torch.any( - torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold, dim=1 + torch.max(torch.linalg.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold, dim=1 ) diff --git a/source/isaaclab/isaaclab/envs/mimic_env_cfg.py b/source/isaaclab/isaaclab/envs/mimic_env_cfg.py index c506df7f20bf..a98c34bf5f90 100644 --- a/source/isaaclab/isaaclab/envs/mimic_env_cfg.py +++ b/source/isaaclab/isaaclab/envs/mimic_env_cfg.py @@ -10,6 +10,8 @@ Base MimicEnvCfg object for Isaac Lab Mimic data generation. """ +from __future__ import annotations + import enum from isaaclab.managers.recorder_manager import RecorderManagerBaseCfg diff --git a/source/isaaclab/isaaclab/envs/ui/__init__.py b/source/isaaclab/isaaclab/envs/ui/__init__.py index 93db88399e45..fe5b72528fea 100644 --- a/source/isaaclab/isaaclab/envs/ui/__init__.py +++ b/source/isaaclab/isaaclab/envs/ui/__init__.py @@ -10,7 +10,6 @@ toggling different debug visualization tools, and other user-defined functionalities. """ -from .base_env_window import BaseEnvWindow -from .empty_window import EmptyWindow -from .manager_based_rl_env_window import ManagerBasedRLEnvWindow -from .viewport_camera_controller import ViewportCameraController +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/envs/ui/__init__.pyi b/source/isaaclab/isaaclab/envs/ui/__init__.pyi new file mode 100644 index 000000000000..89daf73b8840 --- /dev/null +++ b/source/isaaclab/isaaclab/envs/ui/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseEnvWindow", + "EmptyWindow", + "ManagerBasedRLEnvWindow", + "ViewportCameraController", +] + +from .base_env_window import BaseEnvWindow +from .empty_window import EmptyWindow +from .manager_based_rl_env_window import ManagerBasedRLEnvWindow +from .viewport_camera_controller import ViewportCameraController diff --git a/source/isaaclab/isaaclab/envs/ui/base_env_window.py b/source/isaaclab/isaaclab/envs/ui/base_env_window.py index 2aafe5e6bba2..36b1c0fab1c9 100644 --- a/source/isaaclab/isaaclab/envs/ui/base_env_window.py +++ b/source/isaaclab/isaaclab/envs/ui/base_env_window.py @@ -11,14 +11,16 @@ from datetime import datetime from typing import TYPE_CHECKING -import isaacsim -import omni.kit.app -import omni.kit.commands -import omni.usd -from pxr import PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics +from pxr import Sdf, Usd, UsdGeom, UsdPhysics -from isaaclab.sim.utils.stage import get_current_stage +from isaaclab.sim.utils.stage import resolve_paths from isaaclab.ui.widgets import ManagerLiveVisualizer +from isaaclab.utils.version import has_kit + +if has_kit(): + import isaacsim + import omni.kit.commands + if TYPE_CHECKING: import omni.ui @@ -62,7 +64,7 @@ def __init__(self, env: ManagerBasedEnv, window_name: str = "IsaacLab"): ] # get stage handle - self.stage = get_current_stage() + self.stage = env.sim.stage # Listeners for environment selection changes self._ui_listeners: list[ManagerLiveVisualizer] = [] @@ -123,18 +125,8 @@ def _build_sim_frame(self): # create stack for controls self.ui_window_elements["sim_vstack"] = omni.ui.VStack(spacing=5, height=0) with self.ui_window_elements["sim_vstack"]: - # create rendering mode dropdown - render_mode_cfg = { - "label": "Rendering Mode", - "type": "dropdown", - "default_val": self.env.sim.render_mode.value, - "items": [member.name for member in self.env.sim.RenderMode if member.value >= 0], - "tooltip": "Select a rendering mode\n" + self.env.sim.RenderMode.__doc__, - "on_clicked_fn": lambda value: self.env.sim.set_render_mode(self.env.sim.RenderMode[value]), - } - self.ui_window_elements["render_dropdown"] = isaacsim.gui.components.ui_utils.dropdown_builder( - **render_mode_cfg - ) + # create rendering mode dropdown if visualizer supports it + self._build_render_mode_dropdown() # create animation recording box record_animate_cfg = { @@ -149,7 +141,40 @@ def _build_sim_frame(self): **record_animate_cfg ) # disable the button if fabric is not enabled - self.ui_window_elements["record_animation"].enabled = not self.env.sim.is_fabric_enabled() + self.ui_window_elements["record_animation"].enabled = not self.env.sim.get_setting( + "/isaaclab/fabric_enabled" + ) + + def _build_render_mode_dropdown(self): + """Build rendering mode dropdown if a visualizer supports it.""" + # Find first visualizer with render_mode support + viz = None + RenderMode = None + for v in self.env.sim.visualizers: + if hasattr(v, "render_mode") and hasattr(v, "set_render_mode"): + viz = v + # Get RenderMode enum from the visualizer's module + RenderMode = type(v.render_mode) + break + + if viz is None or RenderMode is None: + return + + def on_render_mode_changed(value: str): + if viz is not None and hasattr(viz, "set_render_mode"): + viz.set_render_mode(RenderMode[value]) + + render_mode_cfg = { + "label": "Rendering Mode", + "type": "dropdown", + "default_val": viz.render_mode.value, + "items": [member.name for member in RenderMode if member.value >= 0], + "tooltip": "Select a rendering mode\n" + (RenderMode.__doc__ or ""), + "on_clicked_fn": on_render_mode_changed, + } + self.ui_window_elements["render_dropdown"] = isaacsim.gui.components.ui_utils.dropdown_builder( + **render_mode_cfg + ) def _build_viewer_frame(self): """Build the viewer-related control frame for the UI.""" @@ -329,16 +354,16 @@ def _toggle_recording_animation_fn(self, value: bool): # if prim has articulation then disable it if prim.HasAPI(UsdPhysics.ArticulationRootAPI): prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) - prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI) + prim.RemoveAppliedSchema("PhysxArticulationAPI") # if prim has rigid body then disable it if prim.HasAPI(UsdPhysics.RigidBodyAPI): prim.RemoveAPI(UsdPhysics.RigidBodyAPI) - prim.RemoveAPI(PhysxSchema.PhysxRigidBodyAPI) + prim.RemoveAppliedSchema("PhysxRigidBodyAPI") # if prim is a joint type then disable it if prim.IsA(UsdPhysics.Joint): prim.GetAttribute("physics:jointEnabled").Set(False) - # resolve all paths relative to layer path - omni.usd.resolve_paths(source_layer.identifier, temp_layer.identifier) + # resolve paths so asset references remain valid from the new location + resolve_paths(source_layer.identifier, temp_layer.identifier) # save the stage temp_layer.Save() # print the path to the saved stage diff --git a/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py b/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py index 0ba5368734b0..509b17033397 100644 --- a/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py +++ b/source/isaaclab/isaaclab/envs/ui/viewport_camera_controller.py @@ -13,9 +13,6 @@ import numpy as np import torch -import omni.kit.app -import omni.timeline - from isaaclab.assets.articulation.articulation import Articulation if TYPE_CHECKING: @@ -76,6 +73,8 @@ def __init__(self, env: ManagerBasedEnv | DirectRLEnv, cfg: ViewerCfg): self.update_view_to_world() # subscribe to post update event so that camera view can be updated at each rendering step + import omni.kit.app + app_interface = omni.kit.app.get_app_interface() app_event_stream = app_interface.get_post_update_event_stream() self._viewport_camera_update_handle = app_event_stream.create_subscription_to_pop( @@ -161,8 +160,9 @@ def update_view_to_asset_root(self, asset_name: str): self.cfg.asset_name = asset_name # set origin type to asset_root self.cfg.origin_type = "asset_root" - # update the camera origins - self.viewer_origin = self._env.scene[self.cfg.asset_name].data.root_pos_w[self.cfg.env_index] + # update the camera origins (convert Warp array to torch tensor first, then index) + root_pos = self._env.scene[self.cfg.asset_name].data.root_pos_w.torch + self.viewer_origin = root_pos[self.cfg.env_index] # update the camera view self.update_view_location() @@ -194,8 +194,9 @@ def update_view_to_asset_body(self, asset_name: str, body_name: str): self.cfg.asset_name = asset_name # set origin type to asset_body self.cfg.origin_type = "asset_body" - # update the camera origins - self.viewer_origin = self._env.scene[self.cfg.asset_name].data.body_pos_w[self.cfg.env_index, body_id].view(3) + # update the camera origins (convert Warp array to torch tensor first, then index) + body_pos = self._env.scene[self.cfg.asset_name].data.body_pos_w.torch + self.viewer_origin = body_pos[self.cfg.env_index, body_id].squeeze(0) # update the camera view self.update_view_location() @@ -216,8 +217,17 @@ def update_view_location(self, eye: Sequence[float] | None = None, lookat: Seque cam_eye = viewer_origin + self.default_cam_eye cam_target = viewer_origin + self.default_cam_lookat - # set the camera view - self._env.sim.set_camera_view(eye=cam_eye, target=cam_target) + eye_t = (float(cam_eye[0]), float(cam_eye[1]), float(cam_eye[2])) + target_t = (float(cam_target[0]), float(cam_target[1]), float(cam_target[2])) + self._env.sim.set_camera_view(eye=eye_t, target=target_t) + + # Renderer viewport camera (Isaac RTX / Kit); optional — pure-Newton installs have no isaaclab_physx. + try: + from isaaclab_physx.renderers.kit_viewport_utils import set_kit_renderer_camera_view + + set_kit_renderer_camera_view(eye=cam_eye, target=cam_target, camera_prim_path=self.cfg.cam_prim_path) + except (ImportError, ModuleNotFoundError): + pass """ Private Functions diff --git a/source/isaaclab/isaaclab/envs/utils/io_descriptors.py b/source/isaaclab/isaaclab/envs/utils/io_descriptors.py index 84b8a5cf8a0c..3a9d54f12e5a 100644 --- a/source/isaaclab/isaaclab/envs/utils/io_descriptors.py +++ b/source/isaaclab/isaaclab/envs/utils/io_descriptors.py @@ -319,7 +319,7 @@ def record_joint_pos_offsets(output: torch.Tensor, descriptor: GenericObservatio ids = kwargs["asset_cfg"].joint_ids # Get the offsets of the joints for the first robot in the scene. # This assumes that all robots have the same joint offsets. - descriptor.joint_pos_offsets = asset.data.default_joint_pos[:, ids][0] + descriptor.joint_pos_offsets = asset.data.default_joint_pos.torch[:, ids][0] def record_joint_vel_offsets(output: torch.Tensor, descriptor: GenericObservationIODescriptor, **kwargs): @@ -336,7 +336,7 @@ def record_joint_vel_offsets(output: torch.Tensor, descriptor: GenericObservatio ids = kwargs["asset_cfg"].joint_ids # Get the offsets of the joints for the first robot in the scene. # This assumes that all robots have the same joint offsets. - descriptor.joint_vel_offsets = asset.data.default_joint_vel[:, ids][0] + descriptor.joint_vel_offsets = asset.data.default_joint_vel.torch[:, ids][0] def export_articulations_data(env: ManagerBasedEnv) -> dict[str, dict[str, list[float]]]: @@ -356,25 +356,25 @@ def export_articulations_data(env: ManagerBasedEnv) -> dict[str, dict[str, list[ articulation_joint_data[articulation_name] = {} articulation_joint_data[articulation_name]["joint_names"] = articulation.joint_names articulation_joint_data[articulation_name]["default_joint_pos"] = ( - articulation.data.default_joint_pos[0].detach().cpu().numpy().tolist() + articulation.data.default_joint_pos.torch[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_vel"] = ( - articulation.data.default_joint_vel[0].detach().cpu().numpy().tolist() + articulation.data.default_joint_vel.torch[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_pos_limits"] = ( - articulation.data.default_joint_pos_limits[0].detach().cpu().numpy().tolist() + articulation.data.default_joint_pos_limits.torch[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_damping"] = ( - articulation.data.default_joint_damping[0].detach().cpu().numpy().tolist() + articulation.data.joint_damping.torch[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_stiffness"] = ( - articulation.data.default_joint_stiffness[0].detach().cpu().numpy().tolist() + articulation.data.joint_stiffness.torch[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_friction"] = ( - articulation.data.default_joint_friction[0].detach().cpu().numpy().tolist() + articulation.data.joint_friction_coeff.torch[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_armature"] = ( - articulation.data.default_joint_armature[0].detach().cpu().numpy().tolist() + articulation.data.joint_armature.torch[0].detach().cpu().numpy().tolist() ) return articulation_joint_data diff --git a/source/isaaclab/isaaclab/envs/utils/marl.py b/source/isaaclab/isaaclab/envs/utils/marl.py index 010f6e5e27bd..5901bfbc3184 100644 --- a/source/isaaclab/isaaclab/envs/utils/marl.py +++ b/source/isaaclab/isaaclab/envs/utils/marl.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + import math from typing import Any diff --git a/source/isaaclab/isaaclab/envs/utils/recording_hooks.py b/source/isaaclab/isaaclab/envs/utils/recording_hooks.py new file mode 100644 index 000000000000..584b6d73adf5 --- /dev/null +++ b/source/isaaclab/isaaclab/envs/utils/recording_hooks.py @@ -0,0 +1,50 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Hooks that run after visualizers during :meth:`~isaaclab.sim.SimulationContext.render`. + +Lives alongside :mod:`video_recorder` / :mod:`video_recorder_cfg` because both tie into +``--video`` / ``rgb_array`` recording. Keeps :class:`~isaaclab.sim.SimulationContext` free +of imports from ``isaaclab_physx``, ``isaaclab_newton``, and other recording backends. +Each integration is loaded lazily so optional extensions are not required at import time. +""" + +from __future__ import annotations + +from typing import Any + + +def run_recording_hooks_after_visualizers(sim: Any) -> None: + """Run recording-related work after :meth:`~isaaclab.sim.SimulationContext.render` steps visualizers. + + Isaac Sim / RTX follow-up is loaded lazily so minimal installs still work. + Newton GL video is handled by :class:`~isaaclab.envs.utils.video_recorder.VideoRecorder` + (e.g. :class:`~isaaclab_newton.video_recording.newton_gl_perspective_video.NewtonGlPerspectiveVideo`), + not here. + + Args: + sim: Active :class:`~isaaclab.sim.SimulationContext` instance. + """ + _recording_followup_isaac_sim(sim) + + +def _recording_followup_isaac_sim(sim: Any) -> None: + """Isaac Sim: keep RTX / Replicator outputs fresh when recording video without a Kit visualizer. + + When ``--video`` uses ``rgb_array`` / :class:`~gymnasium.wrappers.RecordVideo`, Replicator + render products must see Kit's event loop pumped. :class:`~isaaclab_visualizers.kit.KitVisualizer` + already calls ``omni.kit.app.get_app().update()`` in its ``step()``; if no such visualizer + is active, we pump here (guarded by ``/isaaclab/video/enabled`` and ``is_rendering``). + + Implemented by ``pump_kit_app_for_headless_video_render_if_needed`` in + :mod:`isaaclab_physx.renderers.isaac_rtx_renderer_utils`. + """ + try: + from isaaclab_physx.renderers.isaac_rtx_renderer_utils import ( + pump_kit_app_for_headless_video_render_if_needed, + ) + except ImportError: + return + pump_kit_app_for_headless_video_render_if_needed(sim) diff --git a/source/isaaclab/isaaclab/envs/utils/spaces.py b/source/isaaclab/isaaclab/envs/utils/spaces.py index a21ecd82667c..46dda8b31302 100644 --- a/source/isaaclab/isaaclab/envs/utils/spaces.py +++ b/source/isaaclab/isaaclab/envs/utils/spaces.py @@ -222,8 +222,14 @@ def replace_strings_with_env_cfg_spaces(env_cfg: object) -> object: """ for attr in ["observation_space", "action_space", "state_space"]: if hasattr(env_cfg, attr): - setattr(env_cfg, attr, deserialize_space(getattr(env_cfg, attr))) + val = getattr(env_cfg, attr) + if isinstance(val, str): + setattr(env_cfg, attr, deserialize_space(val)) for attr in ["observation_spaces", "action_spaces"]: if hasattr(env_cfg, attr): - setattr(env_cfg, attr, {k: deserialize_space(v) for k, v in getattr(env_cfg, attr).items()}) + setattr( + env_cfg, + attr, + {k: deserialize_space(v) for k, v in getattr(env_cfg, attr).items() if isinstance(v, str)}, + ) return env_cfg diff --git a/source/isaaclab/isaaclab/envs/utils/video_recorder.py b/source/isaaclab/isaaclab/envs/utils/video_recorder.py new file mode 100644 index 000000000000..3ff6a7e1a0a4 --- /dev/null +++ b/source/isaaclab/isaaclab/envs/utils/video_recorder.py @@ -0,0 +1,247 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Video recorder implementation. + +Backend resolution (``--video`` + ``--visualizer``): + +1. **Active visualizer** - ``"kit"`` uses the Kit camera; ``"newton"`` uses the Newton GL viewer. + ``"viser"`` / ``"rerun"`` have no capture API and fall through to rule 2. +2. **Physics/renderer stack** - + - PhysX or Isaac RTX uses the Kit camera; + - Newton physics or Newton Warp uses the Newton GL viewer. + Kit wins when both signals present. Raises if nothing resolves. + +Set :attr:`~isaaclab.envs.utils.video_recorder_cfg.VideoRecorderCfg.backend_source` to ``"renderer"`` +to ignore active visualizers and record from the physics/renderer stack. + +Camera sync when a visualizer drives the backend: construction copies the visualizer config's +``eye`` / ``lookat`` into the recorder config; each :meth:`~VideoRecorder.render_rgb_array` +call then re-reads the Newton viewer's live ``camera.pos/pitch/yaw``. Kit video uses the +configured ``eye`` / ``lookat`` at construction time. + +See :mod:`video_recorder_cfg` for configuration. +""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Literal + +import numpy as np + +if TYPE_CHECKING: + from isaaclab.scene import InteractiveScene + + from .video_recorder_cfg import VideoRecorderCfg + +logger = logging.getLogger(__name__) + +_VideoBackend = Literal["kit", "newton_gl"] + +# visualizer types that map to a supported video backend. +# viser and rerun are intentionally absent - they have no video-capture API. +_VISUALIZER_TO_VIDEO_BACKEND: dict[str, _VideoBackend] = { + "kit": "kit", + "newton": "newton_gl", +} + + +def _resolve_video_backend( + scene: InteractiveScene, backend_source: str = "visualizer" +) -> tuple[_VideoBackend, str | None]: + """Return ``(backend, matched_visualizer_type)`` for the active scene. + + ``matched_visualizer_type`` is ``"kit"`` / ``"newton"`` when a visualizer drove the + selection, or ``None`` when the physics/renderer preset stack was used instead. + + Args: + scene: The interactive scene that owns the sim context. + backend_source: ``"visualizer"`` to let active visualizers choose the backend, or ``"renderer"`` + to ignore active visualizers and use the physics/renderer stack. + + Raises: + RuntimeError: If no supported backend is detected. + """ + if backend_source not in ("visualizer", "renderer"): + raise ValueError("VideoRecorderCfg.backend_source must be either 'visualizer' or 'renderer'.") + + # Prefer the visualizer backend when --visualizer is active alongside --video. + visualizer_types: list[str] = scene.sim.resolve_visualizer_types() if backend_source == "visualizer" else [] + if visualizer_types: + # kit takes priority when multiple visualizers are active + for preferred in ("kit", "newton"): + if preferred in visualizer_types: + backend = _VISUALIZER_TO_VIDEO_BACKEND[preferred] + logger.debug("[VideoRecorder] Using '%s' backend from active '%s' visualizer.", backend, preferred) + return backend, preferred + # only unsupported visualizer types (viser, rerun) are active. + logger.warning( + "[VideoRecorder] Active visualizer(s) %s do not support video capture; " + "falling back to physics/renderer stack detection.", + visualizer_types, + ) + + # fall back to physics/renderer preset stack detection. + sim = scene.sim + physics_name = sim.physics_manager.__name__.lower() + renderer_types: list[str] = scene._sensor_renderer_types() + + use_kit = "physx" in physics_name or "isaac_rtx" in renderer_types + use_newton_gl = "newton" in physics_name or "newton_warp" in renderer_types + + if use_kit: + return "kit", None + if use_newton_gl: + return "newton_gl", None + raise RuntimeError( + "Video recording (--video) requires a supported backend: " + "PhysX or Isaac RTX renderer (Kit camera), or Newton physics / Newton Warp renderer (GL viewer). " + "No supported backend detected; do not use --video for this setup." + ) + + +def _sync_camera_from_visualizer( + scene: InteractiveScene, + visualizer_type: str, + cfg: VideoRecorderCfg, +) -> None: + """Overwrite ``cfg.eye`` and ``cfg.lookat`` from the active visualizer. + + Args: + scene: The interactive scene that owns the sim context. + visualizer_type: The visualizer type string matched by ``_resolve_video_backend`` + (e.g. ``"kit"`` or ``"newton"``). + cfg: The recorder configuration to update in place. + """ + try: + resolved_cfgs = scene.sim._resolve_visualizer_cfgs() + except Exception as exc: + logger.debug("[VideoRecorder] Could not resolve visualizer cfgs for camera sync: %s", exc) + return + + for vcfg in resolved_cfgs: + if getattr(vcfg, "visualizer_type", None) != visualizer_type: + continue + pos = getattr(vcfg, "eye", None) + tgt = getattr(vcfg, "lookat", None) + if pos is None or tgt is None: + break + cfg.eye = tuple(float(x) for x in pos) + cfg.lookat = tuple(float(x) for x in tgt) + logger.debug( + "[VideoRecorder] Camera synced from '%s' visualizer: position=%s, target=%s.", + visualizer_type, + cfg.eye, + cfg.lookat, + ) + return + + logger.debug( + "[VideoRecorder] Could not find eye/lookat on '%s' visualizer cfg; keeping existing camera values.", + visualizer_type, + ) + + +class VideoRecorder: + """Records perspective video frames from the scene's active renderer. + + Args: + cfg: Recorder configuration. + scene: The interactive scene that owns the sensors. + """ + + def __init__(self, cfg: VideoRecorderCfg, scene: InteractiveScene): + self.cfg = cfg + self._scene = scene + self._backend: _VideoBackend | None = None + self._capture = None + # visualizer type that drove backend selection (or None when using physics/renderer stack). + self._matched_visualizer: str | None = None + # live visualizer instance - looked up lazily on first render_rgb_array() call because + # visualizers are initialised by sim.reset(), which runs after VideoRecorder.__init__. + self._live_visualizer = None + + if cfg.env_render_mode == "rgb_array": + backend_source = getattr(cfg, "backend_source", "visualizer") + self._backend, self._matched_visualizer = _resolve_video_backend(scene, backend_source) + if self._matched_visualizer is not None: + _sync_camera_from_visualizer(scene, self._matched_visualizer, cfg) + if self._backend == "newton_gl": + try: + import pyglet as _pyglet # noqa: F401 - verify pyglet is available + except ImportError as e: + raise ImportError( + "The Newton GL video backend requires 'pyglet'. Install IsaacLab with './isaaclab.sh -i'." + ) from e + from isaaclab_newton.video_recording.newton_gl_perspective_video import ( + create_newton_gl_perspective_video, + ) + from isaaclab_newton.video_recording.newton_gl_perspective_video_cfg import NewtonGlPerspectiveVideoCfg + + ncfg = NewtonGlPerspectiveVideoCfg( + window_width=cfg.window_width, + window_height=cfg.window_height, + eye=cfg.eye, + lookat=cfg.lookat, + ) + self._capture = create_newton_gl_perspective_video(ncfg) + else: + from isaaclab_physx.video_recording.isaacsim_kit_perspective_video import ( + create_isaacsim_kit_perspective_video, + ) + from isaaclab_physx.video_recording.isaacsim_kit_perspective_video_cfg import ( + IsaacsimKitPerspectiveVideoCfg, + ) + + kcfg = IsaacsimKitPerspectiveVideoCfg( + eye=cfg.eye, + lookat=cfg.lookat, + window_width=cfg.window_width, + window_height=cfg.window_height, + ) + self._capture = create_isaacsim_kit_perspective_video(kcfg) + + def _sync_newton_camera(self) -> None: + """Push the Newton visualizer's live camera pose into the capture object. + + Called once per :meth:`render_rgb_array` when a Newton visualizer is active. + The live visualizer instance is resolved lazily (visualizers are initialised by + ``sim.reset()``, which runs after ``VideoRecorder.__init__``). + """ + if self._live_visualizer is None: + for viz in self._scene.sim.visualizers: + if getattr(getattr(viz, "cfg", None), "visualizer_type", None) == "newton": + self._live_visualizer = viz + break + if self._live_visualizer is None: + return + + viewer = getattr(self._live_visualizer, "_viewer", None) + if viewer is None: + return + + import math + + cam = viewer.camera + pos = (float(cam.pos[0]), float(cam.pos[1]), float(cam.pos[2])) + yaw_rad = math.radians(float(cam.yaw)) + pitch_rad = math.radians(float(cam.pitch)) + dx = math.cos(pitch_rad) * math.cos(yaw_rad) + dy = math.cos(pitch_rad) * math.sin(yaw_rad) + dz = math.sin(pitch_rad) + target = (pos[0] + dx, pos[1] + dy, pos[2] + dz) + self._capture.update_camera(pos, target) + + def render_rgb_array(self) -> np.ndarray | None: + """Return an RGB frame for the resolved backend. Fails if backend is unavailable.""" + if self._backend is None or self._capture is None: + return None + if self._matched_visualizer == "newton": + # Newton GL camera state lives in the capture object and must be synced each frame + # to follow interactive viewer movement. + self._sync_newton_camera() + # Kit capture uses the configured eye/lookat applied to the recording camera at construction time. + return self._capture.render_rgb_array() diff --git a/source/isaaclab/isaaclab/envs/utils/video_recorder_cfg.py b/source/isaaclab/isaaclab/envs/utils/video_recorder_cfg.py new file mode 100644 index 000000000000..c0cbb1d9a11d --- /dev/null +++ b/source/isaaclab/isaaclab/envs/utils/video_recorder_cfg.py @@ -0,0 +1,58 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for :class:`~isaaclab.envs.utils.video_recorder.VideoRecorder`. + +Captures a single wide-angle perspective view of the scene. Newton backends use the +Newton GL viewer; Kit backends use the ``/OmniverseKit_Persp`` camera via +``omni.replicator.core``. +""" + +from __future__ import annotations + +from typing import Literal + +from isaaclab.utils import configclass + +from .video_recorder import VideoRecorder + + +@configclass +class VideoRecorderCfg: + """Configuration for :class:`~isaaclab.envs.utils.video_recorder.VideoRecorder`.""" + + class_type: type = VideoRecorder + """Recorder class to instantiate; must accept ``(cfg, scene)``.""" + + env_render_mode: str | None = None + """Gym render mode forwarded from the environment constructor (``"rgb_array"`` when ``--video`` is active). + + Set automatically by the environment base classes; do not set manually. + """ + + eye: tuple[float, float, float] = (7.5, 7.5, 7.5) + """Perspective camera position in world space (metres). + + Direct RL / MARL and manager-based RL environments overwrite this from + :attr:`~isaaclab.envs.common.ViewerCfg.eye` before recording so ``--video`` matches the + task viewport for both Kit (PhysX / Isaac RTX) and Newton GL (Newton / OVRTX / etc.). + """ + + lookat: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Perspective camera look-at target in world space (metres). Set from ``ViewerCfg.lookat`` at env init.""" + + backend_source: Literal["visualizer", "renderer"] = "visualizer" + """Source used to resolve the video capture backend. + + ``"visualizer"`` records from the active Kit or Newton visualizer when one is enabled, and falls back to the + physics/renderer stack otherwise. ``"renderer"`` ignores active visualizers and records from the backend implied by + the physics/renderer stack. + """ + + window_width: int = 1280 + """Width in pixels of the recorded frame.""" + + window_height: int = 720 + """Height in pixels of the recorded frame.""" diff --git a/source/isaaclab/isaaclab/managers/__init__.py b/source/isaaclab/isaaclab/managers/__init__.py index 4a8266801b47..27043f480c44 100644 --- a/source/isaaclab/isaaclab/managers/__init__.py +++ b/source/isaaclab/isaaclab/managers/__init__.py @@ -10,25 +10,6 @@ designed to be modular and can be easily extended to support new functionality. """ -from .action_manager import ActionManager, ActionTerm -from .command_manager import CommandManager, CommandTerm -from .curriculum_manager import CurriculumManager -from .event_manager import EventManager -from .manager_base import ManagerBase, ManagerTermBase -from .manager_term_cfg import ( - ActionTermCfg, - CommandTermCfg, - CurriculumTermCfg, - EventTermCfg, - ManagerTermBaseCfg, - ObservationGroupCfg, - ObservationTermCfg, - RecorderTermCfg, - RewardTermCfg, - TerminationTermCfg, -) -from .observation_manager import ObservationManager -from .recorder_manager import DatasetExportMode, RecorderManager, RecorderManagerBaseCfg, RecorderTerm -from .reward_manager import RewardManager -from .scene_entity_cfg import SceneEntityCfg -from .termination_manager import TerminationManager +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/managers/__init__.pyi b/source/isaaclab/isaaclab/managers/__init__.pyi new file mode 100644 index 000000000000..3d2f7a22f3ca --- /dev/null +++ b/source/isaaclab/isaaclab/managers/__init__.pyi @@ -0,0 +1,61 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ActionManager", + "ActionTerm", + "CommandManager", + "CommandTerm", + "CurriculumManager", + "EventManager", + "ManagerBase", + "ManagerTermBase", + "ActionTermCfg", + "CommandTermCfg", + "CurriculumTermCfg", + "EventTermCfg", + "ManagerTermBaseCfg", + "ObservationGroupCfg", + "ObservationTermCfg", + "RecorderTermCfg", + "RewardTermCfg", + "TerminationTermCfg", + "ObservationManager", + "DatasetExportMode", + "RecorderManager", + "RecorderManagerBaseCfg", + "RecorderTerm", + "RewardManager", + "SceneEntityCfg", + "TerminationManager", +] + +from .action_manager import ActionManager, ActionTerm +from .command_manager import CommandManager, CommandTerm +from .curriculum_manager import CurriculumManager +from .event_manager import EventManager +from .manager_base import ManagerBase, ManagerTermBase +from .manager_term_cfg import ( + ActionTermCfg, + CommandTermCfg, + CurriculumTermCfg, + EventTermCfg, + ManagerTermBaseCfg, + ObservationGroupCfg, + ObservationTermCfg, + RecorderTermCfg, + RewardTermCfg, + TerminationTermCfg, +) +from .observation_manager import ObservationManager +from .recorder_manager import ( + DatasetExportMode, + RecorderManager, + RecorderManagerBaseCfg, + RecorderTerm, +) +from .reward_manager import RewardManager +from .scene_entity_cfg import SceneEntityCfg +from .termination_manager import TerminationManager diff --git a/source/isaaclab/isaaclab/managers/action_manager.py b/source/isaaclab/isaaclab/managers/action_manager.py index 6d138451f998..d711596e5f5a 100644 --- a/source/isaaclab/isaaclab/managers/action_manager.py +++ b/source/isaaclab/isaaclab/managers/action_manager.py @@ -9,7 +9,6 @@ import inspect import re -import weakref from abc import abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any @@ -17,8 +16,6 @@ import torch from prettytable import PrettyTable -import omni.kit.app - from isaaclab.envs.utils.io_descriptors import GenericActionIODescriptor from .manager_base import ManagerBase, ManagerTermBase @@ -63,9 +60,11 @@ def __init__(self, cfg: ActionTermCfg, env: ManagerBasedEnv): def __del__(self): """Unsubscribe from the callbacks.""" - if self._debug_vis_handle: - self._debug_vis_handle.unsubscribe() - self._debug_vis_handle = None + env = getattr(self, "_env", None) + sim = getattr(env, "sim", None) + registry = getattr(sim, "vis_marker_registry", None) + if registry is not None: + registry.clear_debug_vis_callback(self) """ Properties. @@ -132,15 +131,10 @@ def set_debug_vis(self, debug_vis: bool) -> bool: if debug_vis: # create a subscriber for the post update event if it doesn't exist if self._debug_vis_handle is None: - app_interface = omni.kit.app.get_app_interface() - self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( - lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) - ) + self._debug_vis_handle = self._env.sim.vis_marker_registry.add_debug_vis_callback(self) else: # remove the subscriber if it exists - if self._debug_vis_handle is not None: - self._debug_vis_handle.unsubscribe() - self._debug_vis_handle = None + self._env.sim.vis_marker_registry.clear_debug_vis_callback(self) # return success return True diff --git a/source/isaaclab/isaaclab/managers/command_manager.py b/source/isaaclab/isaaclab/managers/command_manager.py index 0fe66ff6b963..9493fc81fbcf 100644 --- a/source/isaaclab/isaaclab/managers/command_manager.py +++ b/source/isaaclab/isaaclab/managers/command_manager.py @@ -8,7 +8,6 @@ from __future__ import annotations import inspect -import weakref from abc import abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING @@ -16,8 +15,6 @@ import torch from prettytable import PrettyTable -import omni.kit.app - from .manager_base import ManagerBase, ManagerTermBase from .manager_term_cfg import CommandTermCfg @@ -62,9 +59,11 @@ def __init__(self, cfg: CommandTermCfg, env: ManagerBasedRLEnv): def __del__(self): """Unsubscribe from the callbacks.""" - if self._debug_vis_handle: - self._debug_vis_handle.unsubscribe() - self._debug_vis_handle = None + env = getattr(self, "_env", None) + sim = getattr(env, "sim", None) + registry = getattr(sim, "vis_marker_registry", None) + if registry is not None: + registry.clear_debug_vis_callback(self) """ Properties @@ -104,17 +103,11 @@ def set_debug_vis(self, debug_vis: bool) -> bool: self._set_debug_vis_impl(debug_vis) # toggle debug visualization handles if debug_vis: - # create a subscriber for the post update event if it doesn't exist if self._debug_vis_handle is None: - app_interface = omni.kit.app.get_app_interface() - self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( - lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) - ) + self._debug_vis_handle = self._env.sim.vis_marker_registry.add_debug_vis_callback(self) else: # remove the subscriber if it exists - if self._debug_vis_handle is not None: - self._debug_vis_handle.unsubscribe() - self._debug_vis_handle = None + self._env.sim.vis_marker_registry.clear_debug_vis_callback(self) # return success return True diff --git a/source/isaaclab/isaaclab/managers/event_manager.py b/source/isaaclab/isaaclab/managers/event_manager.py index 8f92d6859c1e..4a56875c80df 100644 --- a/source/isaaclab/isaaclab/managers/event_manager.py +++ b/source/isaaclab/isaaclab/managers/event_manager.py @@ -191,6 +191,13 @@ def apply( logger.warning(f"Event mode '{mode}' is not defined. Skipping event.") return + # ensure class-based terms are resolved before applying + # the timeline PLAY callback may not have fired yet, so we resolve synchronously + # note: skip for "prestartup" mode as those terms are handled in _prepare_terms + # and scene entities don't exist yet + if mode != "prestartup" and not self._is_scene_entities_resolved: + self._resolve_terms_callback(None) + # check if mode is interval and dt is not provided if mode == "interval" and dt is None: raise ValueError(f"Event mode '{mode}' requires the time-step of the environment.") @@ -205,6 +212,12 @@ def apply( # iterate over all the event terms for index, term_cfg in enumerate(self._mode_term_cfgs[mode]): + # initialize class-based terms if not already initialized (for non-prestartup modes) + if inspect.isclass(term_cfg.func): + logger.info( + f"Initializing term '{self._mode_term_names[mode][index]}' with class '{term_cfg.func.__name__}'." + ) + term_cfg.func = term_cfg.func(cfg=term_cfg, env=self._env) if mode == "interval": # extract time left for this term time_left = self._interval_term_time_left[index] diff --git a/source/isaaclab/isaaclab/managers/manager_base.py b/source/isaaclab/isaaclab/managers/manager_base.py index 158c713abfa3..405157a1548b 100644 --- a/source/isaaclab/isaaclab/managers/manager_base.py +++ b/source/isaaclab/isaaclab/managers/manager_base.py @@ -7,15 +7,13 @@ import copy import inspect -import logging import weakref from abc import ABC, abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any -import omni.timeline - import isaaclab.utils.string as string_utils +from isaaclab.physics import PhysicsEvent, PhysicsManager from isaaclab.utils import class_to_dict, string_to_callable from .manager_term_cfg import ManagerTermBaseCfg @@ -24,9 +22,6 @@ if TYPE_CHECKING: from isaaclab.envs import ManagerBasedEnv -# import logger -logger = logging.getLogger(__name__) - class ManagerTermBase(ABC): """Base class for manager terms. @@ -156,17 +151,19 @@ def __init__(self, cfg: object, env: ManagerBasedEnv): # if the simulation is not playing, we use callbacks to trigger the resolution of the scene # entities configuration. this is needed for cases where the manager is created after the # simulation, but before the simulation is playing. - # FIXME: Once Isaac Sim supports storing this information as USD schema, we can remove this - # callback and resolve the scene entities directly inside `_prepare_terms`. if not self._env.sim.is_playing(): # note: Use weakref on all callbacks to ensure that this object can be deleted when its destructor - # is called - # The order is set to 20 to allow asset/sensor initialization to complete before the scene entities - # are resolved. Those have the order 10. - timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() - self._resolve_terms_handle = timeline_event_stream.create_subscription_to_pop_by_type( - int(omni.timeline.TimelineEventType.PLAY), - lambda event, obj=weakref.proxy(self): obj._resolve_terms_callback(event), + # is called. The order is set to 20 to allow asset/sensor initialization to complete before the + # scene entities are resolved. Those have the order 10. + + physics_mgr_cls = self._env.sim.physics_manager + obj_ref = weakref.proxy(self) + + self._resolve_terms_handle = physics_mgr_cls.register_callback( + lambda payload: PhysicsManager.safe_callback_invoke( + obj_ref._resolve_terms_callback, None, physics_manager=physics_mgr_cls + ), + PhysicsEvent.PHYSICS_READY, order=20, ) else: @@ -178,8 +175,8 @@ def __init__(self, cfg: object, env: ManagerBasedEnv): def __del__(self): """Delete the manager.""" - if self._resolve_terms_handle: - self._resolve_terms_handle.unsubscribe() + if self._resolve_terms_handle is not None: + self._resolve_terms_handle.deregister() self._resolve_terms_handle = None """ @@ -395,24 +392,26 @@ def _process_term_cfg_at_play(self, term_name: str, term_cfg: ManagerTermBaseCfg term_cfg: The term configuration. """ for key, value in term_cfg.params.items(): - if isinstance(value, SceneEntityCfg): - # load the entity - try: - value.resolve(self._env.scene) - except ValueError as e: - raise ValueError(f"Error while parsing '{term_name}:{key}'. {e}") - # log the entity for checking later - msg = f"[{term_cfg.__class__.__name__}:{term_name}] Found entity '{value.name}'." - if value.joint_ids is not None: - msg += f"\n\tJoint names: {value.joint_names} [{value.joint_ids}]" - if value.body_ids is not None: - msg += f"\n\tBody names: {value.body_names} [{value.body_ids}]" - # print the information - logger.info(msg) - # store the entity - term_cfg.params[key] = value - - # initialize the term if it is a class + self._resolve_param_value(term_name, key, value) + + # resolve string func references then initialize class-based terms + if isinstance(term_cfg.func, str): + term_cfg.func = string_to_callable(term_cfg.func) if inspect.isclass(term_cfg.func): - logger.info(f"Initializing term '{term_name}' with class '{term_cfg.func.__name__}'.") term_cfg.func = term_cfg.func(cfg=term_cfg, env=self._env) + + def _resolve_param_value(self, term_name: str, key: str | int, value: Any): + """Recursively resolve a single param value (SceneEntityCfg, nested term cfgs, dicts, lists).""" + if isinstance(value, SceneEntityCfg): + try: + value.resolve(self._env.scene) + except ValueError as e: + raise ValueError(f"Error while parsing '{term_name}:{key}'. {e}") + elif isinstance(value, ManagerTermBaseCfg): + self._process_term_cfg_at_play(f"{term_name}.{key}", value) + elif isinstance(value, dict): + for sub_key, sub_value in value.items(): + self._resolve_param_value(f"{term_name}.{key}", sub_key, sub_value) + elif isinstance(value, (list, tuple)): + for i, item in enumerate(value): + self._resolve_param_value(f"{term_name}.{key}", i, item) diff --git a/source/isaaclab/isaaclab/managers/manager_term_cfg.py b/source/isaaclab/isaaclab/managers/manager_term_cfg.py index de7c23aa220b..06f2516324b5 100644 --- a/source/isaaclab/isaaclab/managers/manager_term_cfg.py +++ b/source/isaaclab/isaaclab/managers/manager_term_cfg.py @@ -118,6 +118,11 @@ class CommandTermCfg: debug_vis: bool = False """Whether to visualize debug information. Defaults to False.""" + cmd_kind: str | None = None + """Type hint for the command for deployment.""" + element_names: list[str] | list[list[str]] | None = None + """Element names for the command for deployment.""" + ## # Curriculum manager. diff --git a/source/isaaclab/isaaclab/managers/observation_manager.py b/source/isaaclab/isaaclab/managers/observation_manager.py index a1bde0266f4b..7299ff954467 100644 --- a/source/isaaclab/isaaclab/managers/observation_manager.py +++ b/source/isaaclab/isaaclab/managers/observation_manager.py @@ -512,6 +512,7 @@ def _prepare_terms(self): # read common config for the group self._group_obs_concatenate[group_name] = group_cfg.concatenate_terms + # to account for the batch dimension self._group_obs_concatenate_dim[group_name] = ( group_cfg.concatenate_dim + 1 if group_cfg.concatenate_dim >= 0 else group_cfg.concatenate_dim ) @@ -550,7 +551,7 @@ def _prepare_terms(self): if group_cfg.history_length is not None: term_cfg.history_length = group_cfg.history_length term_cfg.flatten_history_dim = group_cfg.flatten_history_dim - # add term config to list to list + # add term config to list self._group_obs_term_names[group_name].append(term_name) self._group_obs_term_cfgs[group_name].append(term_cfg) @@ -580,15 +581,15 @@ def _prepare_terms(self): for mod_cfg in term_cfg.modifiers: # check if class modifier and initialize with observation size when adding if isinstance(mod_cfg, modifiers.ModifierCfg): - # to list of modifiers + # to list of modifiers - instantiate class-based modifiers if inspect.isclass(mod_cfg.func): - if not issubclass(mod_cfg.func, modifiers.ModifierBase): + mod_cfg.func = mod_cfg.func(cfg=mod_cfg, data_dim=obs_dims, device=self._env.device) + # verify the instance is the correct type + if not isinstance(mod_cfg.func, modifiers.ModifierBase): raise TypeError( f"Modifier function '{mod_cfg.func}' for observation term '{term_name}'" - f" is not a subclass of 'ModifierBase'. Received: '{type(mod_cfg.func)}'." + f" is not an instance of 'ModifierBase'. Received: '{type(mod_cfg.func)}'." ) - mod_cfg.func = mod_cfg.func(cfg=mod_cfg, data_dim=obs_dims, device=self._env.device) - # add to list of class modifiers self._group_obs_class_instances.append(mod_cfg.func) else: @@ -604,6 +605,9 @@ def _prepare_terms(self): f" Received: {mod_cfg.func}" ) + # TODO(jichuanh): improvement can be made in two ways: + # 1. modifier specific check can be done in the modifier class + # 2. general param vs function matching check can be a common utility # check if term's arguments are matched by params term_params = list(mod_cfg.params.keys()) args = inspect.signature(mod_cfg.func).parameters @@ -622,16 +626,15 @@ def _prepare_terms(self): # prepare noise model classes if term_cfg.noise is not None and isinstance(term_cfg.noise, noise.NoiseModelCfg): - noise_model_cls = term_cfg.noise.class_type - if not issubclass(noise_model_cls, noise.NoiseModel): - raise TypeError( - f"Class type for observation term '{term_name}' NoiseModelCfg" - f" is not a subclass of 'NoiseModel'. Received: '{type(noise_model_cls)}'." - ) - # initialize func to be the noise model class instance - term_cfg.noise.func = noise_model_cls( + term_cfg.noise.func = term_cfg.noise.class_type( term_cfg.noise, num_envs=self._env.num_envs, device=self._env.device ) + # verify the instance is the correct type + if not isinstance(term_cfg.noise.func, noise.NoiseModel): + raise TypeError( + f"Noise model for observation term '{term_name}' is not an instance of 'NoiseModel'." + f" Received: '{type(term_cfg.noise.func)}'." + ) self._group_obs_class_instances.append(term_cfg.noise.func) # create history buffers and calculate history term dimensions @@ -643,7 +646,9 @@ def _prepare_terms(self): old_dims.insert(1, term_cfg.history_length) obs_dims = tuple(old_dims) if term_cfg.flatten_history_dim: - obs_dims = (obs_dims[0], np.prod(obs_dims[1:])) + # Cast to ``int`` so the dim is a plain Python int rather than ``np.int64``; + # otherwise the tuple would render as ``(np.int64(N),)`` in __str__. + obs_dims = (obs_dims[0], int(np.prod(obs_dims[1:]))) self._group_obs_term_dim[group_name].append(obs_dims[1:]) diff --git a/source/isaaclab/isaaclab/managers/recorder_manager.py b/source/isaaclab/isaaclab/managers/recorder_manager.py index 51a3f3e83518..2e6ebf0b2446 100644 --- a/source/isaaclab/isaaclab/managers/recorder_manager.py +++ b/source/isaaclab/isaaclab/managers/recorder_manager.py @@ -12,6 +12,7 @@ from typing import TYPE_CHECKING import torch +import warp as wp from prettytable import PrettyTable from isaaclab.utils import configclass @@ -54,6 +55,9 @@ class RecorderManagerBaseCfg: export_in_close: bool = False """Whether to export episodes in the close call.""" + dataset_compression: bool = True + """Enable dataset compression.""" + class RecorderTerm(ManagerTermBase): """Base class for recorder terms. @@ -329,14 +333,19 @@ def add_to_episodes(self, key: str, value: torch.Tensor | dict, env_ids: Sequenc if isinstance(value, dict): for sub_key, sub_value in value.items(): + if isinstance(sub_value, wp.array): + sub_value = wp.to_torch(sub_value) self.add_to_episodes(f"{key}/{sub_key}", sub_value, env_ids) return + if isinstance(value, wp.array): + value = wp.to_torch(value) + value = value.clone() # Clone once for all envs for value_index, env_id in enumerate(env_ids): if env_id not in self._episodes: self._episodes[env_id] = EpisodeData() self._episodes[env_id].env_id = env_id - self._episodes[env_id].add(key, value[value_index]) + self._episodes[env_id].add(key, value[value_index], clone=False) def set_success_to_episodes(self, env_ids: Sequence[int] | None, success_values: torch.Tensor): """Sets the task success values to the episodes for the given environment ids. @@ -508,7 +517,9 @@ def export_episodes(self, env_ids: Sequence[int] | None = None, demo_ids: Sequen if target_dataset_file_handler is not None: # Use corresponding demo_id if provided, otherwise None current_demo_id = demo_ids[i] if demo_ids is not None else None - target_dataset_file_handler.write_episode(self._episodes[env_id], current_demo_id) + target_dataset_file_handler.write_episode( + self._episodes[env_id], current_demo_id, self.cfg.dataset_compression + ) need_to_flush = True # Update episode count if episode_succeeded: @@ -562,6 +573,7 @@ def _prepare_terms(self): "dataset_export_mode", "export_in_record_pre_reset", "export_in_close", + "dataset_compression", ]: continue # check if term config is None diff --git a/source/isaaclab/isaaclab/managers/scene_entity_cfg.py b/source/isaaclab/isaaclab/managers/scene_entity_cfg.py index f3b01f7eee0d..2fe118c2da26 100644 --- a/source/isaaclab/isaaclab/managers/scene_entity_cfg.py +++ b/source/isaaclab/isaaclab/managers/scene_entity_cfg.py @@ -229,12 +229,15 @@ def _resolve_body_names(self, scene: InteractiveScene): if self.body_names is not None or self.body_ids != slice(None): entity: RigidObject = scene[self.name] # -- if both are not their default values, check if they are valid + # use find_sensors/num_sensors for ContactSensor, find_bodies/num_bodies for others + _find_fn = entity.find_sensors if hasattr(entity, "find_sensors") else entity.find_bodies + _num_bodies = entity.num_sensors if hasattr(entity, "num_sensors") else entity.num_bodies if self.body_names is not None and self.body_ids != slice(None): if isinstance(self.body_names, str): self.body_names = [self.body_names] if isinstance(self.body_ids, int): self.body_ids = [self.body_ids] - body_ids, _ = entity.find_bodies(self.body_names, preserve_order=self.preserve_order) + body_ids, _ = _find_fn(self.body_names, preserve_order=self.preserve_order) body_names = [entity.body_names[i] for i in self.body_ids] if body_ids != self.body_ids or body_names != self.body_names: raise ValueError( @@ -247,10 +250,10 @@ def _resolve_body_names(self, scene: InteractiveScene): elif self.body_names is not None: if isinstance(self.body_names, str): self.body_names = [self.body_names] - self.body_ids, _ = entity.find_bodies(self.body_names, preserve_order=self.preserve_order) + self.body_ids, _ = _find_fn(self.body_names, preserve_order=self.preserve_order) # performance optimization (slice offers faster indexing than list of indices) # only all bodies in the entity order are selected - if len(self.body_ids) == entity.num_bodies and self.body_names == entity.body_names: + if len(self.body_ids) == _num_bodies and self.body_names == entity.body_names: self.body_ids = slice(None) # -- from body indices to body names elif self.body_ids != slice(None): diff --git a/source/isaaclab/isaaclab/markers/__init__.py b/source/isaaclab/isaaclab/markers/__init__.py index eb7b69761009..57eb4c791005 100644 --- a/source/isaaclab/isaaclab/markers/__init__.py +++ b/source/isaaclab/isaaclab/markers/__init__.py @@ -21,5 +21,6 @@ """ -from .config import * # noqa: F401, F403 -from .visualization_markers import VisualizationMarkers, VisualizationMarkersCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/markers/__init__.pyi b/source/isaaclab/isaaclab/markers/__init__.pyi new file mode 100644 index 000000000000..38ee70109177 --- /dev/null +++ b/source/isaaclab/isaaclab/markers/__init__.pyi @@ -0,0 +1,36 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "VisualizationMarkers", + "VisualizationMarkersCfg", + "RAY_CASTER_MARKER_CFG", + "CONTACT_SENSOR_MARKER_CFG", + "DEFORMABLE_TARGET_MARKER_CFG", + "VISUO_TACTILE_SENSOR_MARKER_CFG", + "FRAME_MARKER_CFG", + "RED_ARROW_X_MARKER_CFG", + "BLUE_ARROW_X_MARKER_CFG", + "GREEN_ARROW_X_MARKER_CFG", + "CUBOID_MARKER_CFG", + "SPHERE_MARKER_CFG", + "POSITION_GOAL_MARKER_CFG", +] + +from .visualization_markers import VisualizationMarkers +from .visualization_markers_cfg import VisualizationMarkersCfg +from .config import ( + RAY_CASTER_MARKER_CFG, + CONTACT_SENSOR_MARKER_CFG, + DEFORMABLE_TARGET_MARKER_CFG, + VISUO_TACTILE_SENSOR_MARKER_CFG, + FRAME_MARKER_CFG, + RED_ARROW_X_MARKER_CFG, + BLUE_ARROW_X_MARKER_CFG, + GREEN_ARROW_X_MARKER_CFG, + CUBOID_MARKER_CFG, + SPHERE_MARKER_CFG, + POSITION_GOAL_MARKER_CFG, +) diff --git a/source/isaaclab/isaaclab/markers/config/__init__.py b/source/isaaclab/isaaclab/markers/config/__init__.py index 54d30051259a..54713d65935d 100644 --- a/source/isaaclab/isaaclab/markers/config/__init__.py +++ b/source/isaaclab/isaaclab/markers/config/__init__.py @@ -4,7 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause import isaaclab.sim as sim_utils -from isaaclab.markers.visualization_markers import VisualizationMarkersCfg +from isaaclab.markers.visualization_markers_cfg import VisualizationMarkersCfg from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR ## diff --git a/source/isaaclab/isaaclab/markers/vis_marker_registry.py b/source/isaaclab/isaaclab/markers/vis_marker_registry.py new file mode 100644 index 000000000000..a50d26713971 --- /dev/null +++ b/source/isaaclab/isaaclab/markers/vis_marker_registry.py @@ -0,0 +1,66 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Registry for visualization marker state.""" + +from __future__ import annotations + +import weakref +from collections.abc import Callable +from typing import Any + + +class VisMarkerRegistry: + """Tracks visualization marker callbacks and active marker groups.""" + + def __init__(self): + self._callbacks: dict[str, Callable[[Any], None]] = {} + self._groups: dict[str, Any] = {} + + def add_callback(self, name: str, callback: Callable[[Any], None]) -> str: + """Register a callback invoked before marker-capable visualizers step each render tick.""" + self._callbacks[name] = callback + return name + + def add_debug_vis_callback(self, owner: Any) -> str: + """Register an owner's debug visualization callback. + + Args: + owner: Object implementing ``_debug_vis_callback(event)``. + + Returns: + Callback identifier that can be passed to :meth:`remove_callback`. + """ + callback_id = f"visualization_marker:{type(owner).__name__}:{id(owner)}" + owner_ref = weakref.proxy(owner) + return self.add_callback(callback_id, lambda event: owner_ref._debug_vis_callback(event)) + + def clear_debug_vis_callback(self, owner: Any) -> None: + """Clear an owner's registered debug visualization callback, if any.""" + callback_id = getattr(owner, "_debug_vis_handle", None) + if callback_id is not None: + self.remove_callback(callback_id) + owner._debug_vis_handle = None + + def remove_callback(self, callback_id: str) -> None: + """Remove a visualization marker callback if it exists.""" + self._callbacks.pop(callback_id, None) + + def dispatch_callbacks(self, event: Any = None) -> None: + """Invoke all registered visualization marker callbacks.""" + for callback in list(self._callbacks.values()): + callback(event) + + def set_group(self, group_id: str, state: Any) -> None: + """Set or replace one visualization marker group state.""" + self._groups[group_id] = state + + def remove_group(self, group_id: str) -> None: + """Remove one visualization marker group state if present.""" + self._groups.pop(group_id, None) + + def get_groups(self) -> dict[str, Any]: + """Return all active visualization marker groups.""" + return self._groups diff --git a/source/isaaclab/isaaclab/markers/visualization_markers.py b/source/isaaclab/isaaclab/markers/visualization_markers.py index bd27009c0ec3..2e418bbecade 100644 --- a/source/isaaclab/isaaclab/markers/visualization_markers.py +++ b/source/isaaclab/isaaclab/markers/visualization_markers.py @@ -3,92 +3,64 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""A class to coordinate groups of visual markers (such as spheres, frames or arrows) -using `UsdGeom.PointInstancer`_ class. +"""Backend-agnostic facade for coordinating groups of visual markers. -The class :class:`VisualizationMarkers` is used to create a group of visual markers and -visualize them in the viewport. The markers are represented as :class:`UsdGeom.PointInstancer` prims -in the USD stage. The markers are created as prototypes in the :class:`UsdGeom.PointInstancer` prim -and are instanced in the :class:`UsdGeom.PointInstancer` prim. The markers can be visualized by -passing the indices of the marker prototypes and their translations, orientations and scales. -The marker prototypes can be configured with the :class:`VisualizationMarkersCfg` class. - -.. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html +The :class:`VisualizationMarkers` class is used to create a group of visual +markers and visualize them through the active visualizer backends. The marker +prototypes are configured with :class:`VisualizationMarkersCfg`, and individual +marker instances can be updated by passing prototype indices and their +translations, orientations, and scales. """ -# needed to import for allowing type-hinting: np.ndarray | torch.Tensor | None from __future__ import annotations import logging -from dataclasses import MISSING import numpy as np import torch -import omni.physx.scripts.utils as physx_utils -from pxr import Gf, PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics, Vt - import isaaclab.sim as sim_utils -from isaaclab.sim.spawners import SpawnerCfg -from isaaclab.utils.configclass import configclass -from isaaclab.utils.math import convert_quat - -# import logger -logger = logging.getLogger(__name__) - - -@configclass -class VisualizationMarkersCfg: - """A class to configure a :class:`VisualizationMarkers`.""" - prim_path: str = MISSING - """The prim path where the :class:`UsdGeom.PointInstancer` will be created.""" +from .visualization_markers_cfg import VisualizationMarkersCfg - markers: dict[str, SpawnerCfg] = MISSING - """The dictionary of marker configurations. - - The key is the name of the marker, and the value is the configuration of the marker. - The key is used to identify the marker in the class. - """ +logger = logging.getLogger(__name__) class VisualizationMarkers: - """A class to coordinate groups of visual markers (loaded from USD). - - This class allows visualization of different UI markers in the scene, such as points and frames. - The class wraps around the `UsdGeom.PointInstancer`_ for efficient handling of objects - in the stage via instancing the created marker prototype prims. - - A marker prototype prim is a reusable template prim used for defining variations of objects - in the scene. For example, a sphere prim can be used as a marker prototype prim to create - multiple sphere prims in the scene at different locations. Thus, prototype prims are useful - for creating multiple instances of the same prim in the scene. - - The class parses the configuration to create different the marker prototypes into the stage. Each marker - prototype prim is created as a child of the :class:`UsdGeom.PointInstancer` prim. The prim path for the - marker prim is resolved using the key of the marker in the :attr:`VisualizationMarkersCfg.markers` - dictionary. The marker prototypes are created using the :meth:`isaaclab.sim.utils.prims.create_prim` - function, and then instanced using :class:`UsdGeom.PointInstancer` prim to allow creating multiple - instances of the marker prims. - - Switching between different marker prototypes is possible by calling the :meth:`visualize` method with - the prototype indices corresponding to the marker prototype. The prototype indices are based on the order - in the :attr:`VisualizationMarkersCfg.markers` dictionary. For example, if the dictionary has two markers, - "marker1" and "marker2", then their prototype indices are 0 and 1 respectively. The prototype indices - can be passed as a list or array of integers. + """Coordinate groups of visual markers across active visualizer backends. + + This class allows visualization of different UI markers in the scene, such + as points, frames, arrows, and shapes. Marker prototypes are reusable + templates that define variations of objects to visualize. For example, a + sphere marker prototype can be used to create many sphere marker instances + at different locations. + + The class parses the configuration to create the marker prototypes in each + active backend. The marker prototype name comes from the key in the + :attr:`VisualizationMarkersCfg.markers` dictionary, and prototype indices + are based on the dictionary order. For example, if the dictionary has two + markers, ``"marker1"`` and ``"marker2"``, their prototype indices are 0 + and 1 respectively. These indices can be passed to :meth:`visualize` as a + list or array of integers. + + Switching between marker prototypes is possible by calling + :meth:`visualize` with the corresponding prototype indices. The marker + transforms are updated only for the arguments that are provided; omitted + translations, orientations, scales, or marker indices are left unchanged + when supported by the active backend. Usage: - The following snippet shows how to create 24 sphere markers with a radius of 1.0 at random translations - within the range [-1.0, 1.0]. The first 12 markers will be colored red and the rest will be colored green. + The following snippet creates 24 sphere markers at random translations. + The first 12 markers use the first prototype and the rest use the + second prototype. .. code-block:: python + import numpy as np + import isaaclab.sim as sim_utils - from isaaclab.markers import VisualizationMarkersCfg, VisualizationMarkers + from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg - # Create the markers configuration - # This creates two marker prototypes, "marker1" and "marker2" which are spheres with a radius of 1.0. - # The color of "marker1" is red and the color of "marker2" is green. cfg = VisualizationMarkersCfg( prim_path="/World/Visuals/testMarkers", markers={ @@ -96,48 +68,35 @@ class VisualizationMarkers: radius=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), ), - "marker2": VisualizationMarkersCfg.SphereCfg( + "marker2": sim_utils.SphereCfg( radius=1.0, visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ), }, ) - # Create the markers instance - # This will create a UsdGeom.PointInstancer prim at the given path along with the marker prototypes. - marker = VisualizationMarkers(cfg) - # Set position of the marker - # -- randomly sample translations between -1.0 and 1.0 + marker = VisualizationMarkers(cfg) marker_translations = np.random.uniform(-1.0, 1.0, (24, 3)) - # -- this will create 24 markers at the given translations - # note: the markers will all be `marker1` since the marker indices are not given + + # This creates 24 markers using the first prototype because marker + # indices are not given. marker.visualize(translations=marker_translations) - # alter the markers based on their prototypes indices - # first 12 markers will be marker1 and the rest will be marker2 - # 0 -> marker1, 1 -> marker2 + # 0 -> marker1, 1 -> marker2. Since translations are omitted here, + # only the marker prototypes are changed. marker_indices = [0] * 12 + [1] * 12 - # this will change the marker prototypes at the given indices - # note: the translations of the markers will not be changed from the previous call - # since the translations are not given. marker.visualize(marker_indices=marker_indices) - # alter the markers based on their prototypes indices and translations + # Update both marker prototypes and translations. marker.visualize(marker_indices=marker_indices, translations=marker_translations) - .. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html - + The public API intentionally remains the historical marker API: + :meth:`set_visibility`, :meth:`is_visible`, and :meth:`visualize`. Backend + details are delegated to Kit and Newton marker implementations. """ def __init__(self, cfg: VisualizationMarkersCfg): - """Initialize the class. - - When the class is initialized, the :class:`UsdGeom.PointInstancer` is created into the stage - and the marker prims are registered into it. - - .. note:: - If a prim already exists at the given path, the function will find the next free path - and create the :class:`UsdGeom.PointInstancer` prim there. + """Initialize visualization marker backends from the active simulation context. Args: cfg: The configuration for the markers. @@ -145,28 +104,18 @@ def __init__(self, cfg: VisualizationMarkersCfg): Raises: ValueError: When no markers are provided in the :obj:`cfg`. """ - # get next free path for the prim - prim_path = sim_utils.get_next_free_prim_path(cfg.prim_path) - # create a new prim - self.stage = sim_utils.get_current_stage() - self._instancer_manager = UsdGeom.PointInstancer.Define(self.stage, prim_path) - # store inputs - self.prim_path = prim_path + if len(cfg.markers) == 0: + raise ValueError(f"The `cfg.markers` cannot be empty. Received: {cfg.markers}") + self.cfg = cfg - # check if any markers is provided - if len(self.cfg.markers) == 0: - raise ValueError(f"The `cfg.markers` cannot be empty. Received: {self.cfg.markers}") - - # create a child prim for the marker - self._add_markers_prototypes(self.cfg.markers) - # Note: We need to do this the first time to initialize the instancer. - # Otherwise, the instancer will not be "created" and the function `GetInstanceIndices()` will fail. - self._instancer_manager.GetProtoIndicesAttr().Set(list(range(self.num_prototypes))) - self._instancer_manager.GetPositionsAttr().Set([Gf.Vec3f(0.0)] * self.num_prototypes) - self._count = self.num_prototypes + self.prim_path = cfg.prim_path + self._count = len(cfg.markers) + self._is_visible = True + self._backends: list[object] = [] + self._ensure_backends_initialized() def __str__(self) -> str: - """Return: A string representation of the class.""" + """Return a string representation of the marker group.""" msg = f"VisualizationMarkers(prim_path={self.prim_path})" msg += f"\n\tCount: {self.count}" msg += f"\n\tNumber of prototypes: {self.num_prototypes}" @@ -175,10 +124,6 @@ def __str__(self) -> str: msg += f"\n\t\t[Index: {index}]: {name}: {marker.to_dict()}" return msg - """ - Properties. - """ - @property def num_prototypes(self) -> int: """The number of marker prototypes available.""" @@ -187,35 +132,20 @@ def num_prototypes(self) -> int: @property def count(self) -> int: """The total number of marker instances.""" - # TODO: Update this when the USD API is available (Isaac Sim 2023.1) - # return self._instancer_manager.GetInstanceCount() return self._count - """ - Operations. - """ - def set_visibility(self, visible: bool): - """Sets the visibility of the markers. - - The method does this through the USD API. - - Args: - visible: flag to set the visibility. - """ - imageable = UsdGeom.Imageable(self._instancer_manager) - if visible: - imageable.MakeVisible() - else: - imageable.MakeInvisible() + """Set marker visibility for all initialized backends.""" + self._is_visible = visible + self._ensure_backends_initialized() + for backend in self._backends: + backend.set_visibility(visible) def is_visible(self) -> bool: - """Checks the visibility of the markers. - - Returns: - True if the markers are visible, False otherwise. - """ - return self._instancer_manager.GetVisibilityAttr().Get() != UsdGeom.Tokens.invisible + """Return whether the marker group is visible.""" + if self._backends: + return any(backend.is_visible() for backend in self._backends) + return self._is_visible def visualize( self, @@ -224,187 +154,158 @@ def visualize( scales: np.ndarray | torch.Tensor | None = None, marker_indices: list[int] | np.ndarray | torch.Tensor | None = None, ): - """Update markers in the viewport. + """Update markers in all initialized visualizer backends. .. note:: - If the prim `PointInstancer` is hidden in the stage, the function will simply return - without updating the markers. This helps in unnecessary computation when the markers - are not visible. - - Whenever updating the markers, the input arrays must have the same number of elements - in the first dimension. If the number of elements is different, the `UsdGeom.PointInstancer` - will raise an error complaining about the mismatch. - - Additionally, the function supports dynamic update of the markers. This means that the - number of markers can change between calls. For example, if you have 24 points that you - want to visualize, you can pass 24 translations, orientations, and scales. If you want to - visualize only 12 points, you can pass 12 translations, orientations, and scales. The - function will automatically update the number of markers in the scene. - - The function will also update the marker prototypes based on their prototype indices. For instance, - if you have two marker prototypes, and you pass the following marker indices: [0, 1, 0, 1], the function - will update the first and third markers with the first prototype, and the second and fourth markers - with the second prototype. This is useful when you want to visualize different markers in the same - scene. The list of marker indices must have the same number of elements as the translations, orientations, - or scales. If the number of elements is different, the function will raise an error. + If the markers are hidden, the function returns without updating + backend marker state. This avoids unnecessary work while debug + visualization is disabled. - .. caution:: - This function will update all the markers instanced from the prototypes. That means - if you have 24 markers, you will need to pass 24 translations, orientations, and scales. + Whenever updating the markers, the input arrays must have the same + number of elements in the first dimension. Backends generally require + all per-marker arrays to describe the same number of marker instances. + + The function supports dynamic updates of the marker count. For example, + if you have 24 points to visualize, you can pass 24 translations, + orientations, and scales. If you later want to visualize only 12 + points, you can pass arrays with 12 rows and the backends will update + the number of marker instances. - If you want to update only a subset of the markers, you will need to handle the indices - yourself and pass the complete arrays to this function. + The function also updates marker prototypes based on prototype indices. + For instance, if there are two marker prototypes and you pass marker + indices ``[0, 1, 0, 1]``, the first and third markers use the first + prototype and the second and fourth markers use the second prototype. + + .. caution:: + This function updates all markers instanced from the prototypes. If + you want to update only a subset of markers, handle the indexing + externally and pass complete arrays to this function. Args: - translations: Translations w.r.t. parent prim frame. Shape is (M, 3). - Defaults to None, which means left unchanged. - orientations: Quaternion orientations (w, x, y, z) w.r.t. parent prim frame. Shape is (M, 4). - Defaults to None, which means left unchanged. - scales: Scale applied before any rotation is applied. Shape is (M, 3). - Defaults to None, which means left unchanged. - marker_indices: Decides which marker prototype to visualize. Shape is (M). - Defaults to None, which means left unchanged provided that the total number of markers - is the same as the previous call. If the number of markers is different, the function - will update the number of markers in the scene. + translations: Translations w.r.t. parent prim frame. Shape is + (M, 3). Defaults to None, which means left unchanged. + orientations: Quaternion orientations (x, y, z, w) w.r.t. parent + prim frame. Shape is (M, 4). Defaults to None, which means left + unchanged. + scales: Scale applied before any rotation is applied. Shape is + (M, 3). Defaults to None, which means left unchanged. + marker_indices: Decides which marker prototype to visualize. Shape + is (M). Defaults to None, which means left unchanged provided + that the total number of markers is the same as the previous + call. If the number of markers is different, the function will + update the number of markers. Raises: ValueError: When input arrays do not follow the expected shapes. ValueError: When the function is called with all None arguments. """ - # check if it is visible (if not then let's not waste time) + self._ensure_backends_initialized() + # If markers are hidden, do not spend time normalizing or dispatching + # marker state to the active backends. if not self.is_visible(): return - # check if we have any markers to visualize - num_markers = 0 - # resolve inputs - # -- position - if translations is not None: - if isinstance(translations, torch.Tensor): - translations = translations.detach().cpu().numpy() - # check that shape is correct - if translations.shape[1] != 3 or len(translations.shape) != 2: - raise ValueError(f"Expected `translations` to have shape (M, 3). Received: {translations.shape}.") - # apply translations - self._instancer_manager.GetPositionsAttr().Set(Vt.Vec3fArray.FromNumpy(translations)) - # update number of markers - num_markers = translations.shape[0] - # -- orientation - if orientations is not None: - if isinstance(orientations, torch.Tensor): - orientations = orientations.detach().cpu().numpy() - # check that shape is correct - if orientations.shape[1] != 4 or len(orientations.shape) != 2: - raise ValueError(f"Expected `orientations` to have shape (M, 4). Received: {orientations.shape}.") - # roll orientations from (w, x, y, z) to (x, y, z, w) - # internally USD expects (x, y, z, w) - orientations = convert_quat(orientations, to="xyzw") - # apply orientations - self._instancer_manager.GetOrientationsAttr().Set(Vt.QuathArray.FromNumpy(orientations)) - # update number of markers - num_markers = orientations.shape[0] - # -- scales - if scales is not None: - if isinstance(scales, torch.Tensor): - scales = scales.detach().cpu().numpy() - # check that shape is correct - if scales.shape[1] != 3 or len(scales.shape) != 2: - raise ValueError(f"Expected `scales` to have shape (M, 3). Received: {scales.shape}.") - # apply scales - self._instancer_manager.GetScalesAttr().Set(Vt.Vec3fArray.FromNumpy(scales)) - # update number of markers - num_markers = scales.shape[0] - # -- status - if marker_indices is not None or num_markers != self._count: - # apply marker indices - if marker_indices is not None: - if isinstance(marker_indices, torch.Tensor): - marker_indices = marker_indices.detach().cpu().numpy() - elif isinstance(marker_indices, list): - marker_indices = np.array(marker_indices) - # check that shape is correct - if len(marker_indices.shape) != 1: - raise ValueError(f"Expected `marker_indices` to have shape (M,). Received: {marker_indices.shape}.") - # apply proto indices - self._instancer_manager.GetProtoIndicesAttr().Set(Vt.IntArray.FromNumpy(marker_indices)) - # update number of markers - num_markers = marker_indices.shape[0] - else: - # check that number of markers is not zero - if num_markers == 0: - raise ValueError("Number of markers cannot be zero! Hint: The function was called with no inputs?") - # set all markers to be the first prototype - self._instancer_manager.GetProtoIndicesAttr().Set([0] * num_markers) - # set number of markers - self._count = num_markers - """ - Helper functions. - """ - - def _add_markers_prototypes(self, markers_cfg: dict[str, sim_utils.SpawnerCfg]): - """Adds markers prototypes to the scene and sets the markers instancer to use them.""" - # add markers based on config - for name, cfg in markers_cfg.items(): - # resolve prim path - marker_prim_path = f"{self.prim_path}/{name}" - # create a child prim for the marker - marker_prim = cfg.func(prim_path=marker_prim_path, cfg=cfg) - # make the asset uninstanceable (in case it is) - # point instancer defines its own prototypes so if an asset is already instanced, this doesn't work. - self._process_prototype_prim(marker_prim) - # add child reference to point instancer - self._instancer_manager.GetPrototypesRel().AddTarget(marker_prim_path) - # check that we loaded all the prototypes - prototypes = self._instancer_manager.GetPrototypesRel().GetTargets() - if len(prototypes) != len(markers_cfg): - raise RuntimeError( - f"Failed to load all the prototypes. Expected: {len(markers_cfg)}. Received: {len(prototypes)}." - ) - - def _process_prototype_prim(self, prim: Usd.Prim): - """Process a prim and its descendants to make them suitable for defining prototypes. - - Point instancer defines its own prototypes so if an asset is already instanced, this doesn't work. - This function checks if the prim at the specified prim path and its descendants are instanced. - If so, it makes the respective prim uninstanceable by disabling instancing on the prim. + norm_translations = self._to_tensor(translations, expected_width=3, name="translations") + norm_orientations = self._to_tensor(orientations, expected_width=4, name="orientations") + norm_scales = self._to_tensor(scales, expected_width=3, name="scales") + norm_marker_indices = self._to_index_tensor(marker_indices) + target_device = self._resolve_target_device( + norm_translations, norm_orientations, norm_scales, norm_marker_indices + ) + if norm_translations is not None: + norm_translations = norm_translations.to(device=target_device) + if norm_orientations is not None: + norm_orientations = norm_orientations.to(device=target_device) + if norm_scales is not None: + norm_scales = norm_scales.to(device=target_device) + if norm_marker_indices is not None: + norm_marker_indices = norm_marker_indices.to(device=target_device) - Additionally, it makes the prim invisible to secondary rays. This is useful when we do not want - to see the marker prims on camera images. + num_markers = 0 + for value in (norm_translations, norm_orientations, norm_scales, norm_marker_indices): + if value is not None: + num_markers = value.shape[0] + + if norm_marker_indices is None and num_markers != 0 and num_markers != self._count: + norm_marker_indices = torch.zeros(num_markers, dtype=torch.int32, device=target_device) + elif norm_marker_indices is None and num_markers == 0: + if all(value is None for value in (norm_translations, norm_orientations, norm_scales)): + raise ValueError("Number of markers cannot be zero! Hint: The function was called with no inputs?") + num_markers = self._count + + for backend in self._backends: + backend.visualize(norm_translations, norm_orientations, norm_scales, norm_marker_indices) + + if num_markers != 0: + self._count = num_markers + + def __del__(self): + for backend in getattr(self, "_backends", []): + if hasattr(backend, "close"): + backend.close() + + def _ensure_backends_initialized(self) -> None: + sim = sim_utils.SimulationContext.instance() + if sim is None: + self._ensure_kit_backend() + return - Args: - prim: The prim to check. - """ - # check if prim is valid - if not prim.IsValid(): - raise ValueError(f"Prim at path '{prim.GetPrimAtPath()}' is not valid.") - # iterate over all prims under prim-path - all_prims = [prim] - while len(all_prims) > 0: - # get current prim - child_prim = all_prims.pop(0) - # check if it is physics body -> if so, remove it - if child_prim.HasAPI(UsdPhysics.ArticulationRootAPI): - child_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) - child_prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI) - if child_prim.HasAPI(UsdPhysics.RigidBodyAPI): - child_prim.RemoveAPI(UsdPhysics.RigidBodyAPI) - child_prim.RemoveAPI(PhysxSchema.PhysxRigidBodyAPI) - if child_prim.IsA(UsdPhysics.Joint): - child_prim.GetAttribute("physics:jointEnabled").Set(False) - # check if prim is instanced -> if so, make it uninstanceable - if child_prim.IsInstance(): - child_prim.SetInstanceable(False) - # check if prim is a mesh -> if so, make it invisible to secondary rays - if child_prim.IsA(UsdGeom.Gprim): - # invisible to secondary rays such as depth images - sim_utils.change_prim_property( - prop_path=f"{child_prim.GetPrimPath().pathString}.primvars:invisibleToSecondaryRays", - value=True, - stage=prim.GetStage(), - type_to_create_if_not_exist=Sdf.ValueTypeNames.Bool, - ) - # add children to list - all_prims += child_prim.GetChildren() - - # remove any physics on the markers because they are only for visualization! - physx_utils.removeRigidBodySubtree(prim) + if any(viz.supports_markers() and viz.pumps_app_update() and viz.cfg.enable_markers for viz in sim.visualizers): + self._ensure_kit_backend() + if any( + viz.supports_markers() and not viz.pumps_app_update() and viz.cfg.enable_markers for viz in sim.visualizers + ): + self._ensure_newton_backend() + + def _ensure_kit_backend(self) -> None: + """Create the Kit marker backend if it is not already active.""" + from isaaclab_visualizers.kit.kit_visualization_markers import KitVisualizationMarkers + + if not any(isinstance(backend, KitVisualizationMarkers) for backend in self._backends): + self._backends.append(KitVisualizationMarkers(self.cfg, visible=self._is_visible)) + + def _ensure_newton_backend(self) -> None: + """Create the Newton-family marker backend if it is not already active.""" + from isaaclab_visualizers.newton.newton_visualization_markers import NewtonVisualizationMarkers + + if not any(isinstance(backend, NewtonVisualizationMarkers) for backend in self._backends): + self._backends.append(NewtonVisualizationMarkers(self.cfg, visible=self._is_visible)) + + def _resolve_target_device(self, *values: torch.Tensor | None) -> torch.device: + for value in values: + if value is not None: + return value.device + for backend in self._backends: + if hasattr(backend, "infer_device"): + return backend.infer_device() + return torch.device("cpu") + + @staticmethod + def _to_tensor( + value: np.ndarray | torch.Tensor | None, + expected_width: int, + name: str, + ) -> torch.Tensor | None: + if value is None: + return None + if isinstance(value, np.ndarray): + tensor = torch.from_numpy(value) + else: + tensor = value.detach() + if tensor.ndim != 2 or tensor.shape[1] != expected_width: + raise ValueError(f"Expected `{name}` to have shape (M, {expected_width}). Received: {tuple(tensor.shape)}.") + return tensor.to(dtype=torch.float32) + + @staticmethod + def _to_index_tensor(value: list[int] | np.ndarray | torch.Tensor | None) -> torch.Tensor | None: + if value is None: + return None + if isinstance(value, list): + tensor = torch.tensor(value) + elif isinstance(value, np.ndarray): + tensor = torch.from_numpy(value) + else: + tensor = value.detach() + if tensor.ndim != 1: + raise ValueError(f"Expected `marker_indices` to have shape (M,). Received: {tuple(tensor.shape)}.") + return tensor.to(dtype=torch.int32) diff --git a/source/isaaclab/isaaclab/markers/visualization_markers_cfg.py b/source/isaaclab/isaaclab/markers/visualization_markers_cfg.py new file mode 100644 index 000000000000..b7cc2002ccf0 --- /dev/null +++ b/source/isaaclab/isaaclab/markers/visualization_markers_cfg.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for visualization markers.""" + +from __future__ import annotations + +from dataclasses import MISSING + +from isaaclab.sim.spawners import SpawnerCfg +from isaaclab.utils.configclass import configclass + + +@configclass +class VisualizationMarkersCfg: + """A class to configure a :class:`VisualizationMarkers`.""" + + prim_path: str = MISSING + """The prim path where the :class:`UsdGeom.PointInstancer` will be created.""" + + markers: dict[str, SpawnerCfg] = MISSING + """The dictionary of marker configurations. + + The key is the name of the marker, and the value is the configuration of the marker. + The key is used to identify the marker in the class. + """ diff --git a/source/isaaclab/isaaclab/physics/__init__.py b/source/isaaclab/isaaclab/physics/__init__.py new file mode 100644 index 000000000000..7254081c805a --- /dev/null +++ b/source/isaaclab/isaaclab/physics/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Implementation backends for simulation interfaces.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/physics/__init__.pyi b/source/isaaclab/isaaclab/physics/__init__.pyi new file mode 100644 index 000000000000..14d0d4d61739 --- /dev/null +++ b/source/isaaclab/isaaclab/physics/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "CallbackHandle", + "PhysicsEvent", + "PhysicsManager", + "PhysicsCfg", + "BaseSceneDataProvider", + "SceneDataProvider", +] + +from .base_scene_data_provider import BaseSceneDataProvider +from .physics_manager import CallbackHandle, PhysicsEvent, PhysicsManager +from .physics_manager_cfg import PhysicsCfg +from .scene_data_provider import SceneDataProvider diff --git a/source/isaaclab/isaaclab/physics/base_scene_data_provider.py b/source/isaaclab/isaaclab/physics/base_scene_data_provider.py new file mode 100644 index 000000000000..9760a71d25ba --- /dev/null +++ b/source/isaaclab/isaaclab/physics/base_scene_data_provider.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Scene data provider interface for visualizers and renderers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any + + +class BaseSceneDataProvider(ABC): + """Backend-agnostic scene data provider interface.""" + + @abstractmethod + def update(self) -> None: + """Refresh any cached scene data (full model/state).""" + raise NotImplementedError + + @abstractmethod + def get_newton_model(self) -> Any | None: + """Return Newton model handle when available.""" + raise NotImplementedError + + @abstractmethod + def get_newton_state(self) -> Any | None: + """Return Newton state handle when available (full state).""" + raise NotImplementedError + + @abstractmethod + def get_usd_stage(self) -> Any | None: + """Return USD stage handle when available.""" + raise NotImplementedError + + @abstractmethod + def get_metadata(self) -> dict[str, Any]: + """Return backend metadata (num_envs, gravity, etc.).""" + raise NotImplementedError + + @abstractmethod + def get_transforms(self) -> dict[str, Any] | None: + """Return body transforms, if supported.""" + raise NotImplementedError + + @abstractmethod + def get_velocities(self) -> dict[str, Any] | None: + """Return body velocities, if supported.""" + raise NotImplementedError + + @abstractmethod + def get_contacts(self) -> dict[str, Any] | None: + """Return contacts, if supported.""" + raise NotImplementedError + + @abstractmethod + def get_camera_transforms(self) -> dict[str, Any] | None: + """Return per-camera, per-env transforms, if supported.""" + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/physics/physics_manager.py b/source/isaaclab/isaaclab/physics/physics_manager.py new file mode 100644 index 000000000000..7a4cdfe84403 --- /dev/null +++ b/source/isaaclab/isaaclab/physics/physics_manager.py @@ -0,0 +1,372 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for physics managers with unified callback system.""" + +from __future__ import annotations + +import logging +import weakref +from abc import ABC, abstractmethod +from collections.abc import Callable +from enum import Enum +from typing import TYPE_CHECKING, Any, ClassVar + +if TYPE_CHECKING: + from isaaclab.sim.simulation_context import SimulationContext + +logger = logging.getLogger(__name__) + + +class PhysicsEvent(Enum): + """Physics simulation lifecycle events. + + These are general events that apply across all physics backends. + Backend-specific events (e.g., PhysX step events, timeline events) are handled + by the respective manager classes via their own event enums (e.g., IsaacEvents). + + Lifecycle order: MODEL_INIT -> PHYSICS_READY -> STOP + """ + + MODEL_INIT = "model_init" + """Physics model is being constructed. + Fired during scene building, before simulation can run. Use this to register + physics representations (rigid bodies, joints, constraints) with the solver. + """ + + PHYSICS_READY = "physics_ready" + """Physics is initialized and queryable. + Fired after all physics data structures are created and the simulation is + ready to step. Assets can now read initial state (positions, velocities). + """ + + STOP = "stop" + """Simulation is stopping.""" + + +class CallbackHandle: + """Handle for a registered callback, allowing deregistration.""" + + def __init__(self, callback_id: int, manager: type[PhysicsManager]): + self._id = callback_id + self._manager = manager + + @property + def id(self) -> int: + return self._id + + def deregister(self) -> None: + """Remove this callback from the manager.""" + self._manager.deregister_callback(self._id) + + +class PhysicsManager(ABC): + """Abstract base class for physics simulation managers. + + Physics managers handle the lifecycle of a physics simulation backend, + including initialization, stepping, and cleanup. + + This base class provides: + - Unified callback management system + - Common state variables (_sim, _cfg, _device) + - Default accessor implementations + + Lifecycle: initialize() -> reset() -> step() (repeated) -> close() + """ + + _sim: ClassVar[SimulationContext | None] = None + _cfg: ClassVar[Any] = None + _device: ClassVar[str] = "cuda:0" + _sim_time: ClassVar[float] = 0.0 + _callbacks: ClassVar[dict[int, tuple[Any, Callable, int, str | None, Any]]] = {} + _callback_id: ClassVar[int] = 0 + + @classmethod + def register_callback( + cls, + callback: Callable[[Any], None], + event: PhysicsEvent, + order: int = 0, + name: str | None = None, + wrap_weak_ref: bool = True, + ) -> CallbackHandle: + """Register a callback for a physics event. + + Args: + callback: The callback function. Receives event payload as argument. + event: The event to listen for. + order: Priority order (lower = earlier). Default 0. + name: Optional name for debugging. + wrap_weak_ref: If True, wrap bound methods with weak references + to prevent preventing garbage collection. Default True. + + Returns: + CallbackHandle that can be used to deregister the callback. + + Example: + >>> def on_physics_ready(payload): + ... print("Physics is ready!") + >>> handle = PhysxManager.register_callback(on_physics_ready, PhysicsEvent.PHYSICS_READY) + >>> # Later, to remove: + >>> handle.deregister() + """ + cid = cls._callback_id + cls._callback_id += 1 + + if wrap_weak_ref: + callback = cls._wrap_weak_ref(callback) + + subscription = cls._subscribe_to_event(cid, callback, event, order, name) + + cls._callbacks[cid] = (event, callback, order, name, subscription) + return CallbackHandle(cid, cls) + + @classmethod + def deregister_callback(cls, callback_id: int | CallbackHandle) -> None: + """Remove a registered callback. + + Args: + callback_id: The ID or CallbackHandle returned by register_callback(). + """ + cid = callback_id.id if isinstance(callback_id, CallbackHandle) else callback_id + if cid not in cls._callbacks: + return + + event, callback, order, name, subscription = cls._callbacks.pop(cid) + cls._unsubscribe_from_event(cid, event, subscription) + + @classmethod + def dispatch_event(cls, event: PhysicsEvent, payload: Any = None) -> None: + """Dispatch an event to all registered callbacks. + + This is the default implementation using simple callback lists. + Subclasses may override or extend with platform-specific dispatch. + + Args: + event: The event to dispatch. + payload: Optional data to pass to callbacks. + """ + matching = [(cid, cb, order) for cid, (ev, cb, order, name, sub) in cls._callbacks.items() if ev == event] + matching.sort(key=lambda x: x[2]) + + for _, callback, _ in matching: + callback(payload) + + @classmethod + def clear_callbacks(cls) -> None: + """Remove all registered callbacks.""" + for cid in list(cls._callbacks.keys()): + cls.deregister_callback(cid) + cls._callbacks.clear() + cls._callback_id = 0 + + @classmethod + def _wrap_weak_ref(cls, callback: Callable) -> Callable: + """Wrap bound methods with weak references to prevent leaks. + + Args: + callback: The callback to wrap. + + Returns: + Wrapped callback if it's a bound method, otherwise original. + """ + if hasattr(callback, "__self__"): + obj_ref = weakref.proxy(callback.__self__) + method_name = callback.__name__ + + def weak_callback(payload: Any) -> Any: + return getattr(obj_ref, method_name)(payload) + + return weak_callback + return callback + + @classmethod + def _subscribe_to_event( + cls, + callback_id: int, + callback: Callable, + event: PhysicsEvent, + order: int, + name: str | None, + ) -> Any: + """Subscribe to a platform-specific event. + + Override in subclasses to integrate with platform event systems + (e.g., Omniverse event bus, timeline events). + + Args: + callback_id: Unique ID for this callback. + callback: The callback function. + event: The event to subscribe to. + order: Priority order. + name: Optional name. + + Returns: + Platform-specific subscription object (stored for cleanup). + """ + return None + + @classmethod + def _unsubscribe_from_event( + cls, + callback_id: int, + event: PhysicsEvent, + subscription: Any, + ) -> None: + """Unsubscribe from a platform-specific event. + + Override in subclasses to clean up platform subscriptions. + + Args: + callback_id: The callback ID being removed. + event: The event that was subscribed to. + subscription: The subscription object from _subscribe_to_event(). + """ + pass + + @classmethod + @abstractmethod + def initialize(cls, sim_context: SimulationContext) -> None: + """Initialize the physics manager with simulation context. + + Subclasses should call super().initialize() first, then do backend-specific setup. + + Args: + sim_context: Parent simulation context. + """ + # Set on PhysicsManager explicitly so PhysicsManager.get_*() works + # regardless of which subclass is active (Python class vars are per-class) + PhysicsManager._sim = sim_context + PhysicsManager._cfg = sim_context.cfg.physics + PhysicsManager._device = sim_context.cfg.device + PhysicsManager._sim_time = 0.0 + + @classmethod + @abstractmethod + def reset(cls, soft: bool = False) -> None: + """Reset physics simulation. + + Args: + soft: If True, skip full reinitialization. + """ + pass + + @classmethod + @abstractmethod + def forward(cls) -> None: + """Update kinematics without stepping physics (for rendering).""" + pass + + @classmethod + @abstractmethod + def step(cls) -> None: + """Step physics simulation by one timestep (physics only, no rendering).""" + pass + + @classmethod + def pre_render(cls) -> None: + """Sync deferred physics state to the rendering backend. + + Called by :meth:`~isaaclab.sim.SimulationContext.render` before cameras + and visualizers read scene data. The default implementation is a no-op. + Backends that defer transform writes (e.g. Newton's dirty-flag pattern) + should override this to flush pending updates. + """ + pass + + @classmethod + def after_visualizers_render(cls) -> None: + """Hook after visualizers have stepped during :meth:`~isaaclab.sim.SimulationContext.render`. + + Use for physics-backend sync (e.g. fabric) if needed. Recording pipelines (Kit/RTX, + Newton GL video, etc.) run from :mod:`isaaclab.envs.utils.recording_hooks` so they are not + tied to a specific physics manager. Default is a no-op. + """ + pass + + @classmethod + def close(cls) -> None: + """Clean up physics resources. + + Subclasses should call super().close() after backend-specific cleanup. + """ + cls.dispatch_event(PhysicsEvent.STOP) # notify listeners before cleanup + cls.clear_callbacks() + # Reset on PhysicsManager explicitly (matches initialize()) + PhysicsManager._sim = None + PhysicsManager._cfg = None + PhysicsManager._sim_time = 0.0 + + @classmethod + def get_physics_dt(cls) -> float: + """Get the physics timestep in seconds.""" + return PhysicsManager._sim.cfg.dt if PhysicsManager._sim else 1.0 / 60.0 + + @classmethod + def get_device(cls) -> str: + """Get the physics simulation device.""" + return PhysicsManager._device + + @classmethod + def get_simulation_time(cls) -> float: + """Get the current simulation time in seconds.""" + return PhysicsManager._sim_time + + @classmethod + def get_physics_sim_view(cls) -> Any: + """Get the physics simulation view. Override in subclasses.""" + return None + + @classmethod + def play(cls) -> None: + """Start or resume physics simulation. Default is no-op.""" + pass + + @classmethod + def pause(cls) -> None: + """Pause physics simulation. Default is no-op.""" + pass + + @classmethod + def stop(cls) -> None: + """Stop physics simulation. Default is no-op.""" + pass + + @classmethod + def wait_for_playing(cls) -> None: + """Block until the timeline is playing. Default is no-op.""" + pass + + @classmethod + def get_backend(cls) -> str: + """Get the tensor backend being used ("numpy" or "torch").""" + return "torch" if "cuda" in PhysicsManager._device else "numpy" + + @staticmethod + def safe_callback_invoke(fn: Callable, *args, physics_manager: type[PhysicsManager] | None = None) -> None: + """Invoke a callback, catching exceptions that would be swallowed by external event buses. + + Ignores ``ReferenceError`` (from garbage-collected weakref proxies). All other + exceptions are forwarded to *physics_manager*.``store_callback_exception`` when + available (see note below), or re-raised immediately otherwise. + + Note (Octi): + The carb event bus used by PhysX/Omniverse silently swallows exceptions raised + inside callbacks. ``PhysxManager`` works around this by storing the exception + and re-raising it after event dispatch completes (in ``reset()`` / ``step()``). + Backends that dispatch events directly (e.g. Newton) don't need this — exceptions + propagate normally — so ``store_callback_exception`` is not called for them. + This is a known wart; a cleaner solution is actively being explored. + """ + try: + fn(*args) + except ReferenceError: + pass + except Exception as e: + store_fn = getattr(physics_manager, "store_callback_exception", None) + if callable(store_fn): + store_fn(e) + else: + raise diff --git a/source/isaaclab/isaaclab/physics/physics_manager_cfg.py b/source/isaaclab/isaaclab/physics/physics_manager_cfg.py new file mode 100644 index 000000000000..0ff2348b591a --- /dev/null +++ b/source/isaaclab/isaaclab/physics/physics_manager_cfg.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base configuration for physics managers.""" + +from __future__ import annotations + +from dataclasses import MISSING +from typing import TYPE_CHECKING, Any + +from isaaclab.utils import configclass + +if TYPE_CHECKING: + from .physics_manager import PhysicsManager + + +@configclass +class PhysicsCfg: + """Abstract base configuration for physics managers. + + This base class contains physics backend-specific parameters. + Subclasses should override the class_type to return the appropriate + physics manager class. + + Shared simulation parameters (dt, gravity, physics_prim_path, physics_material) + are read directly from :class:`SimulationCfg` by the physics manager. + """ + + class_type: type[PhysicsManager] | Any = MISSING + """The physics manager class to use. Must be set by subclasses.""" diff --git a/source/isaaclab/isaaclab/physics/scene_data_provider.py b/source/isaaclab/isaaclab/physics/scene_data_provider.py new file mode 100644 index 000000000000..0095a413158d --- /dev/null +++ b/source/isaaclab/isaaclab/physics/scene_data_provider.py @@ -0,0 +1,44 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory for creating scene data provider instances.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_scene_data_provider import BaseSceneDataProvider + +if TYPE_CHECKING: + from isaaclab.sim import SimulationContext + + +class SceneDataProvider(FactoryBase, BaseSceneDataProvider): + """Factory for creating scene data provider instances.""" + + _backend_class_names = {"physx": "PhysxSceneDataProvider", "newton": "NewtonSceneDataProvider"} + + @classmethod + def _get_backend(cls, stage, simulation_context: SimulationContext, *args, **kwargs) -> str: + manager_name = simulation_context.physics_manager.__name__.lower() + if "newton" in manager_name: + return "newton" + if "physx" in manager_name: + return "physx" + raise ValueError(f"Unknown physics manager: {manager_name}") + + @classmethod + def _get_module_name(cls, backend: str) -> str: + return f"isaaclab_{backend}.scene_data_providers" + + def __new__(cls, stage, simulation_context: SimulationContext, *args, **kwargs) -> BaseSceneDataProvider: + """Create a new scene data provider based on the active physics backend.""" + result = super().__new__(cls, stage, simulation_context, *args, **kwargs) + if not isinstance(result, BaseSceneDataProvider): + name = type(result).__name__ + raise TypeError(f"Backend scene data provider {name!r} must inherit from BaseSceneDataProvider.") + return result diff --git a/source/isaaclab/isaaclab/physics/scene_data_requirements.py b/source/isaaclab/isaaclab/physics/scene_data_requirements.py new file mode 100644 index 000000000000..f67aaa2bb6d9 --- /dev/null +++ b/source/isaaclab/isaaclab/physics/scene_data_requirements.py @@ -0,0 +1,128 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Central requirement resolution for scene-data consumers. + +This module is intentionally type-based (not config-import based) so requirement +checks stay robust even when optional backend packages are not installed. +""" + +from __future__ import annotations + +from collections.abc import Iterable +from dataclasses import dataclass + + +@dataclass(frozen=True) +class SceneDataRequirement: + """Capabilities required from a scene data provider.""" + + requires_newton_model: bool = False + requires_usd_stage: bool = False + + +_VISUALIZER_REQUIREMENTS: dict[str, SceneDataRequirement] = { + "kit": SceneDataRequirement(requires_usd_stage=True), + "newton": SceneDataRequirement(requires_newton_model=True), + "rerun": SceneDataRequirement(requires_newton_model=True), + "viser": SceneDataRequirement(requires_newton_model=True), +} + +_RENDERER_REQUIREMENTS: dict[str, SceneDataRequirement] = { + "isaac_rtx": SceneDataRequirement(requires_usd_stage=True), + "newton_warp": SceneDataRequirement(requires_newton_model=True), + "ovrtx": SceneDataRequirement(requires_newton_model=True, requires_usd_stage=True), +} + + +def supported_visualizer_types() -> tuple[str, ...]: + """Return supported visualizer type names in sorted order. + + Returns: + Sorted tuple of supported visualizer type names. + """ + return tuple(sorted(_VISUALIZER_REQUIREMENTS)) + + +def supported_renderer_types() -> tuple[str, ...]: + """Return supported renderer type names in sorted order. + + Returns: + Sorted tuple of supported renderer type names. + """ + return tuple(sorted(_RENDERER_REQUIREMENTS)) + + +def requirement_for_visualizer_type(visualizer_type: str) -> SceneDataRequirement: + """Resolve scene-data requirements for one visualizer type. + + Args: + visualizer_type: Visualizer type name. + + Returns: + Requirement object for the given visualizer type. + + Raises: + ValueError: If ``visualizer_type`` is unknown. + """ + requirement = _VISUALIZER_REQUIREMENTS.get(visualizer_type) + if requirement is None: + supported = ", ".join(repr(v) for v in supported_visualizer_types()) + raise ValueError(f"Unknown visualizer type {visualizer_type!r}. Supported types: {supported}.") + return requirement + + +def requirement_for_renderer_type(renderer_type: str) -> SceneDataRequirement: + """Resolve scene-data requirements for one renderer type. + + Args: + renderer_type: Renderer type name. + + Returns: + Requirement object for the given renderer type. + + Raises: + ValueError: If ``renderer_type`` is unknown. + """ + requirement = _RENDERER_REQUIREMENTS.get(renderer_type) + if requirement is None: + supported = ", ".join(repr(v) for v in supported_renderer_types()) + raise ValueError(f"Unknown renderer type {renderer_type!r}. Supported types: {supported}.") + return requirement + + +def aggregate_requirements(requirements: Iterable[SceneDataRequirement]) -> SceneDataRequirement: + """Combine a sequence of requirements using logical OR. + + Args: + requirements: Requirement objects to combine. + + Returns: + Combined requirement object. + """ + requires_newton_model = False + requires_usd_stage = False + for requirement in requirements: + requires_newton_model |= requirement.requires_newton_model + requires_usd_stage |= requirement.requires_usd_stage + return SceneDataRequirement(requires_newton_model=requires_newton_model, requires_usd_stage=requires_usd_stage) + + +def resolve_scene_data_requirements( + visualizer_types: Iterable[str], + renderer_types: Iterable[str] = (), +) -> SceneDataRequirement: + """Resolve combined scene-data requirements from visualizer and renderer types. + + Args: + visualizer_types: Visualizer type names to resolve. + renderer_types: Renderer type names to resolve. + + Returns: + Combined requirement object. + """ + requirements = [requirement_for_visualizer_type(viz_type) for viz_type in visualizer_types] + requirements.extend(requirement_for_renderer_type(renderer_type) for renderer_type in renderer_types) + return aggregate_requirements(requirements) diff --git a/source/isaaclab/isaaclab/renderers/__init__.py b/source/isaaclab/isaaclab/renderers/__init__.py new file mode 100644 index 000000000000..a60aa0496794 --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package for renderer configurations and implementations. + +This sub-package contains configuration classes and implementations for +different renderer backends that can be used with Isaac Lab. +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/renderers/__init__.pyi b/source/isaaclab/isaaclab/renderers/__init__.pyi new file mode 100644 index 000000000000..ae408c03ae7d --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/__init__.pyi @@ -0,0 +1,21 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseRenderer", + "CameraRenderSpec", + "RenderBufferKind", + "RenderBufferSpec", + "Renderer", + "RendererCfg", + "RenderContext", +] + +from .base_renderer import BaseRenderer +from .camera_render_spec import CameraRenderSpec +from .output_contract import RenderBufferKind, RenderBufferSpec +from .renderer import Renderer +from .renderer_cfg import RendererCfg +from .render_context import RenderContext diff --git a/source/isaaclab/isaaclab/renderers/base_renderer.py b/source/isaaclab/isaaclab/renderers/base_renderer.py new file mode 100644 index 000000000000..2fc498eae8e3 --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/base_renderer.py @@ -0,0 +1,122 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Abstract base class for renderer implementations.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any + +from .camera_render_spec import CameraRenderSpec +from .output_contract import RenderBufferKind, RenderBufferSpec + +if TYPE_CHECKING: + import torch + + from isaaclab.sensors.camera.camera_data import CameraData + + +class BaseRenderer(ABC): + """Abstract base class for renderer implementations.""" + + @abstractmethod + def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: + """Per-output layout (channels + dtype) this renderer can produce. + + Outputs absent from the mapping are not produced by this backend. + + Returns: + Mapping from supported :class:`RenderBufferKind` to its :class:`RenderBufferSpec`. + """ + pass + + @abstractmethod + def prepare_stage(self, stage: Any, num_envs: int) -> None: + """Prepare the stage for rendering before :meth:`create_render_data` is called. + + Some renderers need to export or preprocess the USD stage before + creating render data. This method is called after the renderer is + instantiated and before :meth:`create_render_data`. + + Args: + stage: USD stage to prepare, or None if not applicable. + num_envs: Number of environments. + """ + pass + + @abstractmethod + def create_render_data(self, spec: CameraRenderSpec) -> Any: + """Create render data for the given camera :class:`CameraRenderSpec`. + + Args: + spec: Immutable description of the tiled camera (paths, config, device). + + Returns: + Renderer-specific data for subsequent :meth:`render` / :meth:`read_output` calls. + """ + pass + + @abstractmethod + def set_outputs(self, render_data: Any, output_data: dict[str, torch.Tensor]) -> None: + """Store reference to output buffers for writing during render. + + Args: + render_data: The render data object from :meth:`create_render_data`. + output_data: Dictionary mapping output names (e.g. ``"rgb"``, ``"depth"``) + to pre-allocated tensors where rendered data will be written. + """ + pass + + @abstractmethod + def update_transforms(self) -> None: + """Update scene transforms before rendering. + + Called to sync physics/asset state into the renderer's scene representation. + """ + pass + + @abstractmethod + def update_camera( + self, render_data: Any, positions: torch.Tensor, orientations: torch.Tensor, intrinsics: torch.Tensor + ) -> None: + """Update camera poses and intrinsics for the next render. + + Args: + render_data: The render data object from :meth:`create_render_data`. + positions: Camera positions in world frame, shape ``(N, 3)``. + orientations: Camera orientations as quaternions (x, y, z, w), shape ``(N, 4)``. + intrinsics: Camera intrinsic matrices, shape ``(N, 3, 3)``. + """ + pass + + @abstractmethod + def render(self, render_data: Any) -> None: + """Perform rendering and write to output buffers. + + Args: + render_data: The render data object from :meth:`create_render_data`. + """ + pass + + @abstractmethod + def read_output(self, render_data: Any, camera_data: CameraData) -> None: + """Read rendered outputs from the renderer into the camera data container. + + Args: + render_data: The render data object from :meth:`create_render_data`. + camera_data: The :class:`~isaaclab.sensors.camera.camera_data.CameraData` + instance to populate. + """ + pass + + @abstractmethod + def cleanup(self, render_data: Any) -> None: + """Release renderer resources associated with the given render data. + + Args: + render_data: The render data object to clean up, or ``None``. + """ + pass diff --git a/source/isaaclab/isaaclab/renderers/camera_render_spec.py b/source/isaaclab/isaaclab/renderers/camera_render_spec.py new file mode 100644 index 000000000000..526b90121713 --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/camera_render_spec.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Immutable description of a tiled camera passed to render backends.""" + +from __future__ import annotations + +from dataclasses import dataclass + +from isaaclab.sensors.camera.camera_cfg import CameraCfg + + +@dataclass(frozen=True) +class CameraRenderSpec: + """Stable inputs for :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.create_render_data`. + + Backends use this instead of holding a reference to the :class:`~isaaclab.sensors.camera.Camera` + sensor instance, avoiding circular dependencies between sensors and render data. + + Args: + cfg: Camera configuration (data types, resolution, filters, etc.). + device: Torch device string (e.g. ``"cuda:0"``) used by GPU annotators and Warp. + num_instances: Number of tiled camera instances (environments). + camera_prim_paths: Absolute USD paths for each environment's camera prim. + view_count: Number of camera prims (must match ``len(camera_prim_paths)``). + camera_path_relative_to_env_0: Camera prim path with ``/World/envs/env_0/`` prefix + stripped; required by OVRTX. Empty string if the first camera is not under env 0. + """ + + cfg: CameraCfg + device: str + num_instances: int + camera_prim_paths: tuple[str, ...] + view_count: int + camera_path_relative_to_env_0: str diff --git a/source/isaaclab/isaaclab/renderers/output_contract.py b/source/isaaclab/isaaclab/renderers/output_contract.py new file mode 100644 index 000000000000..bfa3fff41d8b --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/output_contract.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Renderer→consumer output contract types. + +Leaf module shared by :class:`~isaaclab.renderers.BaseRenderer` and +:class:`~isaaclab.sensors.camera.CameraData` to avoid a direct dependency +between them. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from enum import StrEnum +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import torch + + +class RenderBufferKind(StrEnum): + """Canonical names for the per-pixel render buffer kinds a renderer can publish. + + String values match the vocabulary used by + :attr:`isaaclab.sensors.camera.CameraCfg.data_types`. + """ + + RGB = "rgb" + RGBA = "rgba" + ALBEDO = "albedo" + DEPTH = "depth" + DISTANCE_TO_IMAGE_PLANE = "distance_to_image_plane" + DISTANCE_TO_CAMERA = "distance_to_camera" + NORMALS = "normals" + MOTION_VECTORS = "motion_vectors" + SEMANTIC_SEGMENTATION = "semantic_segmentation" + INSTANCE_SEGMENTATION_FAST = "instance_segmentation_fast" + INSTANCE_ID_SEGMENTATION_FAST = "instance_id_segmentation_fast" + SIMPLE_SHADING_CONSTANT_DIFFUSE = "simple_shading_constant_diffuse" + SIMPLE_SHADING_DIFFUSE_MDL = "simple_shading_diffuse_mdl" + SIMPLE_SHADING_FULL_MDL = "simple_shading_full_mdl" + + +@dataclass(frozen=True) +class RenderBufferSpec: + """Per-pixel layout (channels + dtype) for one render buffer kind.""" + + channels: int + """Number of per-pixel channels (last dimension of the allocated tensor).""" + + dtype: torch.dtype + """Torch dtype the renderer writes for this render buffer kind.""" diff --git a/source/isaaclab/isaaclab/renderers/render_context.py b/source/isaaclab/isaaclab/renderers/render_context.py new file mode 100644 index 000000000000..1c1a45a19454 --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/render_context.py @@ -0,0 +1,129 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Simulation-scoped renderers for camera sensors.""" + +from __future__ import annotations + +import logging +from typing import Any, cast + +from isaaclab.sensors.camera.camera_data import CameraData + +from .base_renderer import BaseRenderer +from .renderer import Renderer +from .renderer_cfg import RendererCfg + +logger = logging.getLogger(__name__) + + +class RenderContext: + """Holds :class:`BaseRenderer` instances for all :class:`Camera` sensors in a simulation. + + A camera reuses a backend when a prior camera registered a config equal under ``==`` (value + equality) and the same concrete ``RendererCfg`` subclass. A distinct ``RendererCfg`` that + maps to a different implementation (e.g. Isaac RTX vs Newton) produces another backend; each + has :meth:`BaseRenderer.prepare_stage` run before use. + + :meth:`update_transforms` is invoked at most once per :meth:`get_physics_step_count` for the + context; + """ + + __slots__ = ( + "_renderer_entries", + "_prepared_renderer_ids", + "_prepared_num_envs", + "_last_transforms_step", + ) + + def __init__(self) -> None: + self._renderer_entries: list[tuple[RendererCfg, BaseRenderer]] = [] + self._prepared_renderer_ids: set[int] = set() + self._prepared_num_envs: int | None = None + self._last_transforms_step: int | None = None + + def get_renderer(self, cfg: RendererCfg) -> BaseRenderer: + """Return a backend for this configuration, reusing a matching instance if present. + + Lookups use ``==`` and concrete ``RendererCfg`` type, so :func:`hash` is not used (configs + are typically not hashable). + + Args: + cfg: Renderer configuration from the initializing camera. + + Returns: + A shared or newly created renderer backend. + """ + for stored_cfg, r in self._renderer_entries: + if type(stored_cfg) is type(cfg) and stored_cfg == cfg: + return r + new_renderer = cast(BaseRenderer, Renderer(cfg)) # type: ignore[misc] + self._renderer_entries.append((cfg, new_renderer)) + logger.info( + "Created new renderer for simulation: %s", + type(new_renderer).__name__, + ) + return new_renderer + + def ensure_prepare_stage(self, stage: Any, num_envs: int) -> None: + """Call :meth:`BaseRenderer.prepare_stage` for each registered backend (once per backend). + + If a new backend is added after the first :meth:`prepare_stage` call, this method ensures + that new backend is prepared for the same ``stage`` and ``num_envs`` when the camera + that owns it is initialized. + + Args: + stage: USD stage passed to each backend. + num_envs: Environment count. + + Raises: + RuntimeError: If :meth:`get_renderer` was never called, or ``num_envs`` disagrees with + a value already used for a prepared backend in this context. + """ + if not self._renderer_entries: + raise RuntimeError("get_renderer must be called at least once before ensure_prepare_stage.") + if self._prepared_num_envs is not None and self._prepared_num_envs != num_envs: + raise RuntimeError( + "RenderContext prepare_stage was used with a different num_envs " + f"({self._prepared_num_envs} vs {num_envs})." + ) + for _cfg, renderer in self._renderer_entries: + rid = id(renderer) + if rid not in self._prepared_renderer_ids: + renderer.prepare_stage(stage, num_envs) + self._prepared_renderer_ids.add(rid) + if self._prepared_num_envs is None: + self._prepared_num_envs = num_envs + + def update_transforms(self, physics_step_count: int) -> None: + """Call :meth:`BaseRenderer.update_transforms` on all backends (at most once per step).""" + if not self._renderer_entries: + return + if self._last_transforms_step == physics_step_count: + return + for _cfg, renderer in self._renderer_entries: + renderer.update_transforms() + self._last_transforms_step = physics_step_count + + def render_into_camera( + self, + renderer: BaseRenderer, + render_data: Any, + camera_data: CameraData, + physics_step_count: int, + ) -> None: + """Sync scene transforms, render, and read outputs into ``camera_data``.""" + self.update_transforms(physics_step_count) + renderer.render(render_data) + renderer.read_output(render_data, camera_data) + + def reset_stage_prepare_flag(self) -> None: + """Allow :meth:`ensure_prepare_stage` to run ``prepare_stage`` again (e.g. a new USD stage).""" + self._prepared_renderer_ids.clear() + self._prepared_num_envs = None + + def reset_transform_cadence(self) -> None: + """Clear per-step transform dedupe (e.g. a long pause with no physics).""" + self._last_transforms_step = None diff --git a/source/isaaclab/isaaclab/renderers/renderer.py b/source/isaaclab/isaaclab/renderers/renderer.py new file mode 100644 index 000000000000..3a39e19201fb --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/renderer.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory for creating renderer instances.""" + +from __future__ import annotations + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_renderer import BaseRenderer +from .renderer_cfg import RendererCfg + +# This is mapping of where backends live in the isaaclab_ package. +_RENDERER_TYPE_TO_BACKEND = {"isaac_rtx": "physx", "newton_warp": "newton", "ovrtx": "ov"} + + +class Renderer(FactoryBase, BaseRenderer): + """Factory for creating renderer instances.""" + + _backend_class_names = { + "physx": "IsaacRtxRenderer", + "newton": "NewtonWarpRenderer", + "ov": "OVRTXRenderer", + } + + @classmethod + def _get_backend(cls, cfg: RendererCfg, *args, **kwargs) -> str: + return _RENDERER_TYPE_TO_BACKEND.get(cfg.renderer_type, "physx") + + def __new__(cls, cfg: RendererCfg, *args, **kwargs) -> BaseRenderer: + """Create a new instance of a renderer based on the backend.""" + # The `FactoryBase` __new__ method will handle the logic and return + # an instance of the correct backend-specific renderer class. + result = super().__new__(cls, cfg, *args, **kwargs) + if not isinstance(result, BaseRenderer): + name = type(result).__name__ + bases = type(result).__bases__ + raise TypeError( + f"Backend renderer {name!r} must inherit from BaseRenderer, but it inherits from {bases!r}." + ) + return result diff --git a/source/isaaclab/isaaclab/renderers/renderer_cfg.py b/source/isaaclab/isaaclab/renderers/renderer_cfg.py new file mode 100644 index 000000000000..334fa3f070ae --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/renderer_cfg.py @@ -0,0 +1,15 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base configuration for renderers.""" + +from isaaclab.utils import configclass + + +@configclass +class RendererCfg: + """Configuration for a renderer.""" + + renderer_type: str = "default" diff --git a/source/isaaclab/isaaclab/scene/__init__.py b/source/isaaclab/isaaclab/scene/__init__.py index 4b614ea4bced..3c05d53e1517 100644 --- a/source/isaaclab/isaaclab/scene/__init__.py +++ b/source/isaaclab/isaaclab/scene/__init__.py @@ -25,5 +25,6 @@ :mod:`isaaclab.managers` sub-package for more details. """ -from .interactive_scene import InteractiveScene -from .interactive_scene_cfg import InteractiveSceneCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/scene/__init__.pyi b/source/isaaclab/isaaclab/scene/__init__.pyi new file mode 100644 index 000000000000..9a6543cc28ec --- /dev/null +++ b/source/isaaclab/isaaclab/scene/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "InteractiveScene", + "InteractiveSceneCfg", +] + +from .interactive_scene import InteractiveScene +from .interactive_scene_cfg import InteractiveSceneCfg diff --git a/source/isaaclab/isaaclab/scene/interactive_scene.py b/source/isaaclab/isaaclab/scene/interactive_scene.py index 46e5895687aa..ce744fe4bffe 100644 --- a/source/isaaclab/isaaclab/scene/interactive_scene.py +++ b/source/isaaclab/isaaclab/scene/interactive_scene.py @@ -3,36 +3,37 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + import logging from collections.abc import Sequence -from typing import Any +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from isaaclab_physx.assets import DeformableObject, SurfaceGripper import torch +import warp as wp -import carb -from isaacsim.core.cloner import GridCloner -from pxr import PhysxSchema +from pxr import Sdf import isaaclab.sim as sim_utils +from isaaclab import cloner from isaaclab.assets import ( Articulation, ArticulationCfg, AssetBaseCfg, - DeformableObject, - DeformableObjectCfg, RigidObject, RigidObjectCfg, RigidObjectCollection, RigidObjectCollectionCfg, - SurfaceGripper, - SurfaceGripperCfg, ) +from isaaclab.physics.scene_data_requirements import aggregate_requirements, resolve_scene_data_requirements from isaaclab.sensors import ContactSensorCfg, FrameTransformerCfg, SensorBase, SensorBaseCfg from isaaclab.sim import SimulationContext from isaaclab.sim.utils.stage import get_current_stage, get_current_stage_id -from isaaclab.sim.views import XformPrimView +from isaaclab.sim.views import FrameView from isaaclab.terrains import TerrainImporter, TerrainImporterCfg -from isaaclab.utils.version import get_isaac_sim_version # Note: This is a temporary import for the VisuoTactileSensorCfg class. # It will be removed once the VisuoTactileSensor class is added to the core Isaac Lab framework. @@ -137,81 +138,75 @@ def __init__(self, cfg: InteractiveSceneCfg): self.sim = SimulationContext.instance() self.stage = get_current_stage() self.stage_id = get_current_stage_id() + self.physics_backend = self.sim.physics_manager.__name__.lower() + requested_viz_types = set(self.sim.resolve_visualizer_types()) + if self.physics_backend.startswith("ovphysx"): + from isaaclab_ovphysx.cloner import ovphysx_replicate + + physics_clone_fn = ovphysx_replicate + elif self.physics_backend.startswith("physx"): + from isaaclab_physx.cloner import physx_replicate + + physics_clone_fn = physx_replicate + elif self.physics_backend.startswith("newton"): + from isaaclab_newton.cloner import newton_physics_replicate + + physics_clone_fn = newton_physics_replicate + else: + raise ValueError(f"Unsupported physics backend: {self.physics_backend}") # physics scene path self._physics_scene_path = None # prepare cloner for environment replication - self.cloner = GridCloner(spacing=self.cfg.env_spacing, stage=self.stage) - self.cloner.define_base_env(self.env_ns) - self.env_prim_paths = self.cloner.generate_paths(f"{self.env_ns}/env", self.cfg.num_envs) + self.env_prim_paths = [f"{self.env_ns}/env_{i}" for i in range(self.cfg.num_envs)] + + self.cloner_cfg = cloner.TemplateCloneCfg( + clone_regex=self.env_regex_ns, + clone_in_fabric=self.cfg.clone_in_fabric, + device=self.device, + physics_clone_fn=physics_clone_fn, + # For ovphysx: env_1..N are created by physx.clone() in the physics + # runtime after add_usd(). USD replication of the asset hierarchy + # to env_1..N is skipped — only env_0 needs physics prims in the USD. + clone_usd=not self.physics_backend.startswith("ovphysx"), + ) + # create source prim self.stage.DefinePrim(self.env_prim_paths[0], "Xform") + self.stage.DefinePrim(self.cloner_cfg.template_root, "Xform") + self.env_fmt = self.env_regex_ns.replace(".*", "{}") # allocate env indices self._ALL_INDICES = torch.arange(self.cfg.num_envs, dtype=torch.long, device=self.device) - # when replicate_physics=False, we assume heterogeneous environments and clone the xforms first. - # this triggers per-object level cloning in the spawner. - if not self.cfg.replicate_physics: - # check version of Isaac Sim to determine whether clone_in_fabric is valid - if get_isaac_sim_version().major < 5: - # clone the env xform - env_origins = self.cloner.clone( - source_prim_path=self.env_prim_paths[0], - prim_paths=self.env_prim_paths, - replicate_physics=False, - copy_from_source=True, - enable_env_ids=( - self.cfg.filter_collisions if self.device != "cpu" else False - ), # this won't do anything because we are not replicating physics - ) - else: - # clone the env xform - env_origins = self.cloner.clone( - source_prim_path=self.env_prim_paths[0], - prim_paths=self.env_prim_paths, - replicate_physics=False, - copy_from_source=True, - enable_env_ids=( - self.cfg.filter_collisions if self.device != "cpu" else False - ), # this won't do anything because we are not replicating physics - clone_in_fabric=self.cfg.clone_in_fabric, - ) - self._default_env_origins = torch.tensor(env_origins, device=self.device, dtype=torch.float32) - else: - # otherwise, environment origins will be initialized during cloning at the end of environment creation - self._default_env_origins = None + self._default_env_origins, _ = cloner.grid_transforms(self.num_envs, self.cfg.env_spacing, device=self.device) + # copy empty prim of env_0 to env_1, env_2, ..., env_{num_envs-1} with correct location. + # Suspend Fabric's USD notice listener: scene-init is followed by ``SimulationContext.reset``, + # which does the Fabric resync naturally — re-enabling here would just trigger a redundant batch. + # Note: ``restore=False`` means the listener stays disabled past this ``with`` block — through + # ``_add_entities_from_cfg`` and ``clone_environments`` below — until ``SimulationContext.reset`` + # re-enables it. The nested suspension inside ``clone_environments`` becomes a no-op as a result. + with cloner.disabled_fabric_change_notifies(self.stage, restore=False): + cloner.usd_replicate( + self.stage, + [self.env_fmt.format(0)], + [self.env_fmt], + self._ALL_INDICES, + positions=self._default_env_origins, + ) self._global_prim_paths = list() - if self._is_scene_setup_from_cfg(): - # add entities from config + has_scene_cfg_entities = self._is_scene_setup_from_cfg() + if has_scene_cfg_entities: self._add_entities_from_cfg() - # clone environments on a global scope if environment is homogeneous - if self.cfg.replicate_physics: - self.clone_environments(copy_from_source=False) - # replicate physics if we have more than one environment - # this is done to make scene initialization faster at play time - if self.cfg.replicate_physics and self.cfg.num_envs > 1: - # check version of Isaac Sim to determine whether clone_in_fabric is valid - if get_isaac_sim_version().major < 5: - self.cloner.replicate_physics( - source_prim_path=self.env_prim_paths[0], - prim_paths=self.env_prim_paths, - base_env_path=self.env_ns, - root_path=self.env_regex_ns.replace(".*", ""), - enable_env_ids=self.cfg.filter_collisions if self.device != "cpu" else False, - ) - else: - self.cloner.replicate_physics( - source_prim_path=self.env_prim_paths[0], - prim_paths=self.env_prim_paths, - base_env_path=self.env_ns, - root_path=self.env_regex_ns.replace(".*", ""), - enable_env_ids=self.cfg.filter_collisions if self.device != "cpu" else False, - clone_in_fabric=self.cfg.clone_in_fabric, - ) - # since env_ids is only applicable when replicating physics, we have to fallback to the previous method - # to filter collisions if replicate_physics is not enabled - # additionally, env_ids is only supported in GPU simulation - if (not self.cfg.replicate_physics and self.cfg.filter_collisions) or self.device == "cpu": + # Aggregate scene-data requirements from declared visualizers and constructed sensors, + # then publish to ``SimulationContext`` so downstream providers (constructed later by + # :meth:`SimulationContext.initialize_visualizers`) see the full picture in one read. + self._aggregate_scene_data_requirements(requested_viz_types) + + if has_scene_cfg_entities: + self.clone_environments(copy_from_source=(not self.cfg.replicate_physics)) + # Collision filtering is PhysX-specific (PhysxSchema.PhysxSceneAPI) + # Intentionally matches both physx and ovphysx (both are PhysX-based) + if self.cfg.filter_collisions and "physx" in self.physics_backend: self.filter_collisions(self._global_prim_paths) def clone_environments(self, copy_from_source: bool = False): @@ -222,57 +217,76 @@ def clone_environments(self, copy_from_source: bool = False): If True, clones are independent copies of the source prim and won't reflect its changes (start-up time may increase). Defaults to False. """ - # check if user spawned different assets in individual environments - # this flag will be None if no multi asset is spawned - carb_settings_iface = carb.settings.get_settings() - has_multi_assets = carb_settings_iface.get("/isaaclab/spawn/multi_assets") - if has_multi_assets and self.cfg.replicate_physics: - logger.warning( - "Varying assets might have been spawned under different environments." - " However, the replicate physics flag is enabled in the 'InteractiveScene' configuration." - " This may adversely affect PhysX parsing. We recommend disabling this property." - ) + # PhysX-only: set env id bit count for replicated physics. Newton handles env separation in its own API. + # Intentionally matches both physx and ovphysx (both are PhysX-based) + if self.cfg.replicate_physics and "physx" in self.physics_backend: + prim = self.stage.GetPrimAtPath("/physicsScene") + prim.CreateAttribute("physxScene:envIdInBoundsBitCount", Sdf.ValueTypeNames.Int).Set(4) + + # Suspend Fabric's USD notice listener around bulk authoring (re-entrant with the inner + # call inside :func:`clone_from_template`). ``restore=False`` because the downstream + # ``SimulationContext.reset`` does the Fabric resync — re-enabling here would batch-resync + # everything we just authored, which is slower than the unsuppressed baseline. + with cloner.disabled_fabric_change_notifies(self.stage, restore=False): + if self._is_scene_setup_from_cfg(): + self.cloner_cfg.clone_physics = not copy_from_source + plans = cloner.clone_from_template( + self.stage, num_clones=self.num_envs, template_clone_cfg=self.cloner_cfg + ) + else: + mapping = torch.ones((1, self.num_envs), device=self.device, dtype=torch.bool) + replicate_args = ( + [self.env_fmt.format(0)], + [self.env_fmt], + self._ALL_INDICES, + mapping, + ) - # check version of Isaac Sim to determine whether clone_in_fabric is valid - if get_isaac_sim_version().major < 5: - # clone the environment - env_origins = self.cloner.clone( - source_prim_path=self.env_prim_paths[0], - prim_paths=self.env_prim_paths, - replicate_physics=self.cfg.replicate_physics, - copy_from_source=copy_from_source, - enable_env_ids=( - self.cfg.filter_collisions if self.device != "cpu" else False - ), # this automatically filters collisions between environments - ) - else: - # clone the environment - env_origins = self.cloner.clone( - source_prim_path=self.env_prim_paths[0], - prim_paths=self.env_prim_paths, - replicate_physics=self.cfg.replicate_physics, - copy_from_source=copy_from_source, - enable_env_ids=( - self.cfg.filter_collisions if self.device != "cpu" else False - ), # this automatically filters collisions between environments - clone_in_fabric=self.cfg.clone_in_fabric, - ) + if not copy_from_source and self.cloner_cfg.physics_clone_fn is not None: + self.cloner_cfg.physics_clone_fn( + self.stage, *replicate_args, positions=self._default_env_origins, device=self.cloner_cfg.device + ) + if self.cloner_cfg.clone_usd: + cloner.usd_replicate(self.stage, *replicate_args, positions=self._default_env_origins) + # Synthesize a single trivial ClonePlan so consumers (scene data providers, + # pointcloud samplers, etc.) get a uniform interface regardless of whether + # the scene was authored via prototypes or by hand under env_0. + plans = { + self.env_fmt: cloner.ClonePlan( + dest_template=self.env_fmt, + prototype_paths=[self.env_fmt.format(0)], + clone_mask=mapping, + ) + } - # since env_ids is only applicable when replicating physics, we have to fallback to the previous method - # to filter collisions if replicate_physics is not enabled - # additionally, env_ids is only supported in GPU simulation - if (not self.cfg.replicate_physics and self.cfg.filter_collisions) or self.device == "cpu": - # if scene is specified through cfg, this is already taken care of - if not self._is_scene_setup_from_cfg(): - logger.warning( - "Collision filtering can only be automatically enabled when replicate_physics=True and using GPU" - " simulation. Please call scene.filter_collisions(global_prim_paths) to filter collisions across" - " environments." - ) + # Publish to ``SimulationContext`` (the canonical owner). The :attr:`clone_plans` + # property below forwards reads back through ``sim.get_clone_plans()`` so consumers + # holding a scene reference still see the published plans without a duplicate cache. + self.sim.set_clone_plans(plans) + + def _aggregate_scene_data_requirements(self, visualizer_types=()) -> None: + """Aggregate scene-data requirements from visualizers and sensor renderers. - # in case of heterogeneous cloning, the env origins is specified at init - if self._default_env_origins is None: - self._default_env_origins = torch.tensor(env_origins, device=self.device, dtype=torch.float32) + Runs once after :meth:`_add_entities_from_cfg` so all sensors are constructed and + their renderer types are visible. Pushes the merged :class:`SceneDataRequirement` to + :class:`SimulationContext` for later consumption by the scene data provider. + """ + discovered_req = resolve_scene_data_requirements( + visualizer_types=visualizer_types, + renderer_types=self._sensor_renderer_types(), + ) + current_req = self.sim.get_scene_data_requirements() + requirements = aggregate_requirements((current_req, discovered_req)) + if requirements != current_req: + self.sim.update_scene_data_requirements(requirements) + + def _sensor_renderer_types(self) -> list[str]: + """Return renderer type names used by scene sensors (skipping any without a renderer cfg).""" + return [ + getattr(rcfg, "renderer_type", "default") + for s in self._sensors.values() + if (rcfg := getattr(getattr(s, "cfg", None), "renderer_cfg", None)) is not None + ] def filter_collisions(self, global_prim_paths: list[str] | None = None): """Filter environments collisions. @@ -300,7 +314,8 @@ def filter_collisions(self, global_prim_paths: list[str] | None = None): self._global_prim_paths += global_prim_paths # filter collisions within each environment instance - self.cloner.filter_collisions( + cloner.filter_collisions( + self.stage, self.physics_scene_path, "/World/collisions", self.env_prim_paths, @@ -326,7 +341,7 @@ def physics_scene_path(self) -> str: """The path to the USD Physics Scene.""" if self._physics_scene_path is None: for prim in self.stage.Traverse(): - if prim.HasAPI(PhysxSchema.PhysxSceneAPI): + if "PhysxSceneAPI" in prim.GetAppliedSchemas(): self._physics_scene_path = prim.GetPrimPath().pathString logger.info(f"Physics scene prim path: {self._physics_scene_path}") break @@ -412,11 +427,23 @@ def surface_grippers(self) -> dict[str, SurfaceGripper]: return self._surface_grippers @property - def extras(self) -> dict[str, XformPrimView]: + def clone_plans(self) -> dict[str, cloner.ClonePlan]: + """Per-group clone plans produced by :meth:`clone_environments`. + + Forwards to :meth:`SimulationContext.get_clone_plans`, which is the canonical owner. + Keyed by each group's destination path template + (e.g. ``"/World/envs/env_{}/Object"``); the value records the prototype prim paths + and the per-env prototype assignment mask. Empty until :meth:`clone_environments` + runs, and (for the cfg path) empty when the scene cfg has no template prototypes. + """ + return self.sim.get_clone_plans() + + @property + def extras(self) -> dict[str, FrameView]: """A dictionary of miscellaneous simulation objects that neither inherit from assets nor sensors. The keys are the names of the miscellaneous objects, and the values are the - :class:`~isaaclab.sim.views.XformPrimView` instances of the corresponding prims. + :class:`~isaaclab.sim.views.FrameView` instances of the corresponding prims. As an example, lights or other props in the scene that do not have any attributes or properties that you want to alter at runtime can be added to this dictionary. @@ -482,6 +509,11 @@ def update(self, dt: float) -> None: Args: dt: The amount of time passed from last :meth:`update` call. """ + # Scene-wide renderer transform sync once per step when all sensors update, + # so per-camera fetches do not own this concern (deduped inside RenderContext). + if not self.cfg.lazy_sensor_update: + self.sim.render_context.update_transforms(self.sim.get_physics_step_count()) + # -- assets for articulation in self._articulations.values(): articulation.update(dt) @@ -523,38 +555,39 @@ def reset_to( for asset_name, articulation in self._articulations.items(): asset_state = state["articulation"][asset_name] # root state - root_pose = asset_state["root_pose"].clone() + root_pose = asset_state["root_pose"].clone().to(self.device) if is_relative: root_pose[:, :3] += self.env_origins[env_ids] - root_velocity = asset_state["root_velocity"].clone() - articulation.write_root_pose_to_sim(root_pose, env_ids=env_ids) - articulation.write_root_velocity_to_sim(root_velocity, env_ids=env_ids) + root_velocity = asset_state["root_velocity"].clone().to(self.device) + articulation.write_root_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids) + articulation.write_root_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids) # joint state - joint_position = asset_state["joint_position"].clone() - joint_velocity = asset_state["joint_velocity"].clone() - articulation.write_joint_state_to_sim(joint_position, joint_velocity, env_ids=env_ids) + joint_position = asset_state["joint_position"].clone().to(self.device) + joint_velocity = asset_state["joint_velocity"].clone().to(self.device) + articulation.write_joint_position_to_sim_index(position=joint_position, env_ids=env_ids) + articulation.write_joint_velocity_to_sim_index(velocity=joint_velocity, env_ids=env_ids) # FIXME: This is not generic as it assumes PD control over the joints. # This assumption does not hold for effort controlled joints. - articulation.set_joint_position_target(joint_position, env_ids=env_ids) - articulation.set_joint_velocity_target(joint_velocity, env_ids=env_ids) + articulation.set_joint_position_target_index(target=joint_position, env_ids=env_ids) + articulation.set_joint_velocity_target_index(target=joint_velocity, env_ids=env_ids) # deformable objects for asset_name, deformable_object in self._deformable_objects.items(): asset_state = state["deformable_object"][asset_name] - nodal_position = asset_state["nodal_position"].clone() + nodal_position = asset_state["nodal_position"].clone().to(self.device) if is_relative: nodal_position[:, :3] += self.env_origins[env_ids] - nodal_velocity = asset_state["nodal_velocity"].clone() + nodal_velocity = asset_state["nodal_velocity"].clone().to(self.device) deformable_object.write_nodal_pos_to_sim(nodal_position, env_ids=env_ids) deformable_object.write_nodal_velocity_to_sim(nodal_velocity, env_ids=env_ids) # rigid objects for asset_name, rigid_object in self._rigid_objects.items(): asset_state = state["rigid_object"][asset_name] - root_pose = asset_state["root_pose"].clone() + root_pose = asset_state["root_pose"].clone().to(self.device) if is_relative: root_pose[:, :3] += self.env_origins[env_ids] - root_velocity = asset_state["root_velocity"].clone() - rigid_object.write_root_pose_to_sim(root_pose, env_ids=env_ids) - rigid_object.write_root_velocity_to_sim(root_velocity, env_ids=env_ids) + root_velocity = asset_state["root_velocity"].clone().to(self.device) + rigid_object.write_root_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids) + rigid_object.write_root_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids) # surface grippers for asset_name, surface_gripper in self._surface_grippers.items(): asset_state = state["gripper"][asset_name] @@ -620,35 +653,35 @@ def get_state(self, is_relative: bool = False) -> dict[str, dict[str, dict[str, state["articulation"] = dict() for asset_name, articulation in self._articulations.items(): asset_state = dict() - asset_state["root_pose"] = articulation.data.root_pose_w.clone() + asset_state["root_pose"] = articulation.data.root_pose_w.torch.clone() if is_relative: asset_state["root_pose"][:, :3] -= self.env_origins - asset_state["root_velocity"] = articulation.data.root_vel_w.clone() - asset_state["joint_position"] = articulation.data.joint_pos.clone() - asset_state["joint_velocity"] = articulation.data.joint_vel.clone() + asset_state["root_velocity"] = articulation.data.root_vel_w.torch.clone() + asset_state["joint_position"] = articulation.data.joint_pos.torch.clone() + asset_state["joint_velocity"] = articulation.data.joint_vel.torch.clone() state["articulation"][asset_name] = asset_state # deformable objects state["deformable_object"] = dict() for asset_name, deformable_object in self._deformable_objects.items(): asset_state = dict() - asset_state["nodal_position"] = deformable_object.data.nodal_pos_w.clone() + asset_state["nodal_position"] = deformable_object.data.nodal_pos_w.torch.clone() if is_relative: asset_state["nodal_position"][:, :3] -= self.env_origins - asset_state["nodal_velocity"] = deformable_object.data.nodal_vel_w.clone() + asset_state["nodal_velocity"] = deformable_object.data.nodal_vel_w.torch.clone() state["deformable_object"][asset_name] = asset_state # rigid objects state["rigid_object"] = dict() for asset_name, rigid_object in self._rigid_objects.items(): asset_state = dict() - asset_state["root_pose"] = rigid_object.data.root_pose_w.clone() + asset_state["root_pose"] = rigid_object.data.root_pose_w.torch.clone() if is_relative: asset_state["root_pose"][:, :3] -= self.env_origins - asset_state["root_velocity"] = rigid_object.data.root_vel_w.clone() + asset_state["root_velocity"] = rigid_object.data.root_vel_w.torch.clone() state["rigid_object"][asset_name] = asset_state # surface grippers state["gripper"] = dict() for asset_name, gripper in self._surface_grippers.items(): - state["gripper"][asset_name] = gripper.state.clone() + state["gripper"][asset_name] = wp.to_torch(gripper.state).clone() return state """ @@ -721,19 +754,48 @@ def _is_scene_setup_from_cfg(self) -> bool: for asset_name, asset_cfg in self.cfg.__dict__.items() ) - def _add_entities_from_cfg(self): + def _add_entities_from_cfg(self): # noqa: C901 """Add scene entities from the config.""" + from isaaclab_physx.assets import DeformableObjectCfg, SurfaceGripperCfg # noqa: PLC0415 + # store paths that are in global collision filter self._global_prim_paths = list() - # parse the entire scene config and resolve regex - for asset_name, asset_cfg in self.cfg.__dict__.items(): - # skip keywords - # note: easier than writing a list of keywords: [num_envs, env_spacing, lazy_sensor_update] - if asset_name in InteractiveSceneCfg.__dataclass_fields__ or asset_cfg is None: - continue - # resolve regex + # Process non-sensor entities before sensors so that asset prims exist in the template + # when sensors (e.g. cameras attached to robot links) need to spawn under them. + all_items = [ + (k, v) + for k, v in self.cfg.__dict__.items() + if k not in InteractiveSceneCfg.__dataclass_fields__ and v is not None + ] + ordered_items = [(k, v) for k, v in all_items if not isinstance(v, SensorBaseCfg)] + [ + (k, v) for k, v in all_items if isinstance(v, SensorBaseCfg) + ] + + for asset_name, asset_cfg in ordered_items: + # Resolve old-style preset wrappers: configclass with a ``presets`` dict and a ``'default'`` key. + # These are multi-backend selector objects (e.g. VelocityEnvContactSensorCfg) that hold several + # alternative asset configs in a dict and are not themselves asset configs. + if hasattr(asset_cfg, "presets") and isinstance(asset_cfg.presets, dict) and "default" in asset_cfg.presets: + asset_cfg = asset_cfg.presets["default"] + setattr(self.cfg, asset_name, asset_cfg) + # resolve prim_path with env regex if hasattr(asset_cfg, "prim_path"): asset_cfg.prim_path = asset_cfg.prim_path.format(ENV_REGEX_NS=self.env_regex_ns) + # set spawn_path on spawner if cloning is needed + if hasattr(asset_cfg, "spawn") and asset_cfg.spawn is not None: + if hasattr(asset_cfg, "prim_path") and self.env_ns in asset_cfg.prim_path: + template_base = asset_cfg.prim_path.replace(self.env_regex_ns, self.cloner_cfg.template_root) + proto_id = self.cloner_cfg.template_prototype_identifier + if isinstance(asset_cfg, SensorBaseCfg): + # Sensor may be nested under a proto_asset_N prim (e.g. a camera on a robot + # link). Search for the actual template location so spawning succeeds even + # though the parent asset lives at template_root//proto_asset_0/... + asset_cfg.spawn.spawn_path = self._resolve_sensor_template_spawn_path(template_base, proto_id) + else: + asset_cfg.spawn.spawn_path = f"{template_base}/{proto_id}_.*" + else: + # No cloning - spawn directly at prim_path + asset_cfg.spawn.spawn_path = asset_cfg.prim_path # create asset if isinstance(asset_cfg, TerrainImporterCfg): # terrains are special entities since they define environment origins @@ -749,6 +811,16 @@ def _add_entities_from_cfg(self): elif isinstance(asset_cfg, RigidObjectCollectionCfg): for rigid_object_cfg in asset_cfg.rigid_objects.values(): rigid_object_cfg.prim_path = rigid_object_cfg.prim_path.format(ENV_REGEX_NS=self.env_regex_ns) + # set spawn_path on spawner if cloning is needed + if hasattr(rigid_object_cfg, "spawn") and rigid_object_cfg.spawn is not None: + if self.env_ns in rigid_object_cfg.prim_path: + spawn_tmpl = rigid_object_cfg.prim_path.replace( + self.env_regex_ns, self.cloner_cfg.template_root + ) + proto_id = self.cloner_cfg.template_prototype_identifier + rigid_object_cfg.spawn.spawn_path = f"{spawn_tmpl}/{proto_id}_.*" + else: + rigid_object_cfg.spawn.spawn_path = rigid_object_cfg.prim_path self._rigid_object_collections[asset_name] = asset_cfg.class_type(asset_cfg) for rigid_object_cfg in asset_cfg.rigid_objects.values(): if hasattr(rigid_object_cfg, "collision_group") and rigid_object_cfg.collision_group == -1: @@ -766,10 +838,17 @@ def _add_entities_from_cfg(self): updated_target_frames.append(target_frame) asset_cfg.target_frames = updated_target_frames elif isinstance(asset_cfg, ContactSensorCfg): - updated_filter_prim_paths_expr = [] - for filter_prim_path in asset_cfg.filter_prim_paths_expr: - updated_filter_prim_paths_expr.append(filter_prim_path.format(ENV_REGEX_NS=self.env_regex_ns)) - asset_cfg.filter_prim_paths_expr = updated_filter_prim_paths_expr + asset_cfg.filter_prim_paths_expr = [ + p.format(ENV_REGEX_NS=self.env_regex_ns) for p in asset_cfg.filter_prim_paths_expr + ] + if hasattr(asset_cfg, "sensor_shape_prim_expr") and asset_cfg.sensor_shape_prim_expr: + asset_cfg.sensor_shape_prim_expr = [ + p.format(ENV_REGEX_NS=self.env_regex_ns) for p in asset_cfg.sensor_shape_prim_expr + ] + if hasattr(asset_cfg, "filter_shape_prim_expr") and asset_cfg.filter_shape_prim_expr: + asset_cfg.filter_shape_prim_expr = [ + p.format(ENV_REGEX_NS=self.env_regex_ns) for p in asset_cfg.filter_shape_prim_expr + ] elif isinstance(asset_cfg, VisuoTactileSensorCfg): if hasattr(asset_cfg, "camera_cfg") and asset_cfg.camera_cfg is not None: asset_cfg.camera_cfg.prim_path = asset_cfg.camera_cfg.prim_path.format( @@ -788,17 +867,54 @@ def _add_entities_from_cfg(self): # manually spawn asset if asset_cfg.spawn is not None: asset_cfg.spawn.func( - asset_cfg.prim_path, + asset_cfg.spawn.spawn_path, asset_cfg.spawn, translation=asset_cfg.init_state.pos, orientation=asset_cfg.init_state.rot, ) # store xform prim view corresponding to this asset # all prims in the scene are Xform prims (i.e. have a transform component) - self._extras[asset_name] = XformPrimView(asset_cfg.prim_path, device=self.device, stage=self.stage) + self._extras[asset_name] = FrameView(asset_cfg.prim_path, device=self.device, stage=self.stage) else: raise ValueError(f"Unknown asset config type for {asset_name}: {asset_cfg}") + # store global collision paths if hasattr(asset_cfg, "collision_group") and asset_cfg.collision_group == -1: asset_paths = sim_utils.find_matching_prim_paths(asset_cfg.prim_path) self._global_prim_paths += asset_paths + + def _resolve_sensor_template_spawn_path(self, template_base: str, proto_id: str) -> str: + """Resolve the actual template spawn path for a sensor nested under a proto_asset prim. + + Sensors parented to robot links live inside ``proto_asset_0`` rather than directly under + the template root. For example, a wrist camera at + ``/World/template/Robot/panda_hand/wrist_cam`` is actually spawned at + ``/World/template/Robot/proto_asset_0/panda_hand/wrist_cam``. + + This method inserts a ``proto_id_.*`` wildcard one level below the template root and + searches for the concrete parent prim so the camera spawner can find it. + + Args: + template_base: Template path derived by replacing the env regex with the template root. + Example: ``/World/template/Robot/panda_hand/wrist_cam``. + proto_id: Prototype identifier prefix (e.g. ``proto_asset``). + + Returns: + Concrete spawn path (e.g. ``/World/template/Robot/proto_asset_0/panda_hand/wrist_cam``) + if the parent is found, otherwise ``template_base/proto_id_.*`` as a fallback. + """ + template_root = self.cloner_cfg.template_root + # rel = e.g. "Robot/panda_hand/wrist_cam" + rel = template_base[len(template_root) + 1 :] + # asset = "Robot", remainder = "panda_hand/wrist_cam" + asset, _, remainder = rel.partition("/") + if not remainder: + return f"{template_base}/{proto_id}_.*" + + # parent = "panda_hand", leaf = "wrist_cam" + parent, _, leaf = remainder.rpartition("/") + search = ( + f"{template_root}/{asset}/{proto_id}_.*/{parent}" if parent else f"{template_root}/{asset}/{proto_id}_.*" + ) + found = sim_utils.find_matching_prim_paths(search) + return f"{found[0]}/{leaf}" if found else f"{template_base}/{proto_id}_.*" diff --git a/source/isaaclab/isaaclab/sensors/__init__.py b/source/isaaclab/isaaclab/sensors/__init__.py index f0a5719e5056..717fc4a7163c 100644 --- a/source/isaaclab/isaaclab/sensors/__init__.py +++ b/source/isaaclab/isaaclab/sensors/__init__.py @@ -26,19 +26,17 @@ +---------------------+---------------------------+---------------------------------------------------------------+ | Contact Sensor | /World/robot/feet_* | Leaf is available and checks if the schema exists | +---------------------+---------------------------+---------------------------------------------------------------+ -| Ray Caster | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | +| Ray Caster | /World/robot/base/raycast | ``spawn`` creates an Xform leaf; else the leaf must exist | +---------------------+---------------------------+---------------------------------------------------------------+ | Frame Transformer | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | +---------------------+---------------------------+---------------------------------------------------------------+ | Imu | /World/robot/base | Leaf exists and is a physics body (Rigid Body) | +---------------------+---------------------------+---------------------------------------------------------------+ +| Pva | /World/robot/base | Leaf exists and is a physics body (Rigid Body) | ++---------------------+---------------------------+---------------------------------------------------------------+ """ -from .camera import * # noqa: F401, F403 -from .contact_sensor import * # noqa: F401, F403 -from .frame_transformer import * # noqa: F401 -from .imu import * # noqa: F401, F403 -from .ray_caster import * # noqa: F401, F403 -from .sensor_base import SensorBase # noqa: F401 -from .sensor_base_cfg import SensorBaseCfg # noqa: F401 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/__init__.pyi b/source/isaaclab/isaaclab/sensors/__init__.pyi new file mode 100644 index 000000000000..1bd092de46e6 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/__init__.pyi @@ -0,0 +1,112 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "SensorBase", + "SensorBaseCfg", + "Camera", + "CameraCfg", + "CameraData", + "RenderBufferKind", + "RenderBufferSpec", + "TiledCamera", + "TiledCameraCfg", + "transform_points", + "create_pointcloud_from_depth", + "create_pointcloud_from_rgbd", + "save_images_to_file", + "BaseContactSensor", + "BaseContactSensorData", + "ContactSensor", + "ContactSensorCfg", + "ContactSensorData", + "BaseFrameTransformer", + "BaseFrameTransformerData", + "FrameTransformer", + "FrameTransformerCfg", + "OffsetCfg", + "FrameTransformerData", + "BaseImu", + "BaseImuData", + "Imu", + "ImuCfg", + "ImuData", + "BaseJointWrenchSensor", + "BaseJointWrenchSensorData", + "JointWrenchSensor", + "JointWrenchSensorCfg", + "JointWrenchSensorData", + "BasePva", + "BasePvaData", + "Pva", + "PvaCfg", + "PvaData", + "MultiMeshRayCaster", + "MultiMeshRayCasterCamera", + "MultiMeshRayCasterCameraCfg", + "MultiMeshRayCasterCameraData", + "MultiMeshRayCasterCfg", + "MultiMeshRayCasterData", + "RayCaster", + "RayCasterCamera", + "RayCasterCameraCfg", + "RayCasterCfg", + "RayCasterData", + "patterns", +] + +from .sensor_base import SensorBase +from .sensor_base_cfg import SensorBaseCfg +from .camera import ( + Camera, + CameraCfg, + CameraData, + RenderBufferKind, + RenderBufferSpec, + TiledCamera, + TiledCameraCfg, + transform_points, + create_pointcloud_from_depth, + create_pointcloud_from_rgbd, + save_images_to_file, +) +from .contact_sensor import ( + BaseContactSensor, + BaseContactSensorData, + ContactSensor, + ContactSensorCfg, + ContactSensorData, +) +from .frame_transformer import ( + BaseFrameTransformer, + BaseFrameTransformerData, + FrameTransformer, + FrameTransformerCfg, + OffsetCfg, + FrameTransformerData, +) +from .imu import BaseImu, BaseImuData, Imu, ImuCfg, ImuData +from .joint_wrench import ( + BaseJointWrenchSensor, + BaseJointWrenchSensorData, + JointWrenchSensor, + JointWrenchSensorCfg, + JointWrenchSensorData, +) +from .pva import BasePva, BasePvaData, Pva, PvaCfg, PvaData +from .ray_caster import ( + MultiMeshRayCaster, + MultiMeshRayCasterCamera, + MultiMeshRayCasterCameraCfg, + MultiMeshRayCasterCameraData, + MultiMeshRayCasterCfg, + MultiMeshRayCasterData, + RayCaster, + RayCasterCamera, + RayCasterCameraCfg, + RayCasterCfg, + RayCasterData, + patterns, +) diff --git a/source/isaaclab/isaaclab/sensors/camera/__init__.py b/source/isaaclab/isaaclab/sensors/camera/__init__.py index f2318067b586..8c863ac2d227 100644 --- a/source/isaaclab/isaaclab/sensors/camera/__init__.py +++ b/source/isaaclab/isaaclab/sensors/camera/__init__.py @@ -5,9 +5,6 @@ """Sub-module for camera wrapper around USD camera prim.""" -from .camera import Camera -from .camera_cfg import CameraCfg -from .camera_data import CameraData -from .tiled_camera import TiledCamera -from .tiled_camera_cfg import TiledCameraCfg -from .utils import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/camera/__init__.pyi b/source/isaaclab/isaaclab/sensors/camera/__init__.pyi new file mode 100644 index 000000000000..cac009dea0d4 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/camera/__init__.pyi @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Camera", + "CameraCfg", + "CameraData", + "RenderBufferKind", + "RenderBufferSpec", + "TiledCamera", + "TiledCameraCfg", + "transform_points", + "create_pointcloud_from_depth", + "create_pointcloud_from_rgbd", + "save_images_to_file", +] + +from .camera import Camera +from .camera_cfg import CameraCfg +from .camera_data import CameraData, RenderBufferKind, RenderBufferSpec +from .tiled_camera import TiledCamera +from .tiled_camera_cfg import TiledCameraCfg +from .utils import ( + transform_points, + create_pointcloud_from_depth, + create_pointcloud_from_rgbd, + save_images_to_file, +) diff --git a/source/isaaclab/isaaclab/sensors/camera/camera.py b/source/isaaclab/isaaclab/sensors/camera/camera.py index 26fd0713c715..be52668dbd6c 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera.py @@ -5,34 +5,31 @@ from __future__ import annotations -import json import logging -import re from collections.abc import Sequence -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Literal import numpy as np import torch -from packaging import version +import warp as wp -import carb -import omni.usd -from pxr import Sdf, UsdGeom +from pxr import UsdGeom import isaaclab.sim as sim_utils import isaaclab.utils.sensors as sensor_utils -from isaaclab.sim.views import XformPrimView +from isaaclab.app.settings_manager import get_settings_manager +from isaaclab.renderers import BaseRenderer +from isaaclab.renderers.camera_render_spec import CameraRenderSpec +from isaaclab.sim.views import FrameView from isaaclab.utils import to_camel_case -from isaaclab.utils.array import convert_to_torch from isaaclab.utils.math import ( convert_camera_frame_orientation_convention, create_rotation_matrix_from_view, quat_from_matrix, ) -from isaaclab.utils.version import get_isaac_sim_version from ..sensor_base import SensorBase -from .camera_data import CameraData +from .camera_data import CameraData, RenderBufferKind if TYPE_CHECKING: from .camera_cfg import CameraCfg @@ -51,9 +48,14 @@ class Camera(SensorBase): - ``"rgb"``: A 3-channel rendered color image. - ``"rgba"``: A 4-channel rendered color image with alpha channel. + - ``"albedo"``: A 4-channel fast diffuse-albedo only path for color image. + Note that this path will achieve the best performance when used alone or with depth only. - ``"distance_to_camera"``: An image containing the distance to camera optical center. - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - ``"depth"``: The same as ``"distance_to_image_plane"``. + - ``"simple_shading_constant_diffuse"``: Simple shading (constant diffuse) RGB approximation. + - ``"simple_shading_diffuse_mdl"``: Simple shading (diffuse MDL) RGB approximation. + - ``"simple_shading_full_mdl"``: Simple shading (full MDL) RGB approximation. - ``"normals"``: An image containing the local surface normal vectors at each pixel. - ``"motion_vectors"``: An image containing the motion vector data at each pixel. - ``"semantic_segmentation"``: The semantic segmentation data. @@ -102,74 +104,44 @@ def __init__(self, cfg: CameraCfg): RuntimeError: If no camera prim is found at the given path. ValueError: If the provided data types are not supported by the camera. """ - # check if sensor path is valid - # note: currently we do not handle environment indices if there is a regex pattern in the leaf - # For example, if the prim path is "/World/Sensor_[1,2]". - sensor_path = cfg.prim_path.split("/")[-1] - sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None - if sensor_path_is_regex: - raise RuntimeError( - f"Invalid prim path for the camera sensor: {self.cfg.prim_path}." - "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." - ) # perform check on supported data types self._check_supported_data_types(cfg) # initialize base class super().__init__(cfg) - # toggle rendering of rtx sensors as True - # this flag is read by SimulationContext to determine if rtx sensors should be rendered - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/isaaclab/render/rtx_sensors", True) - - # spawn the asset - if self.cfg.spawn is not None: - # compute the rotation offset - rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32, device="cpu").unsqueeze(0) - rot_offset = convert_camera_frame_orientation_convention( - rot, origin=self.cfg.offset.convention, target="opengl" - ) - rot_offset = rot_offset.squeeze(0).cpu().numpy() - # ensure vertical aperture is set, otherwise replace with default for squared pixels - if self.cfg.spawn.vertical_aperture is None: - self.cfg.spawn.vertical_aperture = self.cfg.spawn.horizontal_aperture * self.cfg.height / self.cfg.width - # spawn the asset - self.cfg.spawn.func( - self.cfg.prim_path, self.cfg.spawn, translation=self.cfg.offset.pos, orientation=rot_offset - ) - # check that spawn was successful - matching_prims = sim_utils.find_matching_prims(self.cfg.prim_path) - if len(matching_prims) == 0: - raise RuntimeError(f"Could not find prim with path {self.cfg.prim_path}.") + # TODO(follow-up PR): move this flag flip out of Camera. The cleanest path is + # an apply_pre_reset_settings() hook on RendererCfg (default no-op) that + # IsaacRtxRendererCfg overrides to flip /isaaclab/render/rtx_sensors. The + # flag must be set pre-sim.reset() because SimulationContext.is_rendering + # and several env classes read it before the renderer's __init__ runs. + if self.cfg.renderer_cfg.renderer_type == "isaac_rtx": + get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", True) + + # Compute camera orientation (convention conversion) and spawn + rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32, device="cpu").unsqueeze(0) + rot_offset = convert_camera_frame_orientation_convention( + rot, origin=self.cfg.offset.convention, target="opengl" + ) + rot_offset = rot_offset.squeeze(0).cpu().numpy() + if self.cfg.spawn is not None and self.cfg.spawn.vertical_aperture is None: + self.cfg.spawn.vertical_aperture = self.cfg.spawn.horizontal_aperture * self.cfg.height / self.cfg.width + self._resolve_and_spawn("camera", translation=self.cfg.offset.pos, orientation=rot_offset) # UsdGeom Camera prim for the sensor self._sensor_prims: list[UsdGeom.Camera] = list() - # Create empty variables for storing output data - self._data = CameraData() - - # HACK: We need to disable instancing for semantic_segmentation and instance_segmentation_fast to work - # checks for Isaac Sim v4.5 as this issue exists there - if get_isaac_sim_version() == version.parse("4.5"): - if "semantic_segmentation" in self.cfg.data_types or "instance_segmentation_fast" in self.cfg.data_types: - logger.warning( - "Isaac Sim 4.5 introduced a bug in Camera and TiledCamera when outputting instance and semantic" - " segmentation outputs for instanceable assets. As a workaround, the instanceable flag on assets" - " will be disabled in the current workflow and may lead to longer load times and increased memory" - " usage." - ) - with Sdf.ChangeBlock(): - for prim in self.stage.Traverse(): - prim.SetInstanceable(False) + # Allocated in :meth:`_create_buffers` once the renderer's output contract is known. + self._data: CameraData | None = None + # Renderer and render data — assigned in _initialize_impl. + self._renderer: BaseRenderer | None = None + self._render_data = None def __del__(self): - """Unsubscribes from callbacks and detach from the replicator registry.""" + """Unsubscribes from callbacks and cleans up renderer resources.""" # unsubscribe callbacks super().__del__() - # delete from replicator registry - for _, annotators in self._rep_registry.items(): - for annotator, render_product_path in zip(annotators, self._render_product_paths): - annotator.detach([render_product_path]) - annotator = None + # cleanup render resources (renderer may be None if never initialized) + if self._renderer is not None: + self._renderer.cleanup(self._render_data) def __str__(self) -> str: """Returns: A string containing information about the instance.""" @@ -177,10 +149,6 @@ def __str__(self) -> str: return ( f"Camera @ '{self.cfg.prim_path}': \n" f"\tdata types : {list(self.data.output.keys())} \n" - f"\tsemantic filter : {self.cfg.semantic_filter}\n" - f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" - f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" - f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" f"\tupdate period (s): {self.cfg.update_period}\n" f"\tshape : {self.image_shape}\n" f"\tnumber of sensors : {self._view.count}" @@ -206,14 +174,6 @@ def frame(self) -> torch.tensor: """Frame number when the measurement took place.""" return self._frame - @property - def render_product_paths(self) -> list[str]: - """The path of the render products for the cameras. - - This can be used via replicator interfaces to attach to writes or external annotator registry. - """ - return self._render_product_paths - @property def image_shape(self) -> tuple[int, int]: """A tuple containing (height, width) of the camera sensor.""" @@ -272,11 +232,11 @@ def set_intrinsic_matrices( param_name = to_camel_case(param_name, to="CC") # get attribute from the class param_attr = getattr(sensor_prim, f"Get{param_name}Attr") - # set value - # note: We have to do it this way because the camera might be on a different - # layer (default cameras are on session layer), and this is the simplest - # way to set the property on the right layer. - omni.usd.set_prop_val(param_attr(), param_value) + # convert numpy scalar to Python float for USD compatibility (NumPy 2.0+) + if isinstance(param_value, np.floating): + param_value = float(param_value) + # set value using pure USD API + param_attr().Set(param_value) # update the internal buffers self._update_intrinsic_matrices(env_ids) @@ -306,7 +266,7 @@ def set_world_poses( Args: positions: The cartesian coordinates (in meters). Shape is (N, 3). Defaults to None, in which case the camera position in not changed. - orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + orientations: The quaternion orientation in (x, y, z, w). Shape is (N, 4). Defaults to None, in which case the camera orientation in not changed. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. convention: The convention in which the poses are fed. Defaults to "ros". @@ -330,8 +290,16 @@ def set_world_poses( elif not isinstance(orientations, torch.Tensor): orientations = torch.tensor(orientations, device=self._device) orientations = convert_camera_frame_orientation_convention(orientations, origin=convention, target="opengl") - # set the pose - self._view.set_world_poses(positions, orientations, env_ids) + # convert torch tensors to warp arrays for the view + pos_wp = wp.from_torch(positions.contiguous()) if positions is not None else None + ori_wp = wp.from_torch(orientations.contiguous()) if orientations is not None else None + if env_ids is not None: + if not isinstance(env_ids, torch.Tensor): + env_ids = torch.tensor(env_ids, dtype=torch.int32, device=self._device) + idx_wp = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) + else: + idx_wp = None + self._view.set_world_poses(pos_wp, ori_wp, idx_wp) def set_world_poses_from_view( self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None @@ -354,22 +322,26 @@ def set_world_poses_from_view( up_axis = UsdGeom.GetStageUpAxis(self.stage) # set camera poses using the view orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, up_axis, device=self._device)) - self._view.set_world_poses(eyes, orientations, env_ids) + if not isinstance(env_ids, torch.Tensor): + env_ids = torch.tensor(env_ids, dtype=torch.int32, device=self._device) + idx_wp = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) + self._view.set_world_poses(wp.from_torch(eyes.contiguous()), wp.from_torch(orientations.contiguous()), idx_wp) """ Operations """ - def reset(self, env_ids: Sequence[int] | None = None): + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): if not self._is_initialized: raise RuntimeError( "Camera could not be initialized. Please ensure --enable_cameras is used to enable rendering." ) # reset the timestamps - super().reset(env_ids) - # resolve None - # note: cannot do smart indexing here since we do a for loop over data. - if env_ids is None: + super().reset(env_ids, env_mask) + # resolve to indices for torch indexing + if env_ids is None and env_mask is not None: + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + elif env_ids is None: env_ids = self._ALL_INDICES # reset the data # note: this recomputation is useful if one performs events such as randomizations on the camera poses. @@ -384,29 +356,33 @@ def reset(self, env_ids: Sequence[int] | None = None): def _initialize_impl(self): """Initializes the sensor handles and internal buffers. - This function creates handles and registers the provided data types with the replicator registry to - be able to access the data from the sensor. It also initializes the internal buffers to store the data. + This function obtains the simulation-scoped :class:`~isaaclab.renderers.base_renderer.BaseRenderer` + from :attr:`~isaaclab.sim.simulation_context.SimulationContext.render_context` using the configured + :attr:`~isaaclab.sensors.camera.CameraCfg.renderer_cfg` and delegates all render-product + and annotator management to it. It also initializes the internal buffers to store the data. Raises: RuntimeError: If the number of camera prims in the view does not match the number of environments. - RuntimeError: If replicator was not found. + RuntimeError: Propagated from the renderer constructor when the active backend's + runtime requirements are not satisfied (e.g. the RTX backend requires the + simulation app to be launched with ``--enable_cameras``). """ - carb_settings_iface = carb.settings.get_settings() - if not carb_settings_iface.get("/isaaclab/cameras_enabled"): - raise RuntimeError( - "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" - " rendering." - ) - - import omni.replicator.core as rep - from omni.syntheticdata.scripts.SyntheticData import SyntheticData - # Initialize parent class super()._initialize_impl() - # Create a view for the sensor with Fabric enabled for fast pose queries, otherwise position will be stale. - self._view = XformPrimView( - self.cfg.prim_path, device=self._device, stage=self.stage, sync_usd_on_fabric_write=True - ) + + sim_ctx = sim_utils.SimulationContext.instance() + if sim_ctx is None: + raise RuntimeError("SimulationContext is not initialized.") + self._renderer = sim_ctx.render_context.get_renderer(self.cfg.renderer_cfg) + logger.info("Using renderer: %s", type(self._renderer).__name__) + + # Stage preprocessing must happen before creating the view because the view keeps + # references to prims located in the stage. + sim_ctx.render_context.ensure_prepare_stage(self.stage, self._num_envs) + + # Create a view for the sensor with Fabric enabled for fast pose queries. + # TODO: remove sync_usd_on_fabric_write=True once the GPU Fabric sync bug is fixed. + self._view = FrameView(self.cfg.prim_path, device=self._device, stage=self.stage, sync_usd_on_fabric_write=True) # Check that sizes are correct if self._view.count != self._num_envs: raise RuntimeError( @@ -419,10 +395,6 @@ def _initialize_impl(self): # Create frame count buffer self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) - # Attach the sensor data types to render node - self._render_product_paths: list[str] = list() - self._rep_registry: dict[str, list[rep.annotators.Annotator]] = {name: list() for name in self.cfg.data_types} - # Convert all encapsulated prims to Camera for cam_prim in self._view.prims: # Obtain the prim path @@ -431,106 +403,51 @@ def _initialize_impl(self): if not cam_prim.IsA(UsdGeom.Camera): raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") # Add to list - sensor_prim = UsdGeom.Camera(cam_prim) - self._sensor_prims.append(sensor_prim) - - # Get render product - # From Isaac Sim 2023.1 onwards, render product is a HydraTexture so we need to extract the path - render_prod_path = rep.create.render_product(cam_prim_path, resolution=(self.cfg.width, self.cfg.height)) - if not isinstance(render_prod_path, str): - render_prod_path = render_prod_path.path - self._render_product_paths.append(render_prod_path) - - # Check if semantic types or semantic filter predicate is provided - if isinstance(self.cfg.semantic_filter, list): - semantic_filter_predicate = ":*; ".join(self.cfg.semantic_filter) + ":*" - elif isinstance(self.cfg.semantic_filter, str): - semantic_filter_predicate = self.cfg.semantic_filter - else: - raise ValueError(f"Semantic types must be a list or a string. Received: {self.cfg.semantic_filter}.") - # set the semantic filter predicate - # copied from rep.scripts.writes_default.basic_writer.py - SyntheticData.Get().set_instance_mapping_semantic_filter(semantic_filter_predicate) - - # Iterate over each data type and create annotator - # TODO: This will move out of the loop once Replicator supports multiple render products within a single - # annotator, i.e.: rep_annotator.attach(self._render_product_paths) - for name in self.cfg.data_types: - # note: we are verbose here to make it easier to understand the code. - # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. - # if colorize is false, the data is returned as a uint32 image with ids as values. - if name == "semantic_segmentation": - init_params = { - "colorize": self.cfg.colorize_semantic_segmentation, - "mapping": json.dumps(self.cfg.semantic_segmentation_mapping), - } - elif name == "instance_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_segmentation} - elif name == "instance_id_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} - else: - init_params = None + self._sensor_prims.append(UsdGeom.Camera(cam_prim)) - # Resolve device name - if "cuda" in self._device: - device_name = self._device.split(":")[0] - else: - device_name = "cpu" - - # Map special cases to their corresponding annotator names - special_cases = {"rgba": "rgb", "depth": "distance_to_image_plane"} - # Get the annotator name, falling back to the original name if not a special case - annotator_name = special_cases.get(name, name) - # Create the annotator node - rep_annotator = rep.AnnotatorRegistry.get_annotator(annotator_name, init_params, device=device_name) - - # attach annotator to render product - rep_annotator.attach(render_prod_path) - # add to registry - self._rep_registry[name].append(rep_annotator) + # View needs to exist before creating render data + cam_paths = tuple(cam_prim.GetPath().pathString for cam_prim in self._view.prims) + env_0_prefix = "/World/envs/env_0/" + rel_under_env0 = ( + cam_paths[0].removeprefix(env_0_prefix) if cam_paths and cam_paths[0].startswith(env_0_prefix) else "" + ) + device_str = self._device if isinstance(self._device, str) else str(self._device) + render_spec = CameraRenderSpec( + cfg=self.cfg, + device=device_str, + num_instances=self.num_instances, + camera_prim_paths=cam_paths, + view_count=self._view.count, + camera_path_relative_to_env_0=rel_under_env0, + ) + self._render_data = self._renderer.create_render_data(render_spec) - # Create internal buffers + # Create internal buffers (includes intrinsic matrix and pose init) self._create_buffers() - self._update_intrinsic_matrices(self._ALL_INDICES) - def _update_buffers_impl(self, env_ids: Sequence[int]): + def _update_buffers_impl(self, env_mask: wp.array): + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + if len(env_ids) == 0: + return # Increment frame count self._frame[env_ids] += 1 - # -- pose + # update latest camera pose if requested if self.cfg.update_latest_camera_pose: self._update_poses(env_ids) - # -- read the data from annotator registry - # check if buffer is called for the first time. If so then, allocate the memory - if len(self._data.output) == 0: - # this is the first time buffer is called - # it allocates memory for all the sensors - self._create_annotator_data() + + sim_ctx = sim_utils.SimulationContext.instance() + renderer = self._renderer + assert renderer is not None + if sim_ctx is not None: + sim_ctx.render_context.render_into_camera( + renderer, + self._render_data, + self._data, + sim_ctx.get_physics_step_count(), + ) else: - # iterate over all the data types - for name, annotators in self._rep_registry.items(): - # iterate over all the annotators - for index in env_ids: - # get the output - output = annotators[index].get_data() - # process the output - data, info = self._process_annotator_output(name, output) - # add data to output - self._data.output[name][index] = data - # add info to output - self._data.info[index][name] = info - # NOTE: The `distance_to_camera` annotator returns the distance to the camera optical center. However, - # the replicator depth clipping is applied w.r.t. to the image plane which may result in values - # larger than the clipping range in the output. We apply an additional clipping to ensure values - # are within the clipping range for all the annotators. - if name == "distance_to_camera": - self._data.output[name][self._data.output[name] > self.cfg.spawn.clipping_range[1]] = torch.inf - # apply defined clipping behavior - if ( - name == "distance_to_camera" or name == "distance_to_image_plane" - ) and self.cfg.depth_clipping_behavior != "none": - self._data.output[name][torch.isinf(self._data.output[name])] = ( - 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] - ) + renderer.render(self._render_data) + renderer.read_output(self._render_data, self._data) """ Private Helpers @@ -558,26 +475,47 @@ def _check_supported_data_types(self, cfg: CameraCfg): def _create_buffers(self): """Create buffers for storing data.""" - # create the data object - # -- pose of the cameras + specs = self._renderer.supported_output_types() + # Split requested names into known/unsupported; warn once for any the renderer can't produce. + known: list[str] = [] + unsupported: list[str] = [] + for name in self.cfg.data_types: + try: + if RenderBufferKind(name) in specs: + known.append(name) + else: + unsupported.append(name) + except ValueError: + unsupported.append(name) + if unsupported: + logger.warning( + "Renderer %s does not support the following requested data types and will not produce them: %s", + type(self._renderer).__name__, + unsupported, + ) + self._data = CameraData.allocate( + data_types=known, + height=self.cfg.height, + width=self.cfg.width, + num_views=self._view.count, + device=self._device, + supported_specs=specs, + ) + # Camera-frame state (pose / intrinsics) is owned by the camera, not + # the renderer: populate it on the freshly constructed ``CameraData``. + self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) + self._update_intrinsic_matrices(self._ALL_INDICES) self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) - # -- intrinsic matrix - self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) - self._data.image_shape = self.image_shape - # -- output data - # lazy allocation of data dictionary - # since the size of the output data is not known in advance, we leave it as None - # the memory will be allocated when the buffer() function is called for the first time. - self._data.output = {} - self._data.info = [{name: None for name in self.cfg.data_types} for _ in range(self._view.count)] + self._update_poses(self._ALL_INDICES) + self._renderer.set_outputs(self._render_data, self._data.output) def _update_intrinsic_matrices(self, env_ids: Sequence[int]): """Compute camera's matrix of intrinsic parameters. Also called calibration matrix. This matrix works for linear depth images. We assume square pixels. - Note: + .. note:: The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera. The coordinates of points on the image plane are in the homogeneous representation. """ @@ -611,104 +549,26 @@ def _update_poses(self, env_ids: Sequence[int]): we assume that the camera front-axis is +Z-axis and up-axis is -Y-axis. Returns: - A tuple of the position (in meters) and quaternion (w, x, y, z). + A tuple of the position (in meters) and quaternion (x, y, z, w). """ # check camera prim exists if len(self._sensor_prims) == 0: raise RuntimeError("Camera prim is None. Please call 'sim.play()' first.") - # get the poses from the view - poses, quat = self._view.get_world_poses(env_ids) - self._data.pos_w[env_ids] = poses + # get the poses from the view (returns ProxyArray, use .torch for tensor access) + if env_ids is not None and not isinstance(env_ids, torch.Tensor): + env_ids = torch.tensor(env_ids, dtype=torch.int32, device=self._device) + indices = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) if env_ids is not None else None + pos_w, quat_w = self._view.get_world_poses(indices) + self._data.pos_w[env_ids] = pos_w.torch self._data.quat_w_world[env_ids] = convert_camera_frame_orientation_convention( - quat, origin="opengl", target="world" + quat_w.torch, origin="opengl", target="world" ) - - def _create_annotator_data(self): - """Create the buffers to store the annotator data. - - We create a buffer for each annotator and store the data in a dictionary. Since the data - shape is not known beforehand, we create a list of buffers and concatenate them later. - - This is an expensive operation and should be called only once. - """ - # add data from the annotators - for name, annotators in self._rep_registry.items(): - # create a list to store the data for each annotator - data_all_cameras = list() - # iterate over all the annotators - for index in self._ALL_INDICES: - # get the output - output = annotators[index].get_data() - # process the output - data, info = self._process_annotator_output(name, output) - # append the data - data_all_cameras.append(data) - # store the info - self._data.info[index][name] = info - # concatenate the data along the batch dimension - self._data.output[name] = torch.stack(data_all_cameras, dim=0) - # NOTE: `distance_to_camera` and `distance_to_image_plane` are not both clipped to the maximum defined - # in the clipping range. The clipping is applied only to `distance_to_image_plane` and then both - # outputs are only clipped where the values in `distance_to_image_plane` exceed the threshold. To - # have a unified behavior between all cameras, we clip both outputs to the maximum value defined. - if name == "distance_to_camera": - self._data.output[name][self._data.output[name] > self.cfg.spawn.clipping_range[1]] = torch.inf - # clip the data if needed - if ( - name == "distance_to_camera" or name == "distance_to_image_plane" - ) and self.cfg.depth_clipping_behavior != "none": - self._data.output[name][torch.isinf(self._data.output[name])] = ( - 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] - ) - - def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: - """Process the annotator output. - - This function is called after the data has been collected from all the cameras. - """ - # extract info and data from the output - if isinstance(output, dict): - data = output["data"] - info = output["info"] - else: - data = output - info = None - # convert data into torch tensor - data = convert_to_torch(data, device=self.device) - - # process data for different segmentation types - # Note: Replicator returns raw buffers of dtype int32 for segmentation types - # so we need to convert them to uint8 4 channel images for colorized types - height, width = self.image_shape - if name == "semantic_segmentation": - if self.cfg.colorize_semantic_segmentation: - data = data.view(torch.uint8).reshape(height, width, -1) - else: - data = data.view(height, width, 1) - elif name == "instance_segmentation_fast": - if self.cfg.colorize_instance_segmentation: - data = data.view(torch.uint8).reshape(height, width, -1) - else: - data = data.view(height, width, 1) - elif name == "instance_id_segmentation_fast": - if self.cfg.colorize_instance_id_segmentation: - data = data.view(torch.uint8).reshape(height, width, -1) - else: - data = data.view(height, width, 1) - # make sure buffer dimensions are consistent as (H, W, C) - elif name == "distance_to_camera" or name == "distance_to_image_plane" or name == "depth": - data = data.view(height, width, 1) - # we only return the RGB channels from the RGBA output if rgb is required - # normals return (x, y, z) in first 3 channels, 4th channel is unused - elif name == "rgb" or name == "normals": - data = data[..., :3] - # motion vectors return (x, y) in first 2 channels, 3rd and 4th channels are unused - elif name == "motion_vectors": - data = data[..., :2] - - # return the data and info - return data, info + # notify renderer of updated poses (guarded in case called before initialization completes) + if self._render_data is not None: + self._renderer.update_camera( + self._render_data, self._data.pos_w, self._data.quat_w_world, self._data.intrinsic_matrices + ) """ Internal simulation callbacks. @@ -716,6 +576,10 @@ def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tenso def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" + if self._renderer is not None and self._render_data is not None: + self._renderer.cleanup(self._render_data) + self._render_data = None + self._renderer = None # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them diff --git a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py index 8fd9f307d180..5ee6cf30b6f6 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py @@ -3,14 +3,34 @@ # # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import MISSING -from typing import Literal +from __future__ import annotations +import warnings +from dataclasses import MISSING, field +from typing import TYPE_CHECKING, Literal + +from isaaclab_physx.renderers import IsaacRtxRendererCfg + +from isaaclab.renderers import RendererCfg from isaaclab.sim import FisheyeCameraCfg, PinholeCameraCfg from isaaclab.utils import configclass from ..sensor_base_cfg import SensorBaseCfg -from .camera import Camera + +if TYPE_CHECKING: + from .camera import Camera + +# Default values for the RTX-flavored fields kept on :class:`CameraCfg` for +# backward compatibility. These mirror the defaults on +# :class:`~isaaclab_physx.renderers.IsaacRtxRendererCfg`. +_DEPRECATED_RENDERER_FIELD_DEFAULTS: dict = { + "semantic_filter": "*:*", + "colorize_semantic_segmentation": True, + "colorize_instance_id_segmentation": True, + "colorize_instance_segmentation": True, + "semantic_segmentation_mapping": {}, + "depth_clipping_behavior": "none", +} @configclass @@ -24,8 +44,8 @@ class OffsetCfg: pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """Quaternion rotation (x, y, z, w) w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0, 1.0).""" convention: Literal["opengl", "ros", "world"] = "ros" """The convention in which the frame offset is applied. Defaults to "ros". @@ -37,12 +57,12 @@ class OffsetCfg: """ - class_type: type = Camera + class_type: type[Camera] | str = "{DIR}.camera:Camera" offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity. - Note: + .. note:: The parent frame is the frame the sensor attaches to. For example, the parent frame of a camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``. """ @@ -60,6 +80,11 @@ class OffsetCfg: - ``"max"``: Values are clipped to the maximum value. - ``"zero"``: Values are clipped to zero. - ``"none``: No clipping is applied. Values will be returned as ``inf``. + + .. deprecated:: 4.6.22 + This field is RTX-specific. Set + :attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.depth_clipping_behavior` + on :attr:`renderer_cfg` instead. """ data_types: list[str] = ["rgb"] @@ -78,7 +103,7 @@ class OffsetCfg: """Whether to update the latest camera pose when fetching the camera's data. Defaults to False. If True, the latest camera pose is updated in the camera's data which will slow down performance - due to the use of :class:`XformPrimView`. + due to the use of :class:`FrameView`. If False, the pose of the camera during initialization is returned. """ @@ -101,6 +126,11 @@ class OffsetCfg: For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_. .. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering + + .. deprecated:: 4.6.22 + This field is RTX-specific. Set + :attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.semantic_filter` on + :attr:`renderer_cfg` instead. """ colorize_semantic_segmentation: bool = True @@ -108,6 +138,11 @@ class OffsetCfg: If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + + .. deprecated:: 4.6.22 + This field is RTX-specific. Set + :attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.colorize_semantic_segmentation` + on :attr:`renderer_cfg` instead. """ colorize_instance_id_segmentation: bool = True @@ -115,6 +150,11 @@ class OffsetCfg: If True, instance id segmentation is converted to an image where instance IDs are mapped to colors. and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + + .. deprecated:: 4.6.22 + This field is RTX-specific. Set + :attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.colorize_instance_id_segmentation` + on :attr:`renderer_cfg` instead. """ colorize_instance_segmentation: bool = True @@ -122,6 +162,11 @@ class OffsetCfg: If True, instance segmentation is converted to an image where instance IDs are mapped to colors. and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + + .. deprecated:: 4.6.22 + This field is RTX-specific. Set + :attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.colorize_instance_segmentation` + on :attr:`renderer_cfg` instead. """ semantic_segmentation_mapping: dict = {} @@ -140,4 +185,35 @@ class OffsetCfg: "class:robot": (61, 178, 255, 255), } + .. deprecated:: 4.6.22 + This field is RTX-specific. Set + :attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.semantic_segmentation_mapping` + on :attr:`renderer_cfg` instead. """ + + renderer_cfg: RendererCfg = field(default_factory=IsaacRtxRendererCfg) + """Renderer configuration for camera sensor.""" + + def __post_init__(self): + """Forward deprecated RTX-flavored fields onto :attr:`renderer_cfg`. + + Each deprecated field set to a non-default value emits a + :class:`DeprecationWarning` and is copied onto ``self.renderer_cfg`` + when that cfg defines the same-named field. + """ + # Forwarded by name: any same-named field on ``renderer_cfg`` will receive the value. + for field_name, default in _DEPRECATED_RENDERER_FIELD_DEFAULTS.items(): + value = getattr(self, field_name) + if value == default: + continue + warnings.warn( + f"CameraCfg.{field_name} is deprecated and will be removed in a future release." + f" Set this field on CameraCfg.renderer_cfg instead.", + DeprecationWarning, + stacklevel=2, + ) + if hasattr(self.renderer_cfg, field_name): + setattr(self.renderer_cfg, field_name, value) + # Reset to default so re-runs of ``__post_init__`` (via ``SensorBase.__init__``'s + # ``cfg.copy()``) don't re-forward and clobber a user-set ``renderer_cfg`` field. + setattr(self, field_name, default) diff --git a/source/isaaclab/isaaclab/sensors/camera/camera_data.py b/source/isaaclab/isaaclab/sensors/camera/camera_data.py index ec3288b04e92..6ec5484531b2 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera_data.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera_data.py @@ -3,13 +3,19 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from dataclasses import dataclass from typing import Any import torch +# Re-exported as part of the public isaaclab.sensors.camera API +from isaaclab.renderers.output_contract import RenderBufferKind, RenderBufferSpec from isaaclab.utils.math import convert_camera_frame_orientation_convention +__all__ = ["CameraData", "RenderBufferKind", "RenderBufferSpec"] + @dataclass class CameraData: @@ -26,7 +32,7 @@ class CameraData: """ quat_w_world: torch.Tensor = None - """Quaternion orientation `(w, x, y, z)` of the sensor origin in world frame, following the world coordinate frame + """Quaternion orientation `(x, y, z, w)` of the sensor origin in world frame, following the world coordinate frame .. note:: World frame convention follows the camera aligned with forward axis +X and up axis +Z. @@ -56,7 +62,7 @@ class CameraData: .. _Replicator Documentation: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output """ - info: list[dict[str, Any]] = None + info: dict[str, Any] = None """The retrieved sensor info with sensor types as key. This contains extra information provided by the sensor such as semantic segmentation label mapping, prim paths. @@ -64,13 +70,87 @@ class CameraData: types, the info is empty. """ + @classmethod + def allocate( + cls, + data_types: list[str], + height: int, + width: int, + num_views: int, + device: torch.device | str, + supported_specs: dict[RenderBufferKind, RenderBufferSpec], + ) -> CameraData: + """Build a :class:`CameraData` with output buffers pre-allocated. + + Allocates one ``(num_views, height, width, channels)`` tensor per kind + in the intersection of ``data_types`` and ``supported_specs``, using + the channels and dtype from each :class:`RenderBufferSpec`. + + Args: + data_types: Requested output names (typically :attr:`CameraCfg.data_types`). + Every name must be a member of :class:`RenderBufferKind`. + height: Image height in pixels. + width: Image width in pixels. + num_views: Number of camera views (batch dimension). + device: Torch device on which to allocate the buffers. + supported_specs: Per-buffer layout the active renderer can produce, + keyed by :class:`RenderBufferKind`. Names absent from this mapping + are not allocated. + + Returns: + A new :class:`CameraData` with :attr:`image_shape`, :attr:`output`, + and :attr:`info` populated; all other fields at their defaults. + + Raises: + ValueError: If ``data_types`` contains names that are not members of + :class:`RenderBufferKind`. + """ + requested: set[RenderBufferKind] = set() + unknown: list[str] = [] + for name in data_types: + try: + requested.add(RenderBufferKind(name)) + except ValueError: + unknown.append(name) + if unknown: + raise ValueError(f"Unknown RenderBufferKind name(s): {unknown}. Expected members of RenderBufferKind.") + # rgb is exposed as a view into rgba when the renderer publishes both, + # so requesting either one allocates the shared rgba buffer. + rgb_alias = ( + RenderBufferKind.RGBA in supported_specs + and RenderBufferKind.RGB in supported_specs + and (RenderBufferKind.RGB in requested or RenderBufferKind.RGBA in requested) + ) + if rgb_alias: + requested.update({RenderBufferKind.RGB, RenderBufferKind.RGBA}) + + buffers: dict[str, torch.Tensor] = {} + for name, spec in supported_specs.items(): + if name not in requested: + continue + if rgb_alias and name == RenderBufferKind.RGB: + continue + buffers[str(name)] = torch.zeros( + (num_views, height, width, spec.channels), + dtype=spec.dtype, + device=device, + ).contiguous() + if rgb_alias: + buffers[str(RenderBufferKind.RGB)] = buffers[str(RenderBufferKind.RGBA)][..., :3] + + return cls( + image_shape=(height, width), + output=buffers, + info={name: None for name in buffers}, + ) + ## # Additional Frame orientation conventions ## @property def quat_w_ros(self) -> torch.Tensor: - """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following ROS convention. + """Quaternion orientation `(x, y, z, w)` of the sensor origin in the world frame, following ROS convention. .. note:: ROS convention follows the camera aligned with forward axis +Z and up axis -Y. @@ -81,7 +161,7 @@ def quat_w_ros(self) -> torch.Tensor: @property def quat_w_opengl(self) -> torch.Tensor: - """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following + """Quaternion orientation `(x, y, z, w)` of the sensor origin in the world frame, following Opengl / USD Camera convention. .. note:: diff --git a/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py b/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py index 4b3676158e75..c34cb4088834 100644 --- a/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py +++ b/source/isaaclab/isaaclab/sensors/camera/tiled_camera.py @@ -3,423 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause -from __future__ import annotations - -import json -import math -from collections.abc import Sequence -from typing import TYPE_CHECKING, Any +"""Deprecated module. Use :class:`~isaaclab.sensors.camera.Camera` instead. -import numpy as np -import torch -import warp as wp +.. deprecated:: 4.6.0 + :class:`TiledCamera` is deprecated. :class:`~isaaclab.sensors.camera.Camera` now includes + TiledCamera's vectorized rendering optimizations via the same :class:`~isaaclab.renderers.Renderer` + abstraction. Use :class:`~isaaclab.sensors.camera.Camera` with + :class:`~isaaclab.sensors.camera.CameraCfg` (or :class:`~isaaclab.sensors.camera.TiledCameraCfg`) + directly. +""" -import carb -from pxr import UsdGeom +from __future__ import annotations -from isaaclab.sim.views import XformPrimView -from isaaclab.utils.warp.kernels import reshape_tiled_image +import warnings -from ..sensor_base import SensorBase from .camera import Camera - -if TYPE_CHECKING: - from .tiled_camera_cfg import TiledCameraCfg +from .tiled_camera_cfg import TiledCameraCfg class TiledCamera(Camera): - r"""The tiled rendering based camera sensor for acquiring the same data as the Camera class. - - This class inherits from the :class:`Camera` class but uses the tiled-rendering API to acquire - the visual data. Tiled-rendering concatenates the rendered images from multiple cameras into a single image. - This allows for rendering multiple cameras in parallel and is useful for rendering large scenes with multiple - cameras efficiently. - - The following sensor types are supported: - - - ``"rgb"``: A 3-channel rendered color image. - - ``"rgba"``: A 4-channel rendered color image with alpha channel. - - ``"distance_to_camera"``: An image containing the distance to camera optical center. - - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - - ``"depth"``: Alias for ``"distance_to_image_plane"``. - - ``"normals"``: An image containing the local surface normal vectors at each pixel. - - ``"motion_vectors"``: An image containing the motion vector data at each pixel. - - ``"semantic_segmentation"``: The semantic segmentation data. - - ``"instance_segmentation_fast"``: The instance segmentation data. - - ``"instance_id_segmentation_fast"``: The instance id segmentation data. - - .. note:: - Currently the following sensor types are not supported in a "view" format: - - - ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead. - - ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead. - - ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions). - - ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions). - - ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions). - - ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions). - - ``"bounding_box_3d"``: The 3D view space bounding box data. - - ``"bounding_box_3d_fast"``: The 3D view space bounding box data. - - .. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output - .. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html - - .. versionadded:: v1.0.0 - - This feature is available starting from Isaac Sim 4.2. Before this version, the tiled rendering APIs - were not available. + """Deprecated alias for :class:`Camera`. + .. deprecated:: 4.6.0 + Use :class:`Camera` directly — it now includes TiledCamera's vectorized rendering + optimizations via the same Renderer abstraction. """ - cfg: TiledCameraCfg - """The configuration parameters.""" - def __init__(self, cfg: TiledCameraCfg): - """Initializes the tiled camera sensor. - - Args: - cfg: The configuration parameters. - - Raises: - RuntimeError: If no camera prim is found at the given path. - ValueError: If the provided data types are not supported by the camera. - """ - super().__init__(cfg) - - def __del__(self): - """Unsubscribes from callbacks and detach from the replicator registry.""" - # unsubscribe from callbacks - SensorBase.__del__(self) - # detach from the replicator registry - for annotator in self._annotators.values(): - annotator.detach(self.render_product_paths) - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - # message for class - return ( - f"Tiled Camera @ '{self.cfg.prim_path}': \n" - f"\tdata types : {list(self.data.output.keys())} \n" - f"\tsemantic filter : {self.cfg.semantic_filter}\n" - f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" - f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" - f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" - f"\tupdate period (s): {self.cfg.update_period}\n" - f"\tshape : {self.image_shape}\n" - f"\tnumber of sensors : {self._view.count}" + warnings.warn( + "TiledCamera is deprecated and will be removed in a future release. " + "Use Camera directly — it now includes TiledCamera's vectorized rendering " + "optimizations via the same Renderer abstraction.", + DeprecationWarning, + stacklevel=2, ) - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - if not self._is_initialized: - raise RuntimeError( - "TiledCamera could not be initialized. Please ensure --enable_cameras is used to enable rendering." - ) - # reset the timestamps - SensorBase.reset(self, env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset the frame count - self._frame[env_ids] = 0 - - """ - Implementation. - """ - - def _initialize_impl(self): - """Initializes the sensor handles and internal buffers. - - This function creates handles and registers the provided data types with the replicator registry to - be able to access the data from the sensor. It also initializes the internal buffers to store the data. - - Raises: - RuntimeError: If the number of camera prims in the view does not match the number of environments. - RuntimeError: If replicator was not found. - """ - carb_settings_iface = carb.settings.get_settings() - if not carb_settings_iface.get("/isaaclab/cameras_enabled"): - raise RuntimeError( - "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" - " rendering." - ) - - import omni.replicator.core as rep - - # Initialize parent class - SensorBase._initialize_impl(self) - # Create a view for the sensor - self._view = XformPrimView(self.cfg.prim_path, device=self._device, stage=self.stage) - # Check that sizes are correct - if self._view.count != self._num_envs: - raise RuntimeError( - f"Number of camera prims in the view ({self._view.count}) does not match" - f" the number of environments ({self._num_envs})." - ) - - # Create all env_ids buffer - self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) - # Create frame count buffer - self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) - - # Convert all encapsulated prims to Camera - cam_prim_paths = [] - for cam_prim in self._view.prims: - # Get camera prim - cam_prim_path = cam_prim.GetPath().pathString - # Check if prim is a camera - if not cam_prim.IsA(UsdGeom.Camera): - raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") - # Add to list - self._sensor_prims.append(UsdGeom.Camera(cam_prim)) - cam_prim_paths.append(cam_prim_path) - - # Create replicator tiled render product - rp = rep.create.render_product_tiled(cameras=cam_prim_paths, tile_resolution=(self.cfg.width, self.cfg.height)) - self._render_product_paths = [rp.path] - - # Define the annotators based on requested data types - self._annotators = dict() - for annotator_type in self.cfg.data_types: - if annotator_type == "rgba" or annotator_type == "rgb": - annotator = rep.AnnotatorRegistry.get_annotator("rgb", device=self.device, do_array_copy=False) - self._annotators["rgba"] = annotator - elif annotator_type == "depth" or annotator_type == "distance_to_image_plane": - # keep depth for backwards compatibility - annotator = rep.AnnotatorRegistry.get_annotator( - "distance_to_image_plane", device=self.device, do_array_copy=False - ) - self._annotators[annotator_type] = annotator - # note: we are verbose here to make it easier to understand the code. - # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. - # if colorize is false, the data is returned as a uint32 image with ids as values. - else: - init_params = None - if annotator_type == "semantic_segmentation": - init_params = { - "colorize": self.cfg.colorize_semantic_segmentation, - "mapping": json.dumps(self.cfg.semantic_segmentation_mapping), - } - elif annotator_type == "instance_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_segmentation} - elif annotator_type == "instance_id_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} - - annotator = rep.AnnotatorRegistry.get_annotator( - annotator_type, init_params, device=self.device, do_array_copy=False - ) - self._annotators[annotator_type] = annotator - - # Attach the annotator to the render product - for annotator in self._annotators.values(): - annotator.attach(self._render_product_paths) - - # Create internal buffers - self._create_buffers() - - def _update_buffers_impl(self, env_ids: Sequence[int]): - # Increment frame count - self._frame[env_ids] += 1 - - # update latest camera pose - if self.cfg.update_latest_camera_pose: - self._update_poses(env_ids) - - # Extract the flattened image buffer - for data_type, annotator in self._annotators.items(): - # check whether returned data is a dict (used for segmentation) - output = annotator.get_data() - if isinstance(output, dict): - tiled_data_buffer = output["data"] - self._data.info[data_type] = output["info"] - else: - tiled_data_buffer = output - - # convert data buffer to warp array - if isinstance(tiled_data_buffer, np.ndarray): - # Let warp infer the dtype from numpy array instead of hardcoding uint8 - # Different annotators return different dtypes: RGB(uint8), depth(float32), segmentation(uint32) - tiled_data_buffer = wp.array(tiled_data_buffer, device=self.device) - else: - tiled_data_buffer = tiled_data_buffer.to(device=self.device) - - # process data for different segmentation types - # Note: Replicator returns raw buffers of dtype uint32 for segmentation types - # so we need to convert them to uint8 4 channel images for colorized types - if ( - (data_type == "semantic_segmentation" and self.cfg.colorize_semantic_segmentation) - or (data_type == "instance_segmentation_fast" and self.cfg.colorize_instance_segmentation) - or (data_type == "instance_id_segmentation_fast" and self.cfg.colorize_instance_id_segmentation) - ): - tiled_data_buffer = wp.array( - ptr=tiled_data_buffer.ptr, shape=(*tiled_data_buffer.shape, 4), dtype=wp.uint8, device=self.device - ) - - # For motion vectors, we only require the first two channels of the tiled buffer - # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/2003) - if data_type == "motion_vectors": - tiled_data_buffer = tiled_data_buffer[:, :, :2].contiguous() - - # For normals, we only require the first three channels of the tiled buffer - # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/4239) - if data_type == "normals": - tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() - - wp.launch( - kernel=reshape_tiled_image, - dim=(self._view.count, self.cfg.height, self.cfg.width), - inputs=[ - tiled_data_buffer.flatten(), - wp.from_torch(self._data.output[data_type]), # zero-copy alias - *list(self._data.output[data_type].shape[1:]), # height, width, num_channels - self._tiling_grid_shape()[0], # num_tiles_x - ], - device=self.device, - ) - - # alias rgb as first 3 channels of rgba - if data_type == "rgba" and "rgb" in self.cfg.data_types: - self._data.output["rgb"] = self._data.output["rgba"][..., :3] - - # NOTE: The `distance_to_camera` annotator returns the distance to the camera optical center. However, - # the replicator depth clipping is applied w.r.t. to the image plane which may result in values - # larger than the clipping range in the output. We apply an additional clipping to ensure values - # are within the clipping range for all the annotators. - if data_type == "distance_to_camera": - self._data.output[data_type][self._data.output[data_type] > self.cfg.spawn.clipping_range[1]] = ( - torch.inf - ) - # apply defined clipping behavior - if ( - data_type == "distance_to_camera" or data_type == "distance_to_image_plane" or data_type == "depth" - ) and self.cfg.depth_clipping_behavior != "none": - self._data.output[data_type][torch.isinf(self._data.output[data_type])] = ( - 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] - ) - - """ - Private Helpers - """ - - def _check_supported_data_types(self, cfg: TiledCameraCfg): - """Checks if the data types are supported by the ray-caster camera.""" - # check if there is any intersection in unsupported types - # reason: these use np structured data types which we can't yet convert to torch tensor - common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES - if common_elements: - # provide alternative fast counterparts - fast_common_elements = [] - for item in common_elements: - if "instance_segmentation" in item or "instance_id_segmentation" in item: - fast_common_elements.append(item + "_fast") - # raise error - raise ValueError( - f"TiledCamera class does not support the following sensor types: {common_elements}." - "\n\tThis is because these sensor types output numpy structured data types which" - "can't be converted to torch tensors easily." - "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." - f"\n\t\tFast counterparts: {fast_common_elements}" - ) - - def _create_buffers(self): - """Create buffers for storing data.""" - # create the data object - # -- pose of the cameras - self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) - self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) - self._update_poses(self._ALL_INDICES) - # -- intrinsic matrix - self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) - self._update_intrinsic_matrices(self._ALL_INDICES) - self._data.image_shape = self.image_shape - # -- output data - data_dict = dict() - if "rgba" in self.cfg.data_types or "rgb" in self.cfg.data_types: - data_dict["rgba"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - if "rgb" in self.cfg.data_types: - # RGB is the first 3 channels of RGBA - data_dict["rgb"] = data_dict["rgba"][..., :3] - if "distance_to_image_plane" in self.cfg.data_types: - data_dict["distance_to_image_plane"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 - ).contiguous() - if "depth" in self.cfg.data_types: - data_dict["depth"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 - ).contiguous() - if "distance_to_camera" in self.cfg.data_types: - data_dict["distance_to_camera"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 - ).contiguous() - if "normals" in self.cfg.data_types: - data_dict["normals"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device, dtype=torch.float32 - ).contiguous() - if "motion_vectors" in self.cfg.data_types: - data_dict["motion_vectors"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 2), device=self.device, dtype=torch.float32 - ).contiguous() - if "semantic_segmentation" in self.cfg.data_types: - if self.cfg.colorize_semantic_segmentation: - data_dict["semantic_segmentation"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - else: - data_dict["semantic_segmentation"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 - ).contiguous() - if "instance_segmentation_fast" in self.cfg.data_types: - if self.cfg.colorize_instance_segmentation: - data_dict["instance_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - else: - data_dict["instance_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 - ).contiguous() - if "instance_id_segmentation_fast" in self.cfg.data_types: - if self.cfg.colorize_instance_id_segmentation: - data_dict["instance_id_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - else: - data_dict["instance_id_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 - ).contiguous() - - self._data.output = data_dict - self._data.info = dict() - - def _tiled_image_shape(self) -> tuple[int, int]: - """Returns a tuple containing the dimension of the tiled image.""" - cols, rows = self._tiling_grid_shape() - return (self.cfg.width * cols, self.cfg.height * rows) - - def _tiling_grid_shape(self) -> tuple[int, int]: - """Returns a tuple containing the tiling grid dimension.""" - cols = math.ceil(math.sqrt(self._view.count)) - rows = math.ceil(self._view.count / cols) - return (cols, rows) - - def _create_annotator_data(self): - # we do not need to create annotator data for the tiled camera sensor - raise RuntimeError("This function should not be called for the tiled camera sensor.") - - def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: - # we do not need to process annotator output for the tiled camera sensor - raise RuntimeError("This function should not be called for the tiled camera sensor.") - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._view = None + super().__init__(cfg) diff --git a/source/isaaclab/isaaclab/sensors/camera/tiled_camera_cfg.py b/source/isaaclab/isaaclab/sensors/camera/tiled_camera_cfg.py index 2241a0648fd2..d35ff285ff13 100644 --- a/source/isaaclab/isaaclab/sensors/camera/tiled_camera_cfg.py +++ b/source/isaaclab/isaaclab/sensors/camera/tiled_camera_cfg.py @@ -3,14 +3,33 @@ # # SPDX-License-Identifier: BSD-3-Clause +import warnings +from typing import TYPE_CHECKING + from isaaclab.utils import configclass from .camera_cfg import CameraCfg -from .tiled_camera import TiledCamera + +if TYPE_CHECKING: + from .tiled_camera import TiledCamera @configclass class TiledCameraCfg(CameraCfg): - """Configuration for a tiled rendering-based camera sensor.""" + """Configuration for a tiled rendering-based camera sensor. + + .. deprecated:: 4.6.0 + :class:`TiledCameraCfg` is deprecated. Use :class:`CameraCfg` directly — + :class:`~isaaclab.sensors.camera.Camera` now includes TiledCamera's vectorized + rendering optimizations via the same renderer abstraction. + """ + + class_type: type["TiledCamera"] | str = "{DIR}.tiled_camera:TiledCamera" - class_type: type = TiledCamera + def __post_init__(self): + warnings.warn( + "TiledCameraCfg is deprecated. Use CameraCfg directly — " + "Camera now includes TiledCamera's vectorized rendering optimizations.", + DeprecationWarning, + stacklevel=2, + ) diff --git a/source/isaaclab/isaaclab/sensors/camera/utils.py b/source/isaaclab/isaaclab/sensors/camera/utils.py index f9db81551b4f..f7e6920217a1 100644 --- a/source/isaaclab/isaaclab/sensors/camera/utils.py +++ b/source/isaaclab/isaaclab/sensors/camera/utils.py @@ -41,7 +41,7 @@ def transform_points( Args: points: a tensor of shape (p, 3) or (n, p, 3) comprising of 3d points in source frame. position: The position of source frame in target frame. Defaults to None. - orientation: The orientation (w, x, y, z) of source frame in target frame. + orientation: The orientation (x, y, z, w) of source frame in target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, i.e. takes the device that matches the depth image. @@ -107,7 +107,7 @@ def create_pointcloud_from_depth( keep_invalid: Whether to keep invalid points in the cloud or not. Invalid points correspond to pixels with depth values 0.0 or NaN. Defaults to False. position: The position of the camera in a target frame. Defaults to None. - orientation: The orientation (w, x, y, z) of the camera in a target frame. Defaults to None. + orientation: The orientation (x, y, z, w) of the camera in a target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, i.e. takes the device that matches the depth image. @@ -183,7 +183,7 @@ def create_pointcloud_from_rgbd( rgb: Color for generated point cloud. Defaults to None. normalize_rgb: Whether to normalize input rgb. Defaults to False. position: The position of the camera in a target frame. Defaults to None. - orientation: The orientation `(w, x, y, z)` of the camera in a target frame. Defaults to None. + orientation: The orientation `(x, y, z, w)` of the camera in a target frame. Defaults to None. device: The device for torch where the computation should be executed. Defaults to None, in which case it takes the device that matches the depth image. num_channels: Number of channels in RGB pointcloud. Defaults to 3. diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py index 94b402d41a37..c0700077e6df 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py @@ -5,6 +5,6 @@ """Sub-module for rigid contact sensor.""" -from .contact_sensor import ContactSensor -from .contact_sensor_cfg import ContactSensorCfg -from .contact_sensor_data import ContactSensorData +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.pyi b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.pyi new file mode 100644 index 000000000000..e961de0dd1cb --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseContactSensor", + "BaseContactSensorData", + "ContactSensor", + "ContactSensorCfg", + "ContactSensorData", +] + +from .base_contact_sensor import BaseContactSensor +from .base_contact_sensor_data import BaseContactSensorData +from .contact_sensor import ContactSensor +from .contact_sensor_cfg import ContactSensorCfg +from .contact_sensor_data import ContactSensorData diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py new file mode 100644 index 000000000000..fb2b4a59b168 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py @@ -0,0 +1,240 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warnings +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import warp as wp + +import isaaclab.utils.string as string_utils +from isaaclab.utils.warp import ProxyArray + +from ..sensor_base import SensorBase +from .base_contact_sensor_data import BaseContactSensorData + +if TYPE_CHECKING: + from .contact_sensor_cfg import ContactSensorCfg + + +class BaseContactSensor(SensorBase): + """A contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body in the world frame. + It relies on the physics engine's contact reporting API to be activated on the rigid bodies. + + To enable the contact reporter on a rigid body, please make sure to enable the + :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your + asset spawner configuration. This will enable the contact reporter on all the rigid bodies + in the asset. + + The sensor can be configured to report the contact forces on a set of bodies with a given + filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful + when you want to report the contact forces between the sensor bodies and a specific set of + bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + + The PhysX backend only supports one-to-many filtered contact reporting: a single sensor + body filtered against multiple partners. For many-to-many, create separate sensors per + body. The Newton backend supports many-to-many natively. + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the contact sensor.""" + + def __init__(self, cfg: ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + # check that config is valid + if cfg.history_length < 0: + raise ValueError(f"History length must be greater than 0! Received: {cfg.history_length}") + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Contact sensor @ '{self.cfg.prim_path}': \n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self.num_sensors}\n" + f"\tbody names : {self.body_names}\n" + ) + + """ + Properties + """ + + @property + @abstractmethod + def num_instances(self) -> int | None: + """Number of instances of the sensor.""" + raise NotImplementedError(f"Num instances is not implemented for {self.__class__.__name__}.") + + @property + @abstractmethod + def data(self) -> BaseContactSensorData: + """Data from the sensor.""" + raise NotImplementedError(f"Data is not implemented for {self.__class__.__name__}.") + + @property + @abstractmethod + def num_sensors(self) -> int: + """Number of sensors per environment.""" + raise NotImplementedError(f"Num sensors is not implemented for {self.__class__.__name__}.") + + @property + @abstractmethod + def body_names(self) -> list[str] | None: + """Ordered names of shapes or bodies with contact sensors attached.""" + raise NotImplementedError(f"Body names is not implemented for {self.__class__.__name__}.") + + @property + @abstractmethod + def contact_view(self) -> None: + """View for the contact forces captured. + + .. note:: + None if there is no view associated with the sensor. + """ + raise NotImplementedError(f"Contact view is not implemented for {self.__class__.__name__}.") + + """ + Operations + """ + + @abstractmethod + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array(dtype=wp.bool) | None = None): + """Resets the sensor. + + Args: + env_ids: The indices of the environments to reset. Defaults to None: all the environments are reset. + env_mask: The masks of the environments to reset. Defaults to None: all the environments are reset. + """ + # reset the timers and counters + super().reset(env_ids, env_mask) + + def find_sensors(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find sensors in the contact sensor based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the sensor indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + @abstractmethod + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> ProxyArray: + """Checks if bodies that have established contact within the last :attr:`dt` seconds. + + This function checks if the bodies have established contact within the last :attr:`dt` seconds + by comparing the current contact time with the given time period. If the contact time is less + than the given time period, then the bodies are considered to be in contact. + + .. note:: + The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other + words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true + if the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contact was established. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have established contact within the last + :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the + number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + raise NotImplementedError(f"Compute first contact is not implemented for {self.__class__.__name__}.") + + @abstractmethod + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> ProxyArray: + """Checks if bodies that have broken contact within the last :attr:`dt` seconds. + + This function checks if the bodies have broken contact within the last :attr:`dt` seconds + by comparing the current air time with the given time period. If the air time is less + than the given time period, then the bodies are considered to not be in contact. + + .. note:: + It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, + :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if + the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contract is broken. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + raise NotImplementedError(f"Compute first air is not implemented for {self.__class__.__name__}.") + + """ + Implementation. + """ + + @abstractmethod + def _initialize_impl(self): + super()._initialize_impl() + + @abstractmethod + def _create_buffers(self): + """Creates the buffers for the sensor data.""" + raise NotImplementedError(f"Create buffers is not implemented for {self.__class__.__name__}.") + + @abstractmethod + def _update_buffers_impl(self, env_mask: wp.array | None): + """Fills the buffers of the sensor data. + + Args: + env_mask: Mask of the environments to update. None: update all environments. + """ + raise NotImplementedError(f"Update buffers is not implemented for {self.__class__.__name__}.") + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + # TODO: invalidate NewtonManager if necessary + + @property + def num_bodies(self) -> int: + """Deprecated property. Please use `num_sensors` instead.""" + warnings.warn( + "The `num_bodies` property will be deprecated in a future release. Please use `num_sensors` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.num_sensors + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Deprecated method. Please use `find_sensors` instead.""" + warnings.warn( + "The `find_bodies` method will be deprecated in a future release. Please use `find_sensors` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.find_sensors(name_keys, preserve_order) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py new file mode 100644 index 000000000000..2f5f69ef55db --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -0,0 +1,185 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for contact sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +from isaaclab.utils.leapp import ( + POSE7_ELEMENT_NAMES, + QUAT_XYZW_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + InputKindEnum, + leapp_tensor_semantics, +) +from isaaclab.utils.warp import ProxyArray + + +class BaseContactSensorData(ABC): + """Data container for the contact reporting sensor. + + This base class defines the interface for contact sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) + def pose_w(self) -> ProxyArray | None: + """Pose of the sensor origin in world frame. + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) + def pos_w(self) -> ProxyArray | None: + """Position of the sensor origin in world frame. + + Shape is (num_instances, num_sensors), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, 3). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_XYZW_ELEMENT_NAMES) + def quat_w(self) -> ProxyArray | None: + """Orientation of the sensor origin in world frame. + + Shape is (num_instances, num_sensors), dtype = wp.quatf. In torch this resolves to + (num_instances, num_sensors, 4). The orientation is provided in (x, y, z, w) format. + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) + def net_forces_w(self) -> ProxyArray | None: + """The net normal contact forces in world frame. + + Shape is (num_instances, num_sensors), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) + def net_forces_w_history(self) -> ProxyArray | None: + """History of net normal contact forces. + + Shape is (num_instances, history_length, num_sensors), dtype = wp.vec3f. In torch this resolves to + (num_instances, history_length, num_sensors, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) + def force_matrix_w(self) -> ProxyArray | None: + """Normal contact forces filtered between sensor and filtered bodies. + + Shape is (num_instances, num_sensors, num_filter_shapes), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) + def force_matrix_w_history(self) -> ProxyArray | None: + """History of filtered contact forces. + + Shape is (num_instances, history_length, num_sensors, num_filter_shapes), dtype = wp.vec3f. + In torch this resolves to (num_instances, history_length, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) + def contact_pos_w(self) -> ProxyArray | None: + """Average position of contact points. + + Shape is (num_instances, num_sensors, num_filter_shapes), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.track_contact_points` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) + def friction_forces_w(self) -> ProxyArray | None: + """Sum of friction forces. + + Shape is (num_instances, num_sensors, num_filter_shapes), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.track_friction_forces` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics() + def last_air_time(self) -> ProxyArray | None: + """Time spent in air before last contact. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics() + def current_air_time(self) -> ProxyArray | None: + """Time spent in air since last detach. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics() + def last_contact_time(self) -> ProxyArray | None: + """Time spent in contact before last detach. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics() + def current_contact_time(self) -> ProxyArray | None: + """Time spent in contact since last contact. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py index 2a85a2661f6a..29e2ff58ed60 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py @@ -3,537 +3,30 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Ignore optional memory usage warning globally -# pyright: reportOptionalSubscript=false - from __future__ import annotations -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch - -import carb -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import PhysxSchema - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.utils.math import convert_quat +from isaaclab.utils.backend_utils import FactoryBase -from ..sensor_base import SensorBase -from .contact_sensor_data import ContactSensorData +from .base_contact_sensor import BaseContactSensor +from .base_contact_sensor_data import BaseContactSensorData if TYPE_CHECKING: - from .contact_sensor_cfg import ContactSensorCfg - - -class ContactSensor(SensorBase): - """A contact reporting sensor. - - The contact sensor reports the normal contact forces on a rigid body in the world frame. - It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. - - To enable the contact reporter on a rigid body, please make sure to enable the - :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your - asset spawner configuration. This will enable the contact reporter on all the rigid bodies - in the asset. - - The sensor can be configured to report the contact forces on a set of bodies with a given - filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful - when you want to report the contact forces between the sensor bodies and a specific set of - bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. - Please check the documentation on `RigidContact`_ for more details. - - The reporting of the filtered contact forces is only possible as one-to-many. This means that only one - sensor body in an environment can be filtered against multiple bodies in that environment. If you need to - filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor - body. - - As an example, suppose you want to report the contact forces for all the feet of a robot against an object - exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and - :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` - respectively will not work. Instead, you need to create a separate sensor for each foot and filter - it against the object. - - .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html - .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView - """ - - cfg: ContactSensorCfg - """The configuration parameters.""" - - def __init__(self, cfg: ContactSensorCfg): - """Initializes the contact sensor object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - - # Enable contact processing - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/disableContactProcessing", False) - - # Create empty variables for storing output data - self._data: ContactSensorData = ContactSensorData() - # initialize self._body_physx_view for running in extension mode - self._body_physx_view = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Contact sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self.body_physx_view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of bodies : {self.num_bodies}\n" - f"\tbody names : {self.body_names}\n" - ) - - """ - Properties - """ - - @property - def num_instances(self) -> int: - return self.body_physx_view.count - - @property - def data(self) -> ContactSensorData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Number of bodies with contact sensors attached.""" - return self._num_bodies - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies with contact sensors attached.""" - prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] - return [path.split("/")[-1] for path in prim_paths] - - @property - def body_physx_view(self) -> physx.RigidBodyView: - """View for the rigid bodies captured (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._body_physx_view - - @property - def contact_physx_view(self) -> physx.RigidContactView: - """Contact reporter view for the bodies (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._contact_physx_view - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.net_forces_w[env_ids] = 0.0 - self._data.net_forces_w_history[env_ids] = 0.0 - # reset force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - self._data.force_matrix_w[env_ids] = 0.0 - self._data.force_matrix_w_history[env_ids] = 0.0 - # reset the current air time - if self.cfg.track_air_time: - self._data.current_air_time[env_ids] = 0.0 - self._data.last_air_time[env_ids] = 0.0 - self._data.current_contact_time[env_ids] = 0.0 - self._data.last_contact_time[env_ids] = 0.0 - # reset contact positions - if self.cfg.track_contact_points: - self._data.contact_pos_w[env_ids, :] = torch.nan - # reset friction forces - if self.cfg.track_friction_forces: - self._data.friction_forces_w[env_ids, :] = 0.0 - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have established contact within the last :attr:`dt` seconds. - - This function checks if the bodies have established contact within the last :attr:`dt` seconds - by comparing the current contact time with the given time period. If the contact time is less - than the given time period, then the bodies are considered to be in contact. - - Note: - The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other - words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true - if the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contact was established. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have established contact within the last - :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the - number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the bodies are in contact - currently_in_contact = self.data.current_contact_time > 0.0 - less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) - return currently_in_contact * less_than_dt_in_contact - - def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have broken contact within the last :attr:`dt` seconds. - - This function checks if the bodies have broken contact within the last :attr:`dt` seconds - by comparing the current air time with the given time period. If the air time is less - than the given time period, then the bodies are considered to not be in contact. - - Note: - It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, - :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if - the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contract is broken. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the sensor is configured to track contact time - currently_detached = self.data.current_air_time > 0.0 - less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) - return currently_detached * less_than_dt_detached - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check that only rigid bodies are selected - leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] - template_prim_path = self._parent_prims[0].GetPath().pathString - body_names = list() - for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): - # check if prim has contact reporter API - if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): - prim_path = prim.GetPath().pathString - body_names.append(prim_path.rsplit("/", 1)[-1]) - # check that there is at least one body with contact reporter API - if not body_names: - raise RuntimeError( - f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." - "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." - ) - - # construct regex expression for the body names - body_names_regex = r"(" + "|".join(body_names) + r")" - body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" - # convert regex expressions to glob expressions for PhysX - body_names_glob = body_names_regex.replace(".*", "*") - filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] - - # create a rigid prim view for the sensor - self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) - self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( - body_names_glob, - filter_patterns=filter_prim_paths_glob, - max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, - ) - # resolve the true count of bodies - self._num_bodies = self.body_physx_view.count // self._num_envs - # check that contact reporter succeeded - if self._num_bodies != len(body_names): - raise RuntimeError( - "Failed to initialize contact reporter for specified bodies." - f"\n\tInput prim path : {self.cfg.prim_path}" - f"\n\tResolved prim paths: {body_names_regex}" - ) - - # prepare data buffers - self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - # optional buffers - # -- history of net forces - if self.cfg.history_length > 0: - self._data.net_forces_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device - ) - else: - self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1) - # -- pose of sensor origins - if self.cfg.track_pose: - self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device) - - # check if filter paths are valid - if self.cfg.track_contact_points or self.cfg.track_friction_forces: - if len(self.cfg.filter_prim_paths_expr) == 0: - raise ValueError( - "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - if self.cfg.max_contact_data_count_per_prim < 1: - raise ValueError( - f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " - "Please set it to a value greater than 0 to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - - # -- position of contact points - if self.cfg.track_contact_points: - self._data.contact_pos_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - torch.nan, - device=self._device, - ) - # -- friction forces at contact points - if self.cfg.track_friction_forces: - self._data.friction_forces_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - 0.0, - device=self._device, - ) - # -- air/contact time between contacts - if self.cfg.track_air_time: - self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - # force matrix: (num_envs, num_bodies, num_filter_shapes, 3) - # force matrix history: (num_envs, history_length, num_bodies, num_filter_shapes, 3) - if len(self.cfg.filter_prim_paths_expr) != 0: - num_filters = self.contact_physx_view.filter_count - self._data.force_matrix_w = torch.zeros( - self._num_envs, self._num_bodies, num_filters, 3, device=self._device - ) - if self.cfg.history_length > 0: - self._data.force_matrix_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, num_filters, 3, device=self._device - ) - else: - self._data.force_matrix_w_history = self._data.force_matrix_w.unsqueeze(1) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - - # obtain the contact forces - # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). - # This isn't the most efficient way to do this, but it's the easiest to implement. - net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) - self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] - # update contact force history - if self.cfg.history_length > 0: - self._data.net_forces_w_history[env_ids] = self._data.net_forces_w_history[env_ids].roll(1, dims=1) - self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] - - # obtain the contact force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) - num_filters = self.contact_physx_view.filter_count - # acquire and shape the force matrix - force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) - force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) - self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] - if self.cfg.history_length > 0: - self._data.force_matrix_w_history[env_ids] = self._data.force_matrix_w_history[env_ids].roll(1, dims=1) - self._data.force_matrix_w_history[env_ids, 0] = self._data.force_matrix_w[env_ids] - - # obtain the pose of the sensor origin - if self.cfg.track_pose: - pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] - pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") - self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) - - # obtain contact points - if self.cfg.track_contact_points: - _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( - self.contact_physx_view.get_contact_data(dt=self._sim_physics_dt) - ) - self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( - buffer_contact_points, buffer_count, buffer_start_indices - )[env_ids] - - # obtain friction forces - if self.cfg.track_friction_forces: - friction_forces, _, buffer_count, buffer_start_indices = self.contact_physx_view.get_friction_data( - dt=self._sim_physics_dt - ) - self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( - friction_forces, buffer_count, buffer_start_indices, avg=False, default=0.0 - )[env_ids] - - # obtain the air time - if self.cfg.track_air_time: - # -- time elapsed since last update - # since this function is called every frame, we can use the difference to get the elapsed time - elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] - # -- check contact state of bodies - is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold - is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact - is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact - # -- update the last contact time if body has just become in contact - self._data.last_air_time[env_ids] = torch.where( - is_first_contact, - self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_air_time[env_ids], - ) - # -- increment time for bodies that are not in contact - self._data.current_air_time[env_ids] = torch.where( - ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - # -- update the last contact time if body has just detached - self._data.last_contact_time[env_ids] = torch.where( - is_first_detached, - self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_contact_time[env_ids], - ) - # -- increment time for bodies that are in contact - self._data.current_contact_time[env_ids] = torch.where( - is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - - def _unpack_contact_buffer_data( - self, - contact_data: torch.Tensor, - buffer_count: torch.Tensor, - buffer_start_indices: torch.Tensor, - avg: bool = True, - default: float = float("nan"), - ) -> torch.Tensor: - """ - Unpacks and aggregates contact data for each (env, body, filter) group. - - This function vectorizes the following nested loop: - - for i in range(self._num_bodies * self._num_envs): - for j in range(self.contact_physx_view.filter_count): - start_index_ij = buffer_start_indices[i, j] - count_ij = buffer_count[i, j] - self._contact_position_aggregate_buffer[i, j, :] = torch.mean( - contact_data[start_index_ij : (start_index_ij + count_ij), :], dim=0 - ) - - For more details, see the `RigidContactView.get_contact_data() documentation `_. - - Args: - contact_data: Flat tensor of contact data, shape (N_envs * N_bodies, 3). - buffer_count: Number of contact points per (env, body, filter), shape (N_envs * N_bodies, N_filters). - buffer_start_indices: Start indices for each (env, body, filter), shape (N_envs * N_bodies, N_filters). - avg: If True, average the contact data for each group; if False, sum the data. Defaults to True. - default: Default value to use for groups with zero contacts. Defaults to NaN. - - Returns: - Aggregated contact data, shape (N_envs, N_bodies, N_filters, 3). - """ - counts, starts = buffer_count.view(-1), buffer_start_indices.view(-1) - n_rows, total = counts.numel(), int(counts.sum()) - agg = torch.full((n_rows, 3), default, device=self._device, dtype=contact_data.dtype) - if total > 0: - row_ids = torch.repeat_interleave(torch.arange(n_rows, device=self._device), counts) - - block_starts = counts.cumsum(0) - counts - deltas = torch.arange(row_ids.numel(), device=counts.device) - block_starts.repeat_interleave(counts) - flat_idx = starts[row_ids] + deltas - - pts = contact_data.index_select(0, flat_idx) - agg = agg.zero_().index_add_(0, row_ids, pts) - agg = agg / counts.clamp_min(1).unsqueeze(-1) if avg else agg - agg[counts == 0] = default - - return agg.view(self._num_envs * self.num_bodies, -1, 3).view( - self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3 - ) + from isaaclab_newton.sensors.contact_sensor import ContactSensor as NewtonContactSensor + from isaaclab_newton.sensors.contact_sensor import ContactSensorData as NewtonContactSensorData + from isaaclab_physx.sensors.contact_sensor import ContactSensor as PhysXContactSensor + from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "contact_visualizer"): - self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.contact_visualizer.set_visibility(True) - else: - if hasattr(self, "contact_visualizer"): - self.contact_visualizer.set_visibility(False) - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self.body_physx_view is None: - return - # marker indices - # 0: contact, 1: no contact - net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) - marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) - # check if prim is visualized - if self.cfg.track_pose: - frame_origins: torch.Tensor = self._data.pos_w - else: - pose = self.body_physx_view.get_transforms() - frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] - # visualize - self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) +class ContactSensor(FactoryBase, BaseContactSensor): + """Factory for creating contact sensor instances.""" - """ - Internal simulation callbacks. - """ + data: BaseContactSensorData | PhysXContactSensorData | NewtonContactSensorData - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._body_physx_view = None - self._contact_physx_view = None + def __new__(cls, *args, **kwargs) -> BaseContactSensor | PhysXContactSensor | NewtonContactSensor: + """Create a new instance of a contact sensor based on the backend.""" + # The `FactoryBase` __new__ method will handle the logic and return + # an instance of the correct backend-specific contact sensor class, + # which is guaranteed to be a subclass of `BaseContactSensor` by convention. + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_cfg.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_cfg.py index c811a7ca63d1..e68117745237 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_cfg.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_cfg.py @@ -3,19 +3,30 @@ # # SPDX-License-Identifier: BSD-3-Clause +from typing import TYPE_CHECKING + from isaaclab.markers import VisualizationMarkersCfg from isaaclab.markers.config import CONTACT_SENSOR_MARKER_CFG from isaaclab.utils import configclass from ..sensor_base_cfg import SensorBaseCfg -from .contact_sensor import ContactSensor + +if TYPE_CHECKING: + from .contact_sensor import ContactSensor @configclass class ContactSensorCfg(SensorBaseCfg): - """Configuration for the contact sensor.""" + """Configuration for the contact sensor. + + Sensing bodies are selected via :attr:`SensorBaseCfg.prim_path`. Filter bodies for + per-partner force reporting are selected via :attr:`filter_prim_paths_expr`. + + Only body-level sensing and filtering are supported. For shape-level granularity, + see ``NewtonContactSensorCfg`` in ``isaaclab_newton``. + """ - class_type: type = ContactSensor + class_type: type["ContactSensor"] | str = "{DIR}.contact_sensor:ContactSensor" track_pose: bool = False """Whether to track the pose of the sensor's origin. Defaults to False.""" @@ -26,8 +37,8 @@ class ContactSensorCfg(SensorBaseCfg): track_friction_forces: bool = False """Whether to track the friction forces at the contact points. Defaults to False.""" - max_contact_data_count_per_prim: int = 4 - """The maximum number of contacts across all batches of the sensor to keep track of. Default is 4. + max_contact_data_count_per_prim: int | None = None + """The maximum number of contacts across all batches of the sensor to keep track of. Default is 4, where supported. This parameter sets the total maximum counts of the simulation across all bodies and environments. The total number of contacts allowed is max_contact_data_count_per_prim*num_envs*num_sensor_bodies. @@ -36,39 +47,43 @@ class ContactSensorCfg(SensorBaseCfg): If the environment is very contact rich it is suggested to increase this parameter to avoid out of bounds memory errors and loss of contact data leading to inaccurate measurements. - - """ + """ track_air_time: bool = False """Whether to track the air/contact time of the bodies (time between contacts). Defaults to False.""" - force_threshold: float = 1.0 + force_threshold: float | None = None """The threshold on the norm of the contact force that determines whether two bodies are in collision or not. + Defaults to None, in which case the sensor backend chooses an appropriate value. This value is only used for tracking the mode duration (the time in contact or in air), if :attr:`track_air_time` is True. """ - filter_prim_paths_expr: list[str] = list() - """The list of primitive paths (or expressions) to filter contacts with. Defaults to an empty list, in which case - no filtering is applied. + history_length: int = 0 + """Number of past frames to store in the sensor buffers. Defaults to 0, which means that only + the current data is stored (no history).""" + + filter_prim_paths_expr: list[str] = [] + """List of body prim path expressions to filter contacts against. Defaults to empty, + meaning contacts with all bodies are aggregated into the net force. + + If provided, a per-partner force matrix (:attr:`ContactSensorData.force_matrix_w`) is + reported in addition to the net force. Each expression is matched against body prim paths + in the scene. - The contact sensor allows reporting contacts between the primitive specified with :attr:`prim_path` and - other primitives in the scene. For instance, in a scene containing a robot, a ground plane and an object, - you can obtain individual contact reports of the base of the robot with the ground plane and the object. + For shape-level filtering, see ``NewtonContactSensorCfg`` in ``isaaclab_newton``. .. note:: - The expression in the list can contain the environment namespace regex ``{ENV_REGEX_NS}`` which - will be replaced with the environment namespace. + Expressions can contain the environment namespace regex ``{ENV_REGEX_NS}``, which + is replaced with the environment namespace. - Example: ``{ENV_REGEX_NS}/Object`` will be replaced with ``/World/envs/env_.*/Object``. + Example: ``{ENV_REGEX_NS}/Object`` becomes ``/World/envs/env_.*/Object``. .. attention:: - The reporting of filtered contacts only works when the sensor primitive :attr:`prim_path` corresponds to a - single primitive in that environment. If the sensor primitive corresponds to multiple primitives, the - filtering will not work as expected. Please check :class:`~isaaclab.sensors.contact_sensor.ContactSensor` - for more details. - If track_contact_points is true, then filter_prim_paths_expr cannot be an empty list! + Filtered contact reporting only works when :attr:`SensorBaseCfg.prim_path` matches a + single primitive per environment. For many-to-many filtering, see + ``NewtonContactSensorCfg`` in ``isaaclab_newton``. """ visualizer_cfg: VisualizationMarkersCfg = CONTACT_SENSOR_MARKER_CFG.replace(prim_path="/Visuals/ContactSensor") diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py index fd6c15ebe960..0ca31f9e40ac 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py @@ -3,151 +3,24 @@ # # SPDX-License-Identifier: BSD-3-Clause -# needed to import for allowing type-hinting: torch.Tensor | None -from __future__ import annotations - -from dataclasses import dataclass - -import torch - - -@dataclass -class ContactSensorData: - """Data container for the contact reporting sensor.""" - - pos_w: torch.Tensor | None = None - """Position of the sensor origin in world frame. - - Shape is (N, 3), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - - """ - - contact_pos_w: torch.Tensor | None = None - """Average of the positions of contact points between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_contact_points` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_contact_points` is True, a ValueError will be raised if: - - * If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * If the :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - will not be calculated. - - """ - - friction_forces_w: torch.Tensor | None = None - """Sum of the friction forces between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_friction_forces` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_friction_forces` is True, a ValueError will be raised if: - - * The :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * The :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - - """ +"""Re-exports the base contact sensor data class for backwards compatibility.""" - quat_w: torch.Tensor | None = None - """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. - - Shape is (N, 4), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - """ - - net_forces_w: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - net_forces_w_history: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length - and B is the number of bodies in each sensor. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - force_matrix_w: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - force_matrix_w_history: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, T, B, M, 3), where N is the number of sensors, T is the configured history length, - B is number of bodies in each sensor and M is the number of filtered bodies. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - last_air_time: torch.Tensor | None = None - """Time spent (in s) in the air before the last contact. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - current_air_time: torch.Tensor | None = None - """Time spent (in s) in the air since the last detach. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. +from __future__ import annotations - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ +from typing import TYPE_CHECKING - last_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact before the last detach. +from isaaclab.utils.backend_utils import FactoryBase - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. +from .base_contact_sensor_data import BaseContactSensorData - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ +if TYPE_CHECKING: + from isaaclab_newton.sensors.contact_sensor.contact_sensor_data import ContactSensorData as NewtonContactSensorData + from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData - current_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact since the last contact. - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. +class ContactSensorData(FactoryBase, BaseContactSensorData): + """Factory for creating contact sensor data instances.""" - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ + def __new__(cls, *args, **kwargs) -> BaseContactSensorData | PhysXContactSensorData | NewtonContactSensorData: + """Create a new instance of a contact sensor data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py index d5db853e8cc2..7ee00d2772eb 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py @@ -5,6 +5,6 @@ """Sub-module for frame transformer sensor.""" -from .frame_transformer import FrameTransformer -from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg -from .frame_transformer_data import FrameTransformerData +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.pyi b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.pyi new file mode 100644 index 000000000000..24ae4bbe53e1 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.pyi @@ -0,0 +1,19 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseFrameTransformer", + "BaseFrameTransformerData", + "FrameTransformer", + "FrameTransformerCfg", + "OffsetCfg", + "FrameTransformerData", +] + +from .base_frame_transformer import BaseFrameTransformer +from .base_frame_transformer_data import BaseFrameTransformerData +from .frame_transformer import FrameTransformer +from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg +from .frame_transformer_data import FrameTransformerData diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py new file mode 100644 index 000000000000..53770029c4fe --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import warp as wp + +import isaaclab.utils.string as string_utils + +from ..sensor_base import SensorBase +from .base_frame_transformer_data import BaseFrameTransformerData + +if TYPE_CHECKING: + from .frame_transformer_cfg import FrameTransformerCfg + + +class BaseFrameTransformer(SensorBase): + """A sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the frame transformer sensor.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseFrameTransformerData: + raise NotImplementedError + + @property + @abstractmethod + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked. + + .. deprecated:: + Use ``len(data.target_frame_names)`` instead. This property will be removed in a future release. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked. + + .. deprecated:: + Use ``data.target_frame_names`` instead. This property will be removed in a future release. + """ + raise NotImplementedError + + """ + Operations + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + @abstractmethod + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_mask: wp.array): + raise NotImplementedError + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py new file mode 100644 index 000000000000..286af6e84ea3 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -0,0 +1,137 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for frame transformer sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +from isaaclab.utils.leapp import ( + POSE7_ELEMENT_NAMES, + QUAT_XYZW_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + InputKindEnum, + leapp_tensor_semantics, + target_frame_pose_resolver, + target_frame_quat_resolver, + target_frame_xyz_resolver, +) +from isaaclab.utils.warp import ProxyArray + + +class BaseFrameTransformerData(ABC): + """Data container for the frame transformer sensor. + + This base class defines the interface for frame transformer sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + def target_frame_names(self) -> list[str]: + """Target frame names (order matches data ordering). + + Resolved from :attr:`FrameTransformerCfg.FrameCfg.name`. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=target_frame_pose_resolver) + def target_pose_source(self) -> ProxyArray | None: + """Pose of the target frame(s) relative to source frame. + + Shape is (num_instances, num_target_frames), dtype = wp.transformf. In torch this resolves to + (num_instances, num_target_frames, 7). The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=target_frame_xyz_resolver) + def target_pos_source(self) -> ProxyArray: + """Position of the target frame(s) relative to source frame. + + Shape is (num_instances, num_target_frames), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_target_frames, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=target_frame_quat_resolver) + def target_quat_source(self) -> ProxyArray: + """Orientation of the target frame(s) relative to source frame. + + Shape is (num_instances, num_target_frames), dtype = wp.quatf. In torch this resolves to + (num_instances, num_target_frames, 4). The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=target_frame_pose_resolver) + def target_pose_w(self) -> ProxyArray | None: + """Pose of the target frame(s) after offset in world frame. + + Shape is (num_instances, num_target_frames), dtype = wp.transformf. In torch this resolves to + (num_instances, num_target_frames, 7). The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=target_frame_xyz_resolver) + def target_pos_w(self) -> ProxyArray: + """Position of the target frame(s) after offset in world frame. + + Shape is (num_instances, num_target_frames), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_target_frames, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=target_frame_quat_resolver) + def target_quat_w(self) -> ProxyArray: + """Orientation of the target frame(s) after offset in world frame. + + Shape is (num_instances, num_target_frames), dtype = wp.quatf. In torch this resolves to + (num_instances, num_target_frames, 4). The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) + def source_pose_w(self) -> ProxyArray | None: + """Pose of the source frame after offset in world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) + def source_pos_w(self) -> ProxyArray: + """Position of the source frame after offset in world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_XYZW_ELEMENT_NAMES) + def source_quat_w(self) -> ProxyArray: + """Orientation of the source frame after offset in world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py index ed83392b3aa7..c44f991ff218 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py @@ -5,556 +5,24 @@ from __future__ import annotations -import logging -import re -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch +from isaaclab.utils.backend_utils import FactoryBase -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.utils.math import ( - combine_frame_transforms, - convert_quat, - is_identity_pose, - normalize, - quat_from_angle_axis, - subtract_frame_transforms, -) - -from ..sensor_base import SensorBase -from .frame_transformer_data import FrameTransformerData +from .base_frame_transformer import BaseFrameTransformer +from .base_frame_transformer_data import BaseFrameTransformerData if TYPE_CHECKING: - from .frame_transformer_cfg import FrameTransformerCfg - -# import logger -logger = logging.getLogger(__name__) - - -class FrameTransformer(SensorBase): - """A sensor for reporting frame transforms. - - This class provides an interface for reporting the transform of one or more frames (target frames) - with respect to another frame (source frame). The source frame is specified by the user as a prim path - (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of - prim paths (:attr:`FrameTransformerCfg.target_frames`). - - The source frame and target frames are assumed to be rigid bodies. The transform of the target frames - with respect to the source frame is computed by first extracting the transform of the source frame - and target frames from the physics engine and then computing the relative transform between the two. - - Additionally, the user can specify an offset for the source frame and each target frame. This is useful - for specifying the transform of the desired frame with respect to the body's center of mass, for instance. - - A common example of using this sensor is to track the position and orientation of the end effector of a - robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the - manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is - typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the - manipulator. - - """ - - cfg: FrameTransformerCfg - """The configuration parameters.""" - - def __init__(self, cfg: FrameTransformerCfg): - """Initializes the frame transformer object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data: FrameTransformerData = FrameTransformerData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"FrameTransformer @ '{self.cfg.prim_path}': \n" - f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" - f"\tnumber of envs: {self._num_envs}\n" - f"\tsource body frame: {self._source_frame_body_name}\n" - f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> FrameTransformerData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Returns the number of target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`len(data.target_frame_names)` to access the number of target frames. - """ - return len(self._target_frame_body_names) - - @property - def body_names(self) -> list[str]: - """Returns the names of the target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`data.target_frame_names` to access the target frame names. - """ - return self._target_frame_body_names - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = ... - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - - # resolve source frame offset - source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) - source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) - # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is - # not the identity quaternion for efficiency in _update_buffer_impl - self._apply_source_frame_offset = True - # Handle source frame offsets - if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): - logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") - self._apply_source_frame_offset = False - else: - logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") - # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) - self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) - self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) - - # Keep track of mapping from the rigid body name to the desired frames and prim path, - # as there may be multiple frames based upon the same body name and we don't want to - # create unnecessary views. - body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} - # The offsets associated with each target frame - target_offsets: dict[str, dict[str, torch.Tensor]] = {} - # The frames whose offsets are not identity - non_identity_offset_frames: list[str] = [] - - # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the - # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl - self._apply_target_frame_offset = False - - # Need to keep track of whether the source frame is also a target frame - self._source_is_also_target_frame = False - - # Collect all target frames, their associated body prim paths and their offsets so that we can extract - # the prim, check that it has the appropriate rigid body API in a single loop. - # First element is None because user can't specify source frame name - frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] - frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] - # First element is None because source frame offset is handled separately - frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] - frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) - for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): - # Find correct prim - matching_prims = sim_utils.find_matching_prims(prim_path) - if len(matching_prims) == 0: - raise ValueError( - f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." - " No matching prims were found." - ) - for prim in matching_prims: - # Get the prim path of the matching prim - matching_prim_path = prim.GetPath().pathString - # Check if it is a rigid prim - if not prim.HasAPI(UsdPhysics.RigidBodyAPI): - raise ValueError( - f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" - " rigid body. The class only supports transformations between rigid bodies." - ) - - # Get the name of the body: use relative prim path for unique identification - body_name = self._get_relative_body_path(matching_prim_path) - # Use leaf name of prim path if frame name isn't specified by user - frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] - - # Keep track of which frames are associated with which bodies - if body_name in body_names_to_frames: - body_names_to_frames[body_name]["frames"].add(frame_name) - - # This is a corner case where the source frame is also a target frame - if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": - self._source_is_also_target_frame = True - - else: - # Store the first matching prim path and the type of frame - body_names_to_frames[body_name] = { - "frames": {frame_name}, - "prim_path": matching_prim_path, - "type": frame_type, - } - - if offset is not None: - offset_pos = torch.tensor(offset.pos, device=self.device) - offset_quat = torch.tensor(offset.rot, device=self.device) - # Check if we need to apply offsets (optimized code path in _update_buffer_impl) - if not is_identity_pose(offset_pos, offset_quat): - non_identity_offset_frames.append(frame_name) - self._apply_target_frame_offset = True - target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} - - if not self._apply_target_frame_offset: - logger.info( - f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" - f" are identity: {frames[1:]}" - ) - else: - logger.info( - f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" - f" {non_identity_offset_frames}" - ) - - # The names of bodies that RigidPrim will be tracking to later extract transforms from - tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] - tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] - - body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] - - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # Create a prim view for all frames and initialize it - # order of transforms coming out of view will be source frame followed by target frame(s) - self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) - - # Determine the order in which regex evaluated body names so we can later index into frame transforms - # by frame name correctly - all_prim_paths = self._frame_physx_view.prim_paths - - if "env_" in all_prim_paths[0]: - - def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: - """Separates the environment number and prim_path from the item. - - Args: - item: The item to extract the environment number from. Assumes item is of the form - `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. - Returns: - The environment number and the prim_path. - """ - match = re.search(r"env_(\d+)(.*)", item) - return (int(match.group(1)), match.group(2)) - - # Find the indices that would reorganize output to be per environment. - # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base - # to come before env_1/Robot/foot so we need to use custom key function - self._per_env_indices = [ - index - for index, _ in sorted( - list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) - ) - ] - - # Only need 0th env as the names and their ordering are the same across environments - sorted_prim_paths = [ - all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] - ] - - else: - # If no environment is present, then the order of the body names is the same as the order of the - # prim paths sorted alphabetically - self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] - sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] - - # -- target frames: use relative prim path for unique identification - self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] - - # -- source frame: use relative prim path for unique identification - self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) - source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) - - # Only remove source frame from tracked bodies if it is not also a target frame - if not self._source_is_also_target_frame: - self._target_frame_body_names.remove(self._source_frame_body_name) - - # Determine indices into all tracked body frames for both source and target frames - all_ids = torch.arange(self._num_envs * len(tracked_body_names)) - self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index - - # If source frame is also a target frame, then the target frame body ids are the same as - # the source frame body ids - if self._source_is_also_target_frame: - self._target_frame_body_ids = all_ids - else: - self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] - - # The name of each of the target frame(s) - either user specified or defaulted to the body name - self._target_frame_names: list[str] = [] - # The position and rotation components of target frame offsets - target_frame_offset_pos = [] - target_frame_offset_quat = [] - # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed - # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations - # when updating sensor in _update_buffers_impl - duplicate_frame_indices = [] - - # Go through each body name and determine the number of duplicates we need for that frame - # and extract the offsets. This is all done to handle the case where multiple frames - # reference the same body, but have different names and/or offsets - for i, body_name in enumerate(self._target_frame_body_names): - for frame in body_names_to_frames[body_name]["frames"]: - # Only need to handle target frames here as source frame is handled separately - if frame in target_offsets: - target_frame_offset_pos.append(target_offsets[frame]["pos"]) - target_frame_offset_quat.append(target_offsets[frame]["quat"]) - self._target_frame_names.append(frame) - duplicate_frame_indices.append(i) - - # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes - # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient - duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) - if self._source_is_also_target_frame: - num_target_body_frames = len(tracked_body_names) - else: - num_target_body_frames = len(tracked_body_names) - 1 - - self._duplicate_frame_indices = torch.cat( - [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] - ) - - # Target frame offsets are only applied if at least one of the offsets are non-identity - if self._apply_target_frame_offset: - # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) - self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) - self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) - - # fill the data buffer - self._data.target_frame_names = self._target_frame_names - self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) - self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) - self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) - self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) - self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) - self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = ... - - # Extract transforms from view - shape is: - # (the total number of source and target body frames being tracked * self._num_envs, 7) - transforms = self._frame_physx_view.get_transforms() - - # Reorder the transforms to be per environment as is expected of SensorData - transforms = transforms[self._per_env_indices] - - # Convert quaternions as PhysX uses xyzw form - transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") - - # Process source frame transform - source_frames = transforms[self._source_frame_body_ids] - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_source_frame_offset: - source_pos_w, source_quat_w = combine_frame_transforms( - source_frames[:, :3], - source_frames[:, 3:], - self._source_frame_offset_pos, - self._source_frame_offset_quat, - ) - else: - source_pos_w = source_frames[:, :3] - source_quat_w = source_frames[:, 3:] - - # Process target frame transforms - target_frames = transforms[self._target_frame_body_ids] - duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] - duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] - - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_target_frame_offset: - target_pos_w, target_quat_w = combine_frame_transforms( - duplicated_target_frame_pos_w, - duplicated_target_frame_quat_w, - self._target_frame_offset_pos, - self._target_frame_offset_quat, - ) - else: - target_pos_w = duplicated_target_frame_pos_w - target_quat_w = duplicated_target_frame_quat_w - - # Compute the transform of the target frame with respect to the source frame - total_num_frames = len(self._target_frame_names) - target_pos_source, target_quat_source = subtract_frame_transforms( - source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), - source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), - target_pos_w, - target_quat_w, - ) - - # Update buffers - # note: The frame names / ordering don't change so no need to update them after initialization - self._data.source_pos_w[:] = source_pos_w.view(-1, 3) - self._data.source_quat_w[:] = source_quat_w.view(-1, 4) - self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) - self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) - self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) - self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) - - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - if not hasattr(self, "frame_visualizer"): - self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - - # set their visibility to true - self.frame_visualizer.set_visibility(True) - else: - if hasattr(self, "frame_visualizer"): - self.frame_visualizer.set_visibility(False) - - def _debug_vis_callback(self, event): - # Get the all frames pose - frames_pos = torch.cat([self._data.source_pos_w, self._data.target_pos_w.view(-1, 3)], dim=0) - frames_quat = torch.cat([self._data.source_quat_w, self._data.target_quat_w.view(-1, 4)], dim=0) - - # Get the all connecting lines between frames pose - lines_pos, lines_quat, lines_length = self._get_connecting_lines( - start_pos=self._data.source_pos_w.repeat_interleave(self._data.target_pos_w.size(1), dim=0), - end_pos=self._data.target_pos_w.view(-1, 3), - ) - - # Initialize default (identity) scales and marker indices for all markers (frames + lines) - marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) - marker_indices = torch.zeros(marker_scales.size(0)) - - # Set the z-scale of line markers to represent their actual length - marker_scales[-lines_length.size(0) :, -1] = lines_length - - # Assign marker config index 1 to line markers - marker_indices[-lines_length.size(0) :] = 1 - - # Update the frame and the connecting line visualizer - self.frame_visualizer.visualize( - translations=torch.cat((frames_pos, lines_pos), dim=0), - orientations=torch.cat((frames_quat, lines_quat), dim=0), - scales=marker_scales, - marker_indices=marker_indices, - ) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._frame_physx_view = None - - """ - Internal helpers. - """ - - def _get_connecting_lines( - self, start_pos: torch.Tensor, end_pos: torch.Tensor - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Draws connecting lines between frames. - - Given start and end points, this function computes the positions (mid-point), orientations, - and lengths of the connecting lines. - - Args: - start_pos: The start positions of the connecting lines. Shape is (N, 3). - end_pos: The end positions of the connecting lines. Shape is (N, 3). - - Returns: - A tuple containing: - - The positions of each connecting line. Shape is (N, 3). - - The orientations of each connecting line in quaternion. Shape is (N, 4). - - The lengths of each connecting line. Shape is (N,). - """ - direction = end_pos - start_pos - lengths = torch.norm(direction, dim=-1) - positions = (start_pos + end_pos) / 2 - - # Get default direction (along z-axis) - default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) - - # Normalize direction vector - direction_norm = normalize(direction) - - # Calculate rotation from default direction to target direction - rotation_axis = torch.linalg.cross(default_direction, direction_norm) - rotation_axis_norm = torch.norm(rotation_axis, dim=-1) - - # Handle case where vectors are parallel - mask = rotation_axis_norm > 1e-6 - rotation_axis = torch.where( - mask.unsqueeze(-1), - normalize(rotation_axis), - torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), - ) - - # Calculate rotation angle - cos_angle = torch.sum(default_direction * direction_norm, dim=-1) - cos_angle = torch.clamp(cos_angle, -1.0, 1.0) - angle = torch.acos(cos_angle) - orientations = quat_from_angle_axis(angle, rotation_axis) - - return positions, orientations, lengths - - @staticmethod - def _get_relative_body_path(prim_path: str) -> str: - """Extract a normalized body path from a prim path. + from isaaclab_newton.sensors.frame_transformer import FrameTransformer as NewtonFrameTransformer + from isaaclab_physx.sensors.frame_transformer import FrameTransformer as PhysXFrameTransformer + from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData - Removes the environment instance segment `/envs/env_/` to normalize paths - across multiple environments, while preserving the `/envs/` prefix to - distinguish environment-scoped paths from non-environment paths. - Examples: - - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' - - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' - - '/World/Robot' -> '/World/Robot' - - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' +class FrameTransformer(FactoryBase, BaseFrameTransformer): + """Factory for creating frame transformer instances.""" - Args: - prim_path: The full prim path. + data: BaseFrameTransformerData | PhysXFrameTransformerData - Returns: - The prim path with `/envs/env_/` removed, preserving `/envs/`. - """ - pattern = re.compile(r"/envs/env_[^/]+/") - return pattern.sub("/envs/", prim_path) + def __new__(cls, *args, **kwargs) -> BaseFrameTransformer | NewtonFrameTransformer | PhysXFrameTransformer: + """Create a new instance of a frame transformer based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py index ca9b528aa1d9..b41c7d6d5f0d 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py @@ -3,13 +3,18 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from dataclasses import MISSING +from typing import TYPE_CHECKING from isaaclab.markers.config import FRAME_MARKER_CFG, VisualizationMarkersCfg from isaaclab.utils import configclass from ..sensor_base_cfg import SensorBaseCfg -from .frame_transformer import FrameTransformer + +if TYPE_CHECKING: + from .frame_transformer import FrameTransformer @configclass @@ -18,8 +23,8 @@ class OffsetCfg: pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """Quaternion rotation (x, y, z, w) w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0, 1.0).""" @configclass @@ -52,7 +57,7 @@ class FrameCfg: offset: OffsetCfg = OffsetCfg() """The pose offset from the parent prim frame.""" - class_type: type = FrameTransformer + class_type: type[FrameTransformer] | str = "{DIR}.frame_transformer:FrameTransformer" prim_path: str = MISSING """The prim path of the body to transform from (source frame).""" @@ -71,6 +76,6 @@ class FrameCfg: visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") """The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py index 942ddbd5144b..f6b28faea395 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py @@ -3,55 +3,28 @@ # # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import dataclass +"""Re-exports the base frame transformer data class for backwards compatibility.""" -import torch +from __future__ import annotations +from typing import TYPE_CHECKING -@dataclass -class FrameTransformerData: - """Data container for the frame transformer sensor.""" +from isaaclab.utils.backend_utils import FactoryBase - target_frame_names: list[str] = None - """Target frame names (this denotes the order in which that frame data is ordered). +from .base_frame_transformer_data import BaseFrameTransformerData - The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. - This does not necessarily follow the order in which the frames are defined in the config due to - the regex matching. - """ +if TYPE_CHECKING: + from isaaclab_newton.sensors.frame_transformer.frame_transformer_data import ( + FrameTransformerData as NewtonFrameTransformerData, + ) + from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData - target_pos_source: torch.Tensor = None - """Position of the target frame(s) relative to source frame. - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ +class FrameTransformerData(FactoryBase, BaseFrameTransformerData): + """Factory for creating frame transformer data instances.""" - target_quat_source: torch.Tensor = None - """Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - target_pos_w: torch.Tensor = None - """Position of the target frame(s) after offset (in world frame). - - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ - - target_quat_w: torch.Tensor = None - """Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - source_pos_w: torch.Tensor = None - """Position of the source frame after offset (in world frame). - - Shape is (N, 3), where N is the number of environments. - """ - - source_quat_w: torch.Tensor = None - """Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z). - - Shape is (N, 4), where N is the number of environments. - """ + def __new__( + cls, *args, **kwargs + ) -> BaseFrameTransformerData | NewtonFrameTransformerData | PhysXFrameTransformerData: + """Create a new instance of a frame transformer data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/imu/__init__.py b/source/isaaclab/isaaclab/sensors/imu/__init__.py index 7501e41cf49d..6236162c439f 100644 --- a/source/isaaclab/isaaclab/sensors/imu/__init__.py +++ b/source/isaaclab/isaaclab/sensors/imu/__init__.py @@ -7,6 +7,6 @@ Imu Sensor """ -from .imu import Imu -from .imu_cfg import ImuCfg -from .imu_data import ImuData +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/imu/__init__.pyi b/source/isaaclab/isaaclab/sensors/imu/__init__.pyi new file mode 100644 index 000000000000..e002281f8b22 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/imu/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseImu", + "BaseImuData", + "Imu", + "ImuCfg", + "ImuData", +] + +from .base_imu import BaseImu +from .base_imu_data import BaseImuData +from .imu import Imu +from .imu_cfg import ImuCfg +from .imu_data import ImuData diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu.py b/source/isaaclab/isaaclab/sensors/imu/base_imu.py new file mode 100644 index 000000000000..2bf99b9a5e16 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu.py @@ -0,0 +1,77 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from typing import TYPE_CHECKING + +import warp as wp + +from ..sensor_base import SensorBase +from .base_imu_data import BaseImuData + +if TYPE_CHECKING: + from .imu_cfg import ImuCfg + + +class BaseImu(SensorBase): + """The Inertial Measurement Unit (IMU) sensor. + + This sensor models a real IMU that measures angular velocity (gyroscope) and + proper linear acceleration (accelerometer) in the sensor's body frame. Unlike the + PVA sensor, it does not provide pose, linear velocity, angular acceleration, + or projected gravity. + + The sensor can be attached to any prim path with a rigid ancestor in its tree. + If the provided path is not a rigid body, the closest rigid-body ancestor is used + for simulation queries. The fixed transform from that ancestor to the target prim + is computed once during initialization and composed with the configured sensor offset. + + .. note:: + + The accuracy of the acceleration readings depends on the physics backend and timestep. + For sufficient accuracy, we recommend keeping the timestep at least 200 Hz. + """ + + cfg: ImuCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the IMU sensor.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the IMU sensor. + + Args: + cfg: The configuration parameters. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseImuData: + raise NotImplementedError + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + @abstractmethod + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_mask: wp.array): + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py new file mode 100644 index 000000000000..039d5dd60f64 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py @@ -0,0 +1,50 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for IMU sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +from isaaclab.utils.leapp import ( + XYZ_ELEMENT_NAMES, + InputKindEnum, + leapp_tensor_semantics, +) +from isaaclab.utils.warp import ProxyArray + + +class BaseImuData(ABC): + """Data container for the IMU sensor. + + This base class defines the interface for IMU sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + + Unlike the PVA sensor, the IMU only provides the two physical quantities that a + real inertial measurement unit measures: angular velocity and linear acceleration. + """ + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) + def ang_vel_b(self) -> ProxyArray: + """IMU frame angular velocity relative to the world expressed in IMU frame [rad/s]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names=XYZ_ELEMENT_NAMES) + def lin_acc_b(self) -> ProxyArray: + """Linear acceleration (proper) in the IMU frame [m/s^2]. + + Zero in freefall, +g upward at rest. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/imu/imu.py b/source/isaaclab/isaaclab/sensors/imu/imu.py index e092b39502ee..4951867fb0b0 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu.py @@ -5,295 +5,25 @@ from __future__ import annotations -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch +from isaaclab.utils.backend_utils import FactoryBase -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdGeom, UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -from isaaclab.markers import VisualizationMarkers - -from ..sensor_base import SensorBase -from .imu_data import ImuData +from .base_imu import BaseImu +from .base_imu_data import BaseImuData if TYPE_CHECKING: - from .imu_cfg import ImuCfg - - -class Imu(SensorBase): - """The Inertia Measurement Unit (IMU) sensor. - - The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame - linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular - accelerations/velocities. - - If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. - The fixed transform from that ancestor to the target prim is computed once during initialization and - composed with the configured sensor offset. - - .. note:: - - We are computing the accelerations using numerical differentiation from the velocities. Consequently, the - IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the - timestep at least as 200Hz. - - .. note:: - - The user can configure the sensor offset in the configuration file. The offset is applied relative to the - rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform - from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. - The offset is defined as a position vector and a quaternion rotation, which - are applied in the order: position, then rotation. The position is applied as a translation - in the body frame of the rigid source prim, and the rotation is applied as a rotation - in the body frame of the rigid source prim. - - """ - - cfg: ImuCfg - """The configuration parameters.""" - - def __init__(self, cfg: ImuCfg): - """Initializes the Imu sensor. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data = ImuData() - - # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) - self._rigid_parent_expr: str | None = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Imu sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of sensors : {self._view.count}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> ImuData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_instances(self) -> int: - return self._view.count - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timestamps - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.pos_w[env_ids] = 0.0 - self._data.quat_w[env_ids] = 0.0 - self._data.quat_w[env_ids, 0] = 1.0 - self._data.projected_gravity_b[env_ids] = 0.0 - self._data.projected_gravity_b[env_ids, 2] = -1.0 - self._data.lin_vel_b[env_ids] = 0.0 - self._data.ang_vel_b[env_ids] = 0.0 - self._data.lin_acc_b[env_ids] = 0.0 - self._data.ang_acc_b[env_ids] = 0.0 - self._prev_lin_vel_w[env_ids] = 0.0 - self._prev_ang_vel_w[env_ids] = 0.0 - - def update(self, dt: float, force_recompute: bool = False): - # save timestamp - self._dt = dt - # execute updating - super().update(dt, force_recompute) - - """ - Implementation. - """ - - def _initialize_impl(self): - """Initializes the sensor handles and internal buffers. - - - If the target prim path is a rigid body, build the view directly on it. - - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor - to the target prim, and build the view on the ancestor expression. - """ - # Initialize parent class - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check if the prim at path is a rigid prim - prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if prim is None: - raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") - - # Find the first matching ancestor prim that implements rigid body API - ancestor_prim = sim_utils.get_first_matching_ancestor_prim( - prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) - ) - if ancestor_prim is None: - raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") - # Convert ancestor prim path to expression - if ancestor_prim == prim: - self._rigid_parent_expr = self.cfg.prim_path - fixed_pos_b, fixed_quat_b = None, None - else: - # Convert ancestor prim path to expression - relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString - self._rigid_parent_expr = self.cfg.prim_path.replace(relative_path, "") - # Resolve the relative pose between the target prim and the ancestor prim - fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) - - # Create the rigid body view on the ancestor - self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) - - # Get world gravity - gravity = self._physics_sim_view.get_gravity() - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, 1) - - # Create internal buffers - self._initialize_buffers_impl() - - # Compose the configured offset with the fixed ancestor->target transform (done once) - # new_offset = fixed * cfg.offset - # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg - if fixed_pos_b is not None and fixed_quat_b is not None: - # Broadcast fixed transform across instances - fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) - fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) - - cfg_p = self._offset_pos_b.clone() - cfg_q = self._offset_quat_b.clone() - - composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) - composed_q = math_utils.quat_mul(fixed_q, cfg_q) - - self._offset_pos_b = composed_p - self._offset_quat_b = composed_q - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - # world pose of the rigid source (ancestor) from the PhysX view - pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) - quat_w = quat_w.roll(1, dims=-1) - - # sensor pose in world: apply composed offset - self._data.pos_w[env_ids] = pos_w + math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids]) - self._data.quat_w[env_ids] = math_utils.quat_mul(quat_w, self._offset_quat_b[env_ids]) - - # COM of rigid source (body frame) - com_pos_b = self._view.get_coms().to(self.device).split([3, 4], dim=-1)[0] - - # Velocities at rigid source COM - lin_vel_w, ang_vel_w = self._view.get_velocities()[env_ids].split([3, 3], dim=-1) - - # If sensor offset or COM != link origin, account for angular velocity contribution - lin_vel_w += torch.linalg.cross( - ang_vel_w, math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids] - com_pos_b[env_ids]), dim=-1 - ) - - # numerical derivative (world frame) - lin_acc_w = (lin_vel_w - self._prev_lin_vel_w[env_ids]) / self._dt + self._gravity_bias_w[env_ids] - ang_acc_w = (ang_vel_w - self._prev_ang_vel_w[env_ids]) / self._dt - - # batch rotate world->body using current sensor orientation - dynamics_data = torch.stack((lin_vel_w, ang_vel_w, lin_acc_w, ang_acc_w, self.GRAVITY_VEC_W[env_ids]), dim=0) - dynamics_data_rot = math_utils.quat_apply_inverse(self._data.quat_w[env_ids].repeat(5, 1), dynamics_data).chunk( - 5, dim=0 - ) - # store the velocities. - self._data.lin_vel_b[env_ids] = dynamics_data_rot[0] - self._data.ang_vel_b[env_ids] = dynamics_data_rot[1] - # store the accelerations - self._data.lin_acc_b[env_ids] = dynamics_data_rot[2] - self._data.ang_acc_b[env_ids] = dynamics_data_rot[3] - # store projected gravity - self._data.projected_gravity_b[env_ids] = dynamics_data_rot[4] - - self._prev_lin_vel_w[env_ids] = lin_vel_w - self._prev_ang_vel_w[env_ids] = ang_vel_w + from isaaclab_newton.sensors.imu import Imu as NewtonImu + from isaaclab_newton.sensors.imu import ImuData as NewtonImuData + from isaaclab_physx.sensors.imu import Imu as PhysXImu + from isaaclab_physx.sensors.imu import ImuData as PhysXImuData - def _initialize_buffers_impl(self): - """Create buffers for storing data.""" - # data buffers - self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device) - self._data.quat_w[:, 0] = 1.0 - self._data.projected_gravity_b = torch.zeros(self._view.count, 3, device=self._device) - self._data.lin_vel_b = torch.zeros_like(self._data.pos_w) - self._data.ang_vel_b = torch.zeros_like(self._data.pos_w) - self._data.lin_acc_b = torch.zeros_like(self._data.pos_w) - self._data.ang_acc_b = torch.zeros_like(self._data.pos_w) - self._prev_lin_vel_w = torch.zeros_like(self._data.pos_w) - self._prev_ang_vel_w = torch.zeros_like(self._data.pos_w) - # store sensor offset (applied relative to rigid source). - # This may be composed later with a fixed ancestor->target transform. - self._offset_pos_b = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - self._offset_quat_b = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) - # set gravity bias - self._gravity_bias_w = torch.tensor(list(self.cfg.gravity_bias), device=self._device).repeat( - self._view.count, 1 - ) +class Imu(FactoryBase, BaseImu): + """Factory for creating IMU sensor instances.""" - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.acceleration_visualizer.set_visibility(True) - else: - if hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer.set_visibility(False) + data: BaseImuData | PhysXImuData | NewtonImuData - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self._view is None: - return - # get marker location - # -- base state - base_pos_w = self._data.pos_w.clone() - base_pos_w[:, 2] += 0.5 - # -- resolve the scales - default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale - arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.shape[0], 1) - # get up axis of current stage - up_axis = UsdGeom.GetStageUpAxis(self.stage) - # arrow-direction - quat_opengl = math_utils.quat_from_matrix( - math_utils.create_rotation_matrix_from_view( - self._data.pos_w, - self._data.pos_w + math_utils.quat_apply(self._data.quat_w, self._data.lin_acc_b), - up_axis=up_axis, - device=self._device, - ) - ) - quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") - # display markers - self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) + def __new__(cls, *args, **kwargs) -> BaseImu | PhysXImu | NewtonImu: + """Create a new instance of an IMU sensor based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_cfg.py b/source/isaaclab/isaaclab/sensors/imu/imu_cfg.py index 06aeca5fa95b..a095c7f27642 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_cfg.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_cfg.py @@ -5,42 +5,37 @@ from __future__ import annotations -from isaaclab.markers import VisualizationMarkersCfg -from isaaclab.markers.config import RED_ARROW_X_MARKER_CFG +from typing import TYPE_CHECKING + from isaaclab.utils import configclass from ..sensor_base_cfg import SensorBaseCfg -from .imu import Imu + +if TYPE_CHECKING: + from .imu import Imu @configclass class ImuCfg(SensorBaseCfg): - """Configuration for an Inertial Measurement Unit (IMU) sensor.""" + """Configuration for an Inertial Measurement Unit (IMU) sensor. - class_type: type = Imu + This configures a sensor that provides the two physical quantities measured by a + real IMU: angular velocity (gyroscope) and linear acceleration (accelerometer). + For a richer sensor that also provides pose, velocity, and angular acceleration, + see :class:`~isaaclab.sensors.PvaCfg`. + """ + + class_type: type[Imu] | str = "{DIR}.imu:Imu" @configclass class OffsetCfg: """The offset pose of the sensor's frame from the sensor's parent frame.""" pos: tuple[float, float, float] = (0.0, 0.0, 0.0) - """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" + """Translation w.r.t. the parent frame [m]. Defaults to (0.0, 0.0, 0.0).""" - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """Quaternion rotation (x, y, z, w) w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0, 1.0).""" offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" - - visualizer_cfg: VisualizationMarkersCfg = RED_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Command/velocity_goal") - """The configuration object for the visualization markers. Defaults to RED_ARROW_X_MARKER_CFG. - - This attribute is only used when debug visualization is enabled. - """ - gravity_bias: tuple[float, float, float] = (0.0, 0.0, 9.81) - """The linear acceleration bias applied to the linear acceleration in the world frame (x,y,z). - - Imu sensors typically output a positive gravity acceleration in opposition to the direction of gravity. This - config parameter allows users to subtract that bias if set to (0.,0.,0.). By default this is set to (0.0,0.0,9.81) - which results in a positive acceleration reading in the world Z. - """ diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_data.py b/source/isaaclab/isaaclab/sensors/imu/imu_data.py index dd06e09a8b79..f23f2a3be6ca 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_data.py @@ -3,55 +3,24 @@ # # SPDX-License-Identifier: BSD-3-Clause -from __future__ import annotations - -from dataclasses import dataclass - -import torch - - -@dataclass -class ImuData: - """Data container for the Imu sensor.""" - - pos_w: torch.Tensor = None - """Position of the sensor origin in world frame. +"""Factory class for IMU data.""" - Shape is (N, 3), where ``N`` is the number of environments. - """ - - quat_w: torch.Tensor = None - """Orientation of the sensor origin in quaternion ``(w, x, y, z)`` in world frame. - - Shape is (N, 4), where ``N`` is the number of environments. - """ - - projected_gravity_b: torch.Tensor = None - """Gravity direction unit vector projected on the imu frame. - - Shape is (N,3), where ``N`` is the number of environments. - """ - - lin_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. +from __future__ import annotations - Shape is (N, 3), where ``N`` is the number of environments. - """ +from typing import TYPE_CHECKING - ang_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. +from isaaclab.utils.backend_utils import FactoryBase - Shape is (N, 3), where ``N`` is the number of environments. - """ +from .base_imu_data import BaseImuData - lin_acc_b: torch.Tensor = None - """IMU frame linear acceleration relative to the world expressed in IMU frame. +if TYPE_CHECKING: + from isaaclab_newton.sensors.imu import ImuData as NewtonImuData + from isaaclab_physx.sensors.imu import ImuData as PhysXImuData - Shape is (N, 3), where ``N`` is the number of environments. - """ - ang_acc_b: torch.Tensor = None - """IMU frame angular acceleration relative to the world expressed in IMU frame. +class ImuData(FactoryBase, BaseImuData): + """Factory for creating IMU data instances.""" - Shape is (N, 3), where ``N`` is the number of environments. - """ + def __new__(cls, *args, **kwargs) -> BaseImuData | PhysXImuData | NewtonImuData: + """Create a new instance of IMU data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/joint_wrench/__init__.py b/source/isaaclab/isaaclab/sensors/joint_wrench/__init__.py new file mode 100644 index 000000000000..aae021e8e98d --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/joint_wrench/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Joint Wrench Sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/joint_wrench/__init__.pyi b/source/isaaclab/isaaclab/sensors/joint_wrench/__init__.pyi new file mode 100644 index 000000000000..97980db01c57 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/joint_wrench/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseJointWrenchSensor", + "BaseJointWrenchSensorData", + "JointWrenchSensor", + "JointWrenchSensorCfg", + "JointWrenchSensorData", +] + +from .base_joint_wrench_sensor import BaseJointWrenchSensor +from .base_joint_wrench_sensor_data import BaseJointWrenchSensorData +from .joint_wrench_sensor import JointWrenchSensor +from .joint_wrench_sensor_cfg import JointWrenchSensorCfg +from .joint_wrench_sensor_data import JointWrenchSensorData diff --git a/source/isaaclab/isaaclab/sensors/joint_wrench/base_joint_wrench_sensor.py b/source/isaaclab/isaaclab/sensors/joint_wrench/base_joint_wrench_sensor.py new file mode 100644 index 000000000000..5ed4e0382bbe --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/joint_wrench/base_joint_wrench_sensor.py @@ -0,0 +1,101 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import warp as wp + +import isaaclab.utils.string as string_utils + +from ..sensor_base import SensorBase +from .base_joint_wrench_sensor_data import BaseJointWrenchSensorData + +if TYPE_CHECKING: + from .joint_wrench_sensor_cfg import JointWrenchSensorCfg + + +class BaseJointWrenchSensor(SensorBase): + """The joint reaction wrench sensor. + + Reports incoming joint wrenches for the bodies selected by the backend as + split force [N] / torque [N·m] pairs expressed in the + ``INCOMING_JOINT_FRAME`` convention (child-side joint frame, child-side + joint anchor reference point). Backends convert from their native + representation to this convention internally. Use :attr:`body_names` or + :meth:`find_bodies` to map entries to articulation bodies. + """ + + cfg: JointWrenchSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the joint wrench sensor.""" + + def __init__(self, cfg: JointWrenchSensorCfg): + """Initialize the joint wrench sensor. + + Args: + cfg: The configuration parameters. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseJointWrenchSensorData: + """The sensor data container, populated after simulation initialization.""" + raise NotImplementedError + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Ordered names of the bodies whose incoming joint wrench is reported.""" + raise NotImplementedError + + @property + def num_bodies(self) -> int: + """Number of bodies whose incoming joint wrench is reported.""" + return len(self.body_names) + + """ + Operations + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find reported bodies based on name keys. + + Args: + name_keys: A regular expression or list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + The matching body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + @abstractmethod + def _initialize_impl(self) -> None: + """Initialize the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to + initialize the common sensor infrastructure from + :class:`~isaaclab.sensors.SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_mask: wp.array) -> None: + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/joint_wrench/base_joint_wrench_sensor_data.py b/source/isaaclab/isaaclab/sensors/joint_wrench/base_joint_wrench_sensor_data.py new file mode 100644 index 000000000000..282021a36784 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/joint_wrench/base_joint_wrench_sensor_data.py @@ -0,0 +1,42 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for joint-wrench sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +from isaaclab.utils.warp import ProxyArray + + +class BaseJointWrenchSensorData(ABC): + """Data container for the joint reaction wrench sensor.""" + + @property + @abstractmethod + def force(self) -> ProxyArray | None: + """Linear component of the joint reaction wrench [N]. + + Expressed in the frame selected by + :attr:`~isaaclab.sensors.JointWrenchSensorCfg.convention`. Shape is + ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. In torch this resolves + to ``(num_envs, num_bodies, 3)``. ``None`` before the simulation is + initialized. + """ + raise NotImplementedError + + @property + @abstractmethod + def torque(self) -> ProxyArray | None: + """Angular component of the joint reaction wrench [N·m]. + + Expressed in the frame selected by + :attr:`~isaaclab.sensors.JointWrenchSensorCfg.convention`. Shape is + ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. In torch this resolves + to ``(num_envs, num_bodies, 3)``. ``None`` before the simulation is + initialized. + """ + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor.py b/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor.py new file mode 100644 index 000000000000..40103b20e9f1 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_joint_wrench_sensor import BaseJointWrenchSensor +from .base_joint_wrench_sensor_data import BaseJointWrenchSensorData + +if TYPE_CHECKING: + from isaaclab_newton.sensors.joint_wrench import JointWrenchSensor as NewtonJointWrenchSensor + from isaaclab_newton.sensors.joint_wrench import JointWrenchSensorData as NewtonJointWrenchSensorData + from isaaclab_physx.sensors.joint_wrench import JointWrenchSensor as PhysXJointWrenchSensor + from isaaclab_physx.sensors.joint_wrench import JointWrenchSensorData as PhysXJointWrenchSensorData + + +class JointWrenchSensor(FactoryBase, BaseJointWrenchSensor): + """Factory for creating joint-wrench sensor instances.""" + + data: BaseJointWrenchSensorData | PhysXJointWrenchSensorData | NewtonJointWrenchSensorData + + def __new__(cls, *args, **kwargs) -> BaseJointWrenchSensor | PhysXJointWrenchSensor | NewtonJointWrenchSensor: + """Create a new instance of a joint-wrench sensor based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor_cfg.py b/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor_cfg.py new file mode 100644 index 000000000000..4bdb0ab74f06 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor_cfg.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal + +from isaaclab.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg + +if TYPE_CHECKING: + from .joint_wrench_sensor import JointWrenchSensor + + +@configclass +class JointWrenchSensorCfg(SensorBaseCfg): + """Configuration for a joint reaction wrench sensor.""" + + class_type: type[JointWrenchSensor] | str = "{DIR}.joint_wrench_sensor:JointWrenchSensor" + + convention: Literal["incoming_joint_frame"] = "incoming_joint_frame" + """Coordinate convention for the reported wrench. Defaults to ``"incoming_joint_frame"``. + + - ``"incoming_joint_frame"`` — child-side joint frame, child-side joint anchor as reference point. + Matches what a real 6-axis F/T sensor mounted at the joint would measure. Backends convert + their native solver outputs to this convention. + """ diff --git a/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor_data.py b/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor_data.py new file mode 100644 index 000000000000..5872640d143c --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/joint_wrench/joint_wrench_sensor_data.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory class for joint-wrench sensor data.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_joint_wrench_sensor_data import BaseJointWrenchSensorData + +if TYPE_CHECKING: + from isaaclab_newton.sensors.joint_wrench import JointWrenchSensorData as NewtonJointWrenchSensorData + from isaaclab_physx.sensors.joint_wrench import JointWrenchSensorData as PhysXJointWrenchSensorData + + +class JointWrenchSensorData(FactoryBase, BaseJointWrenchSensorData): + """Factory for creating joint-wrench sensor data instances.""" + + def __new__( + cls, *args, **kwargs + ) -> BaseJointWrenchSensorData | PhysXJointWrenchSensorData | NewtonJointWrenchSensorData: + """Create a new instance of joint-wrench sensor data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/kernels.py b/source/isaaclab/isaaclab/sensors/kernels.py new file mode 100644 index 000000000000..db6570323fec --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/kernels.py @@ -0,0 +1,77 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def update_timestamp_kernel( + is_outdated: wp.array(dtype=wp.bool), + timestamp: wp.array(dtype=wp.float32), + timestamp_last_update: wp.array(dtype=wp.float32), + dt: wp.float32, + update_period: wp.float32, +): + """Updates timestamp and marks environments as outdated if update period elapsed. + + Args: + is_outdated: Boolean array indicating which envs need update. + timestamp: Current timestamp per env. + timestamp_last_update: Last update timestamp per env. + dt: Simulation time step (scalar). + update_period: Period after which sensor should be updated. + """ + env = wp.tid() + new_timestamp = timestamp[env] + dt + timestamp[env] = new_timestamp + if new_timestamp - timestamp_last_update[env] + 1e-6 >= update_period: + is_outdated[env] = True + + +@wp.kernel +def update_outdated_envs_kernel( + is_outdated: wp.array(dtype=wp.bool), + timestamp: wp.array(dtype=wp.float32), + timestamp_last_update: wp.array(dtype=wp.float32), +): + """Updates timestamp and clears outdated flag for outdated environments. + + Args: + is_outdated: Boolean array indicating which envs need update. Will be set to False. + timestamp: Current timestamp per env. + timestamp_last_update: Last update timestamp per env. Will be set to current timestamp. + """ + env = wp.tid() + if is_outdated[env]: + timestamp_last_update[env] = timestamp[env] + is_outdated[env] = False + + +@wp.kernel +def reset_envs_kernel( + reset_mask: wp.array(dtype=wp.bool), + is_outdated: wp.array(dtype=wp.bool), + timestamp: wp.array(dtype=wp.float32), + timestamp_last_update: wp.array(dtype=wp.float32), +): + """Resets the current and last update timestamps and marks environments as outdated for those being reset. + + Args: + reset_mask: Boolean array indicating which envs to reset. + is_outdated: Boolean array indicating which envs need update. Will be set to True for reset envs. + timestamp: Current timestamp per env. Will be set to 0.0 for reset envs. + timestamp_last_update: Last update timestamp per env. Will be set to 0.0 for reset envs. + """ + + env = wp.tid() + if not reset_mask[env]: + return + + # Reset the timestamp for the sensors + timestamp[env] = 0.0 + + timestamp_last_update[env] = 0.0 + # Set all reset sensors to outdated so that they are updated when data is called the next time. + is_outdated[env] = True diff --git a/source/isaaclab/isaaclab/sensors/pva/__init__.py b/source/isaaclab/isaaclab/sensors/pva/__init__.py new file mode 100644 index 000000000000..281802f26a76 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/pva/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +PVA Sensor +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/pva/__init__.pyi b/source/isaaclab/isaaclab/sensors/pva/__init__.pyi new file mode 100644 index 000000000000..7a8e519a6f7e --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/pva/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BasePva", + "BasePvaData", + "Pva", + "PvaCfg", + "PvaData", +] + +from .base_pva import BasePva +from .base_pva_data import BasePvaData +from .pva import Pva +from .pva_cfg import PvaCfg +from .pva_data import PvaData diff --git a/source/isaaclab/isaaclab/sensors/pva/base_pva.py b/source/isaaclab/isaaclab/sensors/pva/base_pva.py new file mode 100644 index 000000000000..1c14ee454c65 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/pva/base_pva.py @@ -0,0 +1,88 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from typing import TYPE_CHECKING + +import warp as wp + +from ..sensor_base import SensorBase +from .base_pva_data import BasePvaData + +if TYPE_CHECKING: + from .pva_cfg import PvaCfg + + +class BasePva(SensorBase): + """The Pose Velocity Acceleration (PVA) sensor. + + The sensor can be attached to any prim path with a rigid ancestor in its tree and produces world-frame + pose, body-frame velocities, body-frame coordinate accelerations, and projected gravity. This differs + from the :class:`~isaaclab.sensors.imu.BaseImu` sensor, which reports proper acceleration. + + If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. + The fixed transform from that ancestor to the target prim is computed once during initialization and + composed with the configured sensor offset. + + .. note:: + + Depending on the backend, accelerations may be computed via numerical differentiation of velocities + or read directly from the solver. For numerical backends, accuracy depends on the physics timestep; + we recommend at least 200 Hz. + + .. note:: + + The user can configure the sensor offset in the configuration file. The offset is applied relative to the + rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform + from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. + The offset is defined as a position vector and a quaternion rotation, which + are applied in the order: position, then rotation. The position is applied as a translation + in the body frame of the rigid source prim, and the rotation is applied as a rotation + in the body frame of the rigid source prim. + + """ + + cfg: PvaCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the PVA sensor.""" + + def __init__(self, cfg: PvaCfg): + """Initializes the PVA sensor. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BasePvaData: + raise NotImplementedError + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + @abstractmethod + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_mask: wp.array): + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/pva/base_pva_data.py b/source/isaaclab/isaaclab/sensors/pva/base_pva_data.py new file mode 100644 index 000000000000..07fcab62b0b6 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/pva/base_pva_data.py @@ -0,0 +1,111 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for PVA sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +from isaaclab.utils.leapp import ( + POSE7_ELEMENT_NAMES, + QUAT_XYZW_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + InputKindEnum, + leapp_tensor_semantics, +) +from isaaclab.utils.warp import ProxyArray + + +class BasePvaData(ABC): + """Data container for the PVA sensor. + + This base class defines the interface for PVA sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) + def pose_w(self) -> ProxyArray | None: + """Pose of the sensor origin in world frame [m, unitless]. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) + def pos_w(self) -> ProxyArray: + """Position of the sensor origin in world frame [m]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_XYZW_ELEMENT_NAMES) + def quat_w(self) -> ProxyArray: + """Orientation of the sensor origin in world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) + def projected_gravity_b(self) -> ProxyArray: + """Gravity direction unit vector projected on the PVA frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) + def lin_vel_b(self) -> ProxyArray: + """PVA frame linear velocity relative to the world expressed in PVA frame [m/s]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) + def ang_vel_b(self) -> ProxyArray: + """PVA frame angular velocity relative to the world expressed in PVA frame [rad/s]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names=XYZ_ELEMENT_NAMES) + def lin_acc_b(self) -> ProxyArray: + """Linear acceleration (coordinate) in the PVA frame [m/s^2]. + + Equal to -g in freefall, zero at rest. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names=XYZ_ELEMENT_NAMES) + def ang_acc_b(self) -> ProxyArray: + """PVA frame angular acceleration relative to the world expressed in PVA frame [rad/s^2]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/pva/pva.py b/source/isaaclab/isaaclab/sensors/pva/pva.py new file mode 100644 index 000000000000..e65e5d9bdb8b --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/pva/pva.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_pva import BasePva +from .base_pva_data import BasePvaData + +if TYPE_CHECKING: + from isaaclab_newton.sensors.pva import Pva as NewtonPva + from isaaclab_newton.sensors.pva import PvaData as NewtonPvaData + from isaaclab_physx.sensors.pva import Pva as PhysXPva + from isaaclab_physx.sensors.pva import PvaData as PhysXPvaData + + +class Pva(FactoryBase, BasePva): + """Factory for creating PVA sensor instances.""" + + data: BasePvaData | NewtonPvaData | PhysXPvaData + + def __new__(cls, *args, **kwargs) -> BasePva | NewtonPva | PhysXPva: + """Create a new instance of a PVA sensor based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/pva/pva_cfg.py b/source/isaaclab/isaaclab/sensors/pva/pva_cfg.py new file mode 100644 index 000000000000..38f3d627608a --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/pva/pva_cfg.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.markers import VisualizationMarkersCfg +from isaaclab.markers.config import RED_ARROW_X_MARKER_CFG +from isaaclab.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg + +if TYPE_CHECKING: + from .pva import Pva + + +@configclass +class PvaCfg(SensorBaseCfg): + """Configuration for a Pose Velocity Acceleration (PVA) sensor.""" + + class_type: type[Pva] | str = "{DIR}.pva:Pva" + + @configclass + class OffsetCfg: + """The offset pose of the sensor's frame from the sensor's parent frame.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation w.r.t. the parent frame [m]. Defaults to (0.0, 0.0, 0.0).""" + + rot: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """Quaternion rotation (x, y, z, w) w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0, 1.0).""" + + offset: OffsetCfg = OffsetCfg() + """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" + + visualizer_cfg: VisualizationMarkersCfg = RED_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Command/velocity_goal") + """The configuration object for the visualization markers. Defaults to RED_ARROW_X_MARKER_CFG. + + This attribute is only used when debug visualization is enabled. + """ diff --git a/source/isaaclab/isaaclab/sensors/pva/pva_data.py b/source/isaaclab/isaaclab/sensors/pva/pva_data.py new file mode 100644 index 000000000000..e1a614772a18 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/pva/pva_data.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Re-exports the base PVA data class.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_pva_data import BasePvaData + +if TYPE_CHECKING: + from isaaclab_newton.sensors.pva import PvaData as NewtonPvaData + from isaaclab_physx.sensors.pva import PvaData as PhysXPvaData + + +class PvaData(FactoryBase, BasePvaData): + """Factory for creating PVA data instances.""" + + def __new__(cls, *args, **kwargs) -> BasePvaData | NewtonPvaData | PhysXPvaData: + """Create a new instance of PVA data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/__init__.py b/source/isaaclab/isaaclab/sensors/ray_caster/__init__.py index 06f479ed2ee8..6c7a98bb9e14 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/__init__.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/__init__.py @@ -16,14 +16,7 @@ """ from . import patterns -from .multi_mesh_ray_caster import MultiMeshRayCaster -from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera -from .multi_mesh_ray_caster_camera_cfg import MultiMeshRayCasterCameraCfg -from .multi_mesh_ray_caster_camera_data import MultiMeshRayCasterCameraData -from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg -from .multi_mesh_ray_caster_data import MultiMeshRayCasterData -from .ray_caster import RayCaster -from .ray_caster_camera import RayCasterCamera -from .ray_caster_camera_cfg import RayCasterCameraCfg -from .ray_caster_cfg import RayCasterCfg -from .ray_caster_data import RayCasterData + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/__init__.pyi b/source/isaaclab/isaaclab/sensors/ray_caster/__init__.pyi new file mode 100644 index 000000000000..44b76de2fa20 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/ray_caster/__init__.pyi @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MultiMeshRayCaster", + "MultiMeshRayCasterCamera", + "MultiMeshRayCasterCameraCfg", + "MultiMeshRayCasterCameraData", + "MultiMeshRayCasterCfg", + "MultiMeshRayCasterData", + "RayCaster", + "RayCasterCamera", + "RayCasterCameraCfg", + "RayCasterCfg", + "RayCasterData", + "patterns", +] + +from .multi_mesh_ray_caster import MultiMeshRayCaster +from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera +from .multi_mesh_ray_caster_camera_cfg import MultiMeshRayCasterCameraCfg +from .multi_mesh_ray_caster_camera_data import MultiMeshRayCasterCameraData +from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg +from .multi_mesh_ray_caster_data import MultiMeshRayCasterData +from .ray_caster import RayCaster +from .ray_caster_camera import RayCasterCamera +from .ray_caster_camera_cfg import RayCasterCameraCfg +from .ray_caster_cfg import RayCasterCfg +from .ray_caster_data import RayCasterData +from . import patterns diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/kernels.py b/source/isaaclab/isaaclab/sensors/ray_caster/kernels.py new file mode 100644 index 000000000000..98c54ea7141d --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/ray_caster/kernels.py @@ -0,0 +1,261 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp kernels for the ray caster sensor.""" + +import warp as wp + +ALIGNMENT_WORLD = wp.constant(0) +ALIGNMENT_YAW = wp.constant(1) +ALIGNMENT_BASE = wp.constant(2) + +# Upper-bound ray-cast distance [m] used by camera classes. The actual depth-clipping is applied +# as a post-process step per data type, so the kernel is always given a large budget. +CAMERA_RAYCAST_MAX_DIST: float = 1e6 + + +@wp.func +def quat_yaw_only( + # input + q: wp.quatf, +) -> wp.quatf: + """Extract the yaw-only quaternion from a general quaternion. + + Equivalent to :func:`isaaclab.utils.math.yaw_quat`: extracts the yaw angle via + ``atan2(2*(qw*qz + qx*qy), 1 - 2*(qy^2 + qz^2))`` and returns a pure-yaw quaternion + ``(0, 0, sin(yaw/2), cos(yaw/2))``. This is correct for all orientations, including + those with non-zero roll and pitch. + """ + qx = q[0] + qy = q[1] + qz = q[2] + qw = q[3] + yaw = wp.atan2(2.0 * (qw * qz + qx * qy), 1.0 - 2.0 * (qy * qy + qz * qz)) + half_yaw = yaw * 0.5 + return wp.quatf(0.0, 0.0, wp.sin(half_yaw), wp.cos(half_yaw)) + + +@wp.kernel(enable_backward=False) +def update_ray_caster_kernel( + # input + transforms: wp.array(dtype=wp.transformf), + env_mask: wp.array(dtype=wp.bool), + offset_pos: wp.array(dtype=wp.vec3f), + offset_quat: wp.array(dtype=wp.quatf), + drift: wp.array(dtype=wp.vec3f), + ray_cast_drift: wp.array(dtype=wp.vec3f), + ray_starts_local: wp.array2d(dtype=wp.vec3f), + ray_directions_local: wp.array2d(dtype=wp.vec3f), + alignment_mode: int, + # output + pos_w: wp.array(dtype=wp.vec3f), + quat_w: wp.array(dtype=wp.quatf), + ray_starts_w: wp.array2d(dtype=wp.vec3f), + ray_directions_w: wp.array2d(dtype=wp.vec3f), +): + """Compute sensor world poses and transform rays into world frame. + + Combines the PhysX view transform with the sensor offset, applies drift, + and transforms local ray starts/directions according to the alignment mode. + + Launch with dim=(num_envs, num_rays). + + Args: + transforms: World transforms from PhysX view. Shape is (num_envs,). + env_mask: Boolean mask for which environments to update. Shape is (num_envs,). + offset_pos: Per-env position offset [m] from view to sensor. Shape is (num_envs,). + offset_quat: Per-env quaternion offset from view to sensor. Shape is (num_envs,). + drift: Per-env position drift [m]. Shape is (num_envs,). + ray_cast_drift: Per-env ray cast drift [m]. Shape is (num_envs,). + After rotation by the alignment quaternion, only the x and y components + are applied to the ray start position; the z component of the sensor + position is preserved. + ray_starts_local: Per-env local ray start positions [m]. Shape is (num_envs, num_rays). + ray_directions_local: Per-env local ray directions (unit vectors). Shape is (num_envs, num_rays). + alignment_mode: 0=world, 1=yaw, 2=base. + pos_w: Output sensor position in world frame [m]. Shape is (num_envs,). + quat_w: Output sensor orientation in world frame. Shape is (num_envs,). + ray_starts_w: Output world-frame ray starts [m]. Shape is (num_envs, num_rays). + ray_directions_w: Output world-frame ray directions (unit vectors). Shape is (num_envs, num_rays). + """ + env_id, ray_id = wp.tid() + if not env_mask[env_id]: + return + + t = transforms[env_id] + view_pos = wp.transform_get_translation(t) + view_quat = wp.transform_get_rotation(t) + + # combine_frame_transforms: q02 = q01 * q12, t02 = t01 + quat_rotate(q01, t12) + combined_quat = view_quat * offset_quat[env_id] + combined_pos = view_pos + wp.quat_rotate(view_quat, offset_pos[env_id]) + + combined_pos = combined_pos + drift[env_id] + + if ray_id == 0: + pos_w[env_id] = combined_pos + quat_w[env_id] = combined_quat + + local_start = ray_starts_local[env_id, ray_id] + local_dir = ray_directions_local[env_id, ray_id] + rcd = ray_cast_drift[env_id] + + if alignment_mode == ALIGNMENT_WORLD: + pos_drifted = wp.vec3f(combined_pos[0] + rcd[0], combined_pos[1] + rcd[1], combined_pos[2]) + ray_starts_w[env_id, ray_id] = local_start + pos_drifted + ray_directions_w[env_id, ray_id] = local_dir + elif alignment_mode == ALIGNMENT_YAW: + yaw_q = quat_yaw_only(combined_quat) + rot_drift = wp.quat_rotate(yaw_q, rcd) + pos_drifted = wp.vec3f(combined_pos[0] + rot_drift[0], combined_pos[1] + rot_drift[1], combined_pos[2]) + ray_starts_w[env_id, ray_id] = wp.quat_rotate(yaw_q, local_start) + pos_drifted + # Ray DIRECTIONS are intentionally NOT rotated in yaw mode: the sensor's ray pattern + # (e.g. straight-down (0,0,-1) for a height scanner) stays fixed in world frame. + # Only ray STARTS are rotated by the yaw-only quaternion so the scan footprint + # follows the body heading without tilting when the body pitches or rolls. + ray_directions_w[env_id, ray_id] = local_dir + else: + rot_drift = wp.quat_rotate(combined_quat, rcd) + pos_drifted = wp.vec3f(combined_pos[0] + rot_drift[0], combined_pos[1] + rot_drift[1], combined_pos[2]) + ray_starts_w[env_id, ray_id] = wp.quat_rotate(combined_quat, local_start) + pos_drifted + ray_directions_w[env_id, ray_id] = wp.quat_rotate(combined_quat, local_dir) + + +@wp.kernel(enable_backward=False) +def fill_vec3_inf_kernel( + # input + env_mask: wp.array(dtype=wp.bool), + inf_val: wp.float32, + # output + data: wp.array2d(dtype=wp.vec3f), +): + """Fill a 2D vec3f array with a given value for masked environments. + + Launch with dim=(num_envs, num_rays). + + Args: + env_mask: Boolean mask for which environments to update. Shape is (num_envs,). + inf_val: Value to fill with (typically inf). + data: Array to fill. Shape is (num_envs, num_rays). + """ + env, ray = wp.tid() + if not env_mask[env]: + return + data[env, ray] = wp.vec3f(inf_val, inf_val, inf_val) + + +@wp.kernel(enable_backward=False) +def apply_z_drift_kernel( + # input + env_mask: wp.array(dtype=wp.bool), + ray_cast_drift: wp.array(dtype=wp.vec3f), + # output + ray_hits: wp.array2d(dtype=wp.vec3f), +): + """Apply vertical (z) drift to ray hit positions for masked environments. + + Launch with dim=(num_envs, num_rays). + + Args: + env_mask: Boolean mask for which environments to update. Shape is (num_envs,). + ray_cast_drift: Per-env drift vector [m]; only z-component is used. Shape is (num_envs,). + ray_hits: Ray hit positions to modify in-place. Shape is (num_envs, num_rays). + """ + env, ray = wp.tid() + if not env_mask[env]: + return + hit = ray_hits[env, ray] + ray_hits[env, ray] = wp.vec3f(hit[0], hit[1], hit[2] + ray_cast_drift[env][2]) + + +@wp.kernel(enable_backward=False) +def fill_float2d_masked_kernel( + # input + env_mask: wp.array(dtype=wp.bool), + val: wp.float32, + # output + data: wp.array2d(dtype=wp.float32), +): + """Fill a 2D float32 array with a given value for masked environments. + + Launch with dim=(num_envs, num_rays). + + Args: + env_mask: Boolean mask for which environments to update. Shape is (num_envs,). + val: Value to fill with. + data: Array to fill. Shape is (num_envs, num_rays). + """ + env, ray = wp.tid() + if not env_mask[env]: + return + data[env, ray] = val + + +@wp.kernel(enable_backward=False) +def compute_distance_to_image_plane_masked_kernel( + # input + env_mask: wp.array(dtype=wp.bool), + quat_w: wp.array(dtype=wp.quatf), + ray_distance: wp.array2d(dtype=wp.float32), + ray_directions_w: wp.array2d(dtype=wp.vec3f), + # output + distance_to_image_plane: wp.array2d(dtype=wp.float32), +): + """Compute distance-to-image-plane from ray depth and direction for masked environments. + + The distance to the image plane is the signed projection of the hit displacement + (``ray_distance * ray_direction_w``) onto the camera forward axis (+X in world convention). + This equals the x-component of the hit vector in the camera frame. + + Launch with dim=(num_envs, num_rays). + + Args: + env_mask: Boolean mask for which environments to update. Shape is (num_envs,). + quat_w: Camera orientation in world frame (x, y, z, w). Shape is (num_envs,). + ray_distance: Per-ray hit distances [m]. Shape is (num_envs, num_rays). + Contains inf for missed rays. + ray_directions_w: World-frame unit ray directions. Shape is (num_envs, num_rays). + distance_to_image_plane: Output distance-to-image-plane [m]. Shape is (num_envs, num_rays). + """ + env, ray = wp.tid() + if not env_mask[env]: + return + + depth = ray_distance[env, ray] + dir_w = ray_directions_w[env, ray] + # displacement vector in world frame + disp_w = wp.vec3f(depth * dir_w[0], depth * dir_w[1], depth * dir_w[2]) + # rotate into camera frame (quat_rotate_inv applies q^-1 * v * q) + disp_cam = wp.quat_rotate_inv(quat_w[env], disp_w) + # x-component is the forward (depth) axis of the camera in world convention + distance_to_image_plane[env, ray] = disp_cam[0] + + +@wp.kernel(enable_backward=False) +def apply_depth_clipping_masked_kernel( + # input + env_mask: wp.array(dtype=wp.bool), + max_dist: wp.float32, + fill_val: wp.float32, + # output + depth: wp.array2d(dtype=wp.float32), +): + """Clip depth values in-place, replacing values above max_dist or NaN with fill_val. + + Launch with dim=(num_envs, num_rays). + + Args: + env_mask: Boolean mask for which environments to update. Shape is (num_envs,). + max_dist: Maximum depth threshold [m]. + fill_val: Replacement value [m] written for depths exceeding max_dist or NaN. + Pass ``max_dist`` for "max" clipping or ``0.0`` for "zero" clipping. + depth: Depth buffer to clip in-place. Shape is (num_envs, num_rays). + """ + env, ray = wp.tid() + if not env_mask[env]: + return + val = depth[env, ray] + if val > max_dist or wp.isnan(val): + depth[env, ray] = fill_val diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster.py b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster.py index 39be0d7ca0d8..f9d0493a0f61 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster.py @@ -7,7 +7,6 @@ import logging import re -from collections.abc import Sequence from typing import TYPE_CHECKING, ClassVar import numpy as np @@ -15,22 +14,20 @@ import trimesh import warp as wp -import omni.physics.tensors.impl.api as physx - import isaaclab.sim as sim_utils -from isaaclab.sim.views import XformPrimView -from isaaclab.utils.math import matrix_from_quat, quat_mul +from isaaclab.sim.views import BaseFrameView, FrameView +from isaaclab.utils.math import matrix_from_quat from isaaclab.utils.mesh import PRIMITIVE_MESH_TYPES, create_trimesh_from_geom_mesh, create_trimesh_from_geom_shape -from isaaclab.utils.warp import convert_to_warp_mesh, raycast_dynamic_meshes +from isaaclab.utils.warp import convert_to_warp_mesh +from isaaclab.utils.warp import kernels as warp_kernels +from .kernels import fill_float2d_masked_kernel, fill_vec3_inf_kernel from .multi_mesh_ray_caster_data import MultiMeshRayCasterData -from .ray_cast_utils import obtain_world_pose_from_view from .ray_caster import RayCaster if TYPE_CHECKING: from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg -# import logger logger = logging.getLogger(__name__) @@ -42,8 +39,8 @@ class MultiMeshRayCaster(RayCaster): a set of meshes with a given ray pattern. The meshes are parsed from the list of primitive paths provided in the configuration. These are then - converted to warp meshes and stored in the :attr:`meshes` list. The ray-caster then ray-casts against - these warp meshes using the ray pattern provided in the configuration. + converted to warp meshes and stored in the :attr:`meshes` dictionary. The ray-caster then ray-casts + against these warp meshes using the ray pattern provided in the configuration. Compared to the default RayCaster, the MultiMeshRayCaster provides additional functionality and flexibility as an extension of the default RayCaster with the following enhancements: @@ -54,6 +51,15 @@ class MultiMeshRayCaster(RayCaster): (e.g., robot links, articulated bodies, or dynamic obstacles). - Memory-efficient caching : Avoids redundant memory usage by reusing mesh data across environments. + .. warning:: + **Known limitation (multi-mesh closest-hit resolution):** When two meshes produce a + hit at the exact same distance for a given ray, the ``atomic_min`` + equality-check + pattern in the raycasting kernel is not fully thread-safe. The hit *position* is always + correct, but auxiliary outputs (normals, face IDs, mesh IDs) may originate from + different meshes for the affected ray. This requires an exact floating-point tie and is + rare in practice. See `warp#1058 `_ for + upstream progress on a thread-safe ``atomic_min`` return value. + Example usage to raycast against the visual meshes of a robot (e.g. ANYmal): .. code-block:: python @@ -77,9 +83,7 @@ class MultiMeshRayCaster(RayCaster): cfg: MultiMeshRayCasterCfg """The configuration parameters.""" - mesh_offsets: dict[str, tuple[torch.Tensor, torch.Tensor]] = {} - - mesh_views: ClassVar[dict[str, XformPrimView | physx.ArticulationView | physx.RigidBodyView]] = {} + mesh_views: ClassVar[dict[str, BaseFrameView]] = {} """A dictionary to store mesh views for raycasting, shared across all instances. The keys correspond to the prim path for the mesh views, and values are the corresponding view objects. @@ -91,33 +95,24 @@ def __init__(self, cfg: MultiMeshRayCasterCfg): Args: cfg: The configuration parameters. """ - # Initialize base class super().__init__(cfg) - # Create empty variables for storing output data self._num_meshes_per_env: dict[str, int] = {} - """Keeps track of the number of meshes per env for each ray_cast target. - Since we allow regex indexing (e.g. env_*/object_*) they can differ - """ self._raycast_targets_cfg: list[MultiMeshRayCasterCfg.RaycastTargetCfg] = [] for target in self.cfg.mesh_prim_paths: - # Legacy support for string targets. Treat them as global targets. if isinstance(target, str): self._raycast_targets_cfg.append(cfg.RaycastTargetCfg(prim_expr=target, track_mesh_transforms=False)) else: self._raycast_targets_cfg.append(target) - # Resolve regex namespace if set for cfg in self._raycast_targets_cfg: cfg.prim_expr = cfg.prim_expr.format(ENV_REGEX_NS="/World/envs/env_.*") - # overwrite the data class self._data = MultiMeshRayCasterData() def __str__(self) -> str: """Returns: A string containing information about the instance.""" - return ( f"Ray-caster @ '{self.cfg.prim_path}': \n" f"\tview type : {self._view.__class__}\n" @@ -134,9 +129,7 @@ def __str__(self) -> str: @property def data(self) -> MultiMeshRayCasterData: - # update sensors if needed self._update_outdated_buffers() - # return the data return self._data """ @@ -164,9 +157,8 @@ def _initialize_warp_meshes(self): """ multi_mesh_ids: dict[str, list[list[int]]] = {} for target_cfg in self._raycast_targets_cfg: - # target prim path to ray cast against target_prim_path = target_cfg.prim_expr - # # check if mesh already casted into warp mesh and skip if so. + # check if mesh already casted into warp mesh and skip if so. if target_prim_path in multi_mesh_ids: logger.warning( f"Mesh at target prim path '{target_prim_path}' already exists in the mesh cache. Duplicate entries" @@ -174,32 +166,29 @@ def _initialize_warp_meshes(self): ) continue - # find all matching prim paths to provided expression of the target target_prims = sim_utils.find_matching_prims(target_prim_path) if len(target_prims) == 0: raise RuntimeError(f"Failed to find a prim at path expression: {target_prim_path}") - # If only one prim is found, treat it as a global prim. - # Either it's a single global object (e.g. ground) or we are only using one env. is_global_prim = len(target_prims) == 1 loaded_vertices: list[np.ndarray | None] = [] wp_mesh_ids = [] for target_prim in target_prims: - # Reuse previously parsed shared mesh instance if possible. if target_cfg.is_shared and len(wp_mesh_ids) > 0: # Verify if this mesh has already been registered in an earlier environment. # Note, this check may fail, if the prim path is not following the env_.* pattern # Which (worst case) leads to parsing the mesh and skipping registering it at a later stage - curr_prim_base_path = re.sub(r"env_\d+", "env_0", str(target_prim.GetPath())) # - if curr_prim_base_path in MultiMeshRayCaster.meshes: - MultiMeshRayCaster.meshes[str(target_prim.GetPath())] = MultiMeshRayCaster.meshes[ - curr_prim_base_path - ] - # Reuse mesh imported by another ray-cast sensor (global cache). - if str(target_prim.GetPath()) in MultiMeshRayCaster.meshes: - wp_mesh_ids.append(MultiMeshRayCaster.meshes[str(target_prim.GetPath())].id) + curr_prim_base_path = re.sub(r"env_\d+", "env_0", str(target_prim.GetPath())) + base_key = (curr_prim_base_path, self._device) + if base_key in MultiMeshRayCaster.meshes: + MultiMeshRayCaster.meshes[(str(target_prim.GetPath()), self._device)] = ( + MultiMeshRayCaster.meshes[base_key] + ) + prim_key = (str(target_prim.GetPath()), self._device) + if prim_key in MultiMeshRayCaster.meshes: + wp_mesh_ids.append(MultiMeshRayCaster.meshes[prim_key].id) loaded_vertices.append(None) continue @@ -220,7 +209,6 @@ def _initialize_warp_meshes(self): trimesh_meshes = [] for mesh_prim in mesh_prims: - # check if valid if mesh_prim is None or not mesh_prim.IsValid(): raise RuntimeError(f"Invalid mesh prim path: {target_prim}") @@ -241,13 +229,11 @@ def _initialize_warp_meshes(self): transform[:3, 3] = relative_pos.numpy() mesh.apply_transform(transform) - # add to list of parsed meshes trimesh_meshes.append(mesh) if len(trimesh_meshes) == 1: trimesh_mesh = trimesh_meshes[0] elif target_cfg.merge_prim_meshes: - # combine all trimesh meshes into a single mesh trimesh_mesh = trimesh.util.concatenate(trimesh_meshes) else: raise RuntimeError( @@ -255,20 +241,17 @@ def _initialize_warp_meshes(self): " enable `merge_prim_meshes` in the configuration or specify each mesh separately." ) - # check if the mesh is already registered, if so only reference the mesh registered_idx = _registered_points_idx(trimesh_mesh.vertices, loaded_vertices) if registered_idx != -1 and self.cfg.reference_meshes: logger.info("Found a duplicate mesh, only reference the mesh.") - # Found a duplicate mesh, only reference the mesh. loaded_vertices.append(None) wp_mesh_ids.append(wp_mesh_ids[registered_idx]) else: loaded_vertices.append(trimesh_mesh.vertices) - wp_mesh = convert_to_warp_mesh(trimesh_mesh.vertices, trimesh_mesh.faces, device=self.device) - MultiMeshRayCaster.meshes[str(target_prim.GetPath())] = wp_mesh + wp_mesh = convert_to_warp_mesh(trimesh_mesh.vertices, trimesh_mesh.faces, device=self._device) + MultiMeshRayCaster.meshes[(str(target_prim.GetPath()), self._device)] = wp_mesh wp_mesh_ids.append(wp_mesh.id) - # print info if registered_idx != -1: logger.info(f"Found duplicate mesh for mesh prims under path '{target_prim.GetPath()}'.") else: @@ -278,12 +261,9 @@ def _initialize_warp_meshes(self): ) if is_global_prim: - # reference the mesh for each environment to ray cast against multi_mesh_ids[target_prim_path] = [wp_mesh_ids] * self._num_envs self._num_meshes_per_env[target_prim_path] = len(wp_mesh_ids) else: - # split up the meshes for each environment. Little bit ugly, since - # the current order is interleaved (env1_obj1, env1_obj2, env2_obj1, env2_obj2, ...) multi_mesh_ids[target_prim_path] = [] mesh_idx = 0 n_meshes_per_env = len(wp_mesh_ids) // self._num_envs @@ -293,26 +273,28 @@ def _initialize_warp_meshes(self): mesh_idx += n_meshes_per_env if target_cfg.track_mesh_transforms: - MultiMeshRayCaster.mesh_views[target_prim_path], MultiMeshRayCaster.mesh_offsets[target_prim_path] = ( - self._obtain_trackable_prim_view(target_prim_path) + MultiMeshRayCaster.mesh_views[target_prim_path] = FrameView( + target_prim_path, device=self._device, stage=self.stage ) - # throw an error if no meshes are found if all([target_cfg.prim_expr not in multi_mesh_ids for target_cfg in self._raycast_targets_cfg]): raise RuntimeError( f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" ) total_n_meshes_per_env = sum(self._num_meshes_per_env.values()) - self._mesh_positions_w = torch.zeros(self._num_envs, total_n_meshes_per_env, 3, device=self.device) - self._mesh_orientations_w = torch.zeros(self._num_envs, total_n_meshes_per_env, 4, device=self.device) + self._mesh_positions_w = wp.zeros((self._num_envs, total_n_meshes_per_env), dtype=wp.vec3, device=self.device) + self._mesh_orientations_w = wp.zeros( + (self._num_envs, total_n_meshes_per_env), dtype=wp.quat, device=self.device + ) + # Zero-copy torch views for writing from physics view results (torch tensors) + self._mesh_positions_w_torch = wp.to_torch(self._mesh_positions_w) + self._mesh_orientations_w_torch = wp.to_torch(self._mesh_orientations_w) - # Update the mesh positions and rotations mesh_idx = 0 for target_cfg in self._raycast_targets_cfg: n_meshes = self._num_meshes_per_env[target_cfg.prim_expr] - # update position of the target meshes pos_w, ori_w = [], [] for prim in sim_utils.find_matching_prims(target_cfg.prim_expr): translation, quat = sim_utils.resolve_prim_pose(prim) @@ -321,11 +303,10 @@ def _initialize_warp_meshes(self): pos_w = torch.tensor(pos_w, device=self.device, dtype=torch.float32).view(-1, n_meshes, 3) ori_w = torch.tensor(ori_w, device=self.device, dtype=torch.float32).view(-1, n_meshes, 4) - self._mesh_positions_w[:, mesh_idx : mesh_idx + n_meshes] = pos_w - self._mesh_orientations_w[:, mesh_idx : mesh_idx + n_meshes] = ori_w + self._mesh_positions_w_torch[:, mesh_idx : mesh_idx + n_meshes] = pos_w + self._mesh_orientations_w_torch[:, mesh_idx : mesh_idx + n_meshes] = ori_w mesh_idx += n_meshes - # flatten the list of meshes that are included in mesh_prim_paths of the specific ray caster multi_mesh_ids_flattened = [] for env_idx in range(self._num_envs): meshes_in_env = [] @@ -338,26 +319,30 @@ def _initialize_warp_meshes(self): for target_cfg in self._raycast_targets_cfg ] - # save a warp array with mesh ids that is passed to the raycast function self._mesh_ids_wp = wp.array2d(multi_mesh_ids_flattened, dtype=wp.uint64, device=self.device) def _initialize_rays_impl(self): super()._initialize_rays_impl() + # Persistent buffer for tracking closest-hit distance across meshes (for atomic_min) + self._ray_distance_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.float32, device=self._device) if self.cfg.update_mesh_ids: - self._data.ray_mesh_ids = torch.zeros( - self._num_envs, self.num_rays, 1, device=self.device, dtype=torch.int16 - ) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data. - - Args: - env_ids: The environment ids to update. + self._ray_mesh_id_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.int16, device=self._device) + # Zero-copy torch view with the trailing dim expected by consumers of ray_mesh_ids + self._data.ray_mesh_ids = wp.to_torch(self._ray_mesh_id_w).unsqueeze(-1) + else: + # Dummy 1×1 buffer so the kernel launch always has a valid array to bind + self._ray_mesh_id_w = wp.empty((1, 1), dtype=wp.int16, device=self._device) + # Persistent dummy buffers for unused kernel outputs; allocated once to avoid per-step allocations. + self._dummy_normal_w = wp.empty((1, 1), dtype=wp.vec3, device=self._device) + self._dummy_face_id_w = wp.empty((1, 1), dtype=wp.int32, device=self._device) + + def _update_mesh_transforms(self) -> None: + """Update world-frame mesh positions and orientations for dynamically tracked targets. + + Iterates over all tracked views and writes the current world poses into + ``_mesh_positions_w_torch`` and ``_mesh_orientations_w_torch``. Static (non-tracked) + targets are skipped; their initial poses were set during :meth:`_initialize_warp_meshes`. """ - - self._update_ray_infos(env_ids) - - # Update the mesh positions and rotations mesh_idx = 0 for view, target_cfg in zip(self._mesh_views, self._raycast_targets_cfg): if not target_cfg.track_mesh_transforms: @@ -365,42 +350,75 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): continue # update position of the target meshes - pos_w, ori_w = obtain_world_pose_from_view(view, None) + pos_w, ori_w = view.get_world_poses(None) + pos_w, ori_w = pos_w.torch, ori_w.torch pos_w = pos_w.squeeze(0) if len(pos_w.shape) == 3 else pos_w ori_w = ori_w.squeeze(0) if len(ori_w.shape) == 3 else ori_w - if target_cfg.prim_expr in MultiMeshRayCaster.mesh_offsets: - pos_offset, ori_offset = MultiMeshRayCaster.mesh_offsets[target_cfg.prim_expr] - pos_w -= pos_offset - ori_w = quat_mul(ori_offset.expand(ori_w.shape[0], -1), ori_w) - count = view.count - if count != 1: # Mesh is not global, i.e. we have different meshes for each env + if count != 1: count = count // self._num_envs pos_w = pos_w.view(self._num_envs, count, 3) ori_w = ori_w.view(self._num_envs, count, 4) - self._mesh_positions_w[:, mesh_idx : mesh_idx + count] = pos_w - self._mesh_orientations_w[:, mesh_idx : mesh_idx + count] = ori_w + self._mesh_positions_w_torch[:, mesh_idx : mesh_idx + count] = pos_w + self._mesh_orientations_w_torch[:, mesh_idx : mesh_idx + count] = ori_w mesh_idx += count - self._data.ray_hits_w[env_ids], _, _, _, mesh_ids = raycast_dynamic_meshes( - self._ray_starts_w[env_ids], - self._ray_directions_w[env_ids], - mesh_ids_wp=self._mesh_ids_wp, # list with shape num_envs x num_meshes_per_env - max_dist=self.cfg.max_distance, - mesh_positions_w=self._mesh_positions_w[env_ids], - mesh_orientations_w=self._mesh_orientations_w[env_ids], - return_mesh_id=self.cfg.update_mesh_ids, + def _update_buffers_impl(self, env_mask: wp.array): + """Fills the buffers of the sensor data.""" + self._update_ray_infos(env_mask) + self._update_mesh_transforms() + + n_meshes = self._mesh_ids_wp.shape[1] + + # Fill output and distance buffers with inf for masked environments + wp.launch( + fill_vec3_inf_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._data._ray_hits_w], + device=self._device, + ) + wp.launch( + fill_float2d_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._ray_distance_w], + device=self._device, ) - if self.cfg.update_mesh_ids: - self._data.ray_mesh_ids[env_ids] = mesh_ids + # Ray-cast against all meshes; closest hit wins via atomic_min on ray_distance + wp.launch( + warp_kernels.raycast_dynamic_meshes_kernel, + dim=(n_meshes, self._num_envs, self.num_rays), + inputs=[ + env_mask, + self._mesh_ids_wp, + self._ray_starts_w, + self._ray_directions_w, + self._data._ray_hits_w, + self._ray_distance_w, + self._dummy_normal_w, + self._dummy_face_id_w, + self._ray_mesh_id_w, + self._mesh_positions_w, + self._mesh_orientations_w, + float(self.cfg.max_distance), + int(False), + int(False), + int(self.cfg.update_mesh_ids), + ], + device=self._device, + ) + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) + # clear mesh views so they are re-created on the next initialization + MultiMeshRayCaster.mesh_views.clear() def __del__(self): super().__del__() if RayCaster._instance_count == 0: - MultiMeshRayCaster.mesh_offsets.clear() MultiMeshRayCaster.mesh_views.clear() diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py index 970860fa50ae..a14c16f3d0d9 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py @@ -5,17 +5,22 @@ from __future__ import annotations -from collections.abc import Sequence from typing import TYPE_CHECKING import torch +import warp as wp import isaaclab.utils.math as math_utils -from isaaclab.utils.warp import raycast_dynamic_meshes - +from isaaclab.utils.warp import kernels as warp_kernels + +from .kernels import ( + CAMERA_RAYCAST_MAX_DIST, + compute_distance_to_image_plane_masked_kernel, + fill_float2d_masked_kernel, + fill_vec3_inf_kernel, +) from .multi_mesh_ray_caster import MultiMeshRayCaster from .multi_mesh_ray_caster_camera_data import MultiMeshRayCasterCameraData -from .ray_cast_utils import obtain_world_pose_from_view from .ray_caster_camera import RayCasterCamera if TYPE_CHECKING: @@ -84,138 +89,205 @@ def _create_buffers(self): ) def _initialize_rays_impl(self): - # Create all indices buffer + # NOTE: This method intentionally does NOT call super()._initialize_rays_impl() through the MRO + # chain. The intermediate classes (RayCasterCamera, MultiMeshRayCaster) use different internal + # buffer names and orderings that are incompatible with the camera's full init path: + # - RayCasterCamera creates single-mesh ray buffers (_ray_distance, _ray_normal_w, etc.) + # - MultiMeshRayCaster creates _ray_distance_w / _ray_mesh_id_w for multi-mesh use + # The camera replaces all of these with its own camera-named equivalents below. + # If either parent class gains new shared buffers, they must be added here explicitly. + + # Camera-specific bookkeeping buffers self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) - # Create frame count buffer self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) - # create buffers + + # Build camera output buffers (intrinsics, image data, etc.) self._create_buffers() - # compute intrinsic matrices self._compute_intrinsic_matrices() - # compute ray stars and directions - self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( + + # Compute local ray starts/directions from the camera pattern (torch, init-time only) + ray_starts_local, ray_directions_local = self.cfg.pattern_cfg.func( self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device ) - self.num_rays = self.ray_directions.shape[1] - # create buffer to store ray hits - self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) - # set offsets - quat_w = math_utils.convert_camera_frame_orientation_convention( - torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" + self.num_rays = ray_directions_local.shape[1] + + # Store local (sensor-frame) ray arrays as torch tensors for per-env camera-convention rotation + self.ray_starts = ray_starts_local + self.ray_directions = ray_directions_local + + # Camera-frame offset: convert from cfg convention to world convention + quat_offset = math_utils.convert_camera_frame_orientation_convention( + torch.tensor([self.cfg.offset.rot], device=self._device), + origin=self.cfg.offset.convention, + target="world", ) - self._offset_quat = quat_w.repeat(self._view.count, 1) + self._offset_quat = quat_offset.repeat(self._view.count, 1) self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) - self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) + # Camera pose buffers (torch, part of CameraData) + self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) + self._data.quat_w_world = torch.zeros(self._view.count, 4, device=self._device) + # Warp-backed camera orientation buffer for warp kernel calls; + # updated from self._data.quat_w_world in _update_ray_infos. + self._quat_w_wp = wp.zeros(self._view.count, dtype=wp.quatf, device=self._device) + self._quat_w_wp_torch = wp.to_torch(self._quat_w_wp) + + # Warp buffer for distance_to_image_plane output (if requested) + if "distance_to_image_plane" in self.cfg.data_types: + self._distance_to_image_plane_wp = wp.zeros( + (self._view.count, self.num_rays), dtype=wp.float32, device=self._device + ) + + # World-frame ray buffers: allocate as warp arrays first, then create zero-copy torch views. + # Keeping warp arrays as primary storage avoids lifetime issues when passing to kernels. + self._ray_starts_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + self._ray_directions_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + # Zero-copy torch views used for indexing and post-processing + self._ray_starts_w_torch = wp.to_torch(self._ray_starts_w) + self._ray_directions_w_torch = wp.to_torch(self._ray_directions_w) + + # Ray hit positions as a warp array; expose a torch view for debug visualisation + self._ray_hits_w_cam = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + self.ray_hits_w = wp.to_torch(self._ray_hits_w_cam) + + # Per-ray closest-hit distance for atomic_min across meshes + self._ray_distance_cam_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.float32, device=self._device) + + # Optional normal buffer (always allocated; filled only when "normals" is requested) + self._ray_normal_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + + # Mesh-id buffers from MultiMeshRayCaster._initialize_rays_impl + if self.cfg.update_mesh_ids: + self._ray_mesh_id_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.int16, device=self._device) + self._data.ray_mesh_ids = wp.to_torch(self._ray_mesh_id_w).unsqueeze(-1) + else: + self._ray_mesh_id_w = wp.empty((1, 1), dtype=wp.int16, device=self._device) - self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) + # Dummy face-id buffer (not used by camera but required by kernel signature) + self._ray_face_id_w = wp.empty((1, 1), dtype=wp.int32, device=self._device) - def _update_ray_infos(self, env_ids: Sequence[int]): - """Updates the ray information buffers.""" + def _update_ray_infos(self, env_mask: wp.array): + """Updates camera poses and world-frame ray buffers for masked environments. - # compute poses from current view - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) + Args: + env_mask: Boolean mask selecting which environments to update. Shape is (num_envs,). + """ + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + if len(env_ids) == 0: + return + + # Compute camera world poses by composing view pose with sensor offset + indices = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) + pos_w, quat_w = self._view.get_world_poses(indices) + pos_w, quat_w = pos_w.torch, quat_w.torch pos_w, quat_w = math_utils.combine_frame_transforms( pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] ) - # update the data + # Store camera pose in CameraData (torch tensors) and warp-backed orientation buffer self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w - self._data.quat_w_ros[env_ids] = quat_w + self._quat_w_wp_torch[env_ids] = quat_w - # note: full orientation is considered - ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) + # Rotate local ray starts and directions into world frame using full camera orientation + quat_w_repeated = quat_w.repeat(1, self.num_rays).reshape(-1, 4) + ray_starts_local = self.ray_starts[env_ids].reshape(-1, 3) + ray_dirs_local = self.ray_directions[env_ids].reshape(-1, 3) + + ray_starts_world = math_utils.quat_apply(quat_w_repeated, ray_starts_local).reshape( + len(env_ids), self.num_rays, 3 + ) + ray_starts_world += pos_w.unsqueeze(1) + ray_dirs_world = math_utils.quat_apply(quat_w_repeated, ray_dirs_local).reshape(len(env_ids), self.num_rays, 3) - self._ray_starts_w[env_ids] = ray_starts_w - self._ray_directions_w[env_ids] = ray_directions_w + # Write back into the warp-backed buffers via zero-copy torch views + self._ray_starts_w_torch[env_ids] = ray_starts_world + self._ray_directions_w_torch[env_ids] = ray_dirs_world - def _update_buffers_impl(self, env_ids: Sequence[int] | torch.Tensor | None): + def _update_buffers_impl(self, env_mask: wp.array): """Fills the buffers of the sensor data.""" - self._update_ray_infos(env_ids) + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + if len(env_ids) == 0: + return - # increment frame count - if env_ids is None: - env_ids = torch.arange(self._num_envs, device=self.device) - elif not isinstance(env_ids, torch.Tensor): - env_ids = torch.tensor(env_ids, device=self.device) + self._update_ray_infos(env_mask) + # Increment frame count for updated environments self._frame[env_ids] += 1 - # Update the mesh positions and rotations - mesh_idx = 0 - for view, target_cfg in zip(self._mesh_views, self._raycast_targets_cfg): - if not target_cfg.track_mesh_transforms: - mesh_idx += self._num_meshes_per_env[target_cfg.prim_expr] - continue - - # update position of the target meshes - pos_w, ori_w = obtain_world_pose_from_view(view, None) - pos_w = pos_w.squeeze(0) if len(pos_w.shape) == 3 else pos_w - ori_w = ori_w.squeeze(0) if len(ori_w.shape) == 3 else ori_w - - if target_cfg.prim_expr in MultiMeshRayCaster.mesh_offsets: - pos_offset, ori_offset = MultiMeshRayCaster.mesh_offsets[target_cfg.prim_expr] - pos_w -= pos_offset - ori_w = math_utils.quat_mul(ori_offset.expand(ori_w.shape[0], -1), ori_w) - - count = view.count - if count != 1: # Mesh is not global, i.e. we have different meshes for each env - count = count // self._num_envs - pos_w = pos_w.view(self._num_envs, count, 3) - ori_w = ori_w.view(self._num_envs, count, 4) - - self._mesh_positions_w[:, mesh_idx : mesh_idx + count] = pos_w - self._mesh_orientations_w[:, mesh_idx : mesh_idx + count] = ori_w - mesh_idx += count - - # ray cast and store the hits - self.ray_hits_w[env_ids], ray_depth, ray_normal, _, ray_mesh_ids = raycast_dynamic_meshes( - self._ray_starts_w[env_ids], - self._ray_directions_w[env_ids], - mesh_ids_wp=self._mesh_ids_wp, # list with shape num_envs x num_meshes_per_env - max_dist=self.cfg.max_distance, - mesh_positions_w=self._mesh_positions_w[env_ids], - mesh_orientations_w=self._mesh_orientations_w[env_ids], - return_distance=any( - [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] - ), - return_normal="normals" in self.cfg.data_types, - return_mesh_id=self.cfg.update_mesh_ids, + self._update_mesh_transforms() + + n_meshes = self._mesh_ids_wp.shape[1] + return_normal = "normals" in self.cfg.data_types + + # Fill ray hit and distance buffers with inf for masked environments + wp.launch( + fill_vec3_inf_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._ray_hits_w_cam], + device=self._device, + ) + wp.launch( + fill_float2d_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._ray_distance_cam_w], + device=self._device, + ) + if return_normal: + wp.launch( + fill_vec3_inf_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._ray_normal_w], + device=self._device, + ) + + # Ray-cast against all meshes; closest hit wins via atomic_min on ray_distance + wp.launch( + warp_kernels.raycast_dynamic_meshes_kernel, + dim=(n_meshes, self._num_envs, self.num_rays), + inputs=[ + env_mask, + self._mesh_ids_wp, + self._ray_starts_w, + self._ray_directions_w, + self._ray_hits_w_cam, + self._ray_distance_cam_w, + self._ray_normal_w, + self._ray_face_id_w, + self._ray_mesh_id_w, + self._mesh_positions_w, + self._mesh_orientations_w, + float(CAMERA_RAYCAST_MAX_DIST), + int(return_normal), + int(False), + int(self.cfg.update_mesh_ids), + ], + device=self._device, ) - # update output buffers if "distance_to_image_plane" in self.cfg.data_types: - # note: data is in camera frame so we only take the first component (z-axis of camera frame) - distance_to_image_plane = ( - math_utils.quat_apply( - math_utils.quat_inv(self._data.quat_w_world[env_ids]).repeat(1, self.num_rays), - (ray_depth[:, :, None] * self._ray_directions_w[env_ids]), - ) - )[:, :, 0] - # apply the maximum distance after the transformation - if self.cfg.depth_clipping_behavior == "max": - distance_to_image_plane = torch.clip(distance_to_image_plane, max=self.cfg.max_distance) - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = self.cfg.max_distance - elif self.cfg.depth_clipping_behavior == "zero": - distance_to_image_plane[distance_to_image_plane > self.cfg.max_distance] = 0.0 - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = 0.0 - self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view( - -1, *self.image_shape, 1 + wp.launch( + compute_distance_to_image_plane_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, self._quat_w_wp, self._ray_distance_cam_w, self._ray_directions_w], + outputs=[self._distance_to_image_plane_wp], + device=self._device, ) + # Apply depth clipping on the intermediate buffer (leaves _ray_distance_cam_w unmodified) + self._apply_depth_clipping(env_mask, self._distance_to_image_plane_wp) + d2ip_torch = wp.to_torch(self._distance_to_image_plane_wp) + self._data.output["distance_to_image_plane"][env_ids] = d2ip_torch[env_ids].view(-1, *self.image_shape, 1) if "distance_to_camera" in self.cfg.data_types: - if self.cfg.depth_clipping_behavior == "max": - ray_depth = torch.clip(ray_depth, max=self.cfg.max_distance) - elif self.cfg.depth_clipping_behavior == "zero": - ray_depth[ray_depth > self.cfg.max_distance] = 0.0 - self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape, 1) + # d2ip (if requested) was computed before this block so _ray_distance_cam_w is still unclipped. + self._apply_depth_clipping(env_mask, self._ray_distance_cam_w) + ray_dist_torch = wp.to_torch(self._ray_distance_cam_w) + self._data.output["distance_to_camera"][env_ids] = ray_dist_torch[env_ids].view(-1, *self.image_shape, 1) - if "normals" in self.cfg.data_types: - self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) + if return_normal: + ray_normal_torch = wp.to_torch(self._ray_normal_w) + self._data.output["normals"][env_ids] = ray_normal_torch[env_ids].view(-1, *self.image_shape, 3) if self.cfg.update_mesh_ids: - self._data.image_mesh_ids[env_ids] = ray_mesh_ids.view(-1, *self.image_shape, 1) + self._data.image_mesh_ids[env_ids] = wp.to_torch(self._ray_mesh_id_w)[env_ids].view( + -1, *self.image_shape, 1 + ) diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py index 45df51ce6d80..c625ccffedf4 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py @@ -6,13 +6,16 @@ """Configuration for the ray-cast camera sensor.""" import logging +from typing import TYPE_CHECKING from isaaclab.utils import configclass -from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg from .ray_caster_camera_cfg import RayCasterCameraCfg +if TYPE_CHECKING: + from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera + # import logger logger = logging.getLogger(__name__) @@ -21,7 +24,7 @@ class MultiMeshRayCasterCameraCfg(RayCasterCameraCfg, MultiMeshRayCasterCfg): """Configuration for the multi-mesh ray-cast camera sensor.""" - class_type: type = MultiMeshRayCasterCamera + class_type: type["MultiMeshRayCasterCamera"] | str = "{DIR}.multi_mesh_ray_caster_camera:MultiMeshRayCasterCamera" def __post_init__(self): super().__post_init__() diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py index d2f26abdbf47..21338f0a0616 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py @@ -9,11 +9,15 @@ from isaaclab.sensors.camera import CameraData -from .ray_caster_data import RayCasterData +class MultiMeshRayCasterCameraData(CameraData): + """Data container for the multi-mesh ray-cast camera sensor. -class MultiMeshRayCasterCameraData(CameraData, RayCasterData): - """Data container for the multi-mesh ray-cast sensor.""" + This class extends :class:`CameraData` with additional mesh-id information. + It does not inherit from :class:`RayCasterData` because the camera variant + manages its own torch-based pose and hit buffers independently from the + warp-native :class:`RayCasterData`. + """ image_mesh_ids: torch.Tensor = None """The mesh ids of the image pixels. diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_cfg.py index f5393920162a..3e7de9429c60 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_cfg.py @@ -7,12 +7,15 @@ """Configuration for the ray-cast sensor.""" from dataclasses import MISSING +from typing import TYPE_CHECKING from isaaclab.utils import configclass -from .multi_mesh_ray_caster import MultiMeshRayCaster from .ray_caster_cfg import RayCasterCfg +if TYPE_CHECKING: + from .multi_mesh_ray_caster import MultiMeshRayCaster + @configclass class MultiMeshRayCasterCfg(RayCasterCfg): @@ -49,7 +52,7 @@ class RaycastTargetCfg: Not tracking the mesh transformations is recommended when the meshes are static to increase performance. """ - class_type: type = MultiMeshRayCaster + class_type: type["MultiMeshRayCaster"] | str = "{DIR}.multi_mesh_ray_caster:MultiMeshRayCaster" mesh_prim_paths: list[str | RaycastTargetCfg] = MISSING """The list of mesh primitive paths to ray cast against. diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/__init__.py b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/__init__.py index d43f5437ce01..8d3ef840a761 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/__init__.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/__init__.py @@ -5,5 +5,6 @@ """Sub-module for ray-casting patterns used by the ray-caster.""" -from .patterns import bpearl_pattern, grid_pattern, lidar_pattern, pinhole_camera_pattern -from .patterns_cfg import BpearlPatternCfg, GridPatternCfg, LidarPatternCfg, PatternBaseCfg, PinholeCameraPatternCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/__init__.pyi b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/__init__.pyi new file mode 100644 index 000000000000..0055ebc57d1a --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/__init__.pyi @@ -0,0 +1,25 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "bpearl_pattern", + "grid_pattern", + "lidar_pattern", + "pinhole_camera_pattern", + "BpearlPatternCfg", + "GridPatternCfg", + "LidarPatternCfg", + "PatternBaseCfg", + "PinholeCameraPatternCfg", +] + +from .patterns import bpearl_pattern, grid_pattern, lidar_pattern, pinhole_camera_pattern +from .patterns_cfg import ( + BpearlPatternCfg, + GridPatternCfg, + LidarPatternCfg, + PatternBaseCfg, + PinholeCameraPatternCfg, +) diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py index cae68833c4f2..6db80702a5b3 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns.py @@ -97,7 +97,7 @@ def pinhole_camera_pattern( transform_vec = torch.tensor([1, -1, -1], device=device).unsqueeze(0).unsqueeze(2) pix_in_cam_frame = pix_in_cam_frame[:, [2, 0, 1], :] * transform_vec # normalize ray directions - ray_directions = (pix_in_cam_frame / torch.norm(pix_in_cam_frame, dim=1, keepdim=True)).permute(0, 2, 1) + ray_directions = (pix_in_cam_frame / torch.linalg.norm(pix_in_cam_frame, dim=1, keepdim=True)).permute(0, 2, 1) # for camera, we always ray-cast from the sensor's origin ray_starts = torch.zeros_like(ray_directions, device=device) diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py index f50ba272b708..aa946ffac7bc 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py @@ -92,7 +92,7 @@ class PinholeCameraPatternCfg(PatternBaseCfg): Emulates sensor/film width on a camera. - Note: + .. note:: The default value is the horizontal aperture of a 35 mm spherical projector. """ vertical_aperture: float | None = None @@ -194,7 +194,7 @@ class BpearlPatternCfg(PatternBaseCfg): # fmt: on """Vertical ray angles (in degrees). Defaults to a list of 32 angles. - Note: + .. note:: We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles are not evenly spaced. """ diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_cast_utils.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_cast_utils.py deleted file mode 100644 index 543276e8ea2a..000000000000 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_cast_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Utility functions for ray-cast sensors.""" - -from __future__ import annotations - -import torch - -import omni.physics.tensors.impl.api as physx - -from isaaclab.sim.views import XformPrimView -from isaaclab.utils.math import convert_quat - - -def obtain_world_pose_from_view( - physx_view: XformPrimView | physx.ArticulationView | physx.RigidBodyView, - env_ids: torch.Tensor, - clone: bool = False, -) -> tuple[torch.Tensor, torch.Tensor]: - """Get the world poses of the prim referenced by the prim view. - - Args: - physx_view: The prim view to get the world poses from. - env_ids: The environment ids of the prims to get the world poses for. - clone: Whether to clone the returned tensors (default: False). - - Returns: - A tuple containing the world positions and orientations of the prims. - Orientation is in (w, x, y, z) format. - - Raises: - NotImplementedError: If the prim view is not of the supported type. - """ - if isinstance(physx_view, XformPrimView): - pos_w, quat_w = physx_view.get_world_poses(env_ids) - elif isinstance(physx_view, physx.ArticulationView): - pos_w, quat_w = physx_view.get_root_transforms()[env_ids].split([3, 4], dim=-1) - quat_w = convert_quat(quat_w, to="wxyz") - elif isinstance(physx_view, physx.RigidBodyView): - pos_w, quat_w = physx_view.get_transforms()[env_ids].split([3, 4], dim=-1) - quat_w = convert_quat(quat_w, to="wxyz") - else: - raise NotImplementedError(f"Cannot get world poses for prim view of type '{type(physx_view)}'.") - - if clone: - return pos_w.clone(), quat_w.clone() - else: - return pos_w, quat_w diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py index e6735a9f4819..ab8ca8b7d4ce 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py @@ -6,7 +6,6 @@ from __future__ import annotations import logging -import re from collections.abc import Sequence from typing import TYPE_CHECKING, ClassVar @@ -14,26 +13,27 @@ import torch import warp as wp -import omni -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdGeom, UsdPhysics +from pxr import Gf, Usd, UsdGeom import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.markers import VisualizationMarkers -from isaaclab.sim.views import XformPrimView +from isaaclab.sim.views import FrameView from isaaclab.terrains.trimesh.utils import make_plane -from isaaclab.utils.math import quat_apply, quat_apply_yaw -from isaaclab.utils.warp import convert_to_warp_mesh, raycast_mesh +from isaaclab.utils.warp import convert_to_warp_mesh +from isaaclab.utils.warp.kernels import raycast_mesh_masked_kernel from ..sensor_base import SensorBase -from .ray_cast_utils import obtain_world_pose_from_view +from .kernels import ( + apply_z_drift_kernel, + fill_vec3_inf_kernel, + update_ray_caster_kernel, +) from .ray_caster_data import RayCasterData if TYPE_CHECKING: from .ray_caster_cfg import RayCasterCfg -# import logger logger = logging.getLogger(__name__) @@ -45,8 +45,8 @@ class RayCaster(SensorBase): a set of meshes with a given ray pattern. The meshes are parsed from the list of primitive paths provided in the configuration. These are then - converted to warp meshes and stored in the `warp_meshes` list. The ray-caster then ray-casts against - these warp meshes using the ray pattern provided in the configuration. + converted to warp meshes and stored in the :attr:`meshes` dictionary. The ray-caster then ray-casts + against these warp meshes using the ray pattern provided in the configuration. .. note:: Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes @@ -56,11 +56,12 @@ class RayCaster(SensorBase): cfg: RayCasterCfg """The configuration parameters.""" - # Class variables to share meshes across instances - meshes: ClassVar[dict[str, wp.Mesh]] = {} + meshes: ClassVar[dict[tuple[str, str], wp.Mesh]] = {} """A dictionary to store warp meshes for raycasting, shared across all instances. - The keys correspond to the prim path for the meshes, and values are the corresponding warp Mesh objects.""" + The keys are ``(prim_path, device)`` tuples and values are the corresponding warp Mesh objects. + Including the device in the key prevents a mesh created on one device (e.g. CPU) from being + reused by a kernel running on a different device (e.g. CUDA).""" _instance_count: ClassVar[int] = 0 """A counter to track the number of RayCaster instances, used to manage class variable lifecycle.""" @@ -71,19 +72,9 @@ def __init__(self, cfg: RayCasterCfg): cfg: The configuration parameters. """ RayCaster._instance_count += 1 - # check if sensor path is valid - # note: currently we do not handle environment indices if there is a regex pattern in the leaf - # For example, if the prim path is "/World/Sensor_[1,2]". - sensor_path = cfg.prim_path.split("/")[-1] - sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None - if sensor_path_is_regex: - raise RuntimeError( - f"Invalid prim path for the ray-caster sensor: {cfg.prim_path}." - "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." - ) - # Initialize base class super().__init__(cfg) - # Create empty variables for storing output data + # Resolve physics-body paths and spawn the sensor Xform child if needed. + self._resolve_and_spawn("raycaster") self._data = RayCasterData() def __str__(self) -> str: @@ -117,19 +108,22 @@ def data(self) -> RayCasterData: Operations. """ - def reset(self, env_ids: Sequence[int] | None = None): + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: + super().reset(env_ids, env_mask) + # resolve to indices for torch indexing + if env_ids is not None: + num_envs_ids = len(env_ids) + elif env_mask is not None: + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + num_envs_ids = len(env_ids) + else: env_ids = slice(None) num_envs_ids = self._view.count - else: - num_envs_ids = len(env_ids) - # resample the drift + # resample drift (uses torch views for indexing) r = torch.empty(num_envs_ids, 3, device=self.device) self.drift[env_ids] = r.uniform_(*self.cfg.drift_range) - # resample the height drift + # resample the ray cast drift range_list = [self.cfg.ray_cast_drift_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] ranges = torch.tensor(range_list, device=self.device) self.ray_cast_drift[env_ids] = math_utils.sample_uniform( @@ -142,21 +136,28 @@ def reset(self, env_ids: Sequence[int] | None = None): def _initialize_impl(self): super()._initialize_impl() - # obtain global simulation view - - self._physics_sim_view = SimulationManager.get_physics_sim_view() - prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if prim is None: - available_prims = ",".join([str(p.GetPath()) for p in sim_utils.get_current_stage().Traverse()]) - raise RuntimeError( - f"Failed to find a prim at path expression: {self.cfg.prim_path}. Available prims: {available_prims}" - ) - - self._view, self._offset = self._obtain_trackable_prim_view(self.cfg.prim_path) + # Build a FrameView over the sensor prim paths. The FrameView tracks the spawned + # (non-physics) Xform directly, so no physics-body redirect or offset resolution + # is needed at runtime — the world pose returned already includes any offset + # baked into the prim's local transform. + self._view = FrameView(self.cfg.prim_path, device=self._device, stage=self.stage) + + # Per-env identity offsets (kept for kernel ABI compatibility): the sensor frame is + # already the FrameView's tracked prim, so no additional view-to-sensor offset applies. + self._offset_pos_wp = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + identity_quat = torch.zeros(self._view.count, 4, device=self._device) + identity_quat[:, 3] = 1.0 + self._offset_quat_contiguous = identity_quat.contiguous() + self._offset_quat_wp = wp.from_torch(self._offset_quat_contiguous, dtype=wp.quatf) + + # Resolve alignment mode to integer constant for kernel dispatch + alignment_map = {"world": 0, "yaw": 1, "base": 2} + if self.cfg.ray_alignment not in alignment_map: + raise RuntimeError(f"Unsupported ray_alignment type: {self.cfg.ray_alignment}.") + self._alignment_mode = alignment_map[self.cfg.ray_alignment] # load the meshes by parsing the stage self._initialize_warp_meshes() - # initialize the ray start and directions self._initialize_rays_impl() def _initialize_warp_meshes(self): @@ -168,248 +169,195 @@ def _initialize_warp_meshes(self): # read prims to ray-cast for mesh_prim_path in self.cfg.mesh_prim_paths: - # check if mesh already casted into warp mesh - if mesh_prim_path in RayCaster.meshes: + mesh_key = (mesh_prim_path, self._device) + if mesh_key in RayCaster.meshes: continue - # check if the prim is a plane - handle PhysX plane as a special case - # if a plane exists then we need to create an infinite mesh that is a plane mesh_prim = sim_utils.get_first_matching_child_prim( mesh_prim_path, lambda prim: prim.GetTypeName() == "Plane" ) - # if we did not find a plane then we need to read the mesh if mesh_prim is None: - # obtain the mesh prim mesh_prim = sim_utils.get_first_matching_child_prim( mesh_prim_path, lambda prim: prim.GetTypeName() == "Mesh" ) - # check if valid if mesh_prim is None or not mesh_prim.IsValid(): raise RuntimeError(f"Invalid mesh prim path: {mesh_prim_path}") - # cast into UsdGeomMesh mesh_prim = UsdGeom.Mesh(mesh_prim) - # read the vertices and faces points = np.asarray(mesh_prim.GetPointsAttr().Get()) - transform_matrix = np.array(omni.usd.get_world_transform_matrix(mesh_prim)).T + xformable = UsdGeom.Xformable(mesh_prim) + world_transform: Gf.Matrix4d = xformable.ComputeLocalToWorldTransform(Usd.TimeCode.Default()) + transform_matrix = np.array(world_transform).T points = np.matmul(points, transform_matrix[:3, :3].T) points += transform_matrix[:3, 3] indices = np.asarray(mesh_prim.GetFaceVertexIndicesAttr().Get()) - wp_mesh = convert_to_warp_mesh(points, indices, device=self.device) - # print info + wp_mesh = convert_to_warp_mesh(points, indices, device=self._device) logger.info( f"Read mesh prim: {mesh_prim.GetPath()} with {len(points)} vertices and {len(indices)} faces." ) else: mesh = make_plane(size=(2e6, 2e6), height=0.0, center_zero=True) - wp_mesh = convert_to_warp_mesh(mesh.vertices, mesh.faces, device=self.device) - # print info + wp_mesh = convert_to_warp_mesh(mesh.vertices, mesh.faces, device=self._device) logger.info(f"Created infinite plane mesh prim: {mesh_prim.GetPath()}.") - # add the warp mesh to the list - RayCaster.meshes[mesh_prim_path] = wp_mesh + RayCaster.meshes[mesh_key] = wp_mesh - # throw an error if no meshes are found - if all([mesh_prim_path not in RayCaster.meshes for mesh_prim_path in self.cfg.mesh_prim_paths]): + if all((mesh_prim_path, self._device) not in RayCaster.meshes for mesh_prim_path in self.cfg.mesh_prim_paths): raise RuntimeError( f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" ) def _initialize_rays_impl(self): - # compute ray stars and directions - self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func(self.cfg.pattern_cfg, self._device) - self.num_rays = len(self.ray_directions) - # apply offset transformation to the rays + # Compute ray starts and directions from pattern (torch, init-time only) + ray_starts_torch, ray_directions_torch = self.cfg.pattern_cfg.func(self.cfg.pattern_cfg, self._device) + self.num_rays = len(ray_directions_torch) + + # Apply sensor offset rotation/position to local ray pattern offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device) offset_quat = torch.tensor(list(self.cfg.offset.rot), device=self._device) - self.ray_directions = quat_apply(offset_quat.repeat(len(self.ray_directions), 1), self.ray_directions) - self.ray_starts += offset_pos - # repeat the rays for each sensor - self.ray_starts = self.ray_starts.repeat(self._view.count, 1, 1) - self.ray_directions = self.ray_directions.repeat(self._view.count, 1, 1) - # prepare drift - self.drift = torch.zeros(self._view.count, 3, device=self.device) - self.ray_cast_drift = torch.zeros(self._view.count, 3, device=self.device) - # fill the data buffer - self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) - self._data.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - - def _update_ray_infos(self, env_ids: Sequence[int]): - """Updates the ray information buffers.""" - - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset[0][env_ids], self._offset[1][env_ids] + ray_directions_torch = math_utils.quat_apply( + offset_quat.repeat(len(ray_directions_torch), 1), ray_directions_torch ) - # apply drift to ray starting position in world frame - pos_w += self.drift[env_ids] - # store the poses - self._data.pos_w[env_ids] = pos_w - self._data.quat_w[env_ids] = quat_w - - # check if user provided attach_yaw_only flag - if self.cfg.attach_yaw_only is not None: - msg = ( - "Raycaster attribute 'attach_yaw_only' property will be deprecated in a future release." - " Please use the parameter 'ray_alignment' instead." - ) - # set ray alignment to yaw - if self.cfg.attach_yaw_only: - self.cfg.ray_alignment = "yaw" - msg += " Setting ray_alignment to 'yaw'." - else: - self.cfg.ray_alignment = "base" - msg += " Setting ray_alignment to 'base'." - # log the warning - logger.warning(msg) - # ray cast based on the sensor poses - if self.cfg.ray_alignment == "world": - # apply horizontal drift to ray starting position in ray caster frame - pos_w[:, 0:2] += self.ray_cast_drift[env_ids, 0:2] - # no rotation is considered and directions are not rotated - ray_starts_w = self.ray_starts[env_ids] - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = self.ray_directions[env_ids] - elif self.cfg.ray_alignment == "yaw": - # apply horizontal drift to ray starting position in ray caster frame - pos_w[:, 0:2] += quat_apply_yaw(quat_w, self.ray_cast_drift[env_ids])[:, 0:2] - # only yaw orientation is considered and directions are not rotated - ray_starts_w = quat_apply_yaw(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = self.ray_directions[env_ids] - elif self.cfg.ray_alignment == "base": - # apply horizontal drift to ray starting position in ray caster frame - pos_w[:, 0:2] += quat_apply(quat_w, self.ray_cast_drift[env_ids])[:, 0:2] - # full orientation is considered - ray_starts_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) - else: - raise RuntimeError(f"Unsupported ray_alignment type: {self.cfg.ray_alignment}.") + ray_starts_torch += offset_pos + + # Repeat for each environment + ray_starts_torch = ray_starts_torch.repeat(self._view.count, 1, 1) + ray_directions_torch = ray_directions_torch.repeat(self._view.count, 1, 1) + + # Create warp arrays from the init-time torch data + # The warp arrays own the memory; torch views provide backward-compat indexing + self._ray_starts_local = wp.from_torch(ray_starts_torch.contiguous(), dtype=wp.vec3f) + self._ray_directions_local = wp.from_torch(ray_directions_torch.contiguous(), dtype=wp.vec3f) + + # Torch views (same attribute names as before for subclass compatibility) + self.ray_starts = wp.to_torch(self._ray_starts_local) + self.ray_directions = wp.to_torch(self._ray_directions_local) + + # Drift buffers (warp-owned, torch views for reset indexing) + self._drift = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + self._ray_cast_drift = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + self.drift = wp.to_torch(self._drift) + self.ray_cast_drift = wp.to_torch(self._ray_cast_drift) - self._ray_starts_w[env_ids] = ray_starts_w - self._ray_directions_w[env_ids] = ray_directions_w + # World-frame ray buffers + self._ray_starts_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + self._ray_directions_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) - def _update_buffers_impl(self, env_ids: Sequence[int]): + # Torch views for subclass compatibility + self._ray_starts_w_torch = wp.to_torch(self._ray_starts_w) + self._ray_directions_w_torch = wp.to_torch(self._ray_directions_w) + + # Data buffers + self._data.create_buffers(self._view.count, self.num_rays, self._device) + + # Dummy distance/normal buffers required by the merged raycast_mesh_masked_kernel signature. + # Sized (1, 1) even though the kernel is launched at (num_envs, num_rays): the kernel only + # writes to these buffers when return_distance==1 or return_normal==1 respectively, and + # RayCaster always passes 0 for both flags. If those flags are ever enabled here, these + # buffers must be resized to (num_envs, num_rays) to avoid an out-of-bounds write. + self._dummy_ray_distance = wp.empty((1, 1), dtype=wp.float32, device=self._device) + self._dummy_ray_normal = wp.empty((1, 1), dtype=wp.vec3f, device=self._device) + + def _get_view_transforms_wp(self) -> wp.array: + """Get world transforms from the frame view as a warp array of ``wp.transformf``. + + Returns: + Warp array of ``wp.transformf`` with shape ``(num_envs,)``. Layout is + ``(tx, ty, tz, qx, qy, qz, qw)`` per element, matching the quaternion + convention returned by :class:`~isaaclab.sim.views.FrameView`. + """ + pos_w, quat_w = self._view.get_world_poses() + pos_torch = pos_w.torch.reshape(-1, 3) + quat_torch = quat_w.torch.reshape(-1, 4) + poses = torch.cat([pos_torch, quat_torch], dim=-1).contiguous() + return wp.from_torch(poses).view(wp.transformf) + + def _update_ray_infos(self, env_mask: wp.array): + """Updates sensor poses and ray world-frame buffers via a single warp kernel.""" + transforms = self._get_view_transforms_wp() + + wp.launch( + update_ray_caster_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[ + transforms, + env_mask, + self._offset_pos_wp, + self._offset_quat_wp, + self._drift, + self._ray_cast_drift, + self._ray_starts_local, + self._ray_directions_local, + self._alignment_mode, + ], + outputs=[ + self._data._pos_w, + self._data._quat_w, + self._ray_starts_w, + self._ray_directions_w, + ], + device=self._device, + ) + + def _update_buffers_impl(self, env_mask: wp.array): """Fills the buffers of the sensor data.""" - self._update_ray_infos(env_ids) + self._update_ray_infos(env_mask) + + # Fill ray hits with inf before raycasting + wp.launch( + fill_vec3_inf_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._data._ray_hits_w], + device=self._device, + ) - # ray cast and store the hits - # TODO: Make this work for multiple meshes? - self._data.ray_hits_w[env_ids] = raycast_mesh( - self._ray_starts_w[env_ids], - self._ray_directions_w[env_ids], - max_dist=self.cfg.max_distance, - mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], - )[0] + # Ray-cast against the mesh + wp.launch( + raycast_mesh_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[ + RayCaster.meshes[(self.cfg.mesh_prim_paths[0], self._device)].id, + env_mask, + self._ray_starts_w, + self._ray_directions_w, + float(self.cfg.max_distance), + int(False), # return_distance: not needed by RayCaster + int(False), # return_normal: not needed by RayCaster + self._data._ray_hits_w, + self._dummy_ray_distance, + self._dummy_ray_normal, + ], + device=self._device, + ) - # apply vertical drift to ray starting position in ray caster frame - self._data.ray_hits_w[env_ids, :, 2] += self.ray_cast_drift[env_ids, 2].unsqueeze(-1) + # Apply vertical drift to ray hits + wp.launch( + apply_z_drift_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, self._ray_cast_drift, self._data._ray_hits_w], + device=self._device, + ) def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility if debug_vis: if not hasattr(self, "ray_visualizer"): self.ray_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true self.ray_visualizer.set_visibility(True) else: if hasattr(self, "ray_visualizer"): self.ray_visualizer.set_visibility(False) def _debug_vis_callback(self, event): - if self._data.ray_hits_w is None: + if self._data._ray_hits_w is None: return + ray_hits_torch = wp.to_torch(self._data._ray_hits_w) # remove possible inf values - viz_points = self._data.ray_hits_w.reshape(-1, 3) + viz_points = ray_hits_torch.reshape(-1, 3) viz_points = viz_points[~torch.any(torch.isinf(viz_points), dim=1)] - self.ray_visualizer.visualize(viz_points) - - """ - Internal Helpers. - """ - - def _obtain_trackable_prim_view( - self, target_prim_path: str - ) -> tuple[XformPrimView | any, tuple[torch.Tensor, torch.Tensor]]: - """Obtain a prim view that can be used to track the pose of the parget prim. - - The target prim path is a regex expression that matches one or more mesh prims. While we can track its - pose directly using XFormPrim, this is not efficient and can be slow. Instead, we create a prim view - using the physics simulation view, which provides a more efficient way to track the pose of the mesh prims. - - The function additionally resolves the relative pose between the mesh and its corresponding physics prim. - This is especially useful if the mesh is not directly parented to the physics prim. - - Args: - target_prim_path: The target prim path to obtain the prim view for. - - Returns: - A tuple containing: - - - An XFormPrim or a physics prim view (ArticulationView or RigidBodyView). - - A tuple containing the positions and orientations of the mesh prims in the physics prim frame. - - """ - - mesh_prim = sim_utils.find_first_matching_prim(target_prim_path) - current_prim = mesh_prim - current_path_expr = target_prim_path - - prim_view = None - - while prim_view is None: - # TODO: Need to handle the case where API is present but it is disabled - if current_prim.HasAPI(UsdPhysics.ArticulationRootAPI): - prim_view = self._physics_sim_view.create_articulation_view(current_path_expr.replace(".*", "*")) - logger.info(f"Created articulation view for mesh prim at path: {target_prim_path}") - break - - # TODO: Need to handle the case where API is present but it is disabled - if current_prim.HasAPI(UsdPhysics.RigidBodyAPI): - prim_view = self._physics_sim_view.create_rigid_body_view(current_path_expr.replace(".*", "*")) - logger.info(f"Created rigid body view for mesh prim at path: {target_prim_path}") - break - - new_root_prim = current_prim.GetParent() - current_path_expr = current_path_expr.rsplit("/", 1)[0] - if not new_root_prim.IsValid(): - prim_view = XformPrimView(target_prim_path, device=self._device, stage=self.stage) - current_path_expr = target_prim_path - logger.warning( - f"The prim at path {target_prim_path} which is used for raycasting is not a physics prim." - " Defaulting to XFormPrim. \n The pose of the mesh will most likely not" - " be updated correctly when running in headless mode and position lookups will be much slower. \n" - " If possible, ensure that the mesh or its parent is a physics prim (rigid body or articulation)." - ) - break - - # switch the current prim to the parent prim - current_prim = new_root_prim - - # obtain the relative transforms between target prim and the view prims - mesh_prims = sim_utils.find_matching_prims(target_prim_path) - view_prims = sim_utils.find_matching_prims(current_path_expr) - if len(mesh_prims) != len(view_prims): - raise RuntimeError( - f"The number of mesh prims ({len(mesh_prims)}) does not match the number of physics prims" - f" ({len(view_prims)})Please specify the correct mesh and physics prim paths more" - " specifically in your target expressions." - ) - positions = [] - quaternions = [] - for mesh_prim, view_prim in zip(mesh_prims, view_prims): - pos, orientation = sim_utils.resolve_prim_pose(mesh_prim, view_prim) - positions.append(torch.tensor(pos, dtype=torch.float32, device=self.device)) - quaternions.append(torch.tensor(orientation, dtype=torch.float32, device=self.device)) - - positions = torch.stack(positions).to(device=self.device, dtype=torch.float32) - quaternions = torch.stack(quaternions).to(device=self.device, dtype=torch.float32) + # if no points to visualize, skip + if viz_points.shape[0] == 0: + return - return prim_view, (positions, quaternions) + self.ray_visualizer.visualize(viz_points) """ Internal simulation callbacks. @@ -417,9 +365,7 @@ def _obtain_trackable_prim_view( def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" - # call parent super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them self._view = None def __del__(self): diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera.py index e930d3df1837..257b25698deb 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera.py @@ -10,14 +10,23 @@ from typing import TYPE_CHECKING, ClassVar, Literal import torch +import warp as wp from pxr import UsdGeom import isaaclab.utils.math as math_utils from isaaclab.sensors.camera import CameraData -from isaaclab.utils.warp import raycast_mesh - -from .ray_cast_utils import obtain_world_pose_from_view +from isaaclab.utils.warp.kernels import raycast_mesh_masked_kernel + +from .kernels import ( + ALIGNMENT_BASE, + CAMERA_RAYCAST_MAX_DIST, + apply_depth_clipping_masked_kernel, + compute_distance_to_image_plane_masked_kernel, + fill_float2d_masked_kernel, + fill_vec3_inf_kernel, + update_ray_caster_kernel, +) from .ray_caster import RayCaster if TYPE_CHECKING: @@ -142,18 +151,30 @@ def set_intrinsic_matrices( self.ray_starts[env_ids], self.ray_directions[env_ids] = self.cfg.pattern_cfg.func( self.cfg.pattern_cfg, self._data.intrinsic_matrices[env_ids], self._device ) - - def reset(self, env_ids: Sequence[int] | None = None): + # Refresh warp views of local ray buffers; .contiguous() may produce a copy so we store + # the contiguous tensors explicitly to prevent GC while the warp views are alive. + if hasattr(self, "_ray_starts_local"): + self._ray_starts_contiguous = self.ray_starts.contiguous() + self._ray_directions_contiguous = self.ray_directions.contiguous() + self._ray_starts_local = wp.from_torch(self._ray_starts_contiguous, dtype=wp.vec3f) + self._ray_directions_local = wp.from_torch(self._ray_directions_contiguous, dtype=wp.vec3f) + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): # reset the timestamps - super().reset(env_ids) - # resolve None - if env_ids is None or isinstance(env_ids, slice): + super().reset(env_ids, env_mask) + # resolve to indices for torch indexing + if env_ids is None and env_mask is not None: + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + elif env_ids is None or isinstance(env_ids, slice): env_ids = self._ALL_INDICES + if not isinstance(env_ids, torch.Tensor): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self._device) # reset the data # note: this recomputation is useful if one performs events such as randomizations on the camera poses. - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) + indices = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) if env_ids is not None else None + pos_w, quat_w = self._view.get_world_poses(indices) pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + pos_w.torch.clone(), quat_w.torch.clone(), self._offset_pos[env_ids], self._offset_quat[env_ids] ) self._data.pos_w[env_ids] = pos_w self._data.quat_w_world[env_ids] = quat_w @@ -176,13 +197,13 @@ def set_world_poses( - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention - See :meth:`isaaclab.utils.maths.convert_camera_frame_orientation_convention` for more details + See :meth:`isaaclab.utils.math.convert_camera_frame_orientation_convention` for more details on the conventions. Args: positions: The cartesian coordinates (in meters). Shape is (N, 3). Defaults to None, in which case the camera position in not changed. - orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + orientations: The quaternion orientation in (x, y, z, w). Shape is (N, 4). Defaults to None, in which case the camera orientation in not changed. env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. convention: The convention in which the poses are fed. Defaults to "ros". @@ -195,25 +216,28 @@ def set_world_poses( env_ids = self._ALL_INDICES # get current positions - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) + indices = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) if env_ids is not None else None + pos_w, quat_w = self._view.get_world_poses(indices) + pos_w_torch = pos_w.torch + quat_w_torch = quat_w.torch if positions is not None: # transform to camera frame - pos_offset_world_frame = positions - pos_w - self._offset_pos[env_ids] = math_utils.quat_apply(math_utils.quat_inv(quat_w), pos_offset_world_frame) + pos_offset_world_frame = positions - pos_w_torch + self._offset_pos[env_ids] = math_utils.quat_apply(math_utils.quat_inv(quat_w_torch), pos_offset_world_frame) if orientations is not None: # convert rotation matrix from input convention to world quat_w_set = math_utils.convert_camera_frame_orientation_convention( orientations, origin=convention, target="world" ) - self._offset_quat[env_ids] = math_utils.quat_mul(math_utils.quat_inv(quat_w), quat_w_set) + self._offset_quat[env_ids] = math_utils.quat_mul(math_utils.quat_inv(quat_w_torch), quat_w_set) # update the data - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + pos_w2, quat_w2 = self._view.get_world_poses(indices) + pos_w_out, quat_w_out = math_utils.combine_frame_transforms( + pos_w2.torch.clone(), quat_w2.torch.clone(), self._offset_pos[env_ids], self._offset_quat[env_ids] ) - self._data.pos_w[env_ids] = pos_w - self._data.quat_w_world[env_ids] = quat_w + self._data.pos_w[env_ids] = pos_w_out + self._data.quat_w_world[env_ids] = quat_w_out def set_world_poses_from_view( self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None @@ -221,7 +245,7 @@ def set_world_poses_from_view( """Set the poses of the camera from the eye position and look-at target position. Args: - eyes: The positions of the camera's eye. Shape is N, 3). + eyes: The positions of the camera's eye. Shape is (N, 3). targets: The target locations to look at. Shape is (N, 3). env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. @@ -250,97 +274,243 @@ def _initialize_rays_impl(self): self._create_buffers() # compute intrinsic matrices self._compute_intrinsic_matrices() - # compute ray stars and directions + # compute ray starts and directions self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device ) self.num_rays = self.ray_directions.shape[1] - # create buffer to store ray hits - self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) - # set offsets + + # Offset buffers: warp-primary so the kernel always sees the current values without re-wrapping. + # Zero-copy torch views (_offset_pos, _offset_quat) are used by set_world_poses for indexed writes. + self._offset_pos_wp = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + self._offset_quat_wp = wp.zeros(self._view.count, dtype=wp.quatf, device=self._device) + self._offset_pos = wp.to_torch(self._offset_pos_wp) + self._offset_quat = wp.to_torch(self._offset_quat_wp) + # Initialize from config quat_w = math_utils.convert_camera_frame_orientation_convention( torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" ) - self._offset_quat = quat_w.repeat(self._view.count, 1) - self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + self._offset_pos[:] = torch.tensor(list(self.cfg.offset.pos), device=self._device) + self._offset_quat[:] = quat_w + + # Warp buffers for world-frame rays (used by update kernel) + self._ray_starts_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + self._ray_directions_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + + # Warp views for ray_starts and ray_directions (from torch tensors returned by pattern_cfg.func) + # These are (num_envs, num_rays, 3) torch tensors; wrap as warp vec3f arrays. + # Store contiguous tensors explicitly so they are not garbage-collected while the + # warp views are alive (mirrors the pattern in RayCaster._initialize_impl). + self._ray_starts_contiguous = self.ray_starts.contiguous() + self._ray_directions_contiguous = self.ray_directions.contiguous() + self._ray_starts_local = wp.from_torch(self._ray_starts_contiguous, dtype=wp.vec3f) + self._ray_directions_local = wp.from_torch(self._ray_directions_contiguous, dtype=wp.vec3f) + + # Wrap the torch drift buffers (created in _create_buffers) as warp arrays (zero-copy). + # Cameras do not apply positional drift, so these remain zero. + self._drift_contiguous = self.drift.contiguous() + self._ray_cast_drift_contiguous = self.ray_cast_drift.contiguous() + self._drift = wp.from_torch(self._drift_contiguous, dtype=wp.vec3f) + self._ray_cast_drift = wp.from_torch(self._ray_cast_drift_contiguous, dtype=wp.vec3f) + + # Warp buffers for camera pose outputs + self._pos_w_wp = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + self._quat_w_wp = wp.zeros(self._view.count, dtype=wp.quatf, device=self._device) + + # Intermediate warp buffers for ray results (filled with inf before each raycasting step) + self._ray_distance = wp.zeros((self._view.count, self.num_rays), dtype=wp.float32, device=self._device) + if "normals" in self.cfg.data_types: + self._ray_normal_w = wp.zeros((self._view.count, self.num_rays), dtype=wp.vec3f, device=self._device) + else: + self._ray_normal_w = wp.zeros((1, 1), dtype=wp.vec3f, device=self._device) + + if "distance_to_image_plane" in self.cfg.data_types: + self._distance_to_image_plane_wp = wp.zeros( + (self._view.count, self.num_rays), dtype=wp.float32, device=self._device + ) - def _update_buffers_impl(self, env_ids: Sequence[int]): + # Torch buffer for ray hits (used by debug visualizer) + self.ray_hits_w = torch.full((self._view.count, self.num_rays, 3), float("inf"), device=self._device) + # Warp view of ray_hits_w + self._ray_hits_w_wp = wp.from_torch(self.ray_hits_w.contiguous(), dtype=wp.vec3f) + + # Cache zero-copy torch views of warp output buffers to avoid per-step wrapper allocation. + self._pos_w_torch = wp.to_torch(self._pos_w_wp) + self._quat_w_torch = wp.to_torch(self._quat_w_wp) + self._ray_distance_torch = wp.to_torch(self._ray_distance) + if "distance_to_image_plane" in self.cfg.data_types: + self._distance_to_image_plane_torch = wp.to_torch(self._distance_to_image_plane_wp) + if "normals" in self.cfg.data_types: + self._ray_normal_w_torch = wp.to_torch(self._ray_normal_w) + + def _update_buffers_impl(self, env_mask: wp.array): """Fills the buffers of the sensor data.""" + # Convert mask to indices for torch-indexed writes + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + if len(env_ids) == 0: + return # increment frame count self._frame[env_ids] += 1 - # compute poses from current view - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + # Update world-frame ray starts/directions and camera pose via warp kernel. + # Camera always uses ALIGNMENT_BASE (full orientation) and zero drift. + transforms = self._get_view_transforms_wp() + wp.launch( + update_ray_caster_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[ + transforms, + env_mask, + self._offset_pos_wp, + self._offset_quat_wp, + self._drift, + self._ray_cast_drift, + self._ray_starts_local, + self._ray_directions_local, + int(ALIGNMENT_BASE), + ], + outputs=[ + self._pos_w_wp, + self._quat_w_wp, + self._ray_starts_w, + self._ray_directions_w, + ], + device=self._device, + ) + + # Write camera pose to CameraData (torch tensors) + self._data.pos_w[env_ids] = self._pos_w_torch[env_ids] + self._data.quat_w_world[env_ids] = self._quat_w_torch[env_ids] + + # Fill ray hit positions with inf before raycasting + wp.launch( + fill_vec3_inf_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._ray_hits_w_wp], + device=self._device, + ) + + # Fill ray distance with inf before raycasting + wp.launch( + fill_float2d_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._ray_distance], + device=self._device, ) - # update the data - self._data.pos_w[env_ids] = pos_w - self._data.quat_w_world[env_ids] = quat_w - # note: full orientation is considered - ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) - - # ray cast and store the hits - # note: we set max distance to 1e6 during the ray-casting. THis is because we clip the distance - # to the image plane and distance to the camera to the maximum distance afterwards in-order to - # match the USD camera behavior. - - # TODO: Make ray-casting work for multiple meshes? - # necessary for regular dictionaries. - self.ray_hits_w, ray_depth, ray_normal, _ = raycast_mesh( - ray_starts_w, - ray_directions_w, - mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], - max_dist=1e6, - return_distance=any( - [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] - ), - return_normal="normals" in self.cfg.data_types, + # Determine whether to compute normals + need_normal = int("normals" in self.cfg.data_types) + if need_normal: + # Fill normal buffer with inf before raycasting + wp.launch( + fill_vec3_inf_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float("inf"), self._ray_normal_w], + device=self._device, + ) + + # Ray-cast against the mesh; use a large upper-bound max_dist so depth clipping + # can be applied per-data-type afterwards (matching the original behaviour). + wp.launch( + raycast_mesh_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[ + RayCaster.meshes[(self.cfg.mesh_prim_paths[0], self._device)].id, + env_mask, + self._ray_starts_w, + self._ray_directions_w, + float(CAMERA_RAYCAST_MAX_DIST), + int(True), # return_distance: always needed for depth output + need_normal, + self._ray_hits_w_wp, + self._ray_distance, + self._ray_normal_w, + ], + device=self._device, ) - # update output buffers + + # Compute distance_to_image_plane using a warp kernel if "distance_to_image_plane" in self.cfg.data_types: - # note: data is in camera frame so we only take the first component (z-axis of camera frame) - distance_to_image_plane = ( - math_utils.quat_apply( - math_utils.quat_inv(quat_w).repeat(1, self.num_rays), - (ray_depth[:, :, None] * ray_directions_w), - ) - )[:, :, 0] - # apply the maximum distance after the transformation - if self.cfg.depth_clipping_behavior == "max": - distance_to_image_plane = torch.clip(distance_to_image_plane, max=self.cfg.max_distance) - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = self.cfg.max_distance - elif self.cfg.depth_clipping_behavior == "zero": - distance_to_image_plane[distance_to_image_plane > self.cfg.max_distance] = 0.0 - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = 0.0 - self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view( + wp.launch( + compute_distance_to_image_plane_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[ + env_mask, + self._quat_w_wp, + self._ray_distance, + self._ray_directions_w, + ], + outputs=[ + self._distance_to_image_plane_wp, + ], + device=self._device, + ) + # Apply depth clipping on the intermediate buffer (leaves _ray_distance unmodified) + self._apply_depth_clipping(env_mask, self._distance_to_image_plane_wp) + self._data.output["distance_to_image_plane"][env_ids] = self._distance_to_image_plane_torch[env_ids].view( -1, *self.image_shape, 1 ) if "distance_to_camera" in self.cfg.data_types: - if self.cfg.depth_clipping_behavior == "max": - ray_depth = torch.clip(ray_depth, max=self.cfg.max_distance) - elif self.cfg.depth_clipping_behavior == "zero": - ray_depth[ray_depth > self.cfg.max_distance] = 0.0 - self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape, 1) + # d2ip (if requested) was computed before this block so _ray_distance is still unclipped. + self._apply_depth_clipping(env_mask, self._ray_distance) + self._data.output["distance_to_camera"][env_ids] = self._ray_distance_torch[env_ids].view( + -1, *self.image_shape, 1 + ) if "normals" in self.cfg.data_types: - self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) + self._data.output["normals"][env_ids] = self._ray_normal_w_torch[env_ids].view(-1, *self.image_shape, 3) def _debug_vis_callback(self, event): # in case it crashes be safe if not hasattr(self, "ray_hits_w"): return - # show ray hit positions - self.ray_visualizer.visualize(self.ray_hits_w.view(-1, 3)) + # filter out missed rays (inf values) before visualizing + ray_hits_flat = self.ray_hits_w.reshape(-1, 3) + valid_mask = ~torch.isinf(ray_hits_flat).any(dim=-1) + viz_points = ray_hits_flat[valid_mask] + # if no valid hits, skip + if viz_points.shape[0] == 0: + return + self.ray_visualizer.visualize(viz_points) """ Private Helpers """ + def _apply_depth_clipping(self, env_mask: wp.array, depth: wp.array) -> None: + """Apply depth clipping in-place on a warp float32 buffer. + + Uses :attr:`cfg.depth_clipping_behavior` to determine the fill value: + ``"max"`` replaces out-of-range and NaN values with :attr:`cfg.max_distance`; + ``"zero"`` replaces them with 0. No-op when behavior is ``"none"``. + + Args: + env_mask: Boolean mask selecting which environments to update. Shape is (num_envs,). + depth: Warp 2-D float32 buffer to clip in-place. Shape is (num_envs, num_rays). + """ + if self.cfg.depth_clipping_behavior == "max": + wp.launch( + apply_depth_clipping_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float(self.cfg.max_distance), float(self.cfg.max_distance), depth], + device=self._device, + ) + elif self.cfg.depth_clipping_behavior == "zero": + wp.launch( + apply_depth_clipping_masked_kernel, + dim=(self._num_envs, self.num_rays), + inputs=[env_mask, float(self.cfg.max_distance), float(0.0), depth], + device=self._device, + ) + elif self.cfg.depth_clipping_behavior == "none": + pass # no clipping: inf values remain as-is + else: + raise ValueError( + f"Unknown depth_clipping_behavior: {self.cfg.depth_clipping_behavior!r}." + " Valid values are 'max', 'zero', and 'none'." + ) + def _check_supported_data_types(self, cfg: RayCasterCameraCfg): """Checks if the data types are supported by the ray-caster camera.""" # check if there is any intersection in unsupported types @@ -356,7 +526,7 @@ def _check_supported_data_types(self, cfg: RayCasterCameraCfg): def _create_buffers(self): """Create buffers for storing data.""" - # prepare drift + # prepare drift (kept as torch tensors so subclasses may use torch indexing) self.drift = torch.zeros(self._view.count, 3, device=self.device) self.ray_cast_drift = torch.zeros(self._view.count, 3, device=self.device) # create the data object @@ -370,7 +540,7 @@ def _create_buffers(self): # -- output data # create the buffers to store the annotator data. self._data.output = {} - self._data.info = [{name: None for name in self.cfg.data_types}] * self._view.count + self._data.info = {name: None for name in self.cfg.data_types} for name in self.cfg.data_types: if name in ["distance_to_image_plane", "distance_to_camera"]: shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 1) @@ -409,21 +579,23 @@ def _compute_view_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tenso """Obtains the pose of the view the camera is attached to in the world frame. .. deprecated v2.3.1: - This function will be removed in a future release in favor of implementation - :meth:`obtain_world_pose_from_view`. + This function will be removed in a future release. Call + ``self._view.get_world_poses(indices)`` directly instead. The returned + ProxyArray pair exposes ``.warp`` and ``.torch`` accessors. Returns: - A tuple of the position (in meters) and quaternion (w, x, y, z). + A tuple of the position (in meters) and quaternion (x, y, z, w). """ - # deprecation logger.warning( - "The function '_compute_view_world_poses' will be deprecated in favor of the util method" - " 'obtain_world_pose_from_view'. Please use 'obtain_world_pose_from_view' instead...." + "The function '_compute_view_world_poses' is deprecated." + " Call 'self._view.get_world_poses(indices)' directly instead." ) - return obtain_world_pose_from_view(self._view, env_ids, clone=True) + indices = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) if env_ids is not None else None + pos_w, quat_w = self._view.get_world_poses(indices) + return pos_w.torch.clone(), quat_w.torch.clone() def _compute_camera_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: """Computes the pose of the camera in the world frame. @@ -435,22 +607,24 @@ def _compute_camera_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Ten .. code-block:: python - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) + indices = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) + pos_w, quat_w = self._view.get_world_poses(indices) + # The returned ProxyArray pair exposes .warp and .torch accessors + pos_w, quat_w = pos_w.torch.clone(), quat_w.torch.clone() pos_w, quat_w = math_utils.combine_frame_transforms( pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] ) Returns: - A tuple of the position (in meters) and quaternion (w, x, y, z) in "world" convention. + A tuple of the position (in meters) and quaternion (x, y, z, w) in "world" convention. """ - - # deprecation logger.warning( - "The function '_compute_camera_world_poses' will be deprecated in favor of the combination of methods" - " 'obtain_world_pose_from_view' and 'math_utils.combine_frame_transforms'. Please use" - " 'obtain_world_pose_from_view' and 'math_utils.combine_frame_transforms' instead...." + "The function '_compute_camera_world_poses' is deprecated." + " Call 'self._view.get_world_poses(indices)' and 'math_utils.combine_frame_transforms' directly instead." ) - # get the pose of the view the camera is attached to - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - return math_utils.combine_frame_transforms(pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids]) + indices = wp.from_torch(env_ids.to(dtype=torch.int32), dtype=wp.int32) if env_ids is not None else None + pos_w, quat_w = self._view.get_world_poses(indices) + return math_utils.combine_frame_transforms( + pos_w.torch.clone(), quat_w.torch.clone(), self._offset_pos[env_ids], self._offset_quat[env_ids] + ) diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera_cfg.py index 604c586adcc7..574c95020437 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_camera_cfg.py @@ -6,14 +6,16 @@ """Configuration for the ray-cast camera sensor.""" from dataclasses import MISSING -from typing import Literal +from typing import TYPE_CHECKING, Literal from isaaclab.utils import configclass from .patterns import PinholeCameraPatternCfg -from .ray_caster_camera import RayCasterCamera from .ray_caster_cfg import RayCasterCfg +if TYPE_CHECKING: + from .ray_caster_camera import RayCasterCamera + @configclass class RayCasterCameraCfg(RayCasterCfg): @@ -26,8 +28,8 @@ class OffsetCfg: pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """Quaternion rotation (x, y, z, w) w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0, 1.0).""" convention: Literal["opengl", "ros", "world"] = "ros" """The convention in which the frame offset is applied. Defaults to "ros". @@ -39,7 +41,7 @@ class OffsetCfg: """ - class_type: type = RayCasterCamera + class_type: type["RayCasterCamera"] | str = "{DIR}.ray_caster_camera:RayCasterCamera" offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" @@ -52,8 +54,8 @@ class OffsetCfg: - ``"max"``: Values are clipped to the maximum value. - ``"zero"``: Values are clipped to zero. - - ``"none``: No clipping is applied. Values will be returned as ``inf`` for ``distance_to_camera`` and ``nan`` - for ``distance_to_image_plane`` data type. + - ``"none"``: No clipping is applied. Values will be returned as ``inf`` for missed rays in both + ``distance_to_camera`` and ``distance_to_image_plane`` data types. """ pattern_cfg: PinholeCameraPatternCfg = MISSING diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py index dbdebfad3a5e..3e862e389c1e 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py @@ -5,16 +5,21 @@ """Configuration for the ray-cast sensor.""" +from __future__ import annotations + from dataclasses import MISSING -from typing import Literal +from typing import TYPE_CHECKING, Literal from isaaclab.markers import VisualizationMarkersCfg from isaaclab.markers.config import RAY_CASTER_MARKER_CFG +from isaaclab.sim.spawners.sensors.sensors_cfg import SensorFrameCfg from isaaclab.utils import configclass from ..sensor_base_cfg import SensorBaseCfg from .patterns.patterns_cfg import PatternBaseCfg -from .ray_caster import RayCaster + +if TYPE_CHECKING: + from .ray_caster import RayCaster @configclass @@ -27,15 +32,30 @@ class OffsetCfg: pos: tuple[float, float, float] = (0.0, 0.0, 0.0) """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """Quaternion rotation (x, y, z, w) w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0, 1.0).""" + + class_type: type[RayCaster] | str = "{DIR}.ray_caster:RayCaster" + + spawn: SensorFrameCfg | None = SensorFrameCfg() + """Spawn configuration for the sensor Xform prim. - class_type: type = RayCaster + A plain USD Xform is created at :attr:`prim_path` before initialization, matching the + pattern used by :class:`~isaaclab.sensors.camera.camera_cfg.CameraCfg` (which spawns a + Camera prim). The :attr:`prim_path` can be either: + + - A **new** child path under a parent link (e.g. ``{ENV_REGEX_NS}/Robot/base``). + - A **physics body** path (e.g. ``{ENV_REGEX_NS}/Robot/base``). In this case, the sensor + will automatically create a child Xform at ``{prim_path}``. + + If ``None``, the prim at :attr:`prim_path` must already exist on the USD stage and must + **not** be a physics body. + """ mesh_prim_paths: list[str] = MISSING """The list of mesh primitive paths to ray cast against. - Note: + .. note:: Currently, only a single static mesh is supported. We are working on supporting multiple static meshes and dynamic meshes. """ @@ -43,30 +63,15 @@ class OffsetCfg: offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" - attach_yaw_only: bool | None = None - """Whether the rays' starting positions and directions only track the yaw orientation. - Defaults to None, which doesn't raise a warning of deprecated usage. - - This is useful for ray-casting height maps, where only yaw rotation is needed. - - .. deprecated:: 2.1.1 - - This attribute is deprecated and will be removed in the future. Please use - :attr:`ray_alignment` instead. - - To get the same behavior as setting this parameter to ``True`` or ``False``, set - :attr:`ray_alignment` to ``"yaw"`` or "base" respectively. - - """ - ray_alignment: Literal["base", "yaw", "world"] = "base" """Specify in what frame the rays are projected onto the ground. Default is "base". The options are: * ``base`` if the rays' starting positions and directions track the full root position and orientation. - * ``yaw`` if the rays' starting positions and directions track root position and only yaw component of - the orientation. This is useful for ray-casting height maps. + * ``yaw`` if the rays' starting positions track root position and the yaw component of the orientation, + while ray directions remain fixed in world frame. This is useful for ray-casting height maps where + the scan footprint should follow the body heading without tilting when the body pitches or rolls. * ``world`` if rays' starting positions and directions are always fixed. This is useful in combination with a mapping package on the robot and querying ray-casts in a global frame. """ @@ -93,6 +98,6 @@ class OffsetCfg: visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster") """The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py index d63e085e752f..1265e1df0fc3 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py @@ -3,28 +3,82 @@ # # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import dataclass +from __future__ import annotations -import torch +import warp as wp +from isaaclab.utils.leapp import ( + QUAT_XYZW_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + leapp_tensor_semantics, +) +from isaaclab.utils.warp import ProxyArray -@dataclass -class RayCasterData: - """Data container for the ray-cast sensor.""" - pos_w: torch.Tensor = None - """Position of the sensor origin in world frame. +class RayCasterData: + """Data container for the ray-cast sensor. - Shape is (N, 3), where N is the number of sensors. + Public properties return :class:`~isaaclab.utils.warp.ProxyArray` wrappers. + Use ``.torch`` for a cached zero-copy :class:`torch.Tensor` view or + ``.warp`` for the underlying :class:`warp.array`. """ - quat_w: torch.Tensor = None - """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. - Shape is (N, 4), where N is the number of sensors. - """ - ray_hits_w: torch.Tensor = None - """The ray hit positions in the world frame. + def __init__(self): + self._pos_w: wp.array | None = None + self._quat_w: wp.array | None = None + self._ray_hits_w: wp.array | None = None - Shape is (N, B, 3), where N is the number of sensors, B is the number of rays - in the scan pattern per sensor. - """ + # _pos_w_ta / _quat_w_ta / _ray_hits_w_ta are created in create_buffers(). + # Accessing the public properties before create_buffers() raises AttributeError. + + @property + @leapp_tensor_semantics(kind="state/sensor/position", element_names=XYZ_ELEMENT_NAMES) + def pos_w(self) -> ProxyArray: + """Position of the sensor origin in world frame [m]. + + Shape is (N,), dtype ``wp.vec3f``. In torch this resolves to (N, 3), + where N is the number of sensors. Use ``.warp`` for the underlying + ``wp.array`` or ``.torch`` for a cached zero-copy ``torch.Tensor`` view. + """ + return self._pos_w_ta + + @property + @leapp_tensor_semantics(kind="state/sensor/rotation", element_names=QUAT_XYZW_ELEMENT_NAMES) + def quat_w(self) -> ProxyArray: + """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. + + Shape is (N,), dtype ``wp.quatf``. In torch this resolves to (N, 4), + where N is the number of sensors. Use ``.warp`` for the underlying + ``wp.array`` or ``.torch`` for a cached zero-copy ``torch.Tensor`` view. + """ + return self._quat_w_ta + + @property + @leapp_tensor_semantics(kind="state/sensor/ray_hit_position") + def ray_hits_w(self) -> ProxyArray: + """The ray hit positions in the world frame [m]. + + Shape is (N, B), dtype ``wp.vec3f``. In torch this resolves to (N, B, 3), + where N is the number of sensors and B is the number of rays per sensor. + Contains ``inf`` for missed hits. Use ``.warp`` for the underlying + ``wp.array`` or ``.torch`` for a cached zero-copy ``torch.Tensor`` view. + """ + return self._ray_hits_w_ta + + def create_buffers(self, num_envs: int, num_rays: int, device: str) -> None: + """Create internal warp buffers and their :class:`ProxyArray` wrappers. + + Args: + num_envs: Number of environments / sensors. + num_rays: Number of rays per sensor. + device: Device for tensor storage. + """ + self._device = device + + self._pos_w = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._quat_w = wp.zeros(num_envs, dtype=wp.quatf, device=device) + self._ray_hits_w = wp.zeros((num_envs, num_rays), dtype=wp.vec3f, device=device) + + self._pos_w_ta = ProxyArray(self._pos_w) + self._quat_w_ta = ProxyArray(self._quat_w) + self._ray_hits_w_ta = ProxyArray(self._ray_hits_w) diff --git a/source/isaaclab/isaaclab/sensors/sensor_base.py b/source/isaaclab/isaaclab/sensors/sensor_base.py index 4ece160bbe5a..728a708b4448 100644 --- a/source/isaaclab/isaaclab/sensors/sensor_base.py +++ b/source/isaaclab/isaaclab/sensors/sensor_base.py @@ -11,26 +11,26 @@ from __future__ import annotations -import builtins import inspect +import logging import re import weakref from abc import ABC, abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING, Any -import torch - -import omni.kit.app -import omni.timeline -from isaacsim.core.simulation_manager import IsaacEvents, SimulationManager +import warp as wp import isaaclab.sim as sim_utils -from isaaclab.sim.utils.stage import get_current_stage +from isaaclab.physics import PhysicsEvent, PhysicsManager + +from .kernels import reset_envs_kernel, update_outdated_envs_kernel, update_timestamp_kernel if TYPE_CHECKING: from .sensor_base_cfg import SensorBaseCfg +logger = logging.getLogger(__name__) + class SensorBase(ABC): """The base class for implementing a sensor. @@ -50,9 +50,6 @@ def __init__(self, cfg: SensorBaseCfg): Args: cfg: The configuration parameters for the sensor. """ - # check that config is valid - if cfg.history_length < 0: - raise ValueError(f"History length must be greater than 0! Received: {cfg.history_length}") # check that the config is valid cfg.validate() # store inputs @@ -61,8 +58,7 @@ def __init__(self, cfg: SensorBaseCfg): self._is_initialized = False # flag for whether the sensor is in visualization mode self._is_visualizing = False - # get stage handle - self.stage = get_current_stage() + self.stage = sim_utils.get_current_stage() # register various callback functions self._register_callbacks() @@ -154,41 +150,55 @@ def set_debug_vis(self, debug_vis: bool) -> bool: if debug_vis: # create a subscriber for the post update event if it doesn't exist if self._debug_vis_handle is None: - app_interface = omni.kit.app.get_app_interface() - self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( - lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) - ) + sim_ctx = sim_utils.SimulationContext.instance() + if sim_ctx is not None: + self._debug_vis_handle = sim_ctx.vis_marker_registry.add_debug_vis_callback(self) else: # remove the subscriber if it exists - if self._debug_vis_handle is not None: - self._debug_vis_handle.unsubscribe() + sim_ctx = sim_utils.SimulationContext.instance() + if sim_ctx is not None: + sim_ctx.vis_marker_registry.clear_debug_vis_callback(self) + else: self._debug_vis_handle = None # return success return True - def reset(self, env_ids: Sequence[int] | None = None): + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: """Resets the sensor internals. Args: - env_ids: The sensor ids to reset. Defaults to None. + env_ids: The environment indices to reset. Defaults to None, in which case all + environments are reset. + env_mask: A boolean warp array indicating which environments to reset. If provided, + takes priority over ``env_ids``. Defaults to None. """ - # Resolve sensor ids - if env_ids is None: - env_ids = slice(None) - # Reset the timestamp for the sensors - self._timestamp[env_ids] = 0.0 - self._timestamp_last_update[env_ids] = 0.0 - # Set all reset sensors to outdated so that they are updated when data is called the next time. - self._is_outdated[env_ids] = True + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + wp.launch( + reset_envs_kernel, + dim=self._num_envs, + inputs=[env_mask, self._is_outdated, self._timestamp, self._timestamp_last_update], + device=self._device, + ) def update(self, dt: float, force_recompute: bool = False): + # Skip update if sensor is not initialized + if not self._is_initialized: + return # Update the timestamp for the sensors - self._timestamp += dt - self._is_outdated |= self._timestamp - self._timestamp_last_update + 1e-6 >= self.cfg.update_period + wp.launch( + update_timestamp_kernel, + dim=self._num_envs, + inputs=[ + self._is_outdated, + self._timestamp, + self._timestamp_last_update, + dt, + self.cfg.update_period, + ], + device=self._device, + ) # Update the buffers - # TODO (from @mayank): Why is there a history length here when it doesn't mean anything in the sensor base?!? - # It is only for the contact sensor but there we should redefine the update function IMO. - if force_recompute or self._is_visualizing or (self.cfg.history_length > 0): + if force_recompute or self._is_visualizing: self._update_outdated_buffers() """ @@ -210,12 +220,18 @@ def _initialize_impl(self): env_prim_path_expr = self.cfg.prim_path.rsplit("/", 1)[0] self._parent_prims = sim_utils.find_matching_prims(env_prim_path_expr) self._num_envs = len(self._parent_prims) - # Boolean tensor indicating whether the sensor data has to be refreshed - self._is_outdated = torch.ones(self._num_envs, dtype=torch.bool, device=self._device) - # Current timestamp (in seconds) - self._timestamp = torch.zeros(self._num_envs, device=self._device) - # Timestamp from last update - self._timestamp_last_update = torch.zeros_like(self._timestamp) + # Create warp env mask arrays for "all envs" cases and resets. + # Note: We use wp.to_torch() to create zero-copy torch tensor views of warp arrays. + # This allows warp arrays to be passed to warp kernels while the corresponding torch + # views support fancy indexing (e.g. tensor[env_ids] = True) without any memory copies. + # Both the warp array and torch view share the same underlying device memory. + self._ALL_ENV_MASK = wp.ones((self._num_envs), dtype=wp.bool, device=self._device) + self._reset_mask = wp.zeros((self._num_envs), dtype=wp.bool, device=self._device) + self._reset_mask_torch = wp.to_torch(self._reset_mask) + # timestamp and outdated flags + self._is_outdated = wp.ones(self._num_envs, dtype=wp.bool, device=self._device) + self._timestamp = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) + self._timestamp_last_update = wp.zeros_like(self._timestamp) # Initialize debug visualization handle if self._debug_vis_handle is None: @@ -223,14 +239,14 @@ def _initialize_impl(self): self.set_debug_vis(self.cfg.debug_vis) @abstractmethod - def _update_buffers_impl(self, env_ids: Sequence[int]): + def _update_buffers_impl(self, env_mask: wp.array): """Fills the sensor data for provided environment ids. This function does not perform any time-based checks and directly fills the data into the data container. Args: - env_ids: The indices of the sensors that are ready to capture. + env_mask: The mask of the environments that are ready to capture. """ raise NotImplementedError @@ -255,73 +271,71 @@ def _debug_vis_callback(self, event): """ def _register_callbacks(self): - """Registers the timeline and prim deletion callbacks.""" - - # register simulator callbacks (with weakref safety to avoid crashes on deletion) - def safe_callback(callback_name, event, obj_ref): - """Safely invoke a callback on a weakly-referenced object, ignoring ReferenceError if deleted.""" - try: - obj = obj_ref - getattr(obj, callback_name)(event) - except ReferenceError: - # Object has been deleted; ignore. - pass - - # note: use weakref on callbacks to ensure that this object can be deleted when its destructor is called. - # add callbacks for stage play/stop + """Registers physics lifecycle callbacks via the current backend's physics manager.""" + physics_mgr_cls = sim_utils.SimulationContext.instance().physics_manager + obj_ref = weakref.proxy(self) - timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() - # the order is set to 10 which is arbitrary but should be lower priority than the default order of 0 - # register timeline PLAY event callback (lower priority with order=10) - self._initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( - int(omni.timeline.TimelineEventType.PLAY), - lambda event, obj_ref=obj_ref: safe_callback("_initialize_callback", event, obj_ref), + def _invoke(callback_name, event): + getattr(obj_ref, callback_name)(event) + + # Backend-agnostic: PHYSICS_READY (init) and STOP (invalidate) + self._initialize_handle = physics_mgr_cls.register_callback( + lambda payload: PhysicsManager.safe_callback_invoke( + _invoke, "_initialize_callback", payload, physics_manager=physics_mgr_cls + ), + PhysicsEvent.PHYSICS_READY, order=10, ) - # register timeline STOP event callback (lower priority with order=10) - self._invalidate_initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( - int(omni.timeline.TimelineEventType.STOP), - lambda event, obj_ref=obj_ref: safe_callback("_invalidate_initialize_callback", event, obj_ref), + self._invalidate_initialize_handle = physics_mgr_cls.register_callback( + lambda payload: PhysicsManager.safe_callback_invoke( + _invoke, "_invalidate_initialize_callback", payload, physics_manager=physics_mgr_cls + ), + PhysicsEvent.STOP, order=10, ) - # register prim deletion callback - self._prim_deletion_callback_id = SimulationManager.register_callback( - lambda event, obj_ref=obj_ref: safe_callback("_on_prim_deletion", event, obj_ref), - event=IsaacEvents.PRIM_DELETION, - ) + # Optional: prim deletion (only supported by PhysX backend) + self._prim_deletion_handle = None + if "physx" in physics_mgr_cls.__name__.lower(): + from isaaclab_physx.physics import IsaacEvents # noqa: PLC0415 + + self._prim_deletion_handle = physics_mgr_cls.register_callback( + lambda event: PhysicsManager.safe_callback_invoke( + _invoke, "_on_prim_deletion", event, physics_manager=physics_mgr_cls + ), + IsaacEvents.PRIM_DELETION, + ) def _initialize_callback(self, event): """Initializes the scene elements. - Note: - PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be - called whenever the simulator "plays" from a "stop" state. + .. note:: + Physics handles are only valid once the simulation is ready. This callback runs when + :attr:`PhysicsEvent.PHYSICS_READY` is dispatched by the current backend. """ if not self._is_initialized: - try: - self._initialize_impl() - except Exception as e: - if builtins.ISAACLAB_CALLBACK_EXCEPTION is None: - builtins.ISAACLAB_CALLBACK_EXCEPTION = e + self._initialize_impl() self._is_initialized = True def _invalidate_initialize_callback(self, event): """Invalidates the scene elements.""" self._is_initialized = False - if self._debug_vis_handle is not None: - self._debug_vis_handle.unsubscribe() + sim_ctx = sim_utils.SimulationContext.instance() + if sim_ctx is not None: + sim_ctx.vis_marker_registry.clear_debug_vis_callback(self) + else: self._debug_vis_handle = None - def _on_prim_deletion(self, prim_path: str) -> None: + def _on_prim_deletion(self, event) -> None: """Invalidates and deletes the callbacks when the prim is deleted. Args: - prim_path: The path to the prim that is being deleted. + event: The prim deletion event containing the prim path in payload. Note: This function is called when the prim is deleted. """ + prim_path = event.payload["prim_path"] if prim_path == "/": self._clear_callbacks() return @@ -333,18 +347,20 @@ def _on_prim_deletion(self, prim_path: str) -> None: def _clear_callbacks(self) -> None: """Clears the callbacks.""" - if self._prim_deletion_callback_id: - SimulationManager.deregister_callback(self._prim_deletion_callback_id) - self._prim_deletion_callback_id = None - if self._initialize_handle: - self._initialize_handle.unsubscribe() + if self._initialize_handle is not None: + self._initialize_handle.deregister() self._initialize_handle = None - if self._invalidate_initialize_handle: - self._invalidate_initialize_handle.unsubscribe() + if self._invalidate_initialize_handle is not None: + self._invalidate_initialize_handle.deregister() self._invalidate_initialize_handle = None - # clear debug visualization - if self._debug_vis_handle: - self._debug_vis_handle.unsubscribe() + if self._prim_deletion_handle is not None: + self._prim_deletion_handle.deregister() + self._prim_deletion_handle = None + # Clear debug visualization + sim_ctx = sim_utils.SimulationContext.instance() + if sim_ctx is not None: + sim_ctx.vis_marker_registry.clear_debug_vis_callback(self) + else: self._debug_vis_handle = None """ @@ -353,11 +369,92 @@ def _clear_callbacks(self) -> None: def _update_outdated_buffers(self): """Fills the sensor data for the outdated sensors.""" - outdated_env_ids = self._is_outdated.nonzero().squeeze(-1) - if len(outdated_env_ids) > 0: - # obtain new data - self._update_buffers_impl(outdated_env_ids) - # update the timestamp from last update - self._timestamp_last_update[outdated_env_ids] = self._timestamp[outdated_env_ids] - # set outdated flag to false for the updated sensors - self._is_outdated[outdated_env_ids] = False + self._update_buffers_impl(self._is_outdated) + # update timestamps and clear outdated flags + wp.launch( + update_outdated_envs_kernel, + dim=self._num_envs, + inputs=[self._is_outdated, self._timestamp, self._timestamp_last_update], + device=self._device, + ) + + def _resolve_indices_and_mask( + self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None + ) -> wp.array: + """Resolve environment indices to a warp array and mask.""" + if env_ids is None and env_mask is None: + return self._ALL_ENV_MASK + elif env_mask is not None: + return env_mask + else: + self._reset_mask.zero_() + self._reset_mask_torch[env_ids] = True + return self._reset_mask + + def _resolve_and_spawn(self, sensor_name: str, **spawn_kwargs) -> None: + """Resolve physics-body prim paths and spawn the sensor prim if needed. + + Behavior matrix (``spawn`` refers to ``cfg.spawn``): + + +----------------+------------------+--------------------------------------------+ + | ``spawn`` | ``prim_path`` | Action | + +================+==================+============================================+ + | not ``None`` | physics body | Append ``/``, spawn child. | + +----------------+------------------+--------------------------------------------+ + | not ``None`` | non-physics prim | Use existing prim, skip spawn. | + | | (already exists) | | + +----------------+------------------+--------------------------------------------+ + | not ``None`` | does not exist | Spawn prim at ``prim_path``. | + +----------------+------------------+--------------------------------------------+ + | ``None`` | physics body | Raise ``ValueError``. | + +----------------+------------------+--------------------------------------------+ + | ``None`` | non-physics prim | Use as-is (no spawn). | + +----------------+------------------+--------------------------------------------+ + + Args: + sensor_name: Short identifier (e.g. ``"raycaster"``, ``"camera"``). + **spawn_kwargs: Extra keyword arguments forwarded to ``cfg.spawn.func`` + (e.g. ``translation``, ``orientation``). + + Raises: + ValueError: If ``spawn`` is ``None`` and ``prim_path`` is a physics body. + RuntimeError: If the prim does not exist after the spawn attempt. + """ + from pxr import UsdPhysics # noqa: PLC0415 + + spawn = getattr(self.cfg, "spawn", None) + has_spawn = spawn is not None + + # Determine the path to probe for physics-body redirect + spawn_path = (getattr(spawn, "spawn_path", None) or self.cfg.prim_path) if has_spawn else None + probe_path = spawn_path if spawn_path is not None else self.cfg.prim_path + + prim = sim_utils.find_first_matching_prim(probe_path) + if prim is not None and prim.IsValid(): + is_physics = prim.HasAPI(UsdPhysics.ArticulationRootAPI) or prim.HasAPI(UsdPhysics.RigidBodyAPI) + if is_physics: + if not has_spawn: + raise ValueError( + f"Sensor prim_path '{self.cfg.prim_path}' resolves to a physics body but" + f" no spawner is configured (spawn=None). Either set spawn or point" + f" prim_path at a non-physics child (e.g. '{self.cfg.prim_path}/{sensor_name}')." + ) + logger.info( + f"Sensor prim_path '{self.cfg.prim_path}' points at a physics body." + f" Redirecting to '{self.cfg.prim_path}/{sensor_name}'." + ) + self.cfg.prim_path = f"{self.cfg.prim_path}/{sensor_name}" + if getattr(spawn, "spawn_path", None) is not None: + spawn.spawn_path = f"{spawn.spawn_path}/{sensor_name}" + + if not has_spawn: + return + + spawn_target = getattr(spawn, "spawn_path", None) or self.cfg.prim_path + prim = sim_utils.find_first_matching_prim(spawn_target) + if prim is None or not prim.IsValid(): + spawn.func(spawn_target, spawn, **spawn_kwargs) + + check_path = getattr(spawn, "spawn_path", None) or self.cfg.prim_path + if len(sim_utils.find_matching_prims(check_path)) == 0: + raise RuntimeError(f"Could not find prim with path {check_path!r}.") diff --git a/source/isaaclab/isaaclab/sensors/sensor_base_cfg.py b/source/isaaclab/isaaclab/sensors/sensor_base_cfg.py index 85875b2e4996..e2397233be3c 100644 --- a/source/isaaclab/isaaclab/sensors/sensor_base_cfg.py +++ b/source/isaaclab/isaaclab/sensors/sensor_base_cfg.py @@ -4,17 +4,19 @@ # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING +from typing import TYPE_CHECKING from isaaclab.utils import configclass -from .sensor_base import SensorBase +if TYPE_CHECKING: + from .sensor_base import SensorBase @configclass class SensorBaseCfg: """Configuration parameters for a sensor.""" - class_type: type[SensorBase] = MISSING + class_type: type["SensorBase"] = MISSING """The associated sensor class. The class should inherit from :class:`isaaclab.sensors.sensor_base.SensorBase`. @@ -34,9 +36,5 @@ class SensorBaseCfg: update_period: float = 0.0 """Update period of the sensor buffers (in seconds). Defaults to 0.0 (update every step).""" - history_length: int = 0 - """Number of past frames to store in the sensor buffers. Defaults to 0, which means that only - the current data is stored (no history).""" - debug_vis: bool = False """Whether to visualize the sensor. Defaults to False.""" diff --git a/source/isaaclab/isaaclab/sim/__init__.py b/source/isaaclab/isaaclab/sim/__init__.py index 1dc920f4e100..3c75a3548956 100644 --- a/source/isaaclab/isaaclab/sim/__init__.py +++ b/source/isaaclab/isaaclab/sim/__init__.py @@ -26,10 +26,6 @@ """ -from .converters import * # noqa: F401, F403 -from .schemas import * # noqa: F401, F403 -from .simulation_cfg import PhysxCfg, RenderCfg, SimulationCfg # noqa: F401, F403 -from .simulation_context import SimulationContext, build_simulation_context # noqa: F401, F403 -from .spawners import * # noqa: F401, F403 -from .utils import * # noqa: F401, F403 -from .views import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/__init__.pyi b/source/isaaclab/isaaclab/sim/__init__.pyi new file mode 100644 index 000000000000..a718ccdcb989 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/__init__.pyi @@ -0,0 +1,337 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "RenderCfg", + "SimulationCfg", + "SimulationContext", + "build_simulation_context", + "AssetConverterBase", + "AssetConverterBaseCfg", + "MeshConverter", + "MeshConverterCfg", + "MjcfConverter", + "MjcfConverterCfg", + "UrdfConverter", + "UrdfConverterCfg", + "MESH_APPROXIMATION_TOKENS", + "PHYSX_MESH_COLLISION_CFGS", + "USD_MESH_COLLISION_CFGS", + "activate_contact_sensors", + "define_articulation_root_properties", + "define_collision_properties", + "define_mass_properties", + "define_mesh_collision_properties", + "define_rigid_body_properties", + "modify_articulation_root_properties", + "modify_collision_properties", + "modify_fixed_tendon_properties", + "modify_joint_drive_properties", + "modify_mass_properties", + "modify_mesh_collision_properties", + "modify_rigid_body_properties", + "modify_spatial_tendon_properties", + "ArticulationRootPropertiesCfg", + "BoundingCubePropertiesCfg", + "BoundingSpherePropertiesCfg", + "CollisionPropertiesCfg", + "ConvexDecompositionPropertiesCfg", + "ConvexHullPropertiesCfg", + "FixedTendonPropertiesCfg", + "JointDrivePropertiesCfg", + "MassPropertiesCfg", + "MeshCollisionPropertiesCfg", + "RigidBodyPropertiesCfg", + "SDFMeshPropertiesCfg", + "SpatialTendonPropertiesCfg", + "TriangleMeshPropertiesCfg", + "TriangleMeshSimplificationPropertiesCfg", + "SpawnerCfg", + "RigidObjectSpawnerCfg", + "spawn_from_mjcf", + "spawn_from_urdf", + "spawn_from_usd", + "spawn_from_usd_with_compliant_contact_material", + "spawn_ground_plane", + "GroundPlaneCfg", + "MjcfFileCfg", + "UrdfFileCfg", + "UsdFileCfg", + "UsdFileWithCompliantContactCfg", + "spawn_light", + "CylinderLightCfg", + "DiskLightCfg", + "DistantLightCfg", + "DomeLightCfg", + "LightCfg", + "SphereLightCfg", + "spawn_rigid_body_material", + "PhysicsMaterialCfg", + "RigidBodyMaterialCfg", + "spawn_from_mdl_file", + "spawn_preview_surface", + "GlassMdlCfg", + "MdlFileCfg", + "PreviewSurfaceCfg", + "VisualMaterialCfg", + "spawn_mesh_capsule", + "spawn_mesh_cone", + "spawn_mesh_cuboid", + "spawn_mesh_cylinder", + "spawn_mesh_sphere", + "spawn_mesh_square", + "MeshCapsuleCfg", + "MeshCfg", + "MeshConeCfg", + "MeshCuboidCfg", + "MeshCylinderCfg", + "MeshSphereCfg", + "MeshSquareCfg", + "spawn_camera", + "spawn_sensor_frame", + "FisheyeCameraCfg", + "PinholeCameraCfg", + "SensorFrameCfg", + "spawn_capsule", + "spawn_cone", + "spawn_cuboid", + "spawn_cylinder", + "spawn_sphere", + "CapsuleCfg", + "ConeCfg", + "CuboidCfg", + "CylinderCfg", + "ShapeCfg", + "SphereCfg", + "spawn_multi_asset", + "spawn_multi_usd_file", + "MultiAssetSpawnerCfg", + "MultiUsdFileCfg", + "add_reference_to_stage", + "get_stage_up_axis", + "traverse_stage", + "get_prim_at_path", + "get_prim_path", + "is_prim_path_valid", + "define_prim", + "get_prim_type_name", + "get_next_free_path", + "create_prim", + "delete_prim", + "make_uninstanceable", + "set_prim_visibility", + "safe_set_attribute_on_usd_schema", + "safe_set_attribute_on_usd_prim", + "change_prim_property", + "export_prim_to_file", + "apply_nested", + "clone", + "bind_visual_material", + "bind_physics_material", + "add_usd_reference", + "get_usd_references", + "select_usd_variants", + "get_next_free_prim_path", + "get_first_matching_ancestor_prim", + "get_first_matching_child_prim", + "get_all_matching_child_prims", + "find_first_matching_prim", + "find_matching_prims", + "find_matching_prim_paths", + "find_global_fixed_joint_prim", + "add_labels", + "get_labels", + "remove_labels", + "check_missing_labels", + "count_total_labels", + "resolve_paths", + "create_new_stage", + "is_current_stage_in_memory", + "open_stage", + "use_stage", + "update_stage", + "save_stage", + "close_stage", + "clear_stage", + "get_current_stage", + "get_current_stage_id", + "standardize_xform_ops", + "validate_standard_xform_ops", + "resolve_prim_pose", + "resolve_prim_scale", + "convert_world_pose_to_local", + "BaseFrameView", + "UsdFrameView", + "FrameView", + # Deprecated alias + "XformPrimView", +] + +from .simulation_cfg import RenderCfg, SimulationCfg +from .simulation_context import SimulationContext, build_simulation_context +from .converters import ( + AssetConverterBase, + AssetConverterBaseCfg, + MeshConverter, + MeshConverterCfg, + MjcfConverter, + MjcfConverterCfg, + UrdfConverter, + UrdfConverterCfg, +) +from .schemas import ( + MESH_APPROXIMATION_TOKENS, + PHYSX_MESH_COLLISION_CFGS, + USD_MESH_COLLISION_CFGS, + activate_contact_sensors, + define_articulation_root_properties, + define_collision_properties, + define_mass_properties, + define_mesh_collision_properties, + define_rigid_body_properties, + modify_articulation_root_properties, + modify_collision_properties, + modify_fixed_tendon_properties, + modify_joint_drive_properties, + modify_mass_properties, + modify_mesh_collision_properties, + modify_rigid_body_properties, + modify_spatial_tendon_properties, + ArticulationRootPropertiesCfg, + BoundingCubePropertiesCfg, + BoundingSpherePropertiesCfg, + CollisionPropertiesCfg, + ConvexDecompositionPropertiesCfg, + ConvexHullPropertiesCfg, + FixedTendonPropertiesCfg, + JointDrivePropertiesCfg, + MassPropertiesCfg, + MeshCollisionPropertiesCfg, + RigidBodyPropertiesCfg, + SDFMeshPropertiesCfg, + SpatialTendonPropertiesCfg, + TriangleMeshPropertiesCfg, + TriangleMeshSimplificationPropertiesCfg, +) +from .spawners import ( + SpawnerCfg, + RigidObjectSpawnerCfg, + spawn_from_mjcf, + spawn_from_urdf, + spawn_from_usd, + spawn_from_usd_with_compliant_contact_material, + spawn_ground_plane, + GroundPlaneCfg, + MjcfFileCfg, + UrdfFileCfg, + UsdFileCfg, + UsdFileWithCompliantContactCfg, + spawn_light, + CylinderLightCfg, + DiskLightCfg, + DistantLightCfg, + DomeLightCfg, + LightCfg, + SphereLightCfg, + spawn_rigid_body_material, + PhysicsMaterialCfg, + RigidBodyMaterialCfg, + spawn_from_mdl_file, + spawn_preview_surface, + GlassMdlCfg, + MdlFileCfg, + PreviewSurfaceCfg, + VisualMaterialCfg, + spawn_mesh_capsule, + spawn_mesh_cone, + spawn_mesh_cuboid, + spawn_mesh_cylinder, + spawn_mesh_sphere, + spawn_mesh_square, + MeshCapsuleCfg, + MeshCfg, + MeshConeCfg, + MeshCuboidCfg, + MeshCylinderCfg, + MeshSphereCfg, + MeshSquareCfg, + spawn_camera, + spawn_sensor_frame, + FisheyeCameraCfg, + PinholeCameraCfg, + SensorFrameCfg, + spawn_capsule, + spawn_cone, + spawn_cuboid, + spawn_cylinder, + spawn_sphere, + CapsuleCfg, + ConeCfg, + CuboidCfg, + CylinderCfg, + ShapeCfg, + SphereCfg, + spawn_multi_asset, + spawn_multi_usd_file, + MultiAssetSpawnerCfg, + MultiUsdFileCfg, +) +from .utils import ( + add_reference_to_stage, + get_stage_up_axis, + traverse_stage, + get_prim_at_path, + get_prim_path, + is_prim_path_valid, + define_prim, + get_prim_type_name, + get_next_free_path, + create_prim, + delete_prim, + make_uninstanceable, + set_prim_visibility, + safe_set_attribute_on_usd_schema, + safe_set_attribute_on_usd_prim, + change_prim_property, + export_prim_to_file, + apply_nested, + clone, + bind_visual_material, + bind_physics_material, + add_usd_reference, + get_usd_references, + select_usd_variants, + get_next_free_prim_path, + get_first_matching_ancestor_prim, + get_first_matching_child_prim, + get_all_matching_child_prims, + find_first_matching_prim, + find_matching_prims, + find_matching_prim_paths, + find_global_fixed_joint_prim, + add_labels, + get_labels, + remove_labels, + check_missing_labels, + count_total_labels, + resolve_paths, + create_new_stage, + is_current_stage_in_memory, + open_stage, + use_stage, + update_stage, + save_stage, + close_stage, + clear_stage, + get_current_stage, + get_current_stage_id, + standardize_xform_ops, + validate_standard_xform_ops, + resolve_prim_pose, + resolve_prim_scale, + convert_world_pose_to_local, +) +from .views import BaseFrameView, UsdFrameView, FrameView +from .views import XformPrimView # deprecated alias diff --git a/source/isaaclab/isaaclab/sim/converters/__init__.py b/source/isaaclab/isaaclab/sim/converters/__init__.py index 7503c53bdd85..10cdd4c3b4f4 100644 --- a/source/isaaclab/isaaclab/sim/converters/__init__.py +++ b/source/isaaclab/isaaclab/sim/converters/__init__.py @@ -16,11 +16,6 @@ """ -from .asset_converter_base import AssetConverterBase -from .asset_converter_base_cfg import AssetConverterBaseCfg -from .mesh_converter import MeshConverter -from .mesh_converter_cfg import MeshConverterCfg -from .mjcf_converter import MjcfConverter -from .mjcf_converter_cfg import MjcfConverterCfg -from .urdf_converter import UrdfConverter -from .urdf_converter_cfg import UrdfConverterCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/converters/__init__.pyi b/source/isaaclab/isaaclab/sim/converters/__init__.pyi new file mode 100644 index 000000000000..3108576f84a0 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/converters/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "AssetConverterBase", + "AssetConverterBaseCfg", + "MeshConverter", + "MeshConverterCfg", + "MjcfConverter", + "MjcfConverterCfg", + "UrdfConverter", + "UrdfConverterCfg", +] + +from .asset_converter_base import AssetConverterBase +from .asset_converter_base_cfg import AssetConverterBaseCfg +from .mesh_converter import MeshConverter +from .mesh_converter_cfg import MeshConverterCfg +from .mjcf_converter import MjcfConverter +from .mjcf_converter_cfg import MjcfConverterCfg +from .urdf_converter import UrdfConverter +from .urdf_converter_cfg import UrdfConverterCfg diff --git a/source/isaaclab/isaaclab/sim/converters/asset_converter_base_cfg.py b/source/isaaclab/isaaclab/sim/converters/asset_converter_base_cfg.py index 79bb8d17d41c..3ba2a27c18cb 100644 --- a/source/isaaclab/isaaclab/sim/converters/asset_converter_base_cfg.py +++ b/source/isaaclab/isaaclab/sim/converters/asset_converter_base_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from dataclasses import MISSING from isaaclab.utils import configclass diff --git a/source/isaaclab/isaaclab/sim/converters/mesh_converter.py b/source/isaaclab/isaaclab/sim/converters/mesh_converter.py index 4a79a908bab1..74ba8b470c3a 100644 --- a/source/isaaclab/isaaclab/sim/converters/mesh_converter.py +++ b/source/isaaclab/isaaclab/sim/converters/mesh_converter.py @@ -9,7 +9,7 @@ import omni import omni.kit.commands -from isaacsim.core.utils.extensions import enable_extension +from isaacsim.core.experimental.utils.app import enable_extension from pxr import Gf, Tf, Usd, UsdGeom, UsdPhysics, UsdUtils from isaaclab.sim.converters.asset_converter_base import AssetConverterBase @@ -157,7 +157,7 @@ def _convert_asset(self, cfg: MeshConverterCfg): translate_op.Set(Gf.Vec3d(*cfg.translation)) # rotation orient_op = geom_xform.AddOrientOp(UsdGeom.XformOp.PrecisionDouble) - orient_op.Set(Gf.Quatd(*cfg.rotation)) + orient_op.Set(Gf.Quatd(cfg.rotation[3], cfg.rotation[0], cfg.rotation[1], cfg.rotation[2])) # scale scale_op = geom_xform.AddScaleOp(UsdGeom.XformOp.PrecisionDouble) scale_op.Set(Gf.Vec3d(*cfg.scale)) @@ -232,7 +232,7 @@ async def _convert_mesh_to_usd(in_file: str, out_file: str, load_materials: bool # Merge all meshes into one converter_context.merge_all_meshes = True # Sets world units to meters, this will also scale asset if it's centimeters model. - # This does not work right now :(, so we need to scale the mesh manually + # This does not work right now :( so we need to scale the mesh manually converter_context.use_meter_as_world_unit = True converter_context.baking_scales = True # Uses double precision for all transform ops. diff --git a/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py b/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py index 3d231ac1efee..767f6dd04583 100644 --- a/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py +++ b/source/isaaclab/isaaclab/sim/converters/mesh_converter_cfg.py @@ -41,8 +41,8 @@ class MeshConverterCfg(AssetConverterBaseCfg): translation: tuple[float, float, float] = (0.0, 0.0, 0.0) """The translation of the mesh to the origin. Defaults to (0.0, 0.0, 0.0).""" - rotation: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """The rotation of the mesh in quaternion format (w, x, y, z). Defaults to (1.0, 0.0, 0.0, 0.0).""" + rotation: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """The rotation of the mesh in quaternion format (x, y, z, w). Defaults to (0.0, 0.0, 0.0, 1.0).""" scale: tuple[float, float, float] = (1.0, 1.0, 1.0) """The scale of the mesh. Defaults to (1.0, 1.0, 1.0).""" diff --git a/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py b/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py index 17808f59b948..2c8fe992a35a 100644 --- a/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py +++ b/source/isaaclab/isaaclab/sim/converters/mjcf_converter.py @@ -6,23 +6,17 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING - -import omni.kit.commands from .asset_converter_base import AssetConverterBase from .mjcf_converter_cfg import MjcfConverterCfg -if TYPE_CHECKING: - import isaacsim.asset.importer.mjcf - class MjcfConverter(AssetConverterBase): """Converter for a MJCF description file to a USD file. This class wraps around the `isaacsim.asset.importer.mjcf`_ extension to provide a lazy implementation - for MJCF to USD conversion. It stores the output USD file in an instanceable format since that is - what is typically used in all learning related applications. + for MJCF to USD conversion. It uses the :class:`MJCFImporter` class and :class:`MJCFImporterConfig` + dataclass from Isaac Sim to perform the conversion. .. caution:: The current lazy conversion implementation does not automatically trigger USD generation if @@ -30,10 +24,15 @@ class MjcfConverter(AssetConverterBase): :obj:`AssetConverterBaseCfg.force_usd_conversion` to True or delete the output directory. .. note:: - From Isaac Sim 4.5 onwards, the extension name changed from ``omni.importer.mjcf`` to - ``isaacsim.asset.importer.mjcf``. This converter class now uses the latest extension from Isaac Sim. + From Isaac Sim 5.0 onwards, the MJCF importer uses the ``mujoco-usd-converter`` library + and the :class:`MJCFImporter` / :class:`MJCFImporterConfig` API. The old command-based API + (``MJCFCreateAsset`` / ``MJCFCreateImportConfig``) is deprecated. + + .. note:: + The :attr:`~AssetConverterBaseCfg.make_instanceable` setting from the base class is not + supported by the new MJCF importer and will be ignored. - .. _isaacsim.asset.importer.mjcf: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_mjcf.html + .. _isaacsim.asset.importer.mjcf: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_mjcf.html """ cfg: MjcfConverterCfg @@ -43,8 +42,12 @@ def __init__(self, cfg: MjcfConverterCfg): """Initializes the class. Args: - cfg: The configuration instance for URDF to USD conversion. + cfg: The configuration instance for MJCF to USD conversion. """ + # The new MJCF importer outputs to: {usd_path}/{robot_name}/{robot_name}.usda + # Pre-adjust usd_file_name to match this output structure so that lazy conversion works correctly. + file_basename = os.path.splitext(os.path.basename(cfg.asset_path))[0] + cfg.usd_file_name = os.path.join(file_basename, f"{file_basename}.usda") super().__init__(cfg=cfg) """ @@ -52,54 +55,32 @@ def __init__(self, cfg: MjcfConverterCfg): """ def _convert_asset(self, cfg: MjcfConverterCfg): - """Calls underlying Omniverse command to convert MJCF to USD. + """Calls underlying Isaac Sim MJCFImporter to convert MJCF to USD. Args: cfg: The configuration instance for MJCF to USD conversion. """ - import_config = self._get_mjcf_import_config() - file_basename, _ = os.path.basename(cfg.asset_path).split(".") - omni.kit.commands.execute( - "MJCFCreateAsset", - mjcf_path=cfg.asset_path, - import_config=import_config, - dest_path=self.usd_path, - prim_path=f"/{file_basename}", - ) + import shutil - def _get_mjcf_import_config(self) -> isaacsim.asset.importer.mjcf._mjcf.ImportConfig: - """Returns the import configuration for MJCF to USD conversion. + from isaacsim.asset.importer.mjcf import MJCFImporter, MJCFImporterConfig - Returns: - The constructed ``ImportConfig`` object containing the desired settings. - """ + # Clean up existing output subdirectory so the importer writes fresh files. + # The MJCFImporter outputs to {usd_dir}/{robot_name}/{robot_name}.usda and may + # skip writing if the output already exists from a previous conversion. + file_basename = os.path.splitext(os.path.basename(cfg.asset_path))[0] + output_subdir = os.path.join(self.usd_dir, file_basename) + if os.path.exists(output_subdir): + shutil.rmtree(output_subdir) + + import_config = MJCFImporterConfig( + mjcf_path=cfg.asset_path, + usd_path=self.usd_dir, + merge_mesh=cfg.merge_mesh, + collision_from_visuals=cfg.collision_from_visuals, + collision_type=cfg.collision_type, + allow_self_collision=cfg.self_collision, + import_scene=cfg.import_physics_scene, + ) - _, import_config = omni.kit.commands.execute("MJCFCreateImportConfig") - - # set the unit scaling factor, 1.0 means meters, 100.0 means cm - # import_config.set_distance_scale(1.0) - # set imported robot as default prim - # import_config.set_make_default_prim(True) - # add a physics scene to the stage on import if none exists - # import_config.set_create_physics_scene(False) - # set flag to parse tag - import_config.set_import_sites(True) - - # -- instancing settings - # meshes will be placed in a separate usd file - import_config.set_make_instanceable(self.cfg.make_instanceable) - import_config.set_instanceable_usd_path(self.usd_instanceable_meshes_path) - - # -- asset settings - # default density used for links, use 0 to auto-compute - import_config.set_density(self.cfg.link_density) - # import inertia tensor from urdf, if it is not specified in urdf it will import as identity - import_config.set_import_inertia_tensor(self.cfg.import_inertia_tensor) - - # -- physics settings - # create fix joint for base link - import_config.set_fix_base(self.cfg.fix_base) - # self collisions between links in the articulation - import_config.set_self_collision(self.cfg.self_collision) - - return import_config + importer = MJCFImporter(import_config) + importer.import_mjcf() diff --git a/source/isaaclab/isaaclab/sim/converters/mjcf_converter_cfg.py b/source/isaaclab/isaaclab/sim/converters/mjcf_converter_cfg.py index 7cbd83e3e9f2..a9dbe620c7da 100644 --- a/source/isaaclab/isaaclab/sim/converters/mjcf_converter_cfg.py +++ b/source/isaaclab/isaaclab/sim/converters/mjcf_converter_cfg.py @@ -3,33 +3,39 @@ # # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import MISSING - from isaaclab.sim.converters.asset_converter_base_cfg import AssetConverterBaseCfg from isaaclab.utils import configclass @configclass class MjcfConverterCfg(AssetConverterBaseCfg): - """The configuration class for MjcfConverter.""" + """The configuration class for MjcfConverter. - link_density = 0.0 - """Default density used for links. Defaults to 0. + .. note:: + From Isaac Sim 5.0 onwards, the MJCF importer was rewritten to use the ``mujoco-usd-converter`` + library. Several settings from the old importer (``fix_base``, ``link_density``, + ``import_inertia_tensor``, ``import_sites``) are no longer available as they are handled + automatically by the converter based on the MJCF file content. - This setting is only effective if ``"inertial"`` properties are missing in the MJCF. + .. note:: + The :attr:`~AssetConverterBaseCfg.make_instanceable` setting from the base class is not + supported by the new MJCF importer and will be ignored. """ - import_inertia_tensor: bool = True - """Import the inertia tensor from mjcf. Defaults to True. + merge_mesh: bool = False + """Merge meshes where possible to optimize the model. Defaults to False.""" - If the ``"inertial"`` tag is missing, then it is imported as an identity. - """ + collision_from_visuals: bool = False + """Generate collision geometry from visual geometries. Defaults to False.""" - fix_base: bool = MISSING - """Create a fix joint to the root/base link. Defaults to True.""" + collision_type: str = "Convex Hull" + """Type of collision geometry to use. Defaults to ``"Convex Hull"``. - import_sites: bool = True - """Import the sites from the MJCF. Defaults to True.""" + Supported values are ``"Convex Hull"``, and ``"Convex Decomposition"``. + """ self_collision: bool = False """Activate self-collisions between links of the articulation. Defaults to False.""" + + import_physics_scene: bool = False + """Import the physics scene (time step per second, gravity, etc.) from the MJCF file. Defaults to False.""" diff --git a/source/isaaclab/isaaclab/sim/converters/urdf_converter.py b/source/isaaclab/isaaclab/sim/converters/urdf_converter.py index ba80f520355e..a2c41483a46f 100644 --- a/source/isaaclab/isaaclab/sim/converters/urdf_converter.py +++ b/source/isaaclab/isaaclab/sim/converters/urdf_converter.py @@ -5,23 +5,22 @@ from __future__ import annotations +import contextlib +import gc +import importlib import math +import os +import pathlib import re -from typing import TYPE_CHECKING - -from packaging.version import Version +import shutil +import tempfile +import carb import omni.kit.app -import omni.kit.commands - -from isaaclab.utils.version import get_isaac_sim_version from .asset_converter_base import AssetConverterBase from .urdf_converter_cfg import UrdfConverterCfg -if TYPE_CHECKING: - import isaacsim.asset.importer.urdf - class UrdfConverter(AssetConverterBase): """Converter for a URDF description file to a USD file. @@ -40,14 +39,16 @@ class UrdfConverter(AssetConverterBase): ``isaacsim.asset.importer.urdf``. .. note:: - In Isaac Sim 5.1, the URDF importer changed the default behavior of merging fixed joints. - Links connected through ``fixed_joint`` elements are no longer merged when their URDF link - entries specify mass and inertia, even if ``merge-joint`` is set to True. The new behavior - treats links with mass/inertia as full bodies rather than zero-mass reference frames. + In the URDF importer 3.0, the conversion pipeline uses the ``urdf-usd-converter`` library + and the ``isaacsim.asset.transformer.rules`` extension to produce structured USD output. + Features such as ``convert_mimic_joints_to_normal_joints`` and + ``replace_cylinders_with_capsules`` are no longer natively supported by the importer and + will emit warnings if enabled. - To maintain backwards compatibility, **this converter pins to an older version of the - URDF importer extension** (version 2.4.31) that still merges fixed joints by default. - This allows existing URDFs to work as expected without modification. + .. note:: + The ``merge_fixed_joints`` feature is implemented as a URDF XML pre-processing step that + runs *before* the USD conversion. It removes fixed joints from the URDF and merges the + child link's visual, collision, and inertial elements into the parent link. .. _isaacsim.asset.importer.urdf: https://docs.isaacsim.omniverse.nvidia.com/latest/importer_exporter/ext_isaacsim_asset_importer_urdf.html """ @@ -61,16 +62,16 @@ def __init__(self, cfg: UrdfConverterCfg): Args: cfg: The configuration instance for URDF to USD conversion. """ - # switch to older version of the URDF importer extension - if get_isaac_sim_version() >= Version("5.1"): - manager = omni.kit.app.get_app().get_extension_manager() - if not manager.is_extension_enabled("isaacsim.asset.importer.urdf-2.4.31"): - manager.set_extension_enabled_immediate("isaacsim.asset.importer.urdf-2.4.31", True) + # enable the URDF importer extension + manager = omni.kit.app.get_app().get_extension_manager() + if not manager.is_extension_enabled("isaacsim.asset.importer.urdf"): + manager.set_extension_enabled_immediate("isaacsim.asset.importer.urdf", True) - # acquire the URDF interface - from isaacsim.asset.importer.urdf._urdf import acquire_urdf_interface + # set `usd_file_name` to match the new importer's output path structure: + # the importer generates `{usd_path}/{robot_name}/{robot_name}.usda` + robot_name = pathlib.PurePath(cfg.asset_path).stem + cfg.usd_file_name = os.path.join(robot_name, f"{robot_name}.usda") - self._urdf_interface = acquire_urdf_interface() super().__init__(cfg=cfg) """ @@ -78,265 +79,476 @@ def __init__(self, cfg: UrdfConverterCfg): """ def _convert_asset(self, cfg: UrdfConverterCfg): - """Calls underlying Omniverse command to convert URDF to USD. + """Calls the URDF importer 3.0 pipeline to convert URDF to USD. + + This method replicates the ``URDFImporter.import_urdf()`` pipeline from the + ``isaacsim.asset.importer.urdf`` extension, inserting IsaacLab-specific post-processing + (fix base, joint drives, link density) on the intermediate stage before the asset + transformer restructures the output. Args: cfg: The URDF conversion configuration. """ + from isaacsim.asset.importer.utils.impl import importer_utils, stage_utils + from pxr import Sdf + + from .urdf_utils import merge_fixed_joints + + # log warnings for features no longer supported by the URDF importer 3.0 + self._warn_unsupported_features(cfg) + + urdf_path = os.path.normpath(cfg.asset_path) + robot_name = os.path.basename(urdf_path).split(".")[0] + usd_path = os.path.normpath(self.usd_dir) + + # step 0: optionally pre-process the URDF to merge fixed joints + # The merged file is written next to the original so that relative mesh paths + # (e.g. ``meshes/link.stl``) continue to resolve correctly. If the source + # directory is read-only, a temp directory is used as a fallback (relative mesh + # paths may not resolve in that case). + merged_urdf_path: str | None = None + if cfg.merge_fixed_joints: + urdf_dir = os.path.dirname(urdf_path) + try: + fd, merged_urdf_path = tempfile.mkstemp(suffix=".urdf", prefix=".merged_", dir=urdf_dir) + os.close(fd) + except OSError: + carb.log_warn( + "UrdfConverter: Cannot write merged URDF next to the original (read-only directory)." + " Falling back to a temp directory — relative mesh paths may not resolve." + ) + merged_urdf_dir = tempfile.mkdtemp(prefix="isaaclab_urdf_merge_") + merged_urdf_path = os.path.join(merged_urdf_dir, os.path.basename(urdf_path)) + merge_fixed_joints(urdf_path, merged_urdf_path) + urdf_path = merged_urdf_path + + usdex_path = os.path.normpath(os.path.join(usd_path, "usdex")) + intermediate_path = os.path.normpath(os.path.join(usd_path, "temp", f"{robot_name}.usd")) + + # step 1: convert URDF to intermediate USD using urdf-usd-converter + urdf_usd_converter = importlib.import_module("urdf_usd_converter") + converter = urdf_usd_converter.Converter(layer_structure=False, scene=False) + asset: Sdf.AssetPath = converter.convert(urdf_path, usdex_path) + + # step 2: open the intermediate stage and run standard post-processing + stage = stage_utils.open_stage(asset.path) + if not stage: + raise ValueError(f"Failed to open intermediate stage at path: {asset.path}") + + importer_utils.remove_custom_scopes(stage) + importer_utils.add_rigid_body_schemas(stage) + importer_utils.add_joint_schemas(stage) + + # step 3: apply optional importer features + if cfg.collision_from_visuals: + importer_utils.collision_from_visuals(stage, cfg.collision_type) + + importer_utils.enable_self_collision(stage, cfg.self_collision) + + # step 4: IsaacLab-specific post-processing on the intermediate stage + if cfg.fix_base: + self._apply_fix_base(stage) + + if cfg.link_density > 0: + self._apply_link_density(stage, cfg.link_density) + + if cfg.joint_drive: + self._apply_joint_drives(stage, cfg) + + # step 5: save the intermediate stage + stage_utils.save_stage(stage, intermediate_path) + stage = None + gc.collect() + + # step 6: run the asset transformer to produce the final structured output + ext_manager = omni.kit.app.get_app().get_extension_manager() + ext_id = ext_manager.get_enabled_extension_id("isaacsim.asset.transformer.rules") + extension_path = ext_manager.get_extension_path(ext_id) + asset_structure_profile_json_path = os.path.normpath( + os.path.abspath(os.path.join(extension_path, "data", "isaacsim_structure.json")) + ) - import_config = self._get_urdf_import_config() - # parse URDF file - result, self._robot_model = omni.kit.commands.execute( - "URDFParseFile", urdf_path=cfg.asset_path, import_config=import_config + importer_utils.run_asset_transformer_profile( + input_stage_path=intermediate_path, + output_package_root=os.path.normpath(os.path.join(usd_path, robot_name)), + profile_json_path=asset_structure_profile_json_path, ) - if result: - if cfg.joint_drive: - # modify joint parameters - self._update_joint_parameters() - - # set root link name - if cfg.root_link_name: - self._robot_model.root_link = cfg.root_link_name - - # convert the model to USD - omni.kit.commands.execute( - "URDFImportRobot", - urdf_path=cfg.asset_path, - urdf_robot=self._robot_model, - import_config=import_config, - dest_path=self.usd_path, - ) - else: - raise ValueError(f"Failed to parse URDF file: {cfg.asset_path}") + # step 6b: fix ArticulationRootAPI placement for fixed-base articulations. + # After the asset transformer, ArticulationRootAPI ends up on the root rigid body. + # Having a FixedJoint on the same rigid body that has ArticulationRootAPI causes + # PhysX to treat the articulation as a floating-base + constraint (maximal coordinate + # tree) rather than a fixed-base reduced-coordinate articulation. + # Moving ArticulationRootAPI to the parent of the root rigid body resolves this. + if cfg.fix_base: + final_usd_path = os.path.join(usd_path, robot_name, f"{robot_name}.usda") + self._fix_articulation_root_for_fixed_base(final_usd_path) + + # step 7: clean up intermediate files + if os.path.exists(usdex_path): + shutil.rmtree(usdex_path) + temp_dir = os.path.dirname(intermediate_path) + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) + if merged_urdf_path is not None: + with contextlib.suppress(OSError): + os.remove(merged_urdf_path) + # if we used a fallback temp directory, clean that up too + merged_parent = os.path.dirname(merged_urdf_path) + if merged_parent.startswith(tempfile.gettempdir()) and os.path.isdir(merged_parent): + shutil.rmtree(merged_parent, ignore_errors=True) """ Helper methods. """ - def _get_urdf_import_config(self) -> isaacsim.asset.importer.urdf._urdf.ImportConfig: - """Create and fill URDF ImportConfig with desired settings + @staticmethod + def _warn_unsupported_features(cfg: UrdfConverterCfg): + """Log warnings for configuration options no longer supported by the URDF importer 3.0. - Returns: - The constructed ``ImportConfig`` object containing the desired settings. + Args: + cfg: The URDF conversion configuration. """ - # create a new import config - _, import_config = omni.kit.commands.execute("URDFCreateImportConfig") - - # set the unit scaling factor, 1.0 means meters, 100.0 means cm - import_config.set_distance_scale(1.0) - # set imported robot as default prim - import_config.set_make_default_prim(True) - # add a physics scene to the stage on import if none exists - import_config.set_create_physics_scene(False) - - # -- asset settings - # default density used for links, use 0 to auto-compute - import_config.set_density(self.cfg.link_density) - # mesh simplification settings - convex_decomp = self.cfg.collider_type == "convex_decomposition" - import_config.set_convex_decomp(convex_decomp) - # create collision geometry from visual geometry - import_config.set_collision_from_visuals(self.cfg.collision_from_visuals) - # consolidating links that are connected by fixed joints - import_config.set_merge_fixed_joints(self.cfg.merge_fixed_joints) - import_config.set_merge_fixed_ignore_inertia(self.cfg.merge_fixed_joints) - # -- physics settings - # create fix joint for base link - import_config.set_fix_base(self.cfg.fix_base) - # self collisions between links in the articulation - import_config.set_self_collision(self.cfg.self_collision) - # convert mimic joints to normal joints - import_config.set_parse_mimic(self.cfg.convert_mimic_joints_to_normal_joints) - # replace cylinder shapes with capsule shapes - import_config.set_replace_cylinders_with_capsules(self.cfg.replace_cylinders_with_capsules) - - return import_config - - def _update_joint_parameters(self): - """Update the joint parameters based on the configuration.""" - # set the drive type - self._set_joints_drive_type() - # set the drive target type - self._set_joints_drive_target_type() - # set the drive gains - self._set_joint_drive_gains() - - def _set_joints_drive_type(self): - """Set the joint drive type for all joints in the URDF model.""" - from isaacsim.asset.importer.urdf._urdf import UrdfJointDriveType - - drive_type_mapping = { - "force": UrdfJointDriveType.JOINT_DRIVE_FORCE, - "acceleration": UrdfJointDriveType.JOINT_DRIVE_ACCELERATION, - } - - if isinstance(self.cfg.joint_drive.drive_type, str): - for joint in self._robot_model.joints.values(): - joint.drive.set_drive_type(drive_type_mapping[self.cfg.joint_drive.drive_type]) - elif isinstance(self.cfg.joint_drive.drive_type, dict): - for joint_name, drive_type in self.cfg.joint_drive.drive_type.items(): - # handle joint name being a regex - matches = [s for s in self._robot_model.joints.keys() if re.search(joint_name, s)] - if not matches: - raise ValueError( - f"The joint name {joint_name} in the drive type config was not found in the URDF file. The" - f" joint names in the URDF are {list(self._robot_model.joints.keys())}" - ) - for match in matches: - joint = self._robot_model.joints[match] - joint.drive.set_drive_type(drive_type_mapping[drive_type]) - - def _set_joints_drive_target_type(self): - """Set the joint drive target type for all joints in the URDF model.""" - from isaacsim.asset.importer.urdf._urdf import UrdfJointTargetType - - target_type_mapping = { - "none": UrdfJointTargetType.JOINT_DRIVE_NONE, - "position": UrdfJointTargetType.JOINT_DRIVE_POSITION, - "velocity": UrdfJointTargetType.JOINT_DRIVE_VELOCITY, - } - - if isinstance(self.cfg.joint_drive.target_type, str): - for joint in self._robot_model.joints.values(): - joint.drive.set_target_type(target_type_mapping[self.cfg.joint_drive.target_type]) - elif isinstance(self.cfg.joint_drive.target_type, dict): - for joint_name, target_type in self.cfg.joint_drive.target_type.items(): - # handle joint name being a regex - matches = [s for s in self._robot_model.joints.keys() if re.search(joint_name, s)] - if not matches: - raise ValueError( - f"The joint name {joint_name} in the target type config was not found in the URDF file. The" - f" joint names in the URDF are {list(self._robot_model.joints.keys())}" - ) - for match in matches: - joint = self._robot_model.joints[match] - joint.drive.set_target_type(target_type_mapping[target_type]) - - def _set_joint_drive_gains(self): - """Set the joint drive gains for all joints in the URDF model.""" - - # set the gains directly from stiffness and damping values - if isinstance(self.cfg.joint_drive.gains, UrdfConverterCfg.JointDriveCfg.PDGainsCfg): - # stiffness - if isinstance(self.cfg.joint_drive.gains.stiffness, (float, int)): - for joint in self._robot_model.joints.values(): - self._set_joint_drive_stiffness(joint, self.cfg.joint_drive.gains.stiffness) - elif isinstance(self.cfg.joint_drive.gains.stiffness, dict): - for joint_name, stiffness in self.cfg.joint_drive.gains.stiffness.items(): - # handle joint name being a regex - matches = [s for s in self._robot_model.joints.keys() if re.search(joint_name, s)] - if not matches: - raise ValueError( - f"The joint name {joint_name} in the drive stiffness config was not found in the URDF file." - f" The joint names in the URDF are {list(self._robot_model.joints.keys())}" - ) - for match in matches: - joint = self._robot_model.joints[match] - self._set_joint_drive_stiffness(joint, stiffness) - # damping - if isinstance(self.cfg.joint_drive.gains.damping, (float, int)): - for joint in self._robot_model.joints.values(): - self._set_joint_drive_damping(joint, self.cfg.joint_drive.gains.damping) - elif isinstance(self.cfg.joint_drive.gains.damping, dict): - for joint_name, damping in self.cfg.joint_drive.gains.damping.items(): - # handle joint name being a regex - matches = [s for s in self._robot_model.joints.keys() if re.search(joint_name, s)] - if not matches: - raise ValueError( - f"The joint name {joint_name} in the drive damping config was not found in the URDF file." - f" The joint names in the URDF are {list(self._robot_model.joints.keys())}" - ) - for match in matches: - joint = self._robot_model.joints[match] - self._set_joint_drive_damping(joint, damping) - - # set the gains from natural frequency and damping ratio - elif isinstance(self.cfg.joint_drive.gains, UrdfConverterCfg.JointDriveCfg.NaturalFrequencyGainsCfg): - # damping ratio - if isinstance(self.cfg.joint_drive.gains.damping_ratio, (float, int)): - for joint in self._robot_model.joints.values(): - joint.drive.damping_ratio = self.cfg.joint_drive.gains.damping_ratio - elif isinstance(self.cfg.joint_drive.gains.damping_ratio, dict): - for joint_name, damping_ratio in self.cfg.joint_drive.gains.damping_ratio.items(): - # handle joint name being a regex - matches = [s for s in self._robot_model.joints.keys() if re.search(joint_name, s)] - if not matches: - raise ValueError( - f"The joint name {joint_name} in the damping ratio config was not found in the URDF file." - f" The joint names in the URDF are {list(self._robot_model.joints.keys())}" - ) - for match in matches: - joint = self._robot_model.joints[match] - joint.drive.damping_ratio = damping_ratio - - # natural frequency (this has to be done after damping ratio is set) - if isinstance(self.cfg.joint_drive.gains.natural_frequency, (float, int)): - for joint in self._robot_model.joints.values(): - joint.drive.natural_frequency = self.cfg.joint_drive.gains.natural_frequency - self._set_joint_drive_gains_from_natural_frequency(joint) - elif isinstance(self.cfg.joint_drive.gains.natural_frequency, dict): - for joint_name, natural_frequency in self.cfg.joint_drive.gains.natural_frequency.items(): - # handle joint name being a regex - matches = [s for s in self._robot_model.joints.keys() if re.search(joint_name, s)] - if not matches: - raise ValueError( - f"The joint name {joint_name} in the natural frequency config was not found in the URDF" - f" file. The joint names in the URDF are {list(self._robot_model.joints.keys())}" - ) - for match in matches: - joint = self._robot_model.joints[match] - joint.drive.natural_frequency = natural_frequency - self._set_joint_drive_gains_from_natural_frequency(joint) - - def _set_joint_drive_stiffness(self, joint, stiffness: float): - """Set the joint drive stiffness. + if cfg.convert_mimic_joints_to_normal_joints: + carb.log_warn( + "UrdfConverter: 'convert_mimic_joints_to_normal_joints' is no longer supported" + " by the URDF importer 3.0." + ) + if cfg.replace_cylinders_with_capsules: + carb.log_warn( + "UrdfConverter: 'replace_cylinders_with_capsules' is no longer supported by the URDF importer 3.0." + ) + if cfg.root_link_name: + carb.log_warn("UrdfConverter: 'root_link_name' is no longer supported by the URDF importer 3.0.") + if cfg.joint_drive and isinstance( + cfg.joint_drive.gains, + UrdfConverterCfg.JointDriveCfg.NaturalFrequencyGainsCfg, + ): + import warnings + + warnings.warn( + "UrdfConverter: 'NaturalFrequencyGainsCfg' is deprecated and no longer supported by the" + " URDF importer 3.0. The `compute_natural_stiffness` function has been removed." + " Joint drive gains will be left at the values produced by the URDF importer." + " Please use 'PDGainsCfg' instead.", + DeprecationWarning, + stacklevel=2, + ) + + @staticmethod + def _apply_fix_base(stage): + """Add a fixed joint from the world to the root link of the robot. Args: - joint: The joint from the URDF robot model. - stiffness: The stiffness value. + stage: The USD stage to modify. """ - from isaacsim.asset.importer.urdf._urdf import UrdfJointType + from pxr import UsdPhysics + + default_prim = stage.GetDefaultPrim() + if not default_prim or not default_prim.IsValid(): + carb.log_warn("UrdfConverter: Cannot apply fix_base - no default prim found.") + return + + # find the root link: first child with `RigidBodyAPI` under the prim hierarchy + root_link = None + for prim in stage.Traverse(): + if prim.HasAPI(UsdPhysics.RigidBodyAPI): + root_link = prim + break + + if root_link is None: + carb.log_warn("UrdfConverter: Cannot apply fix_base - no rigid body link found.") + return + + # create a fixed joint connecting the world to the root link + default_prim_path = default_prim.GetPath() + joint_path = default_prim_path.AppendChild("fix_base_joint") + + fixed_joint = UsdPhysics.FixedJoint.Define(stage, joint_path) + # `body0` left empty => connected to the world frame + fixed_joint.CreateBody1Rel().SetTargets([root_link.GetPath()]) + + @staticmethod + def _fix_articulation_root_for_fixed_base(usd_path: str): + """Move ArticulationRootAPI from the root rigid body to its parent prim. + + After the asset transformer, ArticulationRootAPI ends up on the root rigid body. + When combined with a FixedJoint on that same body (``fix_base_joint``), PhysX treats + the articulation as a floating-base + external constraint (maximal coordinate tree) + rather than a proper fixed-base reduced-coordinate articulation. + + Moving ArticulationRootAPI to the parent of the root rigid body (a non-rigid Xform / + Scope ancestor) resolves this, matching the pattern used by ``schemas.py``'s + ``fix_root_link``. + + Changes are authored as **local opinions in the root layer** of the stage, which are + stronger than the variant-payload-sublayer opinions written by the asset transformer. + This means the root layer's ``delete apiSchemas`` overrides the ``prepend apiSchemas`` + in the deeper sublayers without modifying those files. - if joint.type == UrdfJointType.JOINT_PRISMATIC: - joint.drive.set_strength(stiffness) - else: - # we need to convert the stiffness from radians to degrees - joint.drive.set_strength(math.pi / 180 * stiffness) + Args: + usd_path: Absolute path to the final ``.usda`` file produced by the asset transformer. + """ + from pxr import Usd, UsdPhysics - def _set_joint_drive_damping(self, joint, damping: float): - """Set the joint drive damping. + stage = Usd.Stage.Open(usd_path) + if not stage: + carb.log_warn( + f"UrdfConverter: Cannot open final stage at '{usd_path}'" + " for fix_base ArticulationRootAPI post-processing." + ) + return + + # Find the root rigid body that incorrectly has ArticulationRootAPI applied. + root_body_prim = None + for prim in stage.Traverse(): + if prim.HasAPI(UsdPhysics.ArticulationRootAPI) and prim.HasAPI(UsdPhysics.RigidBodyAPI): + root_body_prim = prim + break + + if root_body_prim is None: + # ArticulationRootAPI is already on a non-rigid ancestor (correct) or not present. + return + + parent_prim = root_body_prim.GetParent() + if not parent_prim or not parent_prim.IsValid(): + carb.log_warn("UrdfConverter: Root rigid body has no valid parent prim — skipping ArticulationRootAPI fix.") + return + + # Collect all articulation-related schema names applied to the root rigid body. + articulation_api_names = [ + name + for name in root_body_prim.GetAppliedSchemas() + if "ArticulationRoot" in name or name == "PhysxArticulationAPI" + ] + + # --- Apply ArticulationRootAPI schemas to the parent prim --- + # (edit target is the root layer by default; writes local opinions) + UsdPhysics.ArticulationRootAPI.Apply(parent_prim) + already_on_parent = set(parent_prim.GetAppliedSchemas()) + for name in articulation_api_names: + if name != "PhysicsArticulationRootAPI" and name not in already_on_parent: + parent_prim.AddAppliedSchema(name) + + # --- Copy USD articulation attributes to the parent prim --- + usd_art_api = UsdPhysics.ArticulationRootAPI(root_body_prim) + for attr_name in usd_art_api.GetSchemaAttributeNames(): + attr = root_body_prim.GetAttribute(attr_name) + val = attr.Get() if attr else None + if val is not None: + parent_attr = parent_prim.GetAttribute(attr_name) + if not parent_attr: + parent_attr = parent_prim.CreateAttribute(attr_name, attr.GetTypeName()) + parent_attr.Set(val) + + # --- Copy physxArticulation:* attributes to the parent prim --- + for attr in root_body_prim.GetAttributes(): + aname = attr.GetName() + if aname.startswith("physxArticulation:"): + val = attr.Get() + if val is not None: + parent_attr = parent_prim.GetAttribute(aname) + if not parent_attr: + parent_attr = parent_prim.CreateAttribute(aname, attr.GetTypeName()) + parent_attr.Set(val) + + # --- Remove ArticulationRootAPI schemas from the root rigid body --- + # Writing "delete" list-ops in the root layer overrides "prepend" in sublayers. + root_body_prim.RemoveAppliedSchema("PhysxArticulationAPI") + root_body_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) + for name in articulation_api_names: + if name not in ("PhysicsArticulationRootAPI", "PhysxArticulationAPI"): + root_body_prim.RemoveAppliedSchema(name) + + # Save only the root layer (sublayers produced by the asset transformer are untouched). + stage.GetRootLayer().Save() + + @staticmethod + def _apply_link_density(stage, density: float): + """Set default density on rigid body links that have no explicit mass. Args: - joint: The joint from the URDF robot model. - damping: The damping value. + stage: The USD stage to modify. + density: The density value in kg/m^3. """ - from isaacsim.asset.importer.urdf._urdf import UrdfJointType + from pxr import UsdPhysics + + for prim in stage.Traverse(): + if not prim.HasAPI(UsdPhysics.MassAPI): + continue + mass_api = UsdPhysics.MassAPI(prim) + # only set density if mass is not explicitly specified (0.0 means auto-compute) + mass_attr = mass_api.GetMassAttr() + if mass_attr and mass_attr.HasValue() and mass_attr.Get() > 0.0: + continue + density_attr = mass_api.GetDensityAttr() + if not density_attr: + density_attr = mass_api.CreateDensityAttr() + density_attr.Set(density) + + def _apply_joint_drives(self, stage, cfg: UrdfConverterCfg): + """Set joint drive properties (type, target, gains) on USD joints. - if joint.type == UrdfJointType.JOINT_PRISMATIC: - joint.drive.set_damping(damping) - else: - # we need to convert the damping from radians to degrees - joint.drive.set_damping(math.pi / 180 * damping) + Args: + stage: The USD stage to modify. + cfg: The URDF converter configuration containing joint drive settings. + """ + from pxr import UsdPhysics + + # collect all joints with their metadata + joints: dict[str, tuple] = {} + for prim in stage.Traverse(): + if not (prim.IsA(UsdPhysics.RevoluteJoint) or prim.IsA(UsdPhysics.PrismaticJoint)): + continue + joint_name = prim.GetName() + is_revolute = prim.IsA(UsdPhysics.RevoluteJoint) + instance_name = "angular" if is_revolute else "linear" + joints[joint_name] = (prim, is_revolute, instance_name) + + if not joints: + return + + drive_cfg = cfg.joint_drive + + # apply drive type (force / acceleration) + self._set_drive_type_on_joints(joints, drive_cfg) + # apply target type (none / position / velocity) + self._set_target_type_on_joints(joints, drive_cfg) + # apply gains (stiffness / damping) + self._set_drive_gains_on_joints(joints, drive_cfg) + + # ------------------------------------------------------------------ + # Joint drive helpers + # ------------------------------------------------------------------ + + @staticmethod + def _set_drive_type_on_joints(joints: dict, drive_cfg: UrdfConverterCfg.JointDriveCfg): + """Set the drive type (force or acceleration) on joint prims. - def _set_joint_drive_gains_from_natural_frequency(self, joint): - """Compute the joint drive gains from the natural frequency and damping ratio. + Args: + joints: Mapping of joint name → (prim, is_revolute, instance_name). + drive_cfg: The joint drive configuration. + """ + from pxr import UsdPhysics + + def _apply(prim, instance_name: str, drive_type: str): + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + type_attr = drive.GetTypeAttr() + if not type_attr: + type_attr = drive.CreateTypeAttr() + type_attr.Set(drive_type) + + if isinstance(drive_cfg.drive_type, str): + for _name, (prim, _is_rev, inst) in joints.items(): + _apply(prim, inst, drive_cfg.drive_type) + elif isinstance(drive_cfg.drive_type, dict): + for pattern, drive_type in drive_cfg.drive_type.items(): + matches = [n for n in joints if re.search(pattern, n)] + if not matches: + raise ValueError( + f"Joint name pattern '{pattern}' in drive_type config matched no joints." + f" Available joints: {list(joints.keys())}" + ) + for name in matches: + prim, _, inst = joints[name] + _apply(prim, inst, drive_type) + + @staticmethod + def _set_target_type_on_joints(joints: dict, drive_cfg: UrdfConverterCfg.JointDriveCfg): + """Set the target type (none, position, velocity) on joint prims. + + For ``"none"``, both stiffness and damping are zeroed out. Args: - joint: The joint from the URDF robot model. + joints: Mapping of joint name → (prim, is_revolute, instance_name). + drive_cfg: The joint drive configuration. """ - from isaacsim.asset.importer.urdf._urdf import UrdfJointDriveType, UrdfJointTargetType + from pxr import UsdPhysics + + def _apply(prim, instance_name: str, target_type: str): + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + if target_type == "none": + drive.GetStiffnessAttr().Set(0.0) + drive.GetDampingAttr().Set(0.0) + + if isinstance(drive_cfg.target_type, str): + for _name, (prim, _is_rev, inst) in joints.items(): + _apply(prim, inst, drive_cfg.target_type) + elif isinstance(drive_cfg.target_type, dict): + for pattern, target_type in drive_cfg.target_type.items(): + matches = [n for n in joints if re.search(pattern, n)] + if not matches: + raise ValueError( + f"Joint name pattern '{pattern}' in target_type config matched no joints." + f" Available joints: {list(joints.keys())}" + ) + for name in matches: + prim, _, inst = joints[name] + _apply(prim, inst, target_type) - strength = self._urdf_interface.compute_natural_stiffness( - self._robot_model, - joint.name, - joint.drive.natural_frequency, - ) - self._set_joint_drive_stiffness(joint, strength) - - if joint.drive.target_type == UrdfJointTargetType.JOINT_DRIVE_POSITION: - m_eq = 1.0 - if joint.drive.drive_type == UrdfJointDriveType.JOINT_DRIVE_FORCE: - m_eq = joint.inertia - damping = 2 * m_eq * joint.drive.natural_frequency * joint.drive.damping_ratio - self._set_joint_drive_damping(joint, damping) + @staticmethod + def _set_drive_gains_on_joints(joints: dict, drive_cfg: UrdfConverterCfg.JointDriveCfg): + """Set stiffness and damping on joint drive APIs. + + For revolute joints the user-facing values (Nm/rad) are converted to the USD + convention (Nm/deg) by multiplying by ``pi / 180``. + + Args: + joints: Mapping of joint name → (prim, is_revolute, instance_name). + drive_cfg: The joint drive configuration. + """ + from pxr import UsdPhysics + + gains = drive_cfg.gains + if not isinstance(gains, UrdfConverterCfg.JointDriveCfg.PDGainsCfg): + return + + def _set_stiffness(prim, instance_name: str, is_revolute: bool, value: float): + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + usd_value = value * math.pi / 180.0 if is_revolute else value + stiffness_attr = drive.GetStiffnessAttr() + if not stiffness_attr: + stiffness_attr = drive.CreateStiffnessAttr() + stiffness_attr.Set(usd_value) + + def _set_damping(prim, instance_name: str, is_revolute: bool, value: float): + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + usd_value = value * math.pi / 180.0 if is_revolute else value + damping_attr = drive.GetDampingAttr() + if not damping_attr: + damping_attr = drive.CreateDampingAttr() + damping_attr.Set(usd_value) + + # --- stiffness --- + if isinstance(gains.stiffness, (float, int)): + for _name, (prim, is_rev, inst) in joints.items(): + _set_stiffness(prim, inst, is_rev, gains.stiffness) + elif isinstance(gains.stiffness, dict): + for pattern, stiffness in gains.stiffness.items(): + matches = [n for n in joints if re.search(pattern, n)] + if not matches: + raise ValueError( + f"Joint name pattern '{pattern}' in stiffness config matched no joints." + f" Available joints: {list(joints.keys())}" + ) + for name in matches: + prim, is_rev, inst = joints[name] + _set_stiffness(prim, inst, is_rev, stiffness) + + # --- damping --- + if gains.damping is None: + return + if isinstance(gains.damping, (float, int)): + for _name, (prim, is_rev, inst) in joints.items(): + _set_damping(prim, inst, is_rev, gains.damping) + elif isinstance(gains.damping, dict): + for pattern, damping in gains.damping.items(): + matches = [n for n in joints if re.search(pattern, n)] + if not matches: + raise ValueError( + f"Joint name pattern '{pattern}' in damping config matched no joints." + f" Available joints: {list(joints.keys())}" + ) + for name in matches: + prim, is_rev, inst = joints[name] + _set_damping(prim, inst, is_rev, damping) diff --git a/source/isaaclab/isaaclab/sim/converters/urdf_converter_cfg.py b/source/isaaclab/isaaclab/sim/converters/urdf_converter_cfg.py index c04ede2400ae..feb13f61ff20 100644 --- a/source/isaaclab/isaaclab/sim/converters/urdf_converter_cfg.py +++ b/source/isaaclab/isaaclab/sim/converters/urdf_converter_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from dataclasses import MISSING from typing import Literal @@ -22,10 +24,10 @@ class JointDriveCfg: class PDGainsCfg: """Configuration for the PD gains of the drive.""" - stiffness: dict[str, float] | float = MISSING - """The stiffness of the joint drive in Nm/rad or N/rad. + stiffness: dict[str, float] | float | None = None + """The stiffness of the joint drive in Nm/rad or N/rad. Defaults to None. - If None, the stiffness is set to the value parsed from the URDF file. + If None, the stiffness values produced by the URDF importer are preserved. If :attr:`~UrdfConverterCfg.JointDriveCfg.target_type` is set to ``"velocity"``, this value determines the drive strength in joint velocity space. """ @@ -52,6 +54,12 @@ class NaturalFrequencyGainsCfg: * :math:`r = 1.0` is a critically damped system, * :math:`r < 1.0` is underdamped, * :math:`r > 1.0` is overdamped. + + .. deprecated:: + This gains mode is no longer supported by the URDF importer 3.0. + The ``compute_natural_stiffness`` function has been removed. If used, a + :exc:`DeprecationWarning` is emitted and joint drive gains are left at the values + produced by the URDF importer. Use :class:`PDGainsCfg` instead. """ natural_frequency: dict[str, float] | float = MISSING @@ -102,10 +110,20 @@ class NaturalFrequencyGainsCfg: """ merge_fixed_joints: bool = True - """Consolidate links that are connected by fixed joints. Defaults to True.""" + """Consolidate links that are connected by fixed joints. Defaults to True. + + When enabled, a URDF XML pre-processing step removes all fixed joints and merges each + child link's visual, collision, and inertial elements into the parent link before USD + conversion. Downstream joints are re-parented with composed transforms. Chains of + consecutive fixed joints are handled automatically. + """ convert_mimic_joints_to_normal_joints: bool = False - """Convert mimic joints to normal joints. Defaults to False.""" + """Convert mimic joints to normal joints. Defaults to False. + + .. deprecated:: + This option is no longer supported by the URDF importer 3.0. A warning is logged if enabled. + """ joint_drive: JointDriveCfg | None = JointDriveCfg() """The joint drive settings. Defaults to :class:`JointDriveCfg`. @@ -116,17 +134,24 @@ class NaturalFrequencyGainsCfg: collision_from_visuals = False """Whether to create collision geometry from visual geometry. Defaults to False.""" - collider_type: Literal["convex_hull", "convex_decomposition"] = "convex_hull" + collision_type: Literal["Convex Hull", "Convex Decomposition"] = "Convex Hull" """The collision shape simplification. Defaults to "convex_hull". Supported values are: - * ``"convex_hull"``: The collision shape is simplified to a convex hull. - * ``"convex_decomposition"``: The collision shape is decomposed into smaller convex shapes for a closer fit. + * ``"Convex Hull"``: The collision shape is simplified to a convex hull. + * ``"Convex Decomposition"``: The collision shape is decomposed into smaller convex shapes for a closer fit. """ self_collision: bool = False """Activate self-collisions between links of the articulation. Defaults to False.""" replace_cylinders_with_capsules: bool = False - """Replace cylinder shapes with capsule shapes. Defaults to False.""" + """Replace cylinder shapes with capsule shapes. Defaults to False. + + .. deprecated:: + This option is no longer supported by the URDF importer 3.0. A warning is logged if enabled. + """ + + merge_mesh: bool = False + """Merge meshes where possible to optimize the model. Defaults to False.""" diff --git a/source/isaaclab/isaaclab/sim/converters/urdf_utils.py b/source/isaaclab/isaaclab/sim/converters/urdf_utils.py new file mode 100644 index 000000000000..dfef1ee01e52 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/converters/urdf_utils.py @@ -0,0 +1,350 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utility functions for pre-processing URDF files before USD conversion.""" + +from __future__ import annotations + +import math +import xml.etree.ElementTree as ET + +import numpy as np + + +def merge_fixed_joints(urdf_path: str, output_path: str) -> str: + """Pre-process a URDF file to merge links connected by fixed joints. + + For each fixed joint, the child link's ````, ````, and ```` + elements are merged into the parent link with proper transform composition. Any + downstream joints whose parent was the child link are re-parented to the surviving + parent link (with their origin transforms composed accordingly). + + Chains of consecutive fixed joints are handled by iterating until no fixed joints + remain. + + Args: + urdf_path: Path to the input URDF file. + output_path: Path to write the modified URDF file. + + Returns: + The *output_path* that was written to. + """ + tree = ET.parse(urdf_path) + root = tree.getroot() + + # iterate until no fixed joints remain (handles chains) + while True: + fixed_joints = [j for j in root.findall("joint") if j.get("type") == "fixed"] + if not fixed_joints: + break + + # process the first fixed joint found (order matters for chains) + joint = fixed_joints[0] + parent_link_name = joint.find("parent").get("link") + child_link_name = joint.find("child").get("link") + + T_joint = _parse_origin(joint.find("origin")) + + # locate the corresponding `` elements + parent_link_elem = _find_link(root, parent_link_name) + child_link_elem = _find_link(root, child_link_name) + + if parent_link_elem is None or child_link_elem is None: + # safety guard: drop the joint and continue + root.remove(joint) + continue + + # move `` elements from child to parent (with composed transforms) + for visual in child_link_elem.findall("visual"): + _compose_origin(visual, T_joint) + parent_link_elem.append(visual) + + # move `` elements from child to parent (with composed transforms) + for collision in child_link_elem.findall("collision"): + _compose_origin(collision, T_joint) + parent_link_elem.append(collision) + + # merge `` properties (mass, CoM, inertia tensor) + _merge_inertial(parent_link_elem, child_link_elem, T_joint) + + # re-parent any joints that reference the child link as their parent + for other_joint in root.findall("joint"): + if other_joint is joint: + continue + parent_elem = other_joint.find("parent") + if parent_elem is not None and parent_elem.get("link") == child_link_name: + parent_elem.set("link", parent_link_name) + # compose transforms: new_origin = T_joint @ T_other + T_other = _parse_origin(other_joint.find("origin")) + _set_origin(other_joint, T_joint @ T_other) + + # remove the fixed joint and the now-empty child link + root.remove(joint) + root.remove(child_link_elem) + + tree.write(output_path, xml_declaration=True, encoding="UTF-8") + return output_path + + +# --------------------------------------------------------------------------- +# Transform helpers +# --------------------------------------------------------------------------- + + +def _parse_origin(origin_elem: ET.Element | None) -> np.ndarray: + """Parse an ```` element into a 4x4 homogeneous transform matrix. + + Args: + origin_elem: The ```` XML element (may be ``None``). + + Returns: + A 4x4 numpy array representing the transform. + """ + if origin_elem is None: + return np.eye(4) + xyz = [float(v) for v in origin_elem.get("xyz", "0 0 0").split()] + rpy = [float(v) for v in origin_elem.get("rpy", "0 0 0").split()] + return _make_transform(xyz, rpy) + + +def _make_transform(xyz: list[float], rpy: list[float]) -> np.ndarray: + """Create a 4x4 homogeneous transform from *xyz* translation and *rpy* rotation. + + Args: + xyz: Translation ``[x, y, z]``. + rpy: Euler angles ``[roll, pitch, yaw]`` in radians (URDF convention: ``Rz @ Ry @ Rx``). + + Returns: + A 4x4 numpy array. + """ + T = np.eye(4) + T[:3, :3] = _rpy_to_rotation_matrix(rpy) + T[:3, 3] = xyz + return T + + +def _rpy_to_rotation_matrix(rpy: list[float]) -> np.ndarray: + """Convert roll-pitch-yaw to a 3x3 rotation matrix (``Rz @ Ry @ Rx``). + + Args: + rpy: Euler angles ``[roll, pitch, yaw]`` in radians. + + Returns: + A 3x3 rotation matrix. + """ + roll, pitch, yaw = rpy + cr, sr = math.cos(roll), math.sin(roll) + cp, sp = math.cos(pitch), math.sin(pitch) + cy, sy = math.cos(yaw), math.sin(yaw) + return np.array( + [ + [cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr], + [sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr], + [-sp, cp * sr, cp * cr], + ] + ) + + +def _rotation_matrix_to_rpy(R: np.ndarray) -> tuple[float, float, float]: + """Convert a 3x3 rotation matrix to roll-pitch-yaw. + + Args: + R: A 3x3 rotation matrix. + + Returns: + Tuple ``(roll, pitch, yaw)`` in radians. + """ + sy = -R[2, 0] + if abs(sy) >= 1.0 - 1e-12: + # gimbal lock + pitch = math.copysign(math.pi / 2, sy) + roll = math.atan2(R[0, 1], R[0, 2]) + yaw = 0.0 + else: + pitch = math.asin(np.clip(sy, -1.0, 1.0)) + roll = math.atan2(R[2, 1], R[2, 2]) + yaw = math.atan2(R[1, 0], R[0, 0]) + return (roll, pitch, yaw) + + +def _set_origin(element: ET.Element, T: np.ndarray) -> None: + """Set or create the ```` sub-element of *element* from a 4x4 transform. + + Args: + element: The parent XML element (e.g. ````, ````). + T: The 4x4 homogeneous transform. + """ + xyz = T[:3, 3] + rpy = _rotation_matrix_to_rpy(T[:3, :3]) + + origin = element.find("origin") + if origin is None: + origin = ET.SubElement(element, "origin") + + origin.set("xyz", f"{_fmt(xyz[0])} {_fmt(xyz[1])} {_fmt(xyz[2])}") + origin.set("rpy", f"{_fmt(rpy[0])} {_fmt(rpy[1])} {_fmt(rpy[2])}") + + +def _compose_origin(element: ET.Element, T_parent: np.ndarray) -> None: + """Compose *element*'s ```` with *T_parent* (``T_parent @ T_element``). + + The composed transform replaces the element's existing origin. + + Args: + element: An XML element that may contain an ```` child. + T_parent: The parent transform to prepend. + """ + T_elem = _parse_origin(element.find("origin")) + _set_origin(element, T_parent @ T_elem) + + +def _find_link(root: ET.Element, name: str) -> ET.Element | None: + """Find a ```` element by its ``name`` attribute. + + Args: + root: The ```` root element. + name: Link name to search for. + + Returns: + The matching ```` element, or ``None``. + """ + for link in root.findall("link"): + if link.get("name") == name: + return link + return None + + +def _fmt(v: float) -> str: + """Format a float for URDF output, suppressing near-zero noise. + + Args: + v: The value to format. + + Returns: + A string representation. + """ + if abs(v) < 1e-12: + return "0" + return f"{v:.10g}" + + +# --------------------------------------------------------------------------- +# Inertial merge +# --------------------------------------------------------------------------- + + +def _parse_inertia_matrix(inertia_elem: ET.Element | None) -> np.ndarray: + """Parse an ```` element into a 3x3 symmetric inertia matrix. + + Args: + inertia_elem: The ```` XML element (may be ``None``). + + Returns: + A 3x3 numpy array. + """ + if inertia_elem is None: + return np.zeros((3, 3)) + ixx = float(inertia_elem.get("ixx", "0")) + ixy = float(inertia_elem.get("ixy", "0")) + ixz = float(inertia_elem.get("ixz", "0")) + iyy = float(inertia_elem.get("iyy", "0")) + iyz = float(inertia_elem.get("iyz", "0")) + izz = float(inertia_elem.get("izz", "0")) + return np.array( + [ + [ixx, ixy, ixz], + [ixy, iyy, iyz], + [ixz, iyz, izz], + ] + ) + + +def _merge_inertial(parent_link: ET.Element, child_link: ET.Element, T_joint: np.ndarray) -> None: + """Merge the child link's inertial properties into the parent link. + + Uses the parallel axis theorem to correctly combine mass, center of mass, and + inertia tensors when the two bodies are rigidly attached. + + Args: + parent_link: The parent ```` element that will absorb the child. + child_link: The child ```` element being merged. + T_joint: The 4x4 transform from parent link frame to child link frame. + """ + child_inertial = child_link.find("inertial") + if child_inertial is None: + return # nothing to merge + + child_mass_elem = child_inertial.find("mass") + child_mass = float(child_mass_elem.get("value", "0")) if child_mass_elem is not None else 0.0 + if child_mass == 0.0: + return # zero mass — nothing to merge + + # -- child inertial in parent link frame -- + T_child_inertial = _parse_origin(child_inertial.find("origin")) + T_child_in_parent = T_joint @ T_child_inertial + R_child = T_child_in_parent[:3, :3] + child_com_in_parent = T_child_in_parent[:3, 3] + + child_I_local = _parse_inertia_matrix(child_inertial.find("inertia")) + child_I_in_parent = R_child @ child_I_local @ R_child.T + + # -- parent inertial -- + parent_inertial = parent_link.find("inertial") + if parent_inertial is not None: + parent_mass_elem = parent_inertial.find("mass") + parent_mass = float(parent_mass_elem.get("value", "0")) if parent_mass_elem is not None else 0.0 + T_parent_inertial = _parse_origin(parent_inertial.find("origin")) + R_parent = T_parent_inertial[:3, :3] + parent_com = T_parent_inertial[:3, 3] + parent_I_local = _parse_inertia_matrix(parent_inertial.find("inertia")) + parent_I_in_link = R_parent @ parent_I_local @ R_parent.T + else: + parent_inertial = ET.SubElement(parent_link, "inertial") + parent_mass = 0.0 + parent_com = np.zeros(3) + parent_I_in_link = np.zeros((3, 3)) + + # -- combined mass and center of mass -- + total_mass = parent_mass + child_mass + if total_mass == 0.0: + return + combined_com = (parent_mass * parent_com + child_mass * child_com_in_parent) / total_mass + + # -- parallel axis theorem: shift each inertia tensor to the combined CoM -- + def _shift_inertia(I_at_com: np.ndarray, mass: float, com: np.ndarray, ref: np.ndarray) -> np.ndarray: + d = ref - com + return I_at_com + mass * (np.dot(d, d) * np.eye(3) - np.outer(d, d)) + + parent_I_shifted = ( + _shift_inertia(parent_I_in_link, parent_mass, parent_com, combined_com) if parent_mass > 0 else parent_I_in_link + ) + child_I_shifted = _shift_inertia(child_I_in_parent, child_mass, child_com_in_parent, combined_com) + + combined_I = parent_I_shifted + child_I_shifted + + # -- write back to parent -- + # origin: combined CoM with identity rotation (tensor is already in link frame) + origin = parent_inertial.find("origin") + if origin is None: + origin = ET.SubElement(parent_inertial, "origin") + origin.set("xyz", f"{_fmt(combined_com[0])} {_fmt(combined_com[1])} {_fmt(combined_com[2])}") + origin.set("rpy", "0 0 0") + + # mass + mass_elem = parent_inertial.find("mass") + if mass_elem is None: + mass_elem = ET.SubElement(parent_inertial, "mass") + mass_elem.set("value", f"{_fmt(total_mass)}") + + # inertia tensor + inertia_elem = parent_inertial.find("inertia") + if inertia_elem is None: + inertia_elem = ET.SubElement(parent_inertial, "inertia") + inertia_elem.set("ixx", _fmt(combined_I[0, 0])) + inertia_elem.set("ixy", _fmt(combined_I[0, 1])) + inertia_elem.set("ixz", _fmt(combined_I[0, 2])) + inertia_elem.set("iyy", _fmt(combined_I[1, 1])) + inertia_elem.set("iyz", _fmt(combined_I[1, 2])) + inertia_elem.set("izz", _fmt(combined_I[2, 2])) diff --git a/source/isaaclab/isaaclab/sim/schemas/__init__.py b/source/isaaclab/isaaclab/sim/schemas/__init__.py index c8402fdb13c8..f56ca862de59 100644 --- a/source/isaaclab/isaaclab/sim/schemas/__init__.py +++ b/source/isaaclab/isaaclab/sim/schemas/__init__.py @@ -32,96 +32,6 @@ """ -from .schemas import ( - MESH_APPROXIMATION_TOKENS, - PHYSX_MESH_COLLISION_CFGS, - USD_MESH_COLLISION_CFGS, - activate_contact_sensors, - define_articulation_root_properties, - define_collision_properties, - define_deformable_body_properties, - define_mass_properties, - define_mesh_collision_properties, - define_rigid_body_properties, - modify_articulation_root_properties, - modify_collision_properties, - modify_deformable_body_properties, - modify_fixed_tendon_properties, - modify_joint_drive_properties, - modify_mass_properties, - modify_mesh_collision_properties, - modify_rigid_body_properties, - modify_spatial_tendon_properties, -) -from .schemas_cfg import ( - ArticulationRootPropertiesCfg, - BoundingCubePropertiesCfg, - BoundingSpherePropertiesCfg, - CollisionPropertiesCfg, - ConvexDecompositionPropertiesCfg, - ConvexHullPropertiesCfg, - DeformableBodyPropertiesCfg, - FixedTendonPropertiesCfg, - JointDrivePropertiesCfg, - MassPropertiesCfg, - MeshCollisionPropertiesCfg, - RigidBodyPropertiesCfg, - SDFMeshPropertiesCfg, - SpatialTendonPropertiesCfg, - TriangleMeshPropertiesCfg, - TriangleMeshSimplificationPropertiesCfg, -) +from isaaclab.utils.module import lazy_export -__all__ = [ - # articulation root - "ArticulationRootPropertiesCfg", - "define_articulation_root_properties", - "modify_articulation_root_properties", - # rigid bodies - "RigidBodyPropertiesCfg", - "define_rigid_body_properties", - "modify_rigid_body_properties", - "activate_contact_sensors", - # colliders - "CollisionPropertiesCfg", - "define_collision_properties", - "modify_collision_properties", - # deformables - "DeformableBodyPropertiesCfg", - "define_deformable_body_properties", - "modify_deformable_body_properties", - # joints - "JointDrivePropertiesCfg", - "modify_joint_drive_properties", - # mass - "MassPropertiesCfg", - "define_mass_properties", - "modify_mass_properties", - # mesh colliders - "MeshCollisionPropertiesCfg", - "define_mesh_collision_properties", - "modify_mesh_collision_properties", - # bounding cube - "BoundingCubePropertiesCfg", - # bounding sphere - "BoundingSpherePropertiesCfg", - # convex decomposition - "ConvexDecompositionPropertiesCfg", - # convex hull - "ConvexHullPropertiesCfg", - # sdf mesh - "SDFMeshPropertiesCfg", - # triangle mesh - "TriangleMeshPropertiesCfg", - # triangle mesh simplification - "TriangleMeshSimplificationPropertiesCfg", - # tendons - "FixedTendonPropertiesCfg", - "SpatialTendonPropertiesCfg", - "modify_fixed_tendon_properties", - "modify_spatial_tendon_properties", - # Constants for configs that use PhysX vs USD API - "PHYSX_MESH_COLLISION_CFGS", - "USD_MESH_COLLISION_CFGS", - "MESH_APPROXIMATION_TOKENS", -] +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/schemas/__init__.pyi b/source/isaaclab/isaaclab/sim/schemas/__init__.pyi new file mode 100644 index 000000000000..f413b3ded12d --- /dev/null +++ b/source/isaaclab/isaaclab/sim/schemas/__init__.pyi @@ -0,0 +1,76 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MESH_APPROXIMATION_TOKENS", + "PHYSX_MESH_COLLISION_CFGS", + "USD_MESH_COLLISION_CFGS", + "activate_contact_sensors", + "define_articulation_root_properties", + "define_collision_properties", + "define_mass_properties", + "define_mesh_collision_properties", + "define_rigid_body_properties", + "modify_articulation_root_properties", + "modify_collision_properties", + "modify_fixed_tendon_properties", + "modify_joint_drive_properties", + "modify_mass_properties", + "modify_mesh_collision_properties", + "modify_rigid_body_properties", + "modify_spatial_tendon_properties", + "ArticulationRootPropertiesCfg", + "BoundingCubePropertiesCfg", + "BoundingSpherePropertiesCfg", + "CollisionPropertiesCfg", + "ConvexDecompositionPropertiesCfg", + "ConvexHullPropertiesCfg", + "FixedTendonPropertiesCfg", + "JointDrivePropertiesCfg", + "MassPropertiesCfg", + "MeshCollisionPropertiesCfg", + "RigidBodyPropertiesCfg", + "SDFMeshPropertiesCfg", + "SpatialTendonPropertiesCfg", + "TriangleMeshPropertiesCfg", + "TriangleMeshSimplificationPropertiesCfg", +] + +from .schemas import ( + MESH_APPROXIMATION_TOKENS, + PHYSX_MESH_COLLISION_CFGS, + USD_MESH_COLLISION_CFGS, + activate_contact_sensors, + define_articulation_root_properties, + define_collision_properties, + define_mass_properties, + define_mesh_collision_properties, + define_rigid_body_properties, + modify_articulation_root_properties, + modify_collision_properties, + modify_fixed_tendon_properties, + modify_joint_drive_properties, + modify_mass_properties, + modify_mesh_collision_properties, + modify_rigid_body_properties, + modify_spatial_tendon_properties, +) +from .schemas_cfg import ( + ArticulationRootPropertiesCfg, + BoundingCubePropertiesCfg, + BoundingSpherePropertiesCfg, + CollisionPropertiesCfg, + ConvexDecompositionPropertiesCfg, + ConvexHullPropertiesCfg, + FixedTendonPropertiesCfg, + JointDrivePropertiesCfg, + MassPropertiesCfg, + MeshCollisionPropertiesCfg, + RigidBodyPropertiesCfg, + SDFMeshPropertiesCfg, + SpatialTendonPropertiesCfg, + TriangleMeshPropertiesCfg, + TriangleMeshSimplificationPropertiesCfg, +) diff --git a/source/isaaclab/isaaclab/sim/schemas/schemas.py b/source/isaaclab/isaaclab/sim/schemas/schemas.py index 91eb3b2cd6a9..0f97b542e031 100644 --- a/source/isaaclab/isaaclab/sim/schemas/schemas.py +++ b/source/isaaclab/isaaclab/sim/schemas/schemas.py @@ -8,19 +8,17 @@ import logging import math -from collections.abc import Callable from typing import Any -import omni.physx.scripts.utils as physx_utils -from omni.physx.scripts import deformableUtils as deformable_utils -from pxr import PhysxSchema, Usd, UsdPhysics +from pxr import Usd, UsdPhysics from isaaclab.sim.utils.stage import get_current_stage +from isaaclab.utils.string import to_camel_case from ..utils import ( apply_nested, find_global_fixed_joint_prim, - get_all_matching_child_prims, + safe_set_attribute_on_usd_prim, safe_set_attribute_on_usd_schema, ) from . import schemas_cfg @@ -44,7 +42,7 @@ "convexHull": UsdPhysics.Tokens.convexHull, "none": UsdPhysics.Tokens.none, "meshSimplification": UsdPhysics.Tokens.meshSimplification, - "sdf": PhysxSchema.Tokens.sdf, + "sdf": "sdf", # PhysX SDF mesh token; use string (pxr.Tf.Token not available in all envs) } @@ -155,19 +153,21 @@ def modify_articulation_root_properties( # check if prim has articulation applied on it if not UsdPhysics.ArticulationRootAPI(articulation_prim): return False - # retrieve the articulation api - physx_articulation_api = PhysxSchema.PhysxArticulationAPI(articulation_prim) - if not physx_articulation_api: - physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Apply(articulation_prim) + # ensure PhysX articulation API is applied + applied_schemas = articulation_prim.GetAppliedSchemas() + if "PhysxArticulationAPI" not in applied_schemas: + articulation_prim.AddAppliedSchema("PhysxArticulationAPI") # convert to dict cfg = cfg.to_dict() # extract non-USD properties fix_root_link = cfg.pop("fix_root_link", None) - # set into physx api + # set into physx api (prim attributes under physxArticulation:*) for attr_name, value in cfg.items(): - safe_set_attribute_on_usd_schema(physx_articulation_api, attr_name, value, camel_case=True) + safe_set_attribute_on_usd_prim( + articulation_prim, f"physxArticulation:{to_camel_case(attr_name, 'cC')}", value, camel_case=False + ) # fix root link based on input # we do the fixed joint processing later to not interfere with setting other properties @@ -196,6 +196,8 @@ def modify_articulation_root_properties( ) # create a fixed joint between the root link and the world frame + from omni.physx.scripts import utils as physx_utils + physx_utils.createJoint(stage=stage, joint_type="Fixed", from_prim=None, to_prim=articulation_prim) # Having a fixed joint on a rigid body is not treated as "fixed base articulation". @@ -205,23 +207,31 @@ def modify_articulation_root_properties( parent_prim = articulation_prim.GetParent() # apply api to parent UsdPhysics.ArticulationRootAPI.Apply(parent_prim) - PhysxSchema.PhysxArticulationAPI.Apply(parent_prim) + parent_applied = parent_prim.GetAppliedSchemas() + if "PhysxArticulationAPI" not in parent_applied: + parent_prim.AddAppliedSchema("PhysxArticulationAPI") # copy the attributes # -- usd attributes usd_articulation_api = UsdPhysics.ArticulationRootAPI(articulation_prim) for attr_name in usd_articulation_api.GetSchemaAttributeNames(): attr = articulation_prim.GetAttribute(attr_name) - parent_prim.GetAttribute(attr_name).Set(attr.Get()) - # -- physx attributes - physx_articulation_api = PhysxSchema.PhysxArticulationAPI(articulation_prim) - for attr_name in physx_articulation_api.GetSchemaAttributeNames(): - attr = articulation_prim.GetAttribute(attr_name) - parent_prim.GetAttribute(attr_name).Set(attr.Get()) + parent_attr = parent_prim.GetAttribute(attr_name) + if not parent_attr: + parent_attr = parent_prim.CreateAttribute(attr_name, attr.GetTypeName()) + parent_attr.Set(attr.Get()) + # -- physx attributes (copy by name prefix) + for attr in articulation_prim.GetAttributes(): + aname = attr.GetName() + if aname.startswith("physxArticulation:"): + parent_attr = parent_prim.GetAttribute(aname) + if not parent_attr: + parent_attr = parent_prim.CreateAttribute(aname, attr.GetTypeName()) + parent_attr.Set(attr.Get()) # remove api from root + articulation_prim.RemoveAppliedSchema("PhysxArticulationAPI") articulation_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) - articulation_prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI) # success return True @@ -307,10 +317,10 @@ def modify_rigid_body_properties( return False # retrieve the USD rigid-body api usd_rigid_body_api = UsdPhysics.RigidBodyAPI(rigid_body_prim) - # retrieve the physx rigid-body api - physx_rigid_body_api = PhysxSchema.PhysxRigidBodyAPI(rigid_body_prim) - if not physx_rigid_body_api: - physx_rigid_body_api = PhysxSchema.PhysxRigidBodyAPI.Apply(rigid_body_prim) + # ensure PhysX rigid body API is applied + applied_schemas = rigid_body_prim.GetAppliedSchemas() + if "PhysxRigidBodyAPI" not in applied_schemas: + rigid_body_prim.AddAppliedSchema("PhysxRigidBodyAPI") # convert to dict cfg = cfg.to_dict() @@ -318,9 +328,11 @@ def modify_rigid_body_properties( for attr_name in ["rigid_body_enabled", "kinematic_enabled"]: value = cfg.pop(attr_name, None) safe_set_attribute_on_usd_schema(usd_rigid_body_api, attr_name, value, camel_case=True) - # set into PhysX API + # set into PhysX API (prim attributes under physxRigidBody:*) for attr_name, value in cfg.items(): - safe_set_attribute_on_usd_schema(physx_rigid_body_api, attr_name, value, camel_case=True) + safe_set_attribute_on_usd_prim( + rigid_body_prim, f"physxRigidBody:{to_camel_case(attr_name, 'cC')}", value, camel_case=False + ) # success return True @@ -402,20 +414,27 @@ def modify_collision_properties( return False # retrieve the USD collision api usd_collision_api = UsdPhysics.CollisionAPI(collider_prim) - # retrieve the collision api - physx_collision_api = PhysxSchema.PhysxCollisionAPI(collider_prim) - if not physx_collision_api: - physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(collider_prim) - + # ensure PhysX collision API is applied + applied_schemas = collider_prim.GetAppliedSchemas() + if "PhysxCollisionAPI" not in applied_schemas: + collider_prim.AddAppliedSchema("PhysxCollisionAPI") + + mesh_collision_cfg = getattr(cfg, "mesh_collision_property", None) + if mesh_collision_cfg is not None: + modify_mesh_collision_properties(prim_path, mesh_collision_cfg, stage) # convert to dict cfg = cfg.to_dict() + # pop the mesh_collision_property since it is already set + cfg.pop("mesh_collision_property", None) # set into USD API for attr_name in ["collision_enabled"]: value = cfg.pop(attr_name, None) safe_set_attribute_on_usd_schema(usd_collision_api, attr_name, value, camel_case=True) - # set into PhysX API + # set into PhysX API (prim attributes under physxCollision:*) for attr_name, value in cfg.items(): - safe_set_attribute_on_usd_schema(physx_collision_api, attr_name, value, camel_case=True) + safe_set_attribute_on_usd_prim( + collider_prim, f"physxCollision:{to_camel_case(attr_name, 'cC')}", value, camel_case=False + ) # success return True @@ -550,17 +569,14 @@ def activate_contact_sensors(prim_path: str, threshold: float = 0.0, stage: Usd. # check its children if child_prim.HasAPI(UsdPhysics.RigidBodyAPI): # set sleep threshold to zero - rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPrimPath()) - rb.CreateSleepThresholdAttr().Set(0.0) + child_applied = child_prim.GetAppliedSchemas() + if "PhysxRigidBodyAPI" not in child_applied: + child_prim.AddAppliedSchema("PhysxRigidBodyAPI") + safe_set_attribute_on_usd_prim(child_prim, "physxRigidBody:sleepThreshold", 0.0, camel_case=False) # add contact report API with threshold of zero - if not child_prim.HasAPI(PhysxSchema.PhysxContactReportAPI): - logger.debug(f"Adding contact report API to prim: '{child_prim.GetPrimPath()}'") - cr_api = PhysxSchema.PhysxContactReportAPI.Apply(child_prim) - else: - logger.debug(f"Contact report API already exists on prim: '{child_prim.GetPrimPath()}'") - cr_api = PhysxSchema.PhysxContactReportAPI.Get(stage, child_prim.GetPrimPath()) - # set threshold to zero - cr_api.CreateThresholdAttr().Set(threshold) + if "PhysxContactReportAPI" not in child_applied: + child_prim.AddAppliedSchema("PhysxContactReportAPI") + safe_set_attribute_on_usd_prim(child_prim, "physxContactReport:threshold", threshold, camel_case=False) # increment number of contact sensors num_contact_sensors += 1 else: @@ -568,6 +584,19 @@ def activate_contact_sensors(prim_path: str, threshold: float = 0.0, stage: Usd. all_prims += child_prim.GetChildren() # check if no contact sensors were found if num_contact_sensors == 0: + descendant_count = 0 + frontier = [prim] + while frontier: + node = frontier.pop(0) + children = list(node.GetChildren()) + descendant_count += len(children) + frontier.extend(children) + logger.warning( + "[activate_contact_sensors] no rigid bodies found under prim=%r (type=%r, descendants=%d)", + prim_path, + prim.GetTypeName(), + descendant_count, + ) raise ValueError( f"No contact sensors added to the prim: '{prim_path}'. This means that no rigid bodies" " are present under this prim. Please check the prim path." @@ -634,18 +663,17 @@ def modify_joint_drive_properties( else: return False # check that prim is not a tendon child prim - # note: root prim is what "controls" the tendon so we still want to apply the drive to it - if prim.HasAPI(PhysxSchema.PhysxTendonAxisAPI) and not prim.HasAPI(PhysxSchema.PhysxTendonAxisRootAPI): + applied_schemas_str = str(prim.GetAppliedSchemas()) + if "PhysxTendonAxisAPI" in applied_schemas_str and "PhysxTendonAxisRootAPI" not in applied_schemas_str: return False # check if prim has joint drive applied on it usd_drive_api = UsdPhysics.DriveAPI(prim, drive_api_name) if not usd_drive_api: usd_drive_api = UsdPhysics.DriveAPI.Apply(prim, drive_api_name) - # check if prim has Physx joint drive applied on it - physx_joint_api = PhysxSchema.PhysxJointAPI(prim) - if not physx_joint_api: - physx_joint_api = PhysxSchema.PhysxJointAPI.Apply(prim) + # ensure PhysX joint API is applied + if "PhysxJointAPI" not in applied_schemas_str: + prim.AddAppliedSchema("PhysxJointAPI") # mapping from configuration name to USD attribute name cfg_to_usd_map = { @@ -656,6 +684,16 @@ def modify_joint_drive_properties( # convert to dict cfg = cfg.to_dict() + # ensure_drives_exist: if both stiffness and damping are zero on the authored drive, + # set a minimal stiffness so that backends like Newton recognise the drive as active. + ensure_drives = cfg.pop("ensure_drives_exist", False) + if ensure_drives and cfg["stiffness"] is None and cfg["damping"] is None: + # read the current values from the drive + cur_stiffness = usd_drive_api.GetStiffnessAttr().Get() + cur_damping = usd_drive_api.GetDampingAttr().Get() + if (cur_stiffness is None or cur_stiffness == 0.0) and (cur_damping is None or cur_damping == 0.0): + cfg["stiffness"] = 1e-3 + # check if linear drive is_linear_drive = prim.IsA(UsdPhysics.PrismaticJoint) # convert values for angular drives from radians to degrees units @@ -670,11 +708,13 @@ def modify_joint_drive_properties( # N-m-s/rad --> N-m-s/deg cfg["damping"] = cfg["damping"] * math.pi / 180.0 - # set into PhysX API + # set into PhysX API (prim attributes under physxJoint:*) for attr_name in ["max_velocity"]: value = cfg.pop(attr_name, None) - attr_name = cfg_to_usd_map[attr_name] - safe_set_attribute_on_usd_schema(physx_joint_api, attr_name, value, camel_case=True) + usd_attr_name = cfg_to_usd_map[attr_name] + safe_set_attribute_on_usd_prim( + prim, f"physxJoint:{to_camel_case(usd_attr_name, 'cC')}", value, camel_case=False + ) # set into USD API for attr_name, attr_value in cfg.items(): attr_name = cfg_to_usd_map.get(attr_name, attr_name) @@ -726,24 +766,23 @@ def modify_fixed_tendon_properties( # get USD prim tendon_prim = stage.GetPrimAtPath(prim_path) # check if prim has fixed tendon applied on it - has_root_fixed_tendon = tendon_prim.HasAPI(PhysxSchema.PhysxTendonAxisRootAPI) - if not has_root_fixed_tendon: + applied_schemas = tendon_prim.GetAppliedSchemas() + if not any("PhysxTendonAxisRootAPI" in s for s in applied_schemas): return False # resolve all available instances of the schema since it is multi-instance - for schema_name in tendon_prim.GetAppliedSchemas(): - # only consider the fixed tendon schema + cfg = cfg.to_dict() + for schema_name in applied_schemas: if "PhysxTendonAxisRootAPI" not in schema_name: continue - # retrieve the USD tendon api - instance_name = schema_name.split(":")[-1] - physx_tendon_axis_api = PhysxSchema.PhysxTendonAxisRootAPI(tendon_prim, instance_name) - - # convert to dict - cfg = cfg.to_dict() - # set into PhysX API + # set into PhysX API by attribute prefix schema_name: (e.g. PhysxTendonAxisRootAPI:default:stiffness) for attr_name, value in cfg.items(): - safe_set_attribute_on_usd_schema(physx_tendon_axis_api, attr_name, value, camel_case=True) + safe_set_attribute_on_usd_prim( + tendon_prim, + f"{schema_name}:{to_camel_case(attr_name, 'cC')}", + value, + camel_case=False, + ) # success return True @@ -792,248 +831,90 @@ def modify_spatial_tendon_properties( # get USD prim tendon_prim = stage.GetPrimAtPath(prim_path) # check if prim has spatial tendon applied on it - has_spatial_tendon = tendon_prim.HasAPI(PhysxSchema.PhysxTendonAttachmentRootAPI) or tendon_prim.HasAPI( - PhysxSchema.PhysxTendonAttachmentLeafAPI + applied_schemas = tendon_prim.GetAppliedSchemas() + has_spatial = any( + "PhysxTendonAttachmentRootAPI" in s or "PhysxTendonAttachmentLeafAPI" in s for s in applied_schemas ) - if not has_spatial_tendon: + if not has_spatial: return False - # resolve all available instances of the schema since it is multi-instance - for schema_name in tendon_prim.GetAppliedSchemas(): - # only consider the spatial tendon schema - # retrieve the USD tendon api - if "PhysxTendonAttachmentRootAPI" in schema_name: - instance_name = schema_name.split(":")[-1] - physx_tendon_spatial_api = PhysxSchema.PhysxTendonAttachmentRootAPI(tendon_prim, instance_name) - elif "PhysxTendonAttachmentLeafAPI" in schema_name: - instance_name = schema_name.split(":")[-1] - physx_tendon_spatial_api = PhysxSchema.PhysxTendonAttachmentLeafAPI(tendon_prim, instance_name) - else: + cfg = cfg.to_dict() + for schema_name in applied_schemas: + if "PhysxTendonAttachmentRootAPI" not in schema_name and "PhysxTendonAttachmentLeafAPI" not in schema_name: continue - # convert to dict - cfg = cfg.to_dict() - # set into PhysX API for attr_name, value in cfg.items(): - safe_set_attribute_on_usd_schema(physx_tendon_spatial_api, attr_name, value, camel_case=True) + safe_set_attribute_on_usd_prim( + tendon_prim, + f"{schema_name}:{to_camel_case(attr_name, 'cC')}", + value, + camel_case=False, + ) # success return True """ -Deformable body properties. +Collision mesh properties. """ -def define_deformable_body_properties( - prim_path: str, cfg: schemas_cfg.DeformableBodyPropertiesCfg, stage: Usd.Stage | None = None -): - """Apply the deformable body schema on the input prim and set its properties. - - See :func:`modify_deformable_body_properties` for more details on how the properties are set. - - .. note:: - If the input prim is not a mesh, this function will traverse the prim and find the first mesh - under it. If no mesh or multiple meshes are found, an error is raised. This is because the deformable - body schema can only be applied to a single mesh. - - Args: - prim_path: The prim path where to apply the deformable body schema. - cfg: The configuration for the deformable body. - stage: The stage where to find the prim. Defaults to None, in which case the - current stage is used. - - Raises: - ValueError: When the prim path is not valid. - ValueError: When the prim has no mesh or multiple meshes. - """ - # get stage handle - if stage is None: - stage = get_current_stage() - - # get USD prim - prim = stage.GetPrimAtPath(prim_path) - # check if prim path is valid - if not prim.IsValid(): - raise ValueError(f"Prim path '{prim_path}' is not valid.") - - # traverse the prim and get the mesh - matching_prims = get_all_matching_child_prims(prim_path, lambda p: p.GetTypeName() == "Mesh") - # check if the mesh is valid - if len(matching_prims) == 0: - raise ValueError(f"Could not find any mesh in '{prim_path}'. Please check asset.") - if len(matching_prims) > 1: - # get list of all meshes found - mesh_paths = [p.GetPrimPath() for p in matching_prims] - raise ValueError( - f"Found multiple meshes in '{prim_path}': {mesh_paths}." - " Deformable body schema can only be applied to one mesh." - ) - - # get deformable-body USD prim - mesh_prim = matching_prims[0] - # check if prim has deformable-body applied on it - if not PhysxSchema.PhysxDeformableBodyAPI(mesh_prim): - PhysxSchema.PhysxDeformableBodyAPI.Apply(mesh_prim) - # set deformable body properties - modify_deformable_body_properties(mesh_prim.GetPrimPath(), cfg, stage) - - -@apply_nested -def modify_deformable_body_properties( - prim_path: str, cfg: schemas_cfg.DeformableBodyPropertiesCfg, stage: Usd.Stage | None = None -): - """Modify PhysX parameters for a deformable body prim. - - A `deformable body`_ is a single body that can be simulated by PhysX. Unlike rigid bodies, deformable bodies - support relative motion of the nodes in the mesh. Consequently, they can be used to simulate deformations - under applied forces. - - PhysX soft body simulation employs Finite Element Analysis (FEA) to simulate the deformations of the mesh. - It uses two tetrahedral meshes to represent the deformable body: +def _get_physx_collision_namespace(schema_name: str) -> str: + """Convert PhysX schema name to attribute namespace used on the prim.""" + if not schema_name: + raise ValueError("PhysX schema name must be provided for mesh collision properties.") + schema_name = schema_name.removesuffix("API") + return schema_name[0].lower() + schema_name[1:] - 1. **Simulation mesh**: This mesh is used for the simulation and is the one that is deformed by the solver. - 2. **Collision mesh**: This mesh only needs to match the surface of the simulation mesh and is used for - collision detection. - For most applications, we assume that the above two meshes are computed from the "render mesh" of the deformable - body. The render mesh is the mesh that is visible in the scene and is used for rendering purposes. It is composed - of triangles and is the one that is used to compute the above meshes based on PhysX cookings. - - The schema comprises of attributes that belong to the `PhysxDeformableBodyAPI`_. schemas containing the PhysX - parameters for the deformable body. - - .. caution:: - The deformable body schema is still under development by the Omniverse team. The current implementation - works with the PhysX schemas shipped with Isaac Sim 4.0.0 onwards. It may change in future releases. - - .. note:: - This function is decorated with :func:`apply_nested` that sets the properties to all the prims - (that have the schema applied on them) under the input prim path. - - .. _deformable body: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/SoftBodies.html - .. _PhysxDeformableBodyAPI: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_deformable_a_p_i.html - - Args: - prim_path: The prim path to the deformable body. - cfg: The configuration for the deformable body. - stage: The stage where to find the prim. Defaults to None, in which case the - current stage is used. - - Returns: - True if the properties were successfully set, False otherwise. - """ - # get stage handle - if stage is None: - stage = get_current_stage() - - # get deformable-body USD prim - deformable_body_prim = stage.GetPrimAtPath(prim_path) - - # check if the prim is valid and has the deformable-body API - if not deformable_body_prim.IsValid() or not PhysxSchema.PhysxDeformableBodyAPI(deformable_body_prim): - return False - - # retrieve the physx deformable-body api - physx_deformable_body_api = PhysxSchema.PhysxDeformableBodyAPI(deformable_body_prim) - # retrieve the physx deformable api - physx_deformable_api = PhysxSchema.PhysxDeformableAPI(physx_deformable_body_api) - - # convert to dict - cfg = cfg.to_dict() - # set into deformable body API - attr_kwargs = { - attr_name: cfg.pop(attr_name) - for attr_name in [ - "kinematic_enabled", - "collision_simplification", - "collision_simplification_remeshing", - "collision_simplification_remeshing_resolution", - "collision_simplification_target_triangle_count", - "collision_simplification_force_conforming", - "simulation_hexahedral_resolution", - "solver_position_iteration_count", - "vertex_velocity_damping", - "sleep_damping", - "sleep_threshold", - "settling_threshold", - "self_collision", - "self_collision_filter_distance", - ] - } - status = deformable_utils.add_physx_deformable_body(stage, prim_path=prim_path, **attr_kwargs) - # check if the deformable body was successfully added - if not status: - return False - - # obtain the PhysX collision API (this is set when the deformable body is added) - physx_collision_api = PhysxSchema.PhysxCollisionAPI(deformable_body_prim) - - # set into PhysX API - for attr_name, value in cfg.items(): - if attr_name in ["rest_offset", "contact_offset"]: - safe_set_attribute_on_usd_schema(physx_collision_api, attr_name, value, camel_case=True) - else: - safe_set_attribute_on_usd_schema(physx_deformable_api, attr_name, value, camel_case=True) - - # success - return True - - -""" -Collision mesh properties. -""" +def _get_usd_mesh_collision_api(api_name: str): + """Resolve the USD mesh collision API from a string name.""" + if not api_name: + raise ValueError("USD schema name must be provided for mesh collision properties.") + usd_api = getattr(UsdPhysics, api_name, None) + if usd_api is None: + raise ValueError(f"USD schema '{api_name}' not found in UsdPhysics.") + return usd_api def extract_mesh_collision_api_and_attrs( cfg: schemas_cfg.MeshCollisionPropertiesCfg, -) -> tuple[Callable, dict[str, Any]]: - """Extract the mesh collision API function and custom attributes from the configuration. +) -> tuple[tuple[str, Any], dict[str, Any]]: + """Extract the mesh collision API type/value and custom attributes from the configuration. Args: cfg: The configuration for the mesh collision properties. Returns: - A tuple containing the API function to use and a dictionary of custom attributes. + A tuple of ((api_type, api_value), custom_attrs). api_type is "usd" or "physx"; + api_value is the USD API class (callable) or PhysX schema name string. Raises: ValueError: When neither USD nor PhysX API can be determined to be used. """ - # We use the number of user set attributes outside of the API function - # to determine which API to use in ambiguous cases, so collect them here custom_attrs = { key: value for key, value in cfg.to_dict().items() - if value is not None and key not in ["usd_func", "physx_func", "mesh_approximation_name"] + if value is not None and key not in ["usd_api", "physx_api", "mesh_approximation_name"] } use_usd_api = False - use_phsyx_api = False + use_physx_api = False - # We have some custom attributes and allow them if len(custom_attrs) > 0 and type(cfg) in PHYSX_MESH_COLLISION_CFGS: - use_phsyx_api = True - # We have no custom attributes + use_physx_api = True elif len(custom_attrs) == 0: if type(cfg) in USD_MESH_COLLISION_CFGS: - # Use the USD API use_usd_api = True else: - # Use the PhysX API - use_phsyx_api = True - + use_physx_api = True elif len(custom_attrs) > 0 and type(cfg) in USD_MESH_COLLISION_CFGS: raise ValueError("Args are specified but the USD Mesh API doesn't support them!") - if use_usd_api: - # Use USD API for corresponding attributes - # For mesh collision approximation attribute, we set it explicitly in `modify_mesh_collision_properties`` - api_func = cfg.usd_func - elif use_phsyx_api: - api_func = cfg.physx_func - else: - raise ValueError("Either USD or PhysX API should be used for modifying mesh collision attributes!") - - return api_func, custom_attrs + if use_usd_api and getattr(cfg, "usd_api", None): + return ("usd", cfg.usd_api), custom_attrs + if use_physx_api and getattr(cfg, "physx_api", None): + return ("physx", cfg.physx_api), custom_attrs + raise ValueError("Either USD or PhysX API should be used for modifying mesh collision attributes!") def define_mesh_collision_properties( @@ -1058,11 +939,15 @@ def define_mesh_collision_properties( if not prim.IsValid(): raise ValueError(f"Prim path '{prim_path}' is not valid.") - api_func, _ = extract_mesh_collision_api_and_attrs(cfg=cfg) + (api_type, api_value), _ = extract_mesh_collision_api_and_attrs(cfg=cfg) - # Only enable if not already enabled - if not api_func(prim): - api_func.Apply(prim) + if api_type == "usd": + usd_api_class = _get_usd_mesh_collision_api(api_value) + if not usd_api_class(prim): + usd_api_class.Apply(prim) + else: + if api_value not in prim.GetAppliedSchemas(): + prim.AddAppliedSchema(api_value) modify_mesh_collision_properties(prim_path=prim_path, cfg=cfg, stage=stage) @@ -1108,19 +993,23 @@ def modify_mesh_collision_properties( UsdPhysics.MeshCollisionAPI(prim), "Approximation", approximation_token, camel_case=False ) - api_func, custom_attrs = extract_mesh_collision_api_and_attrs(cfg=cfg) - - # retrieve the mesh collision API - mesh_collision_api = api_func(prim) + (api_type, api_value), custom_attrs = extract_mesh_collision_api_and_attrs(cfg=cfg) - # set custom attributes into mesh collision API - for attr_name, value in custom_attrs.items(): - # Only "Attribute" attr should be in format "boundingSphere", so set camel_case to be False - if attr_name == "Attribute": - camel_case = False - else: - camel_case = True - safe_set_attribute_on_usd_schema(mesh_collision_api, attr_name, value, camel_case=camel_case) + if api_type == "usd": + usd_api_class = _get_usd_mesh_collision_api(api_value) + mesh_collision_api = usd_api_class(prim) + if not mesh_collision_api: + return False + for attr_name, value in custom_attrs.items(): + camel_case = attr_name != "Attribute" + safe_set_attribute_on_usd_schema(mesh_collision_api, attr_name, value, camel_case=camel_case) + else: + if api_value not in prim.GetAppliedSchemas(): + return False + attr_namespace = _get_physx_collision_namespace(api_value) + for attr_name, value in custom_attrs.items(): + attr_token = attr_name if attr_name == "Attribute" else to_camel_case(attr_name, "cC") + safe_set_attribute_on_usd_prim(prim, f"{attr_namespace}:{attr_token}", value, camel_case=False) # success return True diff --git a/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py b/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py index 446d7faa105d..69cbc8bd5304 100644 --- a/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py +++ b/source/isaaclab/isaaclab/sim/schemas/schemas_cfg.py @@ -3,10 +3,9 @@ # # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import MISSING -from typing import Literal +from __future__ import annotations -from pxr import PhysxSchema, UsdPhysics +from typing import Literal from isaaclab.utils import configclass @@ -232,6 +231,18 @@ class JointDrivePropertiesCfg: * For angular joints, the unit is kg-m^2/s/rad (N-m-s/rad). """ + ensure_drives_exist: bool = False + """If True, ensure every joint has a non-zero drive so that physics backends + (e.g. Newton) create proper actuators for it. + + When a USD asset defines ``PhysicsDriveAPI`` with ``stiffness=0`` and + ``damping=0``, some backends treat the joint as passive (no PD control). + Enabling this flag writes a minimal stiffness (``1e-3``) to any drive whose + stiffness *and* damping are both zero, guaranteeing that the backend + recognises the drive as active. The actual gains are expected to be + overridden later by the actuator model. + """ + @configclass class FixedTendonPropertiesCfg: @@ -298,162 +309,21 @@ class SpatialTendonPropertiesCfg: """ -@configclass -class DeformableBodyPropertiesCfg: - """Properties to apply to a deformable body. - - A deformable body is a body that can deform under forces. The configuration allows users to specify - the properties of the deformable body, such as the solver iteration counts, damping, and self-collision. - - An FEM-based deformable body is created by providing a collision mesh and simulation mesh. The collision mesh - is used for collision detection and the simulation mesh is used for simulation. The collision mesh is usually - a simplified version of the simulation mesh. - - Based on the above, the PhysX team provides APIs to either set the simulation and collision mesh directly - (by specifying the points) or to simplify the collision mesh based on the simulation mesh. The simplification - process involves remeshing the collision mesh and simplifying it based on the target triangle count. - - Since specifying the collision mesh points directly is not a common use case, we only expose the parameters - to simplify the collision mesh based on the simulation mesh. If you want to provide the collision mesh points, - please open an issue on the repository and we can add support for it. - - See :meth:`modify_deformable_body_properties` for more information. - - .. note:: - If the values are :obj:`None`, they are not modified. This is useful when you want to set only a subset of - the properties and leave the rest as-is. - """ - - deformable_enabled: bool | None = None - """Enables deformable body.""" - - kinematic_enabled: bool = False - """Enables kinematic body. Defaults to False, which means that the body is not kinematic. - - Similar to rigid bodies, this allows setting user-driven motion for the deformable body. For more information, - please refer to the `documentation `__. - """ - - self_collision: bool | None = None - """Whether to enable or disable self-collisions for the deformable body based on the rest position distances.""" - - self_collision_filter_distance: float | None = None - """Penetration value that needs to get exceeded before contacts for self collision are generated. - - This parameter must be greater than of equal to twice the :attr:`rest_offset` value. - - This value has an effect only if :attr:`self_collision` is enabled. - """ - - settling_threshold: float | None = None - """Threshold vertex velocity (in m/s) under which sleep damping is applied in addition to velocity damping.""" - - sleep_damping: float | None = None - """Coefficient for the additional damping term if fertex velocity drops below setting threshold.""" - - sleep_threshold: float | None = None - """The velocity threshold (in m/s) under which the vertex becomes a candidate for sleeping in the next step.""" - - solver_position_iteration_count: int | None = None - """Number of the solver positional iterations per step. Range is [1,255]""" - - vertex_velocity_damping: float | None = None - """Coefficient for artificial damping on the vertex velocity. - - This parameter can be used to approximate the effect of air drag on the deformable body. - """ - - simulation_hexahedral_resolution: int = 10 - """The target resolution for the hexahedral mesh used for simulation. Defaults to 10. - - Note: - This value is ignored if the user provides the simulation mesh points directly. However, we assume that - most users will not provide the simulation mesh points directly. If you want to provide the simulation mesh - directly, please set this value to :obj:`None`. - """ - - collision_simplification: bool = True - """Whether or not to simplify the collision mesh before creating a soft body out of it. Defaults to True. - - Note: - This flag is ignored if the user provides the simulation mesh points directly. However, we assume that - most users will not provide the simulation mesh points directly. Hence, this flag is enabled by default. - - If you want to provide the simulation mesh points directly, please set this flag to False. - """ - - collision_simplification_remeshing: bool = True - """Whether or not the collision mesh should be remeshed before simplification. Defaults to True. - - This parameter is ignored if :attr:`collision_simplification` is False. - """ - - collision_simplification_remeshing_resolution: int = 0 - """The resolution used for remeshing. Defaults to 0, which means that a heuristic is used to determine the - resolution. - - This parameter is ignored if :attr:`collision_simplification_remeshing` is False. - """ - - collision_simplification_target_triangle_count: int = 0 - """The target triangle count used for the simplification. Defaults to 0, which means that a heuristic based on - the :attr:`simulation_hexahedral_resolution` is used to determine the target count. - - This parameter is ignored if :attr:`collision_simplification` is False. - """ - - collision_simplification_force_conforming: bool = True - """Whether or not the simplification should force the output mesh to conform to the input mesh. Defaults to True. - - The flag indicates that the tretrahedralizer used to generate the collision mesh should produce tetrahedra - that conform to the triangle mesh. If False, the simplifier uses the output from the tretrahedralizer used. - - This parameter is ignored if :attr:`collision_simplification` is False. - """ - - contact_offset: float | None = None - """Contact offset for the collision shape (in m). - - The collision detector generates contact points as soon as two shapes get closer than the sum of their - contact offsets. This quantity should be non-negative which means that contact generation can potentially start - before the shapes actually penetrate. - """ - - rest_offset: float | None = None - """Rest offset for the collision shape (in m). - - The rest offset quantifies how close a shape gets to others at rest, At rest, the distance between two - vertically stacked objects is the sum of their rest offsets. If a pair of shapes have a positive rest - offset, the shapes will be separated at rest by an air gap. - """ - - max_depenetration_velocity: float | None = None - """Maximum depenetration velocity permitted to be introduced by the solver (in m/s).""" - - @configclass class MeshCollisionPropertiesCfg: """Properties to apply to a mesh in regards to collision. See :meth:`set_mesh_collision_properties` for more information. .. note:: - If the values are MISSING, they are not modified. This is useful when you want to set only a subset of + If the values are None, they are not modified. This is useful when you want to set only a subset of the properties and leave the rest as-is. """ - usd_func: callable = MISSING - """USD API function for modifying mesh collision properties. - Refer to - `original USD Documentation `_ - for more information. - """ + usd_api: str | None = None + """USD API name for mesh collision (e.g. 'MeshCollisionAPI').""" - physx_func: callable = MISSING - """PhysX API function for modifying mesh collision properties. - Refer to - `original PhysX Documentation `_ - for more information. - """ + physx_api: str | None = None + """PhysX schema name for mesh collision (e.g. 'PhysxConvexDecompositionCollisionAPI').""" mesh_approximation_name: str = "none" """Name of mesh collision approximation method. Default: "none". @@ -463,7 +333,7 @@ class MeshCollisionPropertiesCfg: @configclass class BoundingCubePropertiesCfg(MeshCollisionPropertiesCfg): - usd_func: callable = UsdPhysics.MeshCollisionAPI + usd_api: str = "MeshCollisionAPI" """Original USD Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html """ @@ -476,7 +346,7 @@ class BoundingCubePropertiesCfg(MeshCollisionPropertiesCfg): @configclass class BoundingSpherePropertiesCfg(MeshCollisionPropertiesCfg): - usd_func: callable = UsdPhysics.MeshCollisionAPI + usd_api: str = "MeshCollisionAPI" """Original USD Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html """ @@ -489,12 +359,12 @@ class BoundingSpherePropertiesCfg(MeshCollisionPropertiesCfg): @configclass class ConvexDecompositionPropertiesCfg(MeshCollisionPropertiesCfg): - usd_func: callable = UsdPhysics.MeshCollisionAPI + usd_api: str = "MeshCollisionAPI" """Original USD Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html """ - physx_func: callable = PhysxSchema.PhysxConvexDecompositionCollisionAPI + physx_api: str = "PhysxConvexDecompositionCollisionAPI" """Original PhysX Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_convex_decomposition_collision_a_p_i.html """ @@ -538,12 +408,12 @@ class ConvexDecompositionPropertiesCfg(MeshCollisionPropertiesCfg): @configclass class ConvexHullPropertiesCfg(MeshCollisionPropertiesCfg): - usd_func: callable = UsdPhysics.MeshCollisionAPI + usd_api: str = "MeshCollisionAPI" """Original USD Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html """ - physx_func: callable = PhysxSchema.PhysxConvexHullCollisionAPI + physx_api: str = "PhysxConvexHullCollisionAPI" """Original PhysX Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_convex_hull_collision_a_p_i.html """ @@ -567,7 +437,7 @@ class ConvexHullPropertiesCfg(MeshCollisionPropertiesCfg): @configclass class TriangleMeshPropertiesCfg(MeshCollisionPropertiesCfg): - physx_func: callable = PhysxSchema.PhysxTriangleMeshCollisionAPI + physx_api: str = "PhysxTriangleMeshCollisionAPI" """Triangle mesh is only supported by PhysX API. Original PhysX Documentation: @@ -589,12 +459,12 @@ class TriangleMeshPropertiesCfg(MeshCollisionPropertiesCfg): @configclass class TriangleMeshSimplificationPropertiesCfg(MeshCollisionPropertiesCfg): - usd_func: callable = UsdPhysics.MeshCollisionAPI + usd_api: str = "MeshCollisionAPI" """Original USD Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_usd_physics_mesh_collision_a_p_i.html """ - physx_func: callable = PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI + physx_api: str = "PhysxTriangleMeshSimplificationCollisionAPI" """Original PhysX Documentation: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/class_physx_schema_physx_triangle_mesh_simplification_collision_a_p_i.html """ @@ -619,7 +489,7 @@ class TriangleMeshSimplificationPropertiesCfg(MeshCollisionPropertiesCfg): @configclass class SDFMeshPropertiesCfg(MeshCollisionPropertiesCfg): - physx_func: callable = PhysxSchema.PhysxSDFMeshCollisionAPI + physx_api: str = "PhysxSDFMeshCollisionAPI" """SDF mesh is only supported by PhysX API. Original PhysX documentation: diff --git a/source/isaaclab/isaaclab/sim/simulation_cfg.py b/source/isaaclab/isaaclab/sim/simulation_cfg.py index 06ed4826af68..6c2e97bb7e82 100644 --- a/source/isaaclab/isaaclab/sim/simulation_cfg.py +++ b/source/isaaclab/isaaclab/sim/simulation_cfg.py @@ -9,190 +9,14 @@ configuring the environment instances, viewer settings, and simulation parameters. """ -from typing import Any, Literal +from __future__ import annotations -from isaaclab.utils import configclass - -from .spawners.materials import RigidBodyMaterialCfg - - -@configclass -class PhysxCfg: - """Configuration for PhysX solver-related parameters. - - These parameters are used to configure the PhysX solver. For more information, see the `PhysX 5 SDK - documentation`_. - - PhysX 5 supports GPU-accelerated physics simulation. This is enabled by default, but can be disabled - by setting the :attr:`~SimulationCfg.device` to ``cpu`` in :class:`SimulationCfg`. Unlike CPU PhysX, the GPU - simulation feature is unable to dynamically grow all the buffers. Therefore, it is necessary to provide - a reasonable estimate of the buffer sizes for GPU features. If insufficient buffer sizes are provided, the - simulation will fail with errors and lead to adverse behaviors. The buffer sizes can be adjusted through the - ``gpu_*`` parameters. - - .. _PhysX 5 SDK documentation: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/_api_build/classPxSceneDesc.html - - """ - - solver_type: Literal[0, 1] = 1 - """The type of solver to use.Default is 1 (TGS). - - Available solvers: - - * :obj:`0`: PGS (Projective Gauss-Seidel) - * :obj:`1`: TGS (Temporal Gauss-Seidel) - """ - - solve_articulation_contact_last: bool = False - """Changes the ordering inside the articulation solver. Default is False. - - PhysX employs a strict ordering for handling constraints in an articulation. The outcome of - each constraint resolution modifies the joint and associated link speeds. However, the default - ordering may not be ideal for gripping scenarios because the solver favours the constraint - types that are resolved last. This is particularly true of stiff constraint systems that are hard - to resolve without resorting to vanishingly small simulation timesteps. - - With dynamic contact resolution being such an important part of gripping, it may make - more sense to solve dynamic contact towards the end of the solver rather than at the - beginning. This parameter modifies the default ordering to enable this change. - - For more information, please check `here `__. - - .. versionadded:: v2.3 - This parameter is only available with Isaac Sim 5.1. - - """ - - min_position_iteration_count: int = 1 - """Minimum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 1. - - .. note:: - - Each physics actor in Omniverse specifies its own solver iteration count. The solver takes - the number of iterations specified by the actor with the highest iteration and clamps it to - the range ``[min_position_iteration_count, max_position_iteration_count]``. - """ - - max_position_iteration_count: int = 255 - """Maximum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 255. - - .. note:: - - Each physics actor in Omniverse specifies its own solver iteration count. The solver takes - the number of iterations specified by the actor with the highest iteration and clamps it to - the range ``[min_position_iteration_count, max_position_iteration_count]``. - """ - - min_velocity_iteration_count: int = 0 - """Minimum number of solver velocity iterations (rigid bodies, cloth, particles etc.). Default is 0. - - .. note:: - - Each physics actor in Omniverse specifies its own solver iteration count. The solver takes - the number of iterations specified by the actor with the highest iteration and clamps it to - the range ``[min_velocity_iteration_count, max_velocity_iteration_count]``. - """ - - max_velocity_iteration_count: int = 255 - """Maximum number of solver velocity iterations (rigid bodies, cloth, particles etc.). Default is 255. - - .. note:: - - Each physics actor in Omniverse specifies its own solver iteration count. The solver takes - the number of iterations specified by the actor with the highest iteration and clamps it to - the range ``[min_velocity_iteration_count, max_velocity_iteration_count]``. - """ - - enable_ccd: bool = False - """Enable a second broad-phase pass that makes it possible to prevent objects from tunneling through each other. - Default is False.""" - - enable_stabilization: bool = False - """Enable/disable additional stabilization pass in solver. Default is False. - - .. note:: - - We recommend setting this flag to true only when the simulation step size is large - (i.e., less than 30 Hz or more than 0.0333 seconds). - - .. warning:: - - Enabling this flag may lead to incorrect contact forces report from the contact sensor. - """ - - enable_external_forces_every_iteration: bool = False - """Enable/disable external forces every position iteration in the TGS solver. Default is False. - - When using the TGS solver (:attr:`solver_type` is 1), this flag allows enabling external forces every solver - position iteration. This can help improve the accuracy of velocity updates. Consider enabling this flag if - the velocities generated by the simulation are noisy. Increasing the number of velocity iterations, together - with this flag, can help improve the accuracy of velocity updates. - - .. note:: - - This flag is ignored when using the PGS solver (:attr:`solver_type` is 0). - """ - - enable_enhanced_determinism: bool = False - """Enable/disable improved determinism at the expense of performance. Defaults to False. - - For more information on PhysX determinism, please check `here`_. - - .. _here: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/RigidBodyDynamics.html#enhanced-determinism - """ - - bounce_threshold_velocity: float = 0.5 - """Relative velocity threshold for contacts to bounce (in m/s). Default is 0.5 m/s.""" - - friction_offset_threshold: float = 0.04 - """Threshold for contact point to experience friction force (in m). Default is 0.04 m.""" - - friction_correlation_distance: float = 0.025 - """Distance threshold for merging contacts into a single friction anchor point (in m). Default is 0.025 m.""" - - gpu_max_rigid_contact_count: int = 2**23 - """Size of rigid contact stream buffer allocated in pinned host memory. Default is 2 ** 23.""" +from typing import Any, Literal # Literal used by RenderCfg - gpu_max_rigid_patch_count: int = 5 * 2**15 - """Size of the rigid contact patch stream buffer allocated in pinned host memory. Default is 5 * 2 ** 15.""" - - gpu_found_lost_pairs_capacity: int = 2**21 - """Capacity of found and lost buffers allocated in GPU global memory. Default is 2 ** 21. - - This is used for the found/lost pair reports in the BP. - """ - - gpu_found_lost_aggregate_pairs_capacity: int = 2**25 - """Capacity of found and lost buffers in aggregate system allocated in GPU global memory. - Default is 2 ** 25. - - This is used for the found/lost pair reports in AABB manager. - """ - - gpu_total_aggregate_pairs_capacity: int = 2**21 - """Capacity of total number of aggregate pairs allocated in GPU global memory. Default is 2 ** 21.""" - - gpu_collision_stack_size: int = 2**26 - """Size of the collision stack buffer allocated in pinned host memory. Default is 2 ** 26.""" - - gpu_heap_capacity: int = 2**26 - """Initial capacity of the GPU and pinned host memory heaps. Additional memory will be allocated - if more memory is required. Default is 2 ** 26.""" - - gpu_temp_buffer_capacity: int = 2**24 - """Capacity of temp buffer allocated in pinned host memory. Default is 2 ** 24.""" - - gpu_max_num_partitions: int = 8 - """Limitation for the partitions in the GPU dynamics pipeline. Default is 8. - - This variable must be power of 2. A value greater than 32 is currently not supported. Range: (1, 32) - """ - - gpu_max_soft_body_contacts: int = 2**20 - """Size of soft body contacts stream buffer allocated in pinned host memory. Default is 2 ** 20.""" - - gpu_max_particle_contacts: int = 2**20 - """Size of particle contacts stream buffer allocated in pinned host memory. Default is 2 ** 20.""" +from isaaclab.physics import PhysicsCfg +from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg +from isaaclab.utils import configclass +from isaaclab.visualizers import VisualizerCfg @configclass @@ -214,9 +38,9 @@ class RenderCfg: """ enable_translucency: bool | None = None - """Enables translucency for specular transmissive surfaces such as glass at the cost of some performance. - Default is False. + """Enables translucency for specular transmissive surfaces such as glass. + This comes at the cost of some performance. Default is False. This is set by the variable: ``/rtx/translucency/enabled``. """ @@ -236,8 +60,8 @@ class RenderCfg: """Selects the anti-aliasing mode to use. Defaults to DLSS. - **DLSS**: Boosts performance by using AI to output higher resolution frames from a lower resolution input. - DLSS samples multiple lower resolution images and uses motion data and feedback from prior frames to - reconstruct native quality images. + DLSS samples multiple lower resolution images and uses motion data and feedback from prior frames to reconstruct + native quality images. - **DLAA**: Provides higher image quality with an AI-based anti-aliasing technique. DLAA uses the same Super Resolution technology developed for DLSS, reconstructing a native resolution image to maximize image quality. @@ -331,11 +155,8 @@ class RenderCfg: """A general dictionary for users to supply all carb rendering settings with native names. The keys of the dictionary can be formatted like a carb setting, .kit file setting, or python variable. - For instance, a key value pair can be: - - - ``/rtx/translucency/enabled: False`` (carb) - - ``rtx.translucency.enabled: False`` (.kit) - - ``rtx_translucency_enabled: False`` (python) + For instance, a key value pair can be ``/rtx/translucency/enabled: False`` (carb), + ``rtx.translucency.enabled: False`` (.kit), or ``rtx_translucency_enabled: False`` (python). """ rendering_mode: Literal["performance", "balanced", "quality"] | None = None @@ -347,10 +168,11 @@ class RenderCfg: @configclass class SimulationCfg: - """Configuration for simulation physics.""" + """Configuration for simulation physics. - physics_prim_path: str = "/physicsScene" - """The prim path where the USD PhysicsScene is created. Default is "/physicsScene".""" + This class contains the main simulation parameters including physics time-step, gravity, + device settings, and physics backend configuration. + """ device: str = "cuda:0" """The device to run the simulation on. Default is ``"cuda:0"``. @@ -365,28 +187,19 @@ class SimulationCfg: dt: float = 1.0 / 60.0 """The physics simulation time-step (in seconds). Default is 0.0167 seconds.""" - render_interval: int = 1 - """The number of physics simulation steps per rendering step. Default is 1.""" - gravity: tuple[float, float, float] = (0.0, 0.0, -9.81) - """The gravity vector (in m/s^2). Default is (0.0, 0.0, -9.81). - - If set to (0.0, 0.0, 0.0), gravity is disabled. - """ + """The gravity vector (in m/s^2). Default is (0.0, 0.0, -9.81).""" - enable_scene_query_support: bool = False - """Enable/disable scene query support for collision shapes. Default is False. + physics_prim_path: str = "/physicsScene" + """The prim path where the USD PhysicsScene is created. Default is "/physicsScene".""" - This flag allows performing collision queries (raycasts, sweeps, and overlaps) on actors and - attached shapes in the scene. This is useful for implementing custom collision detection logic - outside of the physics engine. + physics_material: RigidBodyMaterialCfg = RigidBodyMaterialCfg() + """Default physics material settings for rigid bodies. Default is RigidBodyMaterialCfg. - If set to False, the physics engine does not create the scene query manager and the scene query - functionality will not be available. However, this provides some performance speed-up. + The physics engine defaults to this physics material for all the rigid body prims that do not have any + physics material specified on them. - Note: - This flag is overridden to True inside the :class:`SimulationContext` class when running the simulation - with the GUI enabled. This is to allow certain GUI features to work properly. + The material is created at the path: ``{physics_prim_path}/defaultMaterial``. """ use_fabric: bool = True @@ -398,27 +211,31 @@ class SimulationCfg: It is recommended to set this flag to :obj:`True` when running the simulation with a large number of primitives in the scene. + """ - Note: - When enabled, the GUI will not update the physics parameters in real-time. To enable real-time - updates, please set this flag to :obj:`False`. + render_interval: int = 1 + """The number of physics simulation steps per rendering step. Default is 1.""" - When using GPU simulation, it is required to enable Fabric to visualize updates in the renderer. - Transform updates are propagated to the renderer through Fabric. If Fabric is disabled with GPU simulation, - the renderer will not be able to render any updates in the simulation, although simulation will still be - running under the hood. - """ + enable_scene_query_support: bool = False + """Enable/disable scene query support for collision shapes. Default is False. - physx: PhysxCfg = PhysxCfg() - """PhysX solver settings. Default is PhysxCfg().""" + This flag allows performing collision queries (raycasts, sweeps, and overlaps) on actors and + attached shapes in the scene. This is useful for implementing custom collision detection logic + outside of the physics engine. - physics_material: RigidBodyMaterialCfg = RigidBodyMaterialCfg() - """Default physics material settings for rigid bodies. Default is RigidBodyMaterialCfg(). + If set to False, the physics engine does not create the scene query manager and the scene query + functionality will not be available. However, this provides some performance speed-up. - The physics engine defaults to this physics material for all the rigid body prims that do not have any - physics material specified on them. + Note: + This flag is overridden to True inside the :class:`SimulationContext` class when running the simulation + with the GUI enabled. This is to allow certain GUI features to work properly. + """ - The material is created at the path: ``{physics_prim_path}/defaultMaterial``. + physics: PhysicsCfg | None = None + """Physics manager configuration. Default is None (uses PhysxCfg()). + + This configuration determines which physics manager to use. Override with + a different config (e.g., NewtonManagerCfg) to use a different physics backend. """ render: RenderCfg = RenderCfg() @@ -442,3 +259,6 @@ class SimulationCfg: If :attr:`save_logs_to_file` is True, the logs will be saved to the directory specified by :attr:`log_dir`. If None, the logs will be saved to the temp directory. """ + + visualizer_cfgs: list[VisualizerCfg] | VisualizerCfg = [] + """The visualizer configuration(s). Default is an empty list.""" diff --git a/source/isaaclab/isaaclab/sim/simulation_context.py b/source/isaaclab/isaaclab/sim/simulation_context.py index d022c2d32a75..d5ef6f64e9c5 100644 --- a/source/isaaclab/isaaclab/sim/simulation_context.py +++ b/source/isaaclab/isaaclab/sim/simulation_context.py @@ -3,1031 +3,899 @@ # # SPDX-License-Identifier: BSD-3-Clause -import builtins -import enum -import glob +from __future__ import annotations + +import gc import logging import os -import re -import time import traceback -import weakref from collections.abc import Iterator from contextlib import contextmanager -from datetime import datetime -from typing import Any +from typing import TYPE_CHECKING, Any -import flatdict -import numpy as np import toml import torch -import carb -import omni.physx -import omni.usd -from isaacsim.core.api.simulation_context import SimulationContext as _SimulationContext -from isaacsim.core.simulation_manager import SimulationManager -from isaacsim.core.utils.viewports import set_camera_view -from pxr import Gf, PhysxSchema, Sdf, Usd, UsdPhysics, UsdUtils +from pxr import Gf, Usd, UsdGeom, UsdPhysics, UsdUtils import isaaclab.sim as sim_utils -from isaaclab.utils.logger import configure_logging -from isaaclab.utils.version import get_isaac_sim_version +import isaaclab.sim.utils.stage as stage_utils +from isaaclab.app.settings_manager import SettingsManager +from isaaclab.envs.utils.recording_hooks import run_recording_hooks_after_visualizers +from isaaclab.markers.vis_marker_registry import VisMarkerRegistry +from isaaclab.physics import BaseSceneDataProvider, PhysicsManager, SceneDataProvider +from isaaclab.physics.scene_data_requirements import ( + SceneDataRequirement, + resolve_scene_data_requirements, +) +from isaaclab.renderers.render_context import RenderContext +from isaaclab.sim.utils import create_new_stage +from isaaclab.utils.string import clear_resolve_matching_names_cache +from isaaclab.utils.version import has_kit +from isaaclab.visualizers.base_visualizer import BaseVisualizer + +if TYPE_CHECKING: + from isaaclab.cloner.clone_plan import ClonePlan from .simulation_cfg import SimulationCfg from .spawners import DomeLightCfg, GroundPlaneCfg -from .utils import bind_physics_material -# import logger logger = logging.getLogger(__name__) +# Visualizer type names (CLI and config). App launcher parses CSV and stores as a space-separated setting. +_VISUALIZER_TYPES = ("newton", "rerun", "viser", "kit") -class SimulationContext(_SimulationContext): - """A class to control simulation-related events such as physics stepping and rendering. - - The simulation context helps control various simulation aspects. This includes: - * configure the simulator with different settings such as the physics time-step, the number of physics substeps, - and the physics solver parameters (for more information, see :class:`isaaclab.sim.SimulationCfg`) - * playing, pausing, stepping and stopping the simulation - * adding and removing callbacks to different simulation events such as physics stepping, rendering, etc. +class SettingsHelper: + """Helper for typed settings access via SettingsManager.""" - This class inherits from the :class:`isaacsim.core.api.simulation_context.SimulationContext` class and - adds additional functionalities such as setting up the simulation context with a configuration object, - exposing other commonly used simulator-related functions, and performing version checks of Isaac Sim - to ensure compatibility between releases. + def __init__(self, settings: SettingsManager): + self._settings = settings - The simulation context is a singleton object. This means that there can only be one instance - of the simulation context at any given time. This is enforced by the parent class. Therefore, it is - not possible to create multiple instances of the simulation context. Instead, the simulation context - can be accessed using the ``instance()`` method. - - .. attention:: - Since we only support the `PyTorch `_ backend for simulation, the - simulation context is configured to use the ``torch`` backend by default. This means that - all the data structures used in the simulation are ``torch.Tensor`` objects. + def set(self, name: str, value: Any) -> None: + """Set a setting with automatic type routing.""" + if isinstance(value, bool): + self._settings.set_bool(name, value) + elif isinstance(value, int): + self._settings.set_int(name, value) + elif isinstance(value, float): + self._settings.set_float(name, value) + elif isinstance(value, str): + self._settings.set_string(name, value) + elif isinstance(value, (list, tuple)): + self._settings.set(name, value) + else: + raise ValueError(f"Unsupported value type for setting '{name}': {type(value)}") - The simulation context can be used in two different modes of operations: + def get(self, name: str) -> Any: + """Get a setting value.""" + return self._settings.get(name) - 1. **Standalone python script**: In this mode, the user has full control over the simulation and - can trigger stepping events synchronously (i.e. as a blocking call). In this case the user - has to manually call :meth:`step` step the physics simulation and :meth:`render` to - render the scene. - 2. **Omniverse extension**: In this mode, the user has limited control over the simulation stepping - and all the simulation events are triggered asynchronously (i.e. as a non-blocking call). In this - case, the user can only trigger the simulation to start, pause, and stop. The simulation takes - care of stepping the physics simulation and rendering the scene. - Based on above, for most functions in this class there is an equivalent function that is suffixed - with ``_async``. The ``_async`` functions are used in the Omniverse extension mode and - the non-``_async`` functions are used in the standalone python script mode. - """ +class SimulationContext: + """Controls simulation lifecycle including physics stepping and rendering. - class RenderMode(enum.IntEnum): - """Different rendering modes for the simulation. + This singleton class manages: - Render modes correspond to how the viewport and other UI elements (such as listeners to keyboard or mouse - events) are updated. There are three main components that can be updated when the simulation is rendered: + * Physics configuration (time-step, solver parameters via :class:`isaaclab.sim.SimulationCfg`) + * Simulation state (play, pause, step, stop) + * Rendering and visualization - 1. **UI elements and other extensions**: These are UI elements (such as buttons, sliders, etc.) and other - extensions that are running in the background that need to be updated when the simulation is running. - 2. **Cameras**: These are typically based on Hydra textures and are used to render the scene from different - viewpoints. They can be attached to a viewport or be used independently to render the scene. - 3. **Viewports**: These are windows where you can see the rendered scene. + The singleton instance can be accessed using the ``instance()`` class method. + """ - Updating each of the above components has a different overhead. For example, updating the viewports is - computationally expensive compared to updating the UI elements. Therefore, it is useful to be able to - control what is updated when the simulation is rendered. This is where the render mode comes in. There are - four different render modes: + # SINGLETON PATTERN - * :attr:`NO_GUI_OR_RENDERING`: The simulation is running without a GUI and off-screen rendering flag - is disabled, so none of the above are updated. - * :attr:`NO_RENDERING`: No rendering, where only 1 is updated at a lower rate. - * :attr:`PARTIAL_RENDERING`: Partial rendering, where only 1 and 2 are updated. - * :attr:`FULL_RENDERING`: Full rendering, where everything (1, 2, 3) is updated. + _instance: SimulationContext | None = None - .. _Viewports: https://docs.omniverse.nvidia.com/extensions/latest/ext_viewport.html - """ + def __new__(cls, cfg: SimulationCfg | None = None): + """Enforce singleton pattern.""" + if cls._instance is not None: + return cls._instance + return super().__new__(cls) - NO_GUI_OR_RENDERING = -1 - """The simulation is running without a GUI and off-screen rendering is disabled.""" - NO_RENDERING = 0 - """No rendering, where only other UI elements are updated at a lower rate.""" - PARTIAL_RENDERING = 1 - """Partial rendering, where the simulation cameras and UI elements are updated.""" - FULL_RENDERING = 2 - """Full rendering, where all the simulation viewports, cameras and UI elements are updated.""" + @classmethod + def instance(cls) -> SimulationContext | None: + """Get the singleton instance, or None if not created.""" + return cls._instance def __init__(self, cfg: SimulationCfg | None = None): - """Creates a simulation context to control the simulator. + """Initialize the simulation context. Args: - cfg: The configuration of the simulation. Defaults to None, - in which case the default configuration is used. + cfg: Simulation configuration. Defaults to None (uses default config). """ - # store input - if cfg is None: - cfg = SimulationCfg() - # check that the config is valid - cfg.validate() - self.cfg = cfg - # check that simulation is running - if sim_utils.get_current_stage() is None: - raise RuntimeError("The stage has not been created. Did you run the simulator?") - - # setup logger - self.logger = configure_logging( - logging_level=self.cfg.logging_level, - save_logs_to_file=self.cfg.save_logs_to_file, - log_dir=self.cfg.log_dir, - ) + if type(self)._instance is not None: + return # Already initialized - # create stage in memory if requested - if self.cfg.create_stage_in_memory: - self._initial_stage = sim_utils.create_new_stage_in_memory() - else: - self._initial_stage = omni.usd.get_context().get_stage() - # cache stage if it is not already cached - stage_cache = UsdUtils.StageCache.Get() - stage_id = stage_cache.GetId(self._initial_stage).ToLongInt() - if stage_id < 0: - stage_cache.Insert(self._initial_stage) - - # acquire settings interface - self.carb_settings = carb.settings.get_settings() - - # apply carb physics settings - self._apply_physics_settings() - - # note: we read this once since it is not expected to change during runtime - # read flag for whether a local GUI is enabled - self._local_gui = self.carb_settings.get("/app/window/enabled") - # read flag for whether livestreaming GUI is enabled - self._livestream_gui = self.carb_settings.get("/app/livestream/enabled") - # read flag for whether XR GUI is enabled - self._xr_gui = self.carb_settings.get("/app/xr/enabled") - - # read flags anim recording config and init timestamps - self._setup_anim_recording() - - # read flag for whether the Isaac Lab viewport capture pipeline will be used, - # casting None to False if the flag doesn't exist - # this flag is set from the AppLauncher class - self._offscreen_render = bool(self.carb_settings.get("/isaaclab/render/offscreen")) - # read flag for whether the default viewport should be enabled - self._render_viewport = bool(self.carb_settings.get("/isaaclab/render/active_viewport")) - # flag for whether any GUI will be rendered (local, livestreamed or viewport) - self._has_gui = self._local_gui or self._livestream_gui or self._xr_gui - - # apply render settings from render config - self._apply_render_settings_from_cfg() - - # store the default render mode - if not self._has_gui and not self._offscreen_render: - # set default render mode - # note: this is the terminal state: cannot exit from this render mode - self.render_mode = self.RenderMode.NO_GUI_OR_RENDERING - # set viewport context to None - self._viewport_context = None - self._viewport_window = None - elif not self._has_gui and self._offscreen_render: - # set default render mode - # note: this is the terminal state: cannot exit from this render mode - self.render_mode = self.RenderMode.PARTIAL_RENDERING - # set viewport context to None - self._viewport_context = None - self._viewport_window = None - else: - # note: need to import here in case the UI is not available (ex. headless mode) - import omni.ui as ui - from omni.kit.viewport.utility import get_active_viewport - - # set default render mode - # note: this can be changed by calling the `set_render_mode` function - self.render_mode = self.RenderMode.FULL_RENDERING - # acquire viewport context - self._viewport_context = get_active_viewport() - self._viewport_context.updates_enabled = True # pyright: ignore [reportOptionalMemberAccess] - # acquire viewport window - # TODO @mayank: Why not just use get_active_viewport_and_window() directly? - self._viewport_window = ui.Workspace.get_window("Viewport") - # counter for periodic rendering - self._render_throttle_counter = 0 - # rendering frequency in terms of number of render calls - self._render_throttle_period = 5 - - # check the case where we don't need to render the viewport - # since render_viewport can only be False in headless mode, we only need to check for offscreen_render - if not self._render_viewport and self._offscreen_render: - # disable the viewport if offscreen_render is enabled - from omni.kit.viewport.utility import get_active_viewport - - get_active_viewport().updates_enabled = False - - # override enable scene querying if rendering is enabled - # this is needed for some GUI features - if self._has_gui: - self.cfg.enable_scene_query_support = True - # set up flatcache/fabric interface (default is None) - # this is needed to flush the flatcache data into Hydra manually when calling `render()` - # ref: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html - # note: need to do this here because super().__init__ calls render and this variable is needed - self._fabric_iface = None - - # create a tensor for gravity - # note: this line is needed to create a "tensor" in the device to avoid issues with torch 2.1 onwards. - # the issue is with some heap memory corruption when torch tensor is created inside the asset class. - # you can reproduce the issue by commenting out this line and running the test `test_articulation.py`. - self._gravity_tensor = torch.tensor(self.cfg.gravity, dtype=torch.float32, device=self.cfg.device) - - # define a global variable to store the exceptions raised in the callback stack - builtins.ISAACLAB_CALLBACK_EXCEPTION = None - - # add callback to deal the simulation app when simulation is stopped. - # this is needed because physics views go invalid once we stop the simulation - if not builtins.ISAAC_LAUNCHED_FROM_TERMINAL: - timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() - self._app_control_on_stop_handle = timeline_event_stream.create_subscription_to_pop_by_type( - int(omni.timeline.TimelineEventType.STOP), - lambda *args, obj=weakref.proxy(self): obj._app_control_on_stop_handle_fn(*args), - order=15, - ) - else: - self._app_control_on_stop_handle = None - self._disable_app_control_on_stop_handle = False - - # flatten out the simulation dictionary - sim_params = self.cfg.to_dict() - if sim_params is not None: - if "physx" in sim_params: - physx_params = sim_params.pop("physx") - sim_params.update(physx_params) - - # add warning about enabling stabilization for large step sizes - if not self.cfg.physx.enable_stabilization and (self.cfg.dt > 0.0333): - self.logger.warning( - "Large simulation step size (> 0.0333 seconds) is not recommended without enabling stabilization." - " Consider setting the `enable_stabilization` flag to True in the PhysxCfg, or reducing the" - " simulation step size if you run into physics issues." - ) + # Store config + self.cfg = SimulationCfg() if cfg is None else cfg - # set simulation device - # note: Although Isaac Sim sets the physics device in the init function, - # it does a render call which gets the wrong device. - SimulationManager.set_physics_sim_device(self.cfg.device) - - # obtain the parsed device - # This device should be the same as "self.cfg.device". However, for cases, where users specify the device - # as "cuda" and not "cuda:X", then it fetches the current device from SimulationManager. - # Note: Since we fix the device from the configuration and don't expect users to change it at runtime, - # we can obtain the device once from the SimulationManager.get_physics_sim_device() function. - # This reduces the overhead of calling the function. - self._physics_device = SimulationManager.get_physics_sim_device() - - # create a simulation context to control the simulator - if get_isaac_sim_version().major < 5: - # stage arg is not supported before isaac sim 5.0 - super().__init__( - stage_units_in_meters=1.0, - physics_dt=self.cfg.dt, - rendering_dt=self.cfg.dt * self.cfg.render_interval, - backend="torch", - sim_params=sim_params, - physics_prim_path=self.cfg.physics_prim_path, - device=self.cfg.device, - ) + # Get or create stage based on config + stage_cache = UsdUtils.StageCache.Get() + if self.cfg.create_stage_in_memory: + self.stage = create_new_stage() else: - super().__init__( - stage_units_in_meters=1.0, - physics_dt=self.cfg.dt, - rendering_dt=self.cfg.dt * self.cfg.render_interval, - backend="torch", - sim_params=sim_params, - physics_prim_path=self.cfg.physics_prim_path, - device=self.cfg.device, - stage=self._initial_stage, - ) - - """ - Properties - Override. - """ + # Prefer the thread-local current stage (set by create_new_stage / test fixtures) + # over cache lookup, since the cache may contain stale stages from prior tests. + current = getattr(stage_utils._context, "stage", None) + if current is not None: + self.stage = current + else: + all_stages = stage_cache.GetAllStages() if stage_cache.Size() > 0 else [] # type: ignore[union-attr] + self.stage = all_stages[0] if all_stages else create_new_stage() - @property - def device(self) -> str: - """Device used by the simulation. + # Ensure stage is in the USD cache + stage_id = stage_cache.GetId(self.stage).ToLongInt() # type: ignore[union-attr] + if stage_id < 0: + stage_cache.Insert(self.stage) # type: ignore[union-attr] + + # Set as current stage in thread-local context for get_current_stage() + stage_utils._context.stage = self.stage + + # When Kit is running, attach the stage to Kit's USD context so that + # Kit extensions (PhysX views, Articulation, viewport) can discover it. + if has_kit(): + import omni.usd + + kit_context = omni.usd.get_context() + if kit_context is not None and kit_context.get_stage() is not self.stage: + kit_context.attach_stage_with_callback(stage_cache.GetId(self.stage).ToLongInt()) + + # Acquire settings interface (SettingsManager: standalone dict or Omniverse when available) + self.settings = SettingsManager.instance() + self._settings_helper = SettingsHelper(self.settings) + + # Initialize USD physics scene and physics manager + self._init_usd_physics_scene() + + # Normalize "cuda" -> "cuda:" now that the USD physics scene is initialized + # and /physics/cudaDevice is available. Update cfg.device in-place so all + # downstream code (physics backends, assets, sensors) sees a consistent value. + if "cuda" in self.cfg.device and ":" not in self.cfg.device: + cuda_device = self.get_setting("/physics/cudaDevice") + device_id = max(0, int(cuda_device) if cuda_device is not None else 0) + self.cfg.device = f"cuda:{device_id}" + + # Set default physics backend if not specified + if self.cfg.physics is None: + from isaaclab_physx.physics import PhysxCfg + + self.cfg.physics = PhysxCfg() + self._physics = self.cfg.physics + # If physics is a PresetCfg wrapper (has a 'default' field but no 'class_type'), + # resolve to the default preset so downstream code always sees a concrete PhysicsCfg. + if not hasattr(self._physics, "class_type") and hasattr(self._physics, "default"): + self._physics = self._physics.default + self.cfg.physics = self._physics + self.physics_manager: type[PhysicsManager] = self._physics.class_type + self.physics_manager.initialize(self) + self._apply_render_cfg_settings() + + # Initialize visualizer state (provider/visualizers are created lazily during initialize_visualizers()). + self._scene_data_provider: BaseSceneDataProvider | None = None + self._visualizers: list[BaseVisualizer] = [] + self._scene_data_requirements = SceneDataRequirement() + # Per-group clone plans published by InteractiveScene after cloning. Providers (e.g. + # the Newton visualizer model rebuilder on a PhysX backend) consume these to derive + # their own backend args. Empty dict until :meth:`InteractiveScene.clone_environments` + # runs. + self._clone_plans: dict[str, ClonePlan] = {} + self._visualizer_step_counter = 0 + # Default visualization dt used before/without visualizer initialization. + physics_dt = getattr(self.cfg.physics, "dt", None) + self._viz_dt = (physics_dt if physics_dt is not None else self.cfg.dt) * self.cfg.render_interval + + # Cache commonly-used settings (these don't change during runtime) + self._has_gui = bool(self.get_setting("/isaaclab/has_gui")) + self._has_offscreen_render = bool(self.get_setting("/isaaclab/render/offscreen")) + self._xr_enabled = bool(self.get_setting("/isaaclab/xr/enabled")) + # Note: has_rtx_sensors is NOT cached because it changes when Camera sensors are created + self._pending_camera_view: tuple[tuple[float, float, float], tuple[float, float, float]] | None = None + self.vis_marker_registry = VisMarkerRegistry() + + # Simulation state + self._is_playing = False + self._is_stopped = True + + # Monotonic physics-step counter used by camera sensors for + self._physics_step_count: int = 0 + # Monotonic render-generation counter. This increments whenever render() + # is executed and lets downstream camera freshness logic distinguish + # render/reset transitions that occur without advancing physics steps. + self._render_generation: int = 0 + + # Shared renderers for all Camera sensors (compatible renderer_cfg only). + self._render_context = RenderContext() + + type(self)._instance = self # Mark as valid singleton only after successful init + + def _apply_render_cfg_settings(self) -> None: + """Apply render preset and overrides from SimulationCfg.render.""" + # TODO: Refactor render preset + override handling to a dedicated RenderingQualityCfg + # (name subject to change) to keep quality profiles and carb mappings centralized. + render_cfg = getattr(self.cfg, "render", None) + if render_cfg is None: + return - Note: - In Omniverse, it is possible to configure multiple GPUs for rendering, while physics engine - operates on a single GPU. This function returns the device that is used for physics simulation. - """ - return self._physics_device + # Priority: + # 1) CLI/AppLauncher setting if present, 2) SimulationCfg.render.rendering_mode. + rendering_mode = self.get_setting("/isaaclab/rendering/rendering_mode") + if not rendering_mode: + rendering_mode = getattr(render_cfg, "rendering_mode", None) - """ - Operations - New. - """ + if rendering_mode: + supported_rendering_modes = {"performance", "balanced", "quality"} + if rendering_mode not in supported_rendering_modes: + raise ValueError( + f"RenderCfg rendering mode '{rendering_mode}' not in supported modes " + f"{sorted(supported_rendering_modes)}." + ) - def has_gui(self) -> bool: - """Returns whether the simulation has a GUI enabled. + isaaclab_app_exp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), *[".."] * 4, "apps") + from isaaclab.utils.version import get_isaac_sim_version - True if the simulation has a GUI enabled either locally or live-streamed. - """ - return self._has_gui + if get_isaac_sim_version().major < 6: + isaaclab_app_exp_path = os.path.join(isaaclab_app_exp_path, "isaacsim_5") - def has_rtx_sensors(self) -> bool: - """Returns whether the simulation has any RTX-rendering related sensors. + preset_filename = os.path.join(isaaclab_app_exp_path, f"rendering_modes/{rendering_mode}.kit") + if os.path.exists(preset_filename): + with open(preset_filename) as file: + preset_dict = toml.load(file) + + def _apply_nested(data: dict[str, Any], path: str = "") -> None: + for key, value in data.items(): + key_path = f"{path}/{key}" if path else f"/{key}" + if isinstance(value, dict): + _apply_nested(value, key_path) + else: + self.set_setting(key_path.replace(".", "/"), value) + + _apply_nested(preset_dict) + else: + logger.warning("[SimulationContext] Render preset file not found: %s", preset_filename) - This function returns the value of the simulation parameter ``"/isaaclab/render/rtx_sensors"``. - The parameter is set to True when instances of RTX-related sensors (cameras or LiDARs) are - created using Isaac Lab's sensor classes. + # RenderCfg fields mapped to setting paths (stored via SettingsManager) + field_to_setting = { + "enable_translucency": "/rtx/translucency/enabled", + "enable_reflections": "/rtx/reflections/enabled", + "enable_global_illumination": "/rtx/indirectDiffuse/enabled", + "enable_dlssg": "/rtx-transient/dlssg/enabled", + "enable_dl_denoiser": "/rtx-transient/dldenoiser/enabled", + "dlss_mode": "/rtx/post/dlss/execMode", + "enable_direct_lighting": "/rtx/directLighting/enabled", + "samples_per_pixel": "/rtx/directLighting/sampledLighting/samplesPerPixel", + "enable_shadows": "/rtx/shadows/enabled", + "enable_ambient_occlusion": "/rtx/ambientOcclusion/enabled", + "dome_light_upper_lower_strategy": "/rtx/domeLight/upperLowerStrategy", + } - True if the simulation has RTX sensors (such as USD Cameras or LiDARs). + for key, value in vars(render_cfg).items(): + if value is None or key in {"rendering_mode", "carb_settings", "antialiasing_mode"}: + continue + setting_path = field_to_setting.get(key) + if setting_path is not None: + self.set_setting(setting_path, value) + + # Raw overrides from render_cfg (stored via SettingsManager) + extra_settings = getattr(render_cfg, "carb_settings", None) + if extra_settings: + for key, value in extra_settings.items(): + if "_" in key: + path = "/" + key.replace("_", "/") + elif "." in key: + path = "/" + key.replace(".", "/") + else: + path = key + self.set_setting(path, value) + + # Optional anti-aliasing mode via Replicator (best-effort, may use Omniverse APIs) + antialiasing_mode = getattr(render_cfg, "antialiasing_mode", None) + if antialiasing_mode is not None: + try: + import omni.replicator.core as rep - For more information, please check `NVIDIA RTX documentation`_. + rep.settings.set_render_rtx_realtime(antialiasing=antialiasing_mode) + except Exception: + pass - .. _NVIDIA RTX documentation: https://developer.nvidia.com/rendering-technologies - """ - return self._settings.get_as_bool("/isaaclab/render/rtx_sensors") + def _init_usd_physics_scene(self) -> None: + """Create and configure the USD physics scene.""" + cfg = self.cfg + with sim_utils.use_stage(self.stage): + # Set stage conventions for metric units + UsdGeom.SetStageUpAxis(self.stage, "Z") + UsdGeom.SetStageMetersPerUnit(self.stage, 1.0) + UsdPhysics.SetStageKilogramsPerUnit(self.stage, 1.0) + + # Find and delete any existing physics scene. + # Collect paths first to avoid mutating the stage while traversing, + # which can invalidate the USD iterator. + physics_scene_paths = [ + prim.GetPath().pathString for prim in self.stage.Traverse() if prim.GetTypeName() == "PhysicsScene" + ] + for path in physics_scene_paths: + sim_utils.delete_prim(path, stage=self.stage) + + # Create a new physics scene + if self.stage.GetPrimAtPath(cfg.physics_prim_path).IsValid(): + raise RuntimeError(f"A prim already exists at path '{cfg.physics_prim_path}'.") + + physics_scene = UsdPhysics.Scene.Define(self.stage, cfg.physics_prim_path) + + # Pre-create gravity tensor to avoid torch heap corruption issues (torch 2.1+) + gravity = torch.tensor(cfg.gravity, dtype=torch.float32, device=self.cfg.device) + gravity_magnitude = torch.norm(gravity).item() + + if gravity_magnitude == 0.0: + gravity_direction = [0.0, 0.0, -1.0] + else: + gravity_direction = (gravity / gravity_magnitude).tolist() - def is_fabric_enabled(self) -> bool: - """Returns whether the fabric interface is enabled. + physics_scene.CreateGravityDirectionAttr(Gf.Vec3f(*gravity_direction)) + physics_scene.CreateGravityMagnitudeAttr(gravity_magnitude) - When fabric interface is enabled, USD read/write operations are disabled. Instead all applications - read and write the simulation state directly from the fabric interface. This reduces a lot of overhead - that occurs during USD read/write operations. + @property + def physics_sim_view(self): + """Returns the physics simulation view.""" + return self.physics_manager.get_physics_sim_view() - For more information, please check `Fabric documentation`_. + @property + def device(self) -> str: + """Returns the device on which the simulation is running.""" + return self.physics_manager.get_device() - .. _Fabric documentation: https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/usd_fabric_usdrt.html - """ - return self._fabric_iface is not None + @property + def backend(self) -> str: + """Returns the tensor backend being used ("numpy" or "torch").""" + return self.physics_manager.get_backend() - def get_version(self) -> tuple[int, int, int]: - """Returns the version of the simulator. + @property + def has_gui(self) -> bool: + """Returns whether GUI is enabled (cached at init).""" + return self._has_gui - The returned tuple contains the following information: + @property + def has_offscreen_render(self) -> bool: + """Returns whether offscreen rendering is enabled (cached at init).""" + return self._has_offscreen_render + + def has_active_visualizers(self) -> bool: + """Return whether any visualizer path is active for rendering/camera control.""" + return bool(self.get_setting("/isaaclab/visualizer/types")) or bool( + self.get_setting("/isaaclab/video/auto_start_kit") + ) - * Major version: This is the year of the release (e.g. 2022). - * Minor version: This is the half-year of the release (e.g. 1 or 2). - * Patch version: This is the patch number of the release (e.g. 0). + def can_render_rgb_array(self) -> bool: + """Return whether rgb-array rendering is currently available.""" + return self.has_gui or self.has_offscreen_render or self.has_active_visualizers() - .. attention:: - This function is deprecated and will be removed in the future. - We recommend using :func:`isaaclab.utils.version.get_isaac_sim_version` - instead of this function. + @property + def is_rendering(self) -> bool: + """Returns whether rendering is active (GUI, RTX sensors, visualizers, or XR).""" + return ( + self._has_gui + or self._has_offscreen_render + or self.get_setting("/isaaclab/render/rtx_sensors") + or bool(self.resolve_visualizer_types()) + or self._xr_enabled + ) - Returns: - A tuple containing the major, minor, and patch versions. + def get_physics_dt(self) -> float: + """Returns the physics time step.""" + return self.physics_manager.get_physics_dt() - Example: - >>> sim = SimulationContext() - >>> sim.get_version() - (2022, 1, 0) - """ - return get_isaac_sim_version().major, get_isaac_sim_version().minor, get_isaac_sim_version().micro + def get_physics_step_count(self) -> int: + """Return the monotonic physics step counter (incremented each :meth:`step`).""" + return self._physics_step_count - """ - Operations - New utilities. - """ + @property + def render_context(self) -> RenderContext: + """Shared :class:`~isaaclab.renderers.render_context.RenderContext` for camera renderers.""" + return self._render_context - def set_camera_view( - self, - eye: tuple[float, float, float], - target: tuple[float, float, float], - camera_prim_path: str = "/OmniverseKit_Persp", - ): - """Set the location and target of the viewport camera in the stage. + @property + def render_generation(self) -> int: + """Returns a monotonic counter for render() executions.""" + return self._render_generation - Note: - This is a wrapper around the :math:`isaacsim.core.utils.viewports.set_camera_view` function. - It is provided here for convenience to reduce the amount of imports needed. + def _create_default_visualizer_configs(self, requested_visualizers: list[str]) -> list: + """Create default visualizer configs for requested types. - Args: - eye: The location of the camera eye. - target: The location of the camera target. - camera_prim_path: The path to the camera primitive in the stage. Defaults to - "/OmniverseKit_Persp". + Loads only the requested visualizer submodule (e.g. isaaclab_visualizers.rerun) + so dependencies for other backends are not imported. """ - # safe call only if we have a GUI or viewport rendering enabled - if self._has_gui or self._offscreen_render or self._render_viewport: - set_camera_view(eye, target, camera_prim_path) - - def set_render_mode(self, mode: RenderMode): - """Change the current render mode of the simulation. - - Please see :class:`RenderMode` for more information on the different render modes. - - .. note:: - When no GUI is available (locally or livestreamed), we do not need to choose whether the viewport - needs to render or not (since there is no GUI). Thus, in this case, calling the function will not - change the render mode. - - Args: - mode (RenderMode): The rendering mode. If different than SimulationContext's rendering mode, - SimulationContext's mode is changed to the new mode. - - Raises: - ValueError: If the input mode is not supported. + import importlib + + default_configs = [] + cfg_class_names = { + "kit": "KitVisualizerCfg", + "newton": "NewtonVisualizerCfg", + "rerun": "RerunVisualizerCfg", + "viser": "ViserVisualizerCfg", + } + for viz_type in requested_visualizers: + try: + if viz_type not in _VISUALIZER_TYPES: + logger.warning( + f"[SimulationContext] Unknown visualizer type '{viz_type}' requested. " + f"Valid types: {', '.join(repr(t) for t in _VISUALIZER_TYPES)}. Skipping." + ) + continue + mod = importlib.import_module(f"isaaclab_visualizers.{viz_type}") + cfg_cls = getattr(mod, cfg_class_names[viz_type]) + default_configs.append(cfg_cls()) + except (ImportError, ModuleNotFoundError) as exc: + # isaaclab_visualizers is optional; log once at warning level + if "isaaclab_visualizers" in str(exc): + logger.warning( + "[SimulationContext] Visualizer '%s' skipped: isaaclab_visualizers is not installed. " + "Install with: pip install isaaclab_visualizers[%s]", + viz_type, + viz_type, + ) + else: + logger.error( + "[SimulationContext] Failed to create default config for visualizer '%s': %s", + viz_type, + exc, + ) + except Exception as exc: + logger.error(f"[SimulationContext] Failed to create default config for visualizer '{viz_type}': {exc}") + return default_configs + + def _get_cli_visualizer_types(self) -> list[str]: + """Return list of visualizer types requested via CLI (setting).""" + requested = self.get_setting("/isaaclab/visualizer/types") + if not isinstance(requested, str) or not requested.strip(): + return [] + # App launcher writes this as a single string; accept comma and/or whitespace separators. + return [value for chunk in requested.split(",") for value in chunk.split() if value] + + def _apply_visualizer_cli_overrides(self, visualizer_cfgs: list[Any]) -> None: + """Apply ``--max_visible_envs`` to every resolved visualizer cfg when set in settings. + + AppLauncher stores ``/isaaclab/visualizer/max_visible_envs`` as ``-1`` when the flag was + omitted; any non-negative int overrides :attr:`VisualizerCfg.max_visible_envs` on each cfg. """ - # check if mode change is possible -- not possible when no GUI is available - if not self._has_gui: - self.logger.warning( - f"Cannot change render mode when GUI is disabled. Using the default render mode: {self.render_mode}." - ) + raw = self.get_setting("/isaaclab/visualizer/max_visible_envs") + try: + max_visible = int(raw) if raw is not None else -1 + except (TypeError, ValueError): + logger.warning("[SimulationContext] Invalid /isaaclab/visualizer/max_visible_envs: %r", raw) return - # check if there is a mode change - # note: this is mostly needed for GUI when we want to switch between full rendering and no rendering. - if mode != self.render_mode: - if mode == self.RenderMode.FULL_RENDERING: - # display the viewport and enable updates - self._viewport_context.updates_enabled = True # pyright: ignore [reportOptionalMemberAccess] - self._viewport_window.visible = True # pyright: ignore [reportOptionalMemberAccess] - elif mode == self.RenderMode.PARTIAL_RENDERING: - # hide the viewport and disable updates - self._viewport_context.updates_enabled = False # pyright: ignore [reportOptionalMemberAccess] - self._viewport_window.visible = False # pyright: ignore [reportOptionalMemberAccess] - elif mode == self.RenderMode.NO_RENDERING: - # hide the viewport and disable updates - if self._viewport_context is not None: - self._viewport_context.updates_enabled = False # pyright: ignore [reportOptionalMemberAccess] - self._viewport_window.visible = False # pyright: ignore [reportOptionalMemberAccess] - # reset the throttle counter - self._render_throttle_counter = 0 - else: - raise ValueError(f"Unsupported render mode: {mode}! Please check `RenderMode` for details.") - # update render mode - self.render_mode = mode + if max_visible < 0: + return + for cfg in visualizer_cfgs: + if hasattr(cfg, "max_visible_envs"): + cfg.max_visible_envs = max_visible + + def _is_cli_visualizer_explicit(self) -> bool: + """Return ``True`` when visualizers were explicitly provided via CLI.""" + return bool(self.get_setting("/isaaclab/visualizer/explicit")) + + def _is_cli_visualizer_disable_all(self) -> bool: + """Return ``True`` when CLI requested ``--viz none`` semantics.""" + return bool(self.get_setting("/isaaclab/visualizer/disable_all")) + + def resolve_visualizer_types(self) -> list[str]: + """Resolve visualizer types from config or CLI settings.""" + if self._is_cli_visualizer_disable_all(): + return [] + if self._is_cli_visualizer_explicit(): + return self._get_cli_visualizer_types() + + visualizer_cfgs = self.cfg.visualizer_cfgs + if visualizer_cfgs is None: + return [] + if not isinstance(visualizer_cfgs, list): + visualizer_cfgs = [visualizer_cfgs] + return [cfg.visualizer_type for cfg in visualizer_cfgs if getattr(cfg, "visualizer_type", None)] + + def _resolve_visualizer_cfgs(self) -> list[Any]: + """Resolve final visualizer configs from cfg and optional CLI override. + + When visualizers are explicitly requested via ``--visualizer`` CLI flag, + a :class:`RuntimeError` is raised if any requested type cannot be + resolved (unknown type or missing package). + """ + visualizer_cfgs: list[Any] = [] + if self.cfg.visualizer_cfgs is not None: + visualizer_cfgs = ( + self.cfg.visualizer_cfgs if isinstance(self.cfg.visualizer_cfgs, list) else [self.cfg.visualizer_cfgs] + ) - def set_setting(self, name: str, value: Any): - """Set simulation settings using the Carbonite SDK. + cli_requested = self._get_cli_visualizer_types() + cli_explicit = self._is_cli_visualizer_explicit() + cli_disable_all = self._is_cli_visualizer_disable_all() + + if cli_disable_all: + resolved = [] + elif not cli_explicit: + self._apply_visualizer_cli_overrides(visualizer_cfgs) + resolved = visualizer_cfgs + elif not visualizer_cfgs: + resolved = self._create_default_visualizer_configs(cli_requested) if cli_requested else [] + self._apply_visualizer_cli_overrides(resolved) + else: + # CLI selection is explicit: keep only requested cfg types, then add defaults for missing. + cli_requested_set = set(cli_requested) + resolved = [cfg for cfg in visualizer_cfgs if getattr(cfg, "visualizer_type", None) in cli_requested_set] + existing_types = {getattr(cfg, "visualizer_type", None) for cfg in resolved} + for viz_type in cli_requested: + if viz_type not in existing_types and viz_type in _VISUALIZER_TYPES: + resolved.extend(self._create_default_visualizer_configs([viz_type])) + existing_types.add(viz_type) + self._apply_visualizer_cli_overrides(resolved) + + # When visualizers were explicitly requested via CLI, verify all + # requested types were resolved. This catches unknown types and + # missing packages that _create_default_visualizer_configs silently + # skips. + if cli_explicit and cli_requested: + resolved_types = {getattr(cfg, "visualizer_type", None) for cfg in resolved} + missing = [t for t in cli_requested if t not in resolved_types] + if missing: + raise RuntimeError( + f"Explicitly requested visualizer(s) {missing} could not be configured. " + f"Valid types: {', '.join(repr(t) for t in _VISUALIZER_TYPES)}. " + "Ensure the required package is installed " + "(e.g., pip install isaaclab_visualizers[])." + ) - .. note:: - If the input setting name does not exist, it will be created. If it does exist, the value will be - overwritten. Please make sure to use the correct setting name. + # XR auto-start: auto-inject a KitVisualizer when XR is active and no + # Kit visualizer is already present. The KitVisualizer pumps + # app.update() and triggers forward() (via requires_forward_before_step) + # to sync Fabric data so the XR runtime receives up-to-date hand/joint + # transforms each frame. + if self._xr_enabled and bool(self.get_setting("/isaaclab/xr/auto_start")): + has_kit = any(getattr(cfg, "visualizer_type", None) == "kit" for cfg in resolved) + if not has_kit: + try: + import importlib + + mod = importlib.import_module("isaaclab_visualizers.kit") + kit_cfg_cls = getattr(mod, "KitVisualizerCfg") + resolved.append(kit_cfg_cls()) + logger.info("[SimulationContext] Auto-injecting KitVisualizer for XR app-update pumping.") + except (ImportError, ModuleNotFoundError, AttributeError) as exc: + logger.warning( + "[SimulationContext] XR mode could not auto-inject a KitVisualizer: %s. " + "Install isaaclab_visualizers[kit] or pass --visualizer kit.", + exc, + ) + + return resolved + + def initialize_visualizers(self) -> None: + """Initialize visualizers from SimulationCfg.visualizer_cfgs.""" + if self._visualizers: + return - To understand the settings interface, please refer to the - `Carbonite SDK `_ - documentation. + physics_dt = getattr(self.cfg.physics, "dt", None) + self._viz_dt = (physics_dt if physics_dt is not None else self.cfg.dt) * self.cfg.render_interval - Args: - name: The name of the setting. - value: The value of the setting. - """ - # Route through typed setters for correctness and consistency for common scalar types. - if isinstance(value, bool): - self.carb_settings.set_bool(name, value) - elif isinstance(value, int): - self.carb_settings.set_int(name, value) - elif isinstance(value, float): - self.carb_settings.set_float(name, value) - elif isinstance(value, str): - self.carb_settings.set_string(name, value) - elif isinstance(value, (list, tuple)): - self.carb_settings.set(name, value) - else: - raise ValueError(f"Unsupported value type for setting '{name}': {type(value)}") - - def get_setting(self, name: str) -> Any: - """Read the simulation setting using the Carbonite SDK. + visualizer_cfgs = self._resolve_visualizer_cfgs() + if not visualizer_cfgs: + return - Args: - name: The name of the setting. + cli_explicit = self._is_cli_visualizer_explicit() - Returns: - The value of the setting. - """ - return self.carb_settings.get(name) + # Resolve visualizer-driven requirements once and keep optional artifact payload untouched. + visualizer_types = [ + cfg.visualizer_type for cfg in visualizer_cfgs if getattr(cfg, "visualizer_type", None) is not None + ] + requirements = resolve_scene_data_requirements(visualizer_types=visualizer_types) + self._scene_data_requirements = requirements + self.initialize_scene_data_provider() + self._visualizers = [] - def get_initial_stage(self) -> Usd.Stage: - """Returns stage handle used during scene creation. + for cfg in visualizer_cfgs: + try: + visualizer = cfg.create_visualizer() + visualizer.initialize(self._scene_data_provider) + self._visualizers.append(visualizer) + except Exception as exc: + if cli_explicit: + raise RuntimeError( + f"Visualizer '{cfg.visualizer_type}' was explicitly requested " + f"but failed to create or initialize: {exc}" + ) from exc + logger.exception( + "Failed to initialize visualizer '%s' (%s): %s", + cfg.visualizer_type, + type(cfg).__name__, + exc, + ) - Returns: - The stage used during scene creation. + # Replay any camera pose requested before visualizers were initialized. + pending = getattr(self, "_pending_camera_view", None) + if pending is not None: + eye, target = pending + for viz in self._visualizers: + viz.set_camera_view(eye, target) + self._pending_camera_view = None + + if not self._visualizers and self._scene_data_provider is not None: + close_provider = getattr(self._scene_data_provider, "close", None) + if callable(close_provider): + close_provider() + self._scene_data_provider = None + + def initialize_scene_data_provider(self) -> BaseSceneDataProvider: + if self._scene_data_provider is None: + self._scene_data_provider = SceneDataProvider(self.stage, self) + return self._scene_data_provider + + def get_scene_data_requirements(self) -> SceneDataRequirement: + """Return scene-data requirements resolved from visualizers/renderers.""" + return self._scene_data_requirements + + def update_scene_data_requirements(self, requirements: SceneDataRequirement) -> None: + """Update scene-data requirements.""" + self._scene_data_requirements = requirements + + def get_clone_plans(self) -> dict[str, ClonePlan]: + """Return per-group clone plans published by the scene, keyed by destination template. + + Set by :meth:`InteractiveScene.clone_environments` after replication. Consumed by + scene data providers that build backend models (e.g. Newton visualizer model on a + PhysX backend) from the same plan the cloner used. Empty dict until the scene clones. """ - return self._initial_stage + return self._clone_plans - """ - Operations - Override (standalone) - """ + def set_clone_plans(self, plans: dict[str, ClonePlan]) -> None: + """Set the cloner's per-group clone-plan map.""" + self._clone_plans = plans - def reset(self, soft: bool = False): - self._disable_app_control_on_stop_handle = True - # check if we need to raise an exception that was raised in a callback - if builtins.ISAACLAB_CALLBACK_EXCEPTION is not None: - exception_to_raise = builtins.ISAACLAB_CALLBACK_EXCEPTION - builtins.ISAACLAB_CALLBACK_EXCEPTION = None - raise exception_to_raise - super().reset(soft=soft) - # app.update() may be changing the cuda device in reset, so we force it back to our desired device here - if "cuda" in self.device: - torch.cuda.set_device(self.device) - # enable kinematic rendering with fabric - if self.physics_sim_view: - self.physics_sim_view._backend.initialize_kinematic_bodies() - # perform additional rendering steps to warm up replicator buffers - # this is only needed for the first time we set the simulation - if not soft: - for _ in range(2): - self.render() - self._disable_app_control_on_stop_handle = False + @property + def visualizers(self) -> list[BaseVisualizer]: + """Returns the list of active visualizers.""" + return self._visualizers + + def get_rendering_dt(self) -> float: + """Return rendering dt, allowing visualizer-specific override.""" + for viz in self._visualizers: + viz_dt = viz.get_rendering_dt() + if viz_dt is not None and viz_dt > 0: + return float(viz_dt) + return self._viz_dt + + def set_camera_view(self, eye: tuple, target: tuple) -> None: + """Set camera view on all visualizers that support it.""" + self._pending_camera_view = (tuple(eye), tuple(target)) + for viz in self._visualizers: + viz.set_camera_view(eye, target) def forward(self) -> None: - """Updates articulation kinematics and fabric for rendering.""" - if self._fabric_iface is not None: - if self.physics_sim_view is not None and self.is_playing(): - # Update the articulations' link's poses before rendering - self.physics_sim_view.update_articulations_kinematic() - self._update_fabric(0.0, 0.0) - - def step(self, render: bool = True): - """Steps the simulation. + """Update kinematics without stepping physics.""" + self.physics_manager.forward() - .. note:: - This function blocks if the timeline is paused. It only returns when the timeline is playing. + def reset(self, soft: bool = False) -> None: + """Reset the simulation. Args: - render: Whether to render the scene after stepping the physics simulation. - If set to False, the scene is not rendered and only the physics simulation is stepped. + soft: If True, skip full reinitialization. """ - # check if we need to raise an exception that was raised in a callback - if builtins.ISAACLAB_CALLBACK_EXCEPTION is not None: - exception_to_raise = builtins.ISAACLAB_CALLBACK_EXCEPTION - builtins.ISAACLAB_CALLBACK_EXCEPTION = None - raise exception_to_raise - - # update anim recording if needed - if self._anim_recording_enabled: - is_anim_recording_finished = self._update_anim_recording() - if is_anim_recording_finished: - logger.warning("[INFO][SimulationContext]: Animation recording finished. Closing app.") - self._app.shutdown() - - # check if the simulation timeline is paused. in that case keep stepping until it is playing - if not self.is_playing(): - # step the simulator (but not the physics) to have UI still active - while not self.is_playing(): - self.render() - # meantime if someone stops, break out of the loop - if self.is_stopped(): - break - # need to do one step to refresh the app - # reason: physics has to parse the scene again and inform other extensions like hydra-delegate. - # without this the app becomes unresponsive. - # FIXME: This steps physics as well, which we is not good in general. - self.app.update() - - # step the simulation - super().step(render=render) - - # app.update() may be changing the cuda device in step, so we force it back to our desired device here - if "cuda" in self.device: - torch.cuda.set_device(self.device) - - def render(self, mode: RenderMode | None = None): - """Refreshes the rendering components including UI elements and view-ports depending on the render mode. - - This function is used to refresh the rendering components of the simulation. This includes updating the - view-ports, UI elements, and other extensions (besides physics simulation) that are running in the - background. The rendering components are refreshed based on the render mode. - - Please see :class:`RenderMode` for more information on the different render modes. + self.physics_manager.reset(soft) + for viz in self._visualizers: + viz.reset(soft) + # Start the timeline so the play button is pressed + self.physics_manager.play() + if not self._visualizers: + # Initialize visualizers after PhysX sim view is ready. + self.initialize_visualizers() + self._is_playing = True + self._is_stopped = False + + def step(self, render: bool = True) -> None: + """Step physics and optionally render. + + If the timeline is paused (e.g. via the GUI), this method blocks and keeps + the visualizer responsive until the timeline is resumed or stopped. Args: - mode: The rendering mode. Defaults to None, in which case the current rendering mode is used. + render: Whether to render the scene after stepping. Defaults to True. """ - # check if we need to raise an exception that was raised in a callback - if builtins.ISAACLAB_CALLBACK_EXCEPTION is not None: - exception_to_raise = builtins.ISAACLAB_CALLBACK_EXCEPTION - builtins.ISAACLAB_CALLBACK_EXCEPTION = None - raise exception_to_raise - # check if we need to change the render mode - if mode is not None: - self.set_render_mode(mode) - # render based on the render mode - if self.render_mode == self.RenderMode.NO_GUI_OR_RENDERING: - # we never want to render anything here (this is for complete headless mode) - pass - elif self.render_mode == self.RenderMode.NO_RENDERING: - # throttle the rendering frequency to keep the UI responsive - self._render_throttle_counter += 1 - if self._render_throttle_counter % self._render_throttle_period == 0: - self._render_throttle_counter = 0 - # here we don't render viewport so don't need to flush fabric data - # note: we don't call super().render() anymore because they do flush the fabric data - self.set_setting("/app/player/playSimulations", False) - self._app.update() - self.set_setting("/app/player/playSimulations", True) - else: - # manually flush the fabric data to update Hydra textures - self.forward() - # render the simulation - # note: we don't call super().render() anymore because they do above operation inside - # and we don't want to do it twice. We may remove it once we drop support for Isaac Sim 2022.2. - self.set_setting("/app/player/playSimulations", False) - self._app.update() - self.set_setting("/app/player/playSimulations", True) - - # app.update() may be changing the cuda device, so we force it back to our desired device here - if "cuda" in self.device: - torch.cuda.set_device(self.device) - - """ - Operations - Override (extension) - """ - - async def reset_async(self, soft: bool = False): - # need to load all "physics" information from the USD file - if not soft: - omni.physx.acquire_physx_interface().force_load_physics_from_usd() - # play the simulation - await super().reset_async(soft=soft) - - """ - Initialization/Destruction - Override. - """ - - def _init_stage(self, *args, **kwargs) -> Usd.Stage: - _ = super()._init_stage(*args, **kwargs) - with sim_utils.use_stage(self.get_initial_stage()): - # a stage update here is needed for the case when physics_dt != rendering_dt, otherwise the app crashes - # when in headless mode - self.set_setting("/app/player/playSimulations", False) - self._app.update() - self.set_setting("/app/player/playSimulations", True) - # set additional physx parameters and bind material - self._set_additional_physx_params() - # load flatcache/fabric interface - self._load_fabric_interface() - # return the stage - return self.stage - - async def _initialize_stage_async(self, *args, **kwargs) -> Usd.Stage: - await super()._initialize_stage_async(*args, **kwargs) - # set additional physx parameters and bind material - self._set_additional_physx_params() - # load flatcache/fabric interface - self._load_fabric_interface() - # return the stage - return self.stage + # Block while the GUI timeline is paused so the entire training loop freezes. + # See: https://github.com/isaac-sim/IsaacLab/issues/4279 + self.physics_manager.wait_for_playing() + self._physics_step_count += 1 + self.physics_manager.step() + if render and self.is_rendering: + self.render() + + def render(self, mode: int | None = None, skip_app_pumping: bool = False) -> None: + """Update visualizers and render the scene. + + Calls update_visualizers() so visualizers run at the render cadence (not at + every physics step). Camera sensors drive their configured renderer when + fetching data. Recording-related follow-up (Kit/RTX headless video, Newton GL + video, etc.) runs in :mod:`isaaclab.envs.utils.recording_hooks` so it is not tied to a + specific :class:`~isaaclab.physics.PhysicsManager` subclass. + + **Kit vs. standalone visualizers:** The Kit app loop (``app.update()``) is the + only way to drive camera/RTX sensor rendering and viewport GUI updates; it + cannot be split into "cameras only" and "GUI only". Standalone visualizers + (Newton, Rerun, Viser) have self-contained ``step()`` methods that never call + ``app.update()``, so they can run independently of camera rendering. The + ``skip_app_pumping`` flag exploits this distinction: when True, Kit is skipped + while standalone visualizers continue to update. - @classmethod - def clear_instance(cls): - # clear the callback - if cls._instance is not None: - if cls._instance._app_control_on_stop_handle is not None: - cls._instance._app_control_on_stop_handle.unsubscribe() - cls._instance._app_control_on_stop_handle = None - # call parent to clear the instance - super().clear_instance() - - """ - Helper Functions - """ - - def _apply_physics_settings(self): - """Sets various carb physics settings.""" - # enable hydra scene-graph instancing - # note: this allows rendering of instanceable assets on the GUI - self.carb_settings.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) - # change dispatcher to use the default dispatcher in PhysX SDK instead of carb tasking - # note: dispatcher handles how threads are launched for multi-threaded physics - self.carb_settings.set_bool("/physics/physxDispatcher", True) - # disable contact processing in omni.physx - # note: we disable it by default to avoid the overhead of contact processing when it isn't needed. - # The physics flag gets enabled when a contact sensor is created. - if hasattr(self.cfg, "disable_contact_processing"): - self.logger.warning( - "The `disable_contact_processing` attribute is deprecated and always set to True" - " to avoid unnecessary overhead. Contact processing is automatically enabled when" - " a contact sensor is created, so manual configuration is no longer required." - ) - # FIXME: From investigation, it seems this flag only affects CPU physics. For GPU physics, contacts - # are always processed. The issue is reported to the PhysX team by @mmittal. - self.carb_settings.set_bool("/physics/disableContactProcessing", True) - # disable custom geometry for cylinder and cone collision shapes to allow contact reporting for them - # reason: cylinders and cones aren't natively supported by PhysX so we need to use custom geometry flags - # reference: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/Geometry.html?highlight=capsule#geometry - self.carb_settings.set_bool("/physics/collisionConeCustomGeometry", False) - self.carb_settings.set_bool("/physics/collisionCylinderCustomGeometry", False) - # hide the Simulation Settings window - self.carb_settings.set_bool("/physics/autoPopupSimulationOutputWindow", False) - self.carb_settings.set_bool("/physics/visualizationSimulationOutput", False) - # set fabric enabled flag - self.carb_settings.set_bool("/physics/fabricEnabled", self.cfg.use_fabric) - - def _apply_render_settings_from_cfg(self): # noqa: C901 - """Sets rtx settings specified in the RenderCfg.""" - - # define mapping of user-friendly RenderCfg names to native carb names - rendering_setting_name_mapping = { - "enable_translucency": "/rtx/translucency/enabled", - "enable_reflections": "/rtx/reflections/enabled", - "enable_global_illumination": "/rtx/indirectDiffuse/enabled", - "enable_dlssg": "/rtx-transient/dlssg/enabled", - "enable_dl_denoiser": "/rtx-transient/dldenoiser/enabled", - "dlss_mode": "/rtx/post/dlss/execMode", - "enable_direct_lighting": "/rtx/directLighting/enabled", - "samples_per_pixel": "/rtx/directLighting/sampledLighting/samplesPerPixel", - "enable_shadows": "/rtx/shadows/enabled", - "enable_ambient_occlusion": "/rtx/ambientOcclusion/enabled", - "dome_light_upper_lower_strategy": "/rtx/domeLight/upperLowerStrategy", - } + Args: + mode: Unused. Kept for backward compatibility. + skip_app_pumping: When True, skip visualizers whose :meth:`~BaseVisualizer.pumps_app_update` + returns True (e.g. KitVisualizer). This disables the Kit app loop and camera + updates while still stepping standalone visualizers (Newton, Rerun, Viser). + Used by environment ``step()`` when ``render_enabled`` is False. + """ + self.physics_manager.pre_render() + self.update_visualizers(self.get_rendering_dt(), skip_app_pumping=skip_app_pumping) + self.physics_manager.after_visualizers_render() + run_recording_hooks_after_visualizers(self) + self._render_generation += 1 - not_carb_settings = ["rendering_mode", "carb_settings", "antialiasing_mode"] + # Call render callbacks + if hasattr(self, "_render_callbacks"): + for callback in self._render_callbacks.values(): + callback(None) # Pass None as event data - # grab the rendering mode using the following priority: - # 1. command line argument --rendering_mode, if provided - # 2. rendering_mode from Render Config, if set - # 3. lastly, default to "balanced" mode, if neither is specified - rendering_mode = self.carb_settings.get("/isaaclab/rendering/rendering_mode") - if not rendering_mode: - rendering_mode = self.cfg.render.rendering_mode - if not rendering_mode: - rendering_mode = "balanced" + def update_visualizers(self, dt: float, skip_app_pumping: bool = False) -> None: + """Update visualizers without triggering renderer/GUI. - # set preset settings (same behavior as the CLI arg --rendering_mode) - if rendering_mode is not None: - # check if preset is supported - supported_rendering_modes = ["performance", "balanced", "quality"] - if rendering_mode not in supported_rendering_modes: - raise ValueError( - f"RenderCfg rendering mode '{rendering_mode}' not in supported modes {supported_rendering_modes}." - ) - - # grab isaac lab apps path - isaaclab_app_exp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), *[".."] * 4, "apps") - # for Isaac Sim 4.5 compatibility, we use the 4.5 rendering mode app files in a different folder - if get_isaac_sim_version().major < 5: - isaaclab_app_exp_path = os.path.join(isaaclab_app_exp_path, "isaacsim_4_5") + Args: + dt: Simulation time-step in seconds. + skip_app_pumping: When True, skip visualizers whose :meth:`~BaseVisualizer.pumps_app_update` + returns True (e.g. KitVisualizer). This is used when the environment's ``render_enabled`` + flag is False — cameras and the Kit app loop are skipped, but standalone visualizers + (Newton, Rerun, Viser) still receive updates. + """ + if not self._visualizers: + return - # grab preset settings - preset_filename = os.path.join(isaaclab_app_exp_path, f"rendering_modes/{rendering_mode}.kit") - with open(preset_filename) as file: - preset_dict = toml.load(file) - preset_dict = dict(flatdict.FlatDict(preset_dict, delimiter=".")) - - # set presets - for key, value in preset_dict.items(): - key = "/" + key.replace(".", "/") # convert to carb setting format - self.set_setting(key, value) - - # set user-friendly named settings - for key, value in vars(self.cfg.render).items(): - if value is None or key in not_carb_settings: - # skip unset settings and non-carb settings - continue - if key not in rendering_setting_name_mapping: - raise ValueError( - f"'{key}' in RenderCfg not found. Note: internal 'rendering_setting_name_mapping' dictionary might" - " need to be updated." - ) - key = rendering_setting_name_mapping[key] - self.set_setting(key, value) + self.update_scene_data_provider() - # set general carb settings - carb_settings = self.cfg.render.carb_settings - if carb_settings is not None: - for key, value in carb_settings.items(): - if "_" in key: - key = "/" + key.replace("_", "/") # convert from python variable style string - elif "." in key: - key = "/" + key.replace(".", "/") # convert from .kit file style string - if self.get_setting(key) is None: - raise ValueError(f"'{key}' in RenderCfg.general_parameters does not map to a carb setting.") - self.set_setting(key, value) + # Marker callbacks update VisualizationMarkers state; visualizer step() + # consumes that state later in this method. + if any(viz.supports_markers() for viz in self._visualizers): + self.vis_marker_registry.dispatch_callbacks() - # set denoiser mode - if self.cfg.render.antialiasing_mode is not None: + visualizers_to_remove = [] + for viz in self._visualizers: try: - import omni.replicator.core as rep - - rep.settings.set_render_rtx_realtime(antialiasing=self.cfg.render.antialiasing_mode) - except Exception: - pass - - # WAR: Ensure /rtx/renderMode RaytracedLighting is correctly cased. - if self.carb_settings.get("/rtx/rendermode").lower() == "raytracedlighting": - self.carb_settings.set_string("/rtx/rendermode", "RaytracedLighting") - - def _set_additional_physx_params(self): - """Sets additional PhysX parameters that are not directly supported by the parent class.""" - # obtain the physics scene api - physics_scene: UsdPhysics.Scene = self._physics_context._physics_scene - physx_scene_api: PhysxSchema.PhysxSceneAPI = self._physics_context._physx_scene_api - # assert that scene api is not None - if physx_scene_api is None: - raise RuntimeError("Physics scene API is None! Please create the scene first.") - # set parameters not directly supported by the constructor - # -- Continuous Collision Detection (CCD) - # ref: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/AdvancedCollisionDetection.html?highlight=ccd#continuous-collision-detection - self._physics_context.enable_ccd(self.cfg.physx.enable_ccd) - # -- GPU collision stack size - physx_scene_api.CreateGpuCollisionStackSizeAttr(self.cfg.physx.gpu_collision_stack_size) - # -- Improved determinism by PhysX - physx_scene_api.CreateEnableEnhancedDeterminismAttr(self.cfg.physx.enable_enhanced_determinism) - # -- Set solve_articulation_contact_last by add attribute to the PhysxScene prim, and add attribute there. - physx_prim = physx_scene_api.GetPrim() - physx_prim.CreateAttribute("physxScene:solveArticulationContactLast", Sdf.ValueTypeNames.Bool).Set( - self.cfg.physx.solve_articulation_contact_last - ) - # -- Enable external forces every iteration, helps improve the accuracy of velocity updates. - - if self.cfg.physx.solver_type == 1: - if not self.cfg.physx.enable_external_forces_every_iteration: - logger.warning( - "The `enable_external_forces_every_iteration` parameter in the PhysxCfg is set to False. If you are" - " experiencing noisy velocities, consider enabling this flag. You may need to slightly increase the" - " number of velocity iterations (setting it to 1 or 2 rather than 0), together with this flag, to" - " improve the accuracy of velocity updates." - ) - physx_scene_api.CreateEnableExternalForcesEveryIterationAttr( - self.cfg.physx.enable_external_forces_every_iteration - ) - - # -- Gravity - # note: Isaac sim only takes the "up-axis" as the gravity direction. But physics allows any direction so we - # need to convert the gravity vector to a direction and magnitude pair explicitly. - gravity = np.asarray(self.cfg.gravity) - gravity_magnitude = np.linalg.norm(gravity) - - # Avoid division by zero - if gravity_magnitude != 0.0: - gravity_direction = gravity / gravity_magnitude - else: - gravity_direction = gravity - - physics_scene.CreateGravityDirectionAttr(Gf.Vec3f(*gravity_direction)) - physics_scene.CreateGravityMagnitudeAttr(gravity_magnitude) - - # position iteration count - physx_scene_api.CreateMinPositionIterationCountAttr(self.cfg.physx.min_position_iteration_count) - physx_scene_api.CreateMaxPositionIterationCountAttr(self.cfg.physx.max_position_iteration_count) - # velocity iteration count - physx_scene_api.CreateMinVelocityIterationCountAttr(self.cfg.physx.min_velocity_iteration_count) - physx_scene_api.CreateMaxVelocityIterationCountAttr(self.cfg.physx.max_velocity_iteration_count) - - # create the default physics material - # this material is used when no material is specified for a primitive - # check: https://isaac-sim.github.io/IsaacLab/main/source/api/lab/isaaclab.sim.html#isaaclab.sim.SimulationCfg.physics_material - material_path = f"{self.cfg.physics_prim_path}/defaultMaterial" - self.cfg.physics_material.func(material_path, self.cfg.physics_material) - # bind the physics material to the scene - bind_physics_material(self.cfg.physics_prim_path, material_path) - - def _load_fabric_interface(self): - """Loads the fabric interface if enabled.""" - if self.cfg.use_fabric: - from omni.physxfabric import get_physx_fabric_interface - - # acquire fabric interface - self._fabric_iface = get_physx_fabric_interface() - if hasattr(self._fabric_iface, "force_update"): - # The update method in the fabric interface only performs an update if a physics step has occurred. - # However, for rendering, we need to force an update since any element of the scene might have been - # modified in a reset (which occurs after the physics step) and we want the renderer to be aware of - # these changes. - self._update_fabric = self._fabric_iface.force_update - else: - # Needed for backward compatibility with older Isaac Sim versions - self._update_fabric = self._fabric_iface.update - - def _update_anim_recording(self): - """Tracks anim recording timestamps and triggers finish animation recording if the total time has elapsed.""" - if self._anim_recording_started_timestamp is None: - self._anim_recording_started_timestamp = time.time() - - if self._anim_recording_started_timestamp is not None: - anim_recording_total_time = time.time() - self._anim_recording_started_timestamp - if anim_recording_total_time > self._anim_recording_stop_time: - self._finish_anim_recording() - return True - return False - - def _setup_anim_recording(self): - """Sets up anim recording settings and initializes the recording.""" - - self._anim_recording_enabled = bool(self.carb_settings.get("/isaaclab/anim_recording/enabled")) - if not self._anim_recording_enabled: + # When skip_app_pumping is set, skip Kit-like visualizers that call app.update() + if skip_app_pumping and viz.pumps_app_update(): + continue + if viz.is_closed or not viz.is_running(): + if viz.is_closed: + logger.info("Visualizer closed: %s", type(viz).__name__) + else: + logger.info("Visualizer not running: %s", type(viz).__name__) + visualizers_to_remove.append(viz) + continue + if viz.is_rendering_paused(): + # Keep non-Kit visualizer event loops responsive while rendering is paused. + # Newton/Rerun/Viser need step(0.0) so GL/UI can process input (e.g. Resume). + # Kit is skipped: step() would call app.update(), which must not run during pause. + if not viz.pumps_app_update(): + viz.step(0.0) + continue + while viz.is_training_paused() and viz.is_running(): + viz.step(0.0) + viz.step(dt) + except Exception as exc: + logger.error("Error stepping visualizer '%s': %s", type(viz).__name__, exc) + visualizers_to_remove.append(viz) + + for viz in visualizers_to_remove: + try: + viz.close() + self._visualizers.remove(viz) + logger.info("Removed visualizer: %s", type(viz).__name__) + except Exception as exc: + logger.error("Error closing visualizer: %s", exc) + + def update_scene_data_provider(self, force_require_forward: bool = False): + if force_require_forward or self._should_forward_before_visualizer_update(): + self.physics_manager.forward() + self._visualizer_step_counter += 1 + if self._scene_data_provider is None: return + self._scene_data_provider.update() + + def _should_forward_before_visualizer_update(self) -> bool: + """Return True if any visualizer requires pre-step forward kinematics.""" + return any(viz.requires_forward_before_step() for viz in self._visualizers) + + def play(self) -> None: + """Start or resume the simulation.""" + self.physics_manager.play() + for viz in self._visualizers: + viz.play() + self._is_playing = True + self._is_stopped = False + + def pause(self) -> None: + """Pause the simulation (can be resumed with play).""" + self.physics_manager.pause() + for viz in self._visualizers: + viz.pause() + self._is_playing = False + + def stop(self) -> None: + """Stop the simulation completely.""" + self.physics_manager.stop() + for viz in self._visualizers: + viz.stop() + self._is_playing = False + self._is_stopped = True + + def is_playing(self) -> bool: + """Returns True if simulation is playing (not paused or stopped).""" + return self._is_playing + + def is_stopped(self) -> bool: + """Returns True if simulation is stopped (not just paused).""" + return self._is_stopped + + def set_setting(self, name: str, value: Any) -> None: + """Set a setting value.""" + self._settings_helper.set(name, value) - # Import omni.physx.pvd.bindings here since it is not available by default - from omni.physxpvd.bindings import _physxPvd - - # Init anim recording settings - self._anim_recording_start_time = self.carb_settings.get("/isaaclab/anim_recording/start_time") - self._anim_recording_stop_time = self.carb_settings.get("/isaaclab/anim_recording/stop_time") - self._anim_recording_first_step_timestamp = None - self._anim_recording_started_timestamp = None - - # Make output path relative to repo path - repo_path = os.path.join(carb.tokens.get_tokens_interface().resolve("${app}"), "..") - self._anim_recording_timestamp = datetime.now().strftime("%Y_%m_%d_%H%M%S") - self._anim_recording_output_dir = ( - os.path.join(repo_path, "anim_recordings", self._anim_recording_timestamp).replace("\\", "/").rstrip("/") - + "/" - ) - os.makedirs(self._anim_recording_output_dir, exist_ok=True) - - # Acquire physx pvd interface and set output directory - self._physxPvdInterface = _physxPvd.acquire_physx_pvd_interface() - - # Set carb settings for the output path and enabling pvd recording - self.carb_settings.set_string( - "/persistent/physics/omniPvdOvdRecordingDirectory", self._anim_recording_output_dir - ) - self.carb_settings.set_bool("/physics/omniPvdOutputEnabled", True) - - def _update_usda_start_time(self, file_path, start_time): - """Updates the start time of the USDA baked anim recordingfile.""" - - # Read the USDA file - with open(file_path) as file: - content = file.read() - - # Extract the timeCodesPerSecond value - time_code_match = re.search(r"timeCodesPerSecond\s*=\s*(\d+)", content) - if not time_code_match: - raise ValueError("timeCodesPerSecond not found in the file.") - time_codes_per_second = int(time_code_match.group(1)) - - # Compute the new start time code - new_start_time_code = int(start_time * time_codes_per_second) - - # Replace the startTimeCode in the file - content = re.sub(r"startTimeCode\s*=\s*\d+", f"startTimeCode = {new_start_time_code}", content) - - # Write the updated content back to the file - with open(file_path, "w") as file: - file.write(content) - - def _finish_anim_recording(self): - """Finishes the animation recording and outputs the baked animation recording.""" - - logger.warning( - "[INFO][SimulationContext]: Finishing animation recording. Stage must be saved. Might take a few minutes." - ) - - # Detaching the stage will also close it and force the serialization of the OVD file - physx = omni.physx.get_physx_simulation_interface() - physx.detach_stage() + def get_setting(self, name: str) -> Any: + """Get a setting value.""" + return self._settings_helper.get(name) - # Save stage to disk - stage_path = os.path.join(self._anim_recording_output_dir, "stage_simulation.usdc") - sim_utils.save_stage(stage_path, save_and_reload_in_place=False) + @classmethod + def clear_instance(cls) -> None: + """Clean up resources and clear the singleton instance.""" + if cls._instance is not None: + # Close physics manager FIRST to detach PhysX from the stage + # This must happen before clearing USD prims to avoid PhysX cleanup errors + cls._instance.physics_manager.close() - # Find the latest ovd file not named tmp.ovd - ovd_files = [ - f for f in glob.glob(os.path.join(self._anim_recording_output_dir, "*.ovd")) if not f.endswith("tmp.ovd") - ] - input_ovd_path = max(ovd_files, key=os.path.getctime) - - # Invoke pvd interface to create recording - stage_filename = "baked_animation_recording.usda" - result = self._physxPvdInterface.ovd_to_usd_over_with_layer_creation( - input_ovd_path, - stage_path, - self._anim_recording_output_dir, - stage_filename, - self._anim_recording_start_time, - self._anim_recording_stop_time, - True, # True: ASCII layers / False : USDC layers - False, # True: verify over layer - ) + # Close all visualizers + for viz in cls._instance._visualizers: + viz.close() + cls._instance._visualizers.clear() + if cls._instance._scene_data_provider is not None: + close_provider = getattr(cls._instance._scene_data_provider, "close", None) + if callable(close_provider): + close_provider() + cls._instance._scene_data_provider = None - # Workaround for manually setting the truncated start time in the baked animation recording - self._update_usda_start_time( - os.path.join(self._anim_recording_output_dir, stage_filename), self._anim_recording_start_time - ) + # Tear down the stage. We skip clear_stage() (prim-by-prim deletion) since + # close_stage() + app shutdown destroy the entire stage at once. + stage_utils.close_stage() - # Disable recording - self.carb_settings.set_bool("/physics/omniPvdOutputEnabled", False) + # Discard cached name-resolution data from destroyed assets + clear_resolve_matching_names_cache() - return result + # Clear instance + cls._instance = None - """ - Callbacks. - """ + gc.collect() + logger.info("SimulationContext cleared") - def _app_control_on_stop_handle_fn(self, event: carb.events.IEvent): - """Callback to deal with the app when the simulation is stopped. + @classmethod + def clear_stage(cls) -> None: + """Clear the current USD stage (preserving /World and PhysicsScene). - Once the simulation is stopped, the physics handles go invalid. After that, it is not possible to - resume the simulation from the last state. This leaves the app in an inconsistent state, where - two possible actions can be taken: + Uses a predicate that preserves /World and PhysicsScene while also + respecting the default deletability checks (ancestral prims, etc.). + """ + if cls._instance is None: + return - 1. **Keep the app rendering**: In this case, the simulation is kept running and the app is not shutdown. - However, the physics is not updated and the script cannot be resumed from the last state. The - user has to manually close the app to stop the simulation. - 2. **Shutdown the app**: This is the default behavior. In this case, the app is shutdown and - the simulation is stopped. + def _predicate(prim: Usd.Prim) -> bool: + path = prim.GetPath().pathString + if path == "/World": + return False + if prim.GetTypeName() == "PhysicsScene": + return False + return True - Note: - This callback is used only when running the simulation in a standalone python script. In an extension, - it is expected that the user handles the extension shutdown. - """ - if not self._disable_app_control_on_stop_handle: - while not omni.timeline.get_timeline_interface().is_playing(): - self.render() - return + sim_utils.clear_stage(predicate=_predicate) @contextmanager @@ -1040,94 +908,58 @@ def build_simulation_context( add_ground_plane: bool = False, add_lighting: bool = False, auto_add_lighting: bool = False, + visualizers: list[str] | None = None, ) -> Iterator[SimulationContext]: """Context manager to build a simulation context with the provided settings. - This function facilitates the creation of a simulation context and provides flexibility in configuring various - aspects of the simulation, such as time step, gravity, device, and scene elements like ground plane and - lighting. - - If :attr:`sim_cfg` is None, then an instance of :class:`SimulationCfg` is created with default settings, - with parameters overwritten based on arguments to the function. - - An example usage of the context manager function: - - .. code-block:: python - - with build_simulation_context() as sim: - # Design the scene - - # Play the simulation - sim.reset() - while sim.is_playing(): - sim.step() - Args: create_new_stage: Whether to create a new stage. Defaults to True. - gravity_enabled: Whether to enable gravity in the simulation. Defaults to True. + gravity_enabled: Whether to enable gravity. Defaults to True. device: Device to run the simulation on. Defaults to "cuda:0". - dt: Time step for the simulation: Defaults to 0.01. - sim_cfg: :class:`isaaclab.sim.SimulationCfg` to use for the simulation. Defaults to None. - add_ground_plane: Whether to add a ground plane to the simulation. Defaults to False. - add_lighting: Whether to add a dome light to the simulation. Defaults to False. - auto_add_lighting: Whether to automatically add a dome light to the simulation if the simulation has a GUI. - Defaults to False. This is useful for debugging tests in the GUI. + dt: Time step for the simulation. Defaults to 0.01. + sim_cfg: SimulationCfg to use. Defaults to None. + add_ground_plane: Whether to add a ground plane. Defaults to False. + add_lighting: Whether to add a dome light. Defaults to False. + auto_add_lighting: Whether to auto-add lighting if GUI present. Defaults to False. + visualizers: List of visualizer backend keys to enable (e.g. ``["kit", "newton", "rerun"]``). + Valid types: ``"kit"``, ``"newton"``, ``"rerun"``, ``"viser"``. + When provided, sets the ``/isaaclab/visualizer/types`` setting so the + existing visualizer resolution machinery picks them up. Defaults to None. Yields: The simulation context to use for the simulation. - """ + sim: SimulationContext | None = None try: if create_new_stage: sim_utils.create_new_stage() if sim_cfg is None: - # Construct one and overwrite the dt, gravity, and device - sim_cfg = SimulationCfg(dt=dt) - - # Set up gravity - if gravity_enabled: - sim_cfg.gravity = (0.0, 0.0, -9.81) - else: - sim_cfg.gravity = (0.0, 0.0, 0.0) - - # Set device - sim_cfg.device = device + gravity = (0.0, 0.0, -9.81) if gravity_enabled else (0.0, 0.0, 0.0) + sim_cfg = SimulationCfg(device=device, dt=dt, gravity=gravity) - # Construct simulation context sim = SimulationContext(sim_cfg) + if visualizers: + sim.set_setting("/isaaclab/visualizer/types", " ".join(visualizers)) + if add_ground_plane: - # Ground-plane cfg = GroundPlaneCfg() cfg.func("/World/defaultGroundPlane", cfg) - if add_lighting or (auto_add_lighting and sim.has_gui()): - # Lighting + if add_lighting or (auto_add_lighting and (sim.get_setting("/isaaclab/has_gui") or visualizers)): cfg = DomeLightCfg( - color=(0.1, 0.1, 0.1), - enable_color_temperature=True, - color_temperature=5500, - intensity=10000, + color=(0.1, 0.1, 0.1), enable_color_temperature=True, color_temperature=5500, intensity=10000 ) - # Dome light named specifically to avoid conflicts cfg.func(prim_path="/World/defaultDomeLight", cfg=cfg, translation=(0.0, 0.0, 10.0)) yield sim except Exception: - sim.logger.error(traceback.format_exc()) + logger.error(traceback.format_exc()) raise finally: - if not sim.has_gui(): - # Stop simulation only if we aren't rendering otherwise the app will hang indefinitely - sim.stop() - - # Clear the stage - sim.clear_all_callbacks() - sim.clear_instance() - # check if we need to raise an exception that was raised in a callback - if builtins.ISAACLAB_CALLBACK_EXCEPTION is not None: - exception_to_raise = builtins.ISAACLAB_CALLBACK_EXCEPTION - builtins.ISAACLAB_CALLBACK_EXCEPTION = None - raise exception_to_raise + if sim is not None: + if not sim.get_setting("/isaaclab/has_gui"): + sim.stop() + sim.clear_instance() diff --git a/source/isaaclab/isaaclab/sim/spawners/__init__.py b/source/isaaclab/isaaclab/sim/spawners/__init__.py index 916141906e1e..8e2541507d5d 100644 --- a/source/isaaclab/isaaclab/sim/spawners/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/__init__.py @@ -54,11 +54,6 @@ class and the function call in a single line of code. """ -from .from_files import * # noqa: F401, F403 -from .lights import * # noqa: F401, F403 -from .materials import * # noqa: F401, F403 -from .meshes import * # noqa: F401, F403 -from .sensors import * # noqa: F401, F403 -from .shapes import * # noqa: F401, F403 -from .spawner_cfg import * # noqa: F401, F403 -from .wrappers import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/__init__.pyi new file mode 100644 index 000000000000..dae1a432b47e --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/__init__.pyi @@ -0,0 +1,132 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "SpawnerCfg", + "RigidObjectSpawnerCfg", + "spawn_from_mjcf", + "spawn_from_urdf", + "spawn_from_usd", + "spawn_from_usd_with_compliant_contact_material", + "spawn_ground_plane", + "GroundPlaneCfg", + "MjcfFileCfg", + "UrdfFileCfg", + "UsdFileCfg", + "UsdFileWithCompliantContactCfg", + "spawn_light", + "CylinderLightCfg", + "DiskLightCfg", + "DistantLightCfg", + "DomeLightCfg", + "LightCfg", + "SphereLightCfg", + "spawn_rigid_body_material", + "PhysicsMaterialCfg", + "RigidBodyMaterialCfg", + "spawn_from_mdl_file", + "spawn_preview_surface", + "GlassMdlCfg", + "MdlFileCfg", + "PreviewSurfaceCfg", + "VisualMaterialCfg", + "spawn_mesh_capsule", + "spawn_mesh_cone", + "spawn_mesh_cuboid", + "spawn_mesh_cylinder", + "spawn_mesh_sphere", + "spawn_mesh_square", + "MeshCapsuleCfg", + "MeshCfg", + "MeshConeCfg", + "MeshCuboidCfg", + "MeshCylinderCfg", + "MeshSphereCfg", + "MeshSquareCfg", + "spawn_camera", + "spawn_sensor_frame", + "FisheyeCameraCfg", + "PinholeCameraCfg", + "SensorFrameCfg", + "spawn_capsule", + "spawn_cone", + "spawn_cuboid", + "spawn_cylinder", + "spawn_sphere", + "CapsuleCfg", + "ConeCfg", + "CuboidCfg", + "CylinderCfg", + "ShapeCfg", + "SphereCfg", + "spawn_multi_asset", + "spawn_multi_usd_file", + "MultiAssetSpawnerCfg", + "MultiUsdFileCfg", +] + +from .spawner_cfg import SpawnerCfg, RigidObjectSpawnerCfg +from .from_files import ( + spawn_from_mjcf, + spawn_from_urdf, + spawn_from_usd, + spawn_from_usd_with_compliant_contact_material, + spawn_ground_plane, + GroundPlaneCfg, + MjcfFileCfg, + UrdfFileCfg, + UsdFileCfg, + UsdFileWithCompliantContactCfg, +) +from .lights import ( + spawn_light, + CylinderLightCfg, + DiskLightCfg, + DistantLightCfg, + DomeLightCfg, + LightCfg, + SphereLightCfg, +) +from .materials import ( + spawn_rigid_body_material, + PhysicsMaterialCfg, + RigidBodyMaterialCfg, + spawn_from_mdl_file, + spawn_preview_surface, + GlassMdlCfg, + MdlFileCfg, + PreviewSurfaceCfg, + VisualMaterialCfg, +) +from .meshes import ( + spawn_mesh_capsule, + spawn_mesh_cone, + spawn_mesh_cuboid, + spawn_mesh_cylinder, + spawn_mesh_sphere, + spawn_mesh_square, + MeshCapsuleCfg, + MeshCfg, + MeshConeCfg, + MeshCuboidCfg, + MeshCylinderCfg, + MeshSquareCfg, + MeshSphereCfg, +) +from .sensors import spawn_camera, spawn_sensor_frame, FisheyeCameraCfg, PinholeCameraCfg, SensorFrameCfg +from .shapes import ( + spawn_capsule, + spawn_cone, + spawn_cuboid, + spawn_cylinder, + spawn_sphere, + CapsuleCfg, + ConeCfg, + CuboidCfg, + CylinderCfg, + ShapeCfg, + SphereCfg, +) +from .wrappers import spawn_multi_asset, spawn_multi_usd_file, MultiAssetSpawnerCfg, MultiUsdFileCfg diff --git a/source/isaaclab/isaaclab/sim/spawners/from_files/__init__.py b/source/isaaclab/isaaclab/sim/spawners/from_files/__init__.py index a95ac491b0a8..5d953905821e 100644 --- a/source/isaaclab/isaaclab/sim/spawners/from_files/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/from_files/__init__.py @@ -13,11 +13,6 @@ """ -from .from_files import ( - spawn_from_mjcf, - spawn_from_urdf, - spawn_from_usd, - spawn_from_usd_with_compliant_contact_material, - spawn_ground_plane, -) -from .from_files_cfg import GroundPlaneCfg, MjcfFileCfg, UrdfFileCfg, UsdFileCfg, UsdFileWithCompliantContactCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/from_files/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/from_files/__init__.pyi new file mode 100644 index 000000000000..6003fa170fea --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/from_files/__init__.pyi @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_from_mjcf", + "spawn_from_urdf", + "spawn_from_usd", + "spawn_from_usd_with_compliant_contact_material", + "spawn_ground_plane", + "GroundPlaneCfg", + "MjcfFileCfg", + "UrdfFileCfg", + "UsdFileCfg", + "UsdFileWithCompliantContactCfg", +] + +from .from_files import ( + spawn_from_mjcf, + spawn_from_urdf, + spawn_from_usd, + spawn_from_usd_with_compliant_contact_material, + spawn_ground_plane, +) +from .from_files_cfg import ( + GroundPlaneCfg, + MjcfFileCfg, + UrdfFileCfg, + UsdFileCfg, + UsdFileWithCompliantContactCfg, +) diff --git a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py index 242829d777e8..be7bd6e7074e 100644 --- a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py +++ b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py @@ -5,11 +5,17 @@ from __future__ import annotations +import fcntl import logging +import os +import tempfile from typing import TYPE_CHECKING -import omni.kit.commands -from pxr import Gf, Sdf, Usd +# deformables only supported on PhysX backend +from isaaclab_physx.sim import schemas as schemas_physx +from isaaclab_physx.sim.spawners.materials import SurfaceDeformableBodyMaterialCfg + +from pxr import Gf, Sdf, Usd, UsdGeom from isaaclab.sim import converters, schemas from isaaclab.sim.spawners.materials import RigidBodyMaterialCfg @@ -25,7 +31,8 @@ select_usd_variants, set_prim_visibility, ) -from isaaclab.utils.assets import check_usd_path_with_timeout +from isaaclab.utils.assets import check_file_path, retrieve_file_path +from isaaclab.utils.version import has_kit if TYPE_CHECKING: from . import from_files_cfg @@ -62,7 +69,7 @@ def spawn_from_usd( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the translation specified in the USD file is used. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the orientation specified in the USD file is used. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -104,7 +111,7 @@ def spawn_from_urdf( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the translation specified in the generated USD file is used. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the orientation specified in the generated USD file is used. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -147,7 +154,7 @@ def spawn_from_mjcf( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the translation specified in the generated USD file is used. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the orientation specified in the generated USD file is used. Returns: @@ -184,7 +191,7 @@ def spawn_ground_plane( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the translation specified in the USD file is used. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the orientation specified in the USD file is used. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -238,9 +245,12 @@ def spawn_ground_plane( stage=stage, type_to_create_if_not_exist=Sdf.ValueTypeNames.Color3f, ) - # Remove the light from the ground plane + # Remove the light from the ground plane (USD API, works without Kit/Newton) # It isn't bright enough and messes up with the user's lighting settings - omni.kit.commands.execute("ToggleVisibilitySelectedPrims", selected_paths=[f"{prim_path}/SphereLight"], stage=stage) + light_prim = stage.GetPrimAtPath(f"{prim_path}/SphereLight") + if light_prim.IsValid(): + imageable = UsdGeom.Imageable(light_prim) + imageable.MakeInvisible() prim = stage.GetPrimAtPath(prim_path) # Apply semantic tags @@ -286,7 +296,7 @@ def _spawn_from_usd_file( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the translation specified in the generated USD file is used. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the orientation specified in the generated USD file is used. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -296,31 +306,38 @@ def _spawn_from_usd_file( Raises: FileNotFoundError: If the USD file does not exist at the given path. """ - # check if usd path exists with periodic logging until timeout - if not check_usd_path_with_timeout(usd_path): - if "4.5" in usd_path: - usd_5_0_path = usd_path.replace("http", "https").replace("/4.5", "/5.0") - if not check_usd_path_with_timeout(usd_5_0_path): - raise FileNotFoundError(f"USD file not found at path at either: '{usd_path}' or '{usd_5_0_path}'.") - usd_path = usd_5_0_path + # In distributed training, serialize asset download and USD stage composition + # across ranks to prevent file I/O races. Concurrent mmap reads/writes on + # the same cached USD files cause segfaults in Sdf_CrateFile::_MmapStream::Read. + _world_size = int(os.environ.get("LOCAL_WORLD_SIZE", "1")) + + file_status = check_file_path(usd_path) + if file_status == 0: + raise FileNotFoundError(f"USD file not found at path: '{usd_path}'.") + + if _world_size > 1: + lock_path = os.path.join(tempfile.gettempdir(), "isaaclab_usd_spawn.lock") + lock_fd = open(lock_path, "w") # noqa: SIM115 + fcntl.flock(lock_fd, fcntl.LOCK_EX) + try: + if file_status == 2: + usd_path = retrieve_file_path(usd_path, force_download=False) + stage = get_current_stage() + if not stage.GetPrimAtPath(prim_path).IsValid(): + create_prim( + prim_path, + usd_path=usd_path, + translation=translation, + orientation=orientation, + scale=cfg.scale, + stage=stage, + ) else: - raise FileNotFoundError(f"USD file not found at path at: '{usd_path}'.") - - # Obtain current stage - stage = get_current_stage() - # spawn asset if it doesn't exist. - if not stage.GetPrimAtPath(prim_path).IsValid(): - # add prim as reference to stage - create_prim( - prim_path, - usd_path=usd_path, - translation=translation, - orientation=orientation, - scale=cfg.scale, - stage=stage, - ) - else: - logger.warning(f"A prim already exists at prim path: '{prim_path}'.") + logger.warning(f"A prim already exists at prim path: '{prim_path}'.") + finally: + if _world_size > 1: + fcntl.flock(lock_fd, fcntl.LOCK_UN) + lock_fd.close() # modify variants if hasattr(cfg, "variants") and cfg.variants is not None: @@ -350,12 +367,25 @@ def _spawn_from_usd_file( if cfg.joint_drive_props is not None: schemas.modify_joint_drive_properties(prim_path, cfg.joint_drive_props) - # modify deformable body properties + # define deformable body properties, or modify if deformable body API is present (PhysX only) if cfg.deformable_props is not None: - schemas.modify_deformable_body_properties(prim_path, cfg.deformable_props) + prim = stage.GetPrimAtPath(prim_path) + deformable_type = "surface" if isinstance(cfg.physics_material, SurfaceDeformableBodyMaterialCfg) else "volume" + if "OmniPhysicsDeformableBodyAPI" in prim.GetAppliedSchemas(): + schemas_physx.modify_deformable_body_properties(prim_path, cfg.deformable_props, stage) + else: + schemas_physx.define_deformable_body_properties(prim_path, cfg.deformable_props, stage, deformable_type) + if cfg.mass_props is not None: + raise ValueError( + """MassPropertiesCfg are not supported for deformable bodies + and should be set through DeformableBodyPropertiesCfg(mass=).""" + ) # apply visual material if cfg.visual_material is not None: + if not has_kit(): + logger.warning("Skipping visual material application for '%s' in kitless mode.", prim_path) + return stage.GetPrimAtPath(prim_path) if not cfg.visual_material_path.startswith("/"): material_path = f"{prim_path}/{cfg.visual_material_path}" else: @@ -365,6 +395,17 @@ def _spawn_from_usd_file( # apply material bind_visual_material(prim_path, material_path, stage=stage) + # apply physics material + if cfg.physics_material is not None: + if not cfg.physics_material_path.startswith("/"): + material_path = f"{prim_path}/{cfg.physics_material_path}" + else: + material_path = cfg.physics_material_path + # create material + cfg.physics_material.func(material_path, cfg.physics_material) + # apply material + bind_physics_material(prim_path, material_path, stage=stage) + # return the prim return stage.GetPrimAtPath(prim_path) @@ -389,7 +430,7 @@ def spawn_from_usd_with_compliant_contact_material( cfg: The configuration instance containing the USD file path and physics material settings. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the translation specified in the USD file is used. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case the orientation specified in the USD file is used. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. diff --git a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py index ad9f55859040..78aaf1b15c94 100644 --- a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py @@ -8,14 +8,15 @@ from collections.abc import Callable from dataclasses import MISSING +# deformables only supported on PhysX backend +from isaaclab_physx.sim.spawners.spawner_cfg import DeformableObjectSpawnerCfg + from isaaclab.sim import converters, schemas from isaaclab.sim.spawners import materials -from isaaclab.sim.spawners.spawner_cfg import DeformableObjectSpawnerCfg, RigidObjectSpawnerCfg, SpawnerCfg +from isaaclab.sim.spawners.spawner_cfg import RigidObjectSpawnerCfg, SpawnerCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from . import from_files - @configclass class FileCfg(RigidObjectSpawnerCfg, DeformableObjectSpawnerCfg): @@ -69,6 +70,20 @@ class FileCfg(RigidObjectSpawnerCfg, DeformableObjectSpawnerCfg): If None, then no visual material will be added. """ + physics_material_path: str = "material" + """Path to the physics material to use for the prim. Defaults to "material". + + If the path is relative, then it will be relative to the prim's path. + This parameter is ignored if `physics_material` is not None. + """ + + physics_material: materials.PhysicsMaterialCfg | None = None + """Physics material properties. + + Note: + If None, then no custom physics material will be added. + """ + @configclass class UsdFileCfg(FileCfg): @@ -96,7 +111,7 @@ class UsdFileCfg(FileCfg): This is done by calling the respective function with the specified properties. """ - func: Callable = from_files.spawn_from_usd + func: Callable | str = "{DIR}.from_files:spawn_from_usd" usd_path: str = MISSING """Path to the USD file to spawn asset from.""" @@ -129,7 +144,7 @@ class UrdfFileCfg(FileCfg, converters.UrdfConverterCfg): """ - func: Callable = from_files.spawn_from_urdf + func: Callable | str = "{DIR}.from_files:spawn_from_urdf" @configclass @@ -151,7 +166,7 @@ class MjcfFileCfg(FileCfg, converters.MjcfConverterCfg): """ - func: Callable = from_files.spawn_from_mjcf + func: Callable | str = "{DIR}.from_files:spawn_from_mjcf" """ @@ -169,7 +184,7 @@ class UsdFileWithCompliantContactCfg(UsdFileCfg): material application. """ - func: Callable = from_files.spawn_from_usd_with_compliant_contact_material + func: Callable | str = "{DIR}.from_files:spawn_from_usd_with_compliant_contact_material" compliant_contact_stiffness: float | None = None """Stiffness of the compliant contact. Defaults to None. @@ -201,7 +216,7 @@ class GroundPlaneCfg(SpawnerCfg): This uses the USD for the standard grid-world ground plane from Isaac Sim by default. """ - func: Callable = from_files.spawn_ground_plane + func: Callable | str = "{DIR}.from_files:spawn_ground_plane" usd_path: str = f"{ISAAC_NUCLEUS_DIR}/Environments/Grid/default_environment.usd" """Path to the USD file to spawn asset from. Defaults to the grid-world ground plane.""" diff --git a/source/isaaclab/isaaclab/sim/spawners/lights/__init__.py b/source/isaaclab/isaaclab/sim/spawners/lights/__init__.py index df0c638f58f1..0cb0b03b2a25 100644 --- a/source/isaaclab/isaaclab/sim/spawners/lights/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/lights/__init__.py @@ -10,5 +10,6 @@ `_. """ -from .lights import spawn_light -from .lights_cfg import CylinderLightCfg, DiskLightCfg, DistantLightCfg, DomeLightCfg, LightCfg, SphereLightCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/lights/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/lights/__init__.pyi new file mode 100644 index 000000000000..8486f38b0298 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/lights/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_light", + "CylinderLightCfg", + "DiskLightCfg", + "DistantLightCfg", + "DomeLightCfg", + "LightCfg", + "SphereLightCfg", +] + +from .lights import spawn_light +from .lights_cfg import ( + CylinderLightCfg, + DiskLightCfg, + DistantLightCfg, + DomeLightCfg, + LightCfg, + SphereLightCfg, +) diff --git a/source/isaaclab/isaaclab/sim/spawners/lights/lights.py b/source/isaaclab/isaaclab/sim/spawners/lights/lights.py index 9b0106c6ecdd..b033a97aff15 100644 --- a/source/isaaclab/isaaclab/sim/spawners/lights/lights.py +++ b/source/isaaclab/isaaclab/sim/spawners/lights/lights.py @@ -37,7 +37,7 @@ def spawn_light( then the asset is spawned at all the matching prim paths. cfg: The configuration for the light source. translation: The translation of the prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation of the prim as (w, x, y, z). Defaults to None, in which case this + orientation: The orientation of the prim as (x, y, z, w). Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -59,7 +59,7 @@ def spawn_light( # delete spawner func specific parameters del cfg["prim_type"] # delete custom attributes in the config that are not USD parameters - non_usd_cfg_param_names = ["func", "copy_from_source", "visible", "semantic_tags"] + non_usd_cfg_param_names = ["func", "copy_from_source", "visible", "semantic_tags", "spawn_path"] for param_name in non_usd_cfg_param_names: del cfg[param_name] # set into USD API diff --git a/source/isaaclab/isaaclab/sim/spawners/lights/lights_cfg.py b/source/isaaclab/isaaclab/sim/spawners/lights/lights_cfg.py index 60060ea22e52..653e83a108cc 100644 --- a/source/isaaclab/isaaclab/sim/spawners/lights/lights_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/lights/lights_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from collections.abc import Callable from dataclasses import MISSING from typing import Literal @@ -10,8 +12,6 @@ from isaaclab.sim.spawners.spawner_cfg import SpawnerCfg from isaaclab.utils import configclass -from . import lights - @configclass class LightCfg(SpawnerCfg): @@ -24,7 +24,7 @@ class LightCfg(SpawnerCfg): The default values for the attributes are those specified in the their official documentation. """ - func: Callable = lights.spawn_light + func: Callable | str = "{DIR}.lights:spawn_light" prim_type: str = MISSING """The prim type name for the light prim.""" diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py b/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py index 0b0e7851494f..b86e98599fc0 100644 --- a/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/materials/__init__.py @@ -52,7 +52,6 @@ .. _Physics Scene: https://openusd.org/dev/api/usd_physics_page_front.html """ -from .physics_materials import spawn_deformable_body_material, spawn_rigid_body_material -from .physics_materials_cfg import DeformableBodyMaterialCfg, PhysicsMaterialCfg, RigidBodyMaterialCfg -from .visual_materials import spawn_from_mdl_file, spawn_preview_surface -from .visual_materials_cfg import GlassMdlCfg, MdlFileCfg, PreviewSurfaceCfg, VisualMaterialCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/materials/__init__.pyi new file mode 100644 index 000000000000..93142ddab389 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/materials/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_rigid_body_material", + "PhysicsMaterialCfg", + "RigidBodyMaterialCfg", + "spawn_from_mdl_file", + "spawn_preview_surface", + "GlassMdlCfg", + "MdlFileCfg", + "PreviewSurfaceCfg", + "VisualMaterialCfg", +] + +from .physics_materials import spawn_rigid_body_material +from .physics_materials_cfg import ( + PhysicsMaterialCfg, + RigidBodyMaterialCfg, +) +from .visual_materials import spawn_from_mdl_file, spawn_preview_surface +from .visual_materials_cfg import GlassMdlCfg, MdlFileCfg, PreviewSurfaceCfg, VisualMaterialCfg diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials.py b/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials.py index 29818d830951..240a0ff00746 100644 --- a/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials.py +++ b/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials.py @@ -7,10 +7,11 @@ from typing import TYPE_CHECKING -from pxr import PhysxSchema, Usd, UsdPhysics, UsdShade +from pxr import Usd, UsdPhysics, UsdShade -from isaaclab.sim.utils import clone, safe_set_attribute_on_usd_schema +from isaaclab.sim.utils import clone, safe_set_attribute_on_usd_prim, safe_set_attribute_on_usd_schema from isaaclab.sim.utils.stage import get_current_stage +from isaaclab.utils.string import to_camel_case if TYPE_CHECKING: from . import physics_materials_cfg @@ -56,10 +57,10 @@ def spawn_rigid_body_material(prim_path: str, cfg: physics_materials_cfg.RigidBo usd_physics_material_api = UsdPhysics.MaterialAPI(prim) if not usd_physics_material_api: usd_physics_material_api = UsdPhysics.MaterialAPI.Apply(prim) - # retrieve the collision api - physx_material_api = PhysxSchema.PhysxMaterialAPI(prim) - if not physx_material_api: - physx_material_api = PhysxSchema.PhysxMaterialAPI.Apply(prim) + # ensure PhysX material API is applied + applied = prim.GetAppliedSchemas() + if "PhysxMaterialAPI" not in applied: + prim.AddAppliedSchema("PhysxMaterialAPI") # convert to dict cfg = cfg.to_dict() @@ -68,61 +69,8 @@ def spawn_rigid_body_material(prim_path: str, cfg: physics_materials_cfg.RigidBo for attr_name in ["static_friction", "dynamic_friction", "restitution"]: value = cfg.pop(attr_name, None) safe_set_attribute_on_usd_schema(usd_physics_material_api, attr_name, value, camel_case=True) - # set into PhysX API + # set into PhysX API (prim attributes: physxMaterial:*) for attr_name, value in cfg.items(): - safe_set_attribute_on_usd_schema(physx_material_api, attr_name, value, camel_case=True) - # return the prim - return prim - - -@clone -def spawn_deformable_body_material(prim_path: str, cfg: physics_materials_cfg.DeformableBodyMaterialCfg) -> Usd.Prim: - """Create material with deformable-body physics properties. - - Deformable body materials are used to define the physical properties to meshes of a deformable body. These - include the friction and deformable body properties. For more information on deformable body material, - please refer to the documentation on `PxFEMSoftBodyMaterial`_. - - .. note:: - This function is decorated with :func:`clone` that resolves prim path into list of paths - if the input prim path is a regex pattern. This is done to support spawning multiple assets - from a single and cloning the USD prim at the given path expression. - - Args: - prim_path: The prim path or pattern to spawn the asset at. If the prim path is a regex pattern, - then the asset is spawned at all the matching prim paths. - cfg: The configuration for the physics material. - - Returns: - The spawned deformable body material prim. - - Raises: - ValueError: When a prim already exists at the specified prim path and is not a material. - - .. _PxFEMSoftBodyMaterial: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/_api_build/structPxFEMSoftBodyMaterialModel.html - """ - # get stage handle - stage = get_current_stage() - - # create material prim if no prim exists - if not stage.GetPrimAtPath(prim_path).IsValid(): - _ = UsdShade.Material.Define(stage, prim_path) - - # obtain prim - prim = stage.GetPrimAtPath(prim_path) - # check if prim is a material - if not prim.IsA(UsdShade.Material): - raise ValueError(f"A prim already exists at path: '{prim_path}' but is not a material.") - # retrieve the deformable-body api - physx_deformable_body_material_api = PhysxSchema.PhysxDeformableBodyMaterialAPI(prim) - if not physx_deformable_body_material_api: - physx_deformable_body_material_api = PhysxSchema.PhysxDeformableBodyMaterialAPI.Apply(prim) - - # convert to dict - cfg = cfg.to_dict() - del cfg["func"] - # set into PhysX API - for attr_name, value in cfg.items(): - safe_set_attribute_on_usd_schema(physx_deformable_body_material_api, attr_name, value, camel_case=True) + safe_set_attribute_on_usd_prim(prim, f"physxMaterial:{to_camel_case(attr_name, 'cC')}", value, camel_case=False) # return the prim return prim diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py b/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py index ce05c2b9ea46..dde9aec6d905 100644 --- a/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/materials/physics_materials_cfg.py @@ -3,14 +3,14 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from collections.abc import Callable from dataclasses import MISSING from typing import Literal from isaaclab.utils import configclass -from . import physics_materials - @configclass class PhysicsMaterialCfg: @@ -33,7 +33,7 @@ class RigidBodyMaterialCfg(PhysicsMaterialCfg): See :meth:`spawn_rigid_body_material` for more information. """ - func: Callable = physics_materials.spawn_rigid_body_material + func: Callable | str = "{DIR}.physics_materials:spawn_rigid_body_material" static_friction: float = 0.5 """The static friction coefficient. Defaults to 0.5.""" @@ -77,44 +77,3 @@ class RigidBodyMaterialCfg(PhysicsMaterialCfg): Irrelevant if compliant contacts are disabled when :obj:`compliant_contact_stiffness` is set to zero and rigid contacts are active. """ - - -@configclass -class DeformableBodyMaterialCfg(PhysicsMaterialCfg): - """Physics material parameters for deformable bodies. - - See :meth:`spawn_deformable_body_material` for more information. - - """ - - func: Callable = physics_materials.spawn_deformable_body_material - - density: float | None = None - """The material density. Defaults to None, in which case the simulation decides the default density.""" - - dynamic_friction: float = 0.25 - """The dynamic friction. Defaults to 0.25.""" - - youngs_modulus: float = 50000000.0 - """The Young's modulus, which defines the body's stiffness. Defaults to 50000000.0. - - The Young's modulus is a measure of the material's ability to deform under stress. It is measured in Pascals (Pa). - """ - - poissons_ratio: float = 0.45 - """The Poisson's ratio which defines the body's volume preservation. Defaults to 0.45. - - The Poisson's ratio is a measure of the material's ability to expand in the lateral direction when compressed - in the axial direction. It is a dimensionless number between 0 and 0.5. Using a value of 0.5 will make the - material incompressible. - """ - - elasticity_damping: float = 0.005 - """The elasticity damping for the deformable material. Defaults to 0.005.""" - - damping_scale: float = 1.0 - """The damping scale for the deformable material. Defaults to 1.0. - - A scale of 1 corresponds to default damping. A value of 0 will only apply damping to certain motions leading - to special effects that look similar to water filled soft bodies. - """ diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials.py b/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials.py index 074d6ac0e432..e4b330223334 100644 --- a/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials.py +++ b/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials.py @@ -8,12 +8,12 @@ import logging from typing import TYPE_CHECKING -from omni.usd.commands import CreateMdlMaterialPrimCommand, CreateShaderPrimFromSdrCommand from pxr import Usd, UsdShade from isaaclab.sim.utils import clone, safe_set_attribute_on_usd_prim from isaaclab.sim.utils.stage import get_current_stage from isaaclab.utils.assets import NVIDIA_NUCLEUS_DIR +from isaaclab.utils.version import has_kit if TYPE_CHECKING: from . import visual_materials_cfg @@ -50,6 +50,11 @@ def spawn_preview_surface(prim_path: str, cfg: visual_materials_cfg.PreviewSurfa Raises: ValueError: If a prim already exists at the given path. """ + # check if Kit is available (required for shader creation commands) + if not has_kit(): + logger.warning("Skipping preview surface material at '%s' — Kit is not available.", prim_path) + return None + # get stage handle stage = get_current_stage() @@ -61,11 +66,13 @@ def spawn_preview_surface(prim_path: str, cfg: visual_materials_cfg.PreviewSurfa # handle scene creation on a custom stage. material_prim = UsdShade.Material.Define(stage, prim_path) if material_prim: + from omni.usd.commands import CreateShaderPrimFromSdrCommand + shader_prim = CreateShaderPrimFromSdrCommand( parent_path=prim_path, identifier="UsdPreviewSurface", stage_or_context=stage, - name="Shader", + prim_name="Shader", ).do() # bind the shader graph to the material if shader_prim: @@ -126,6 +133,11 @@ def spawn_from_mdl_file( Raises: ValueError: If a prim already exists at the given path. """ + # check if Kit is available (required for MDL material creation commands) + if not has_kit(): + logger.warning("Skipping MDL material at '%s' — Kit is not available.", prim_path) + return None + # get stage handle stage = get_current_stage() @@ -133,6 +145,8 @@ def spawn_from_mdl_file( if not stage.GetPrimAtPath(prim_path).IsValid(): # extract material name from path material_name = cfg.mdl_path.split("/")[-1].split(".")[0] + from omni.usd.commands import CreateMdlMaterialPrimCommand + CreateMdlMaterialPrimCommand( mtl_url=cfg.mdl_path.format(NVIDIA_NUCLEUS_DIR=NVIDIA_NUCLEUS_DIR), mtl_name=material_name, diff --git a/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials_cfg.py b/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials_cfg.py index a0c2874b0e9a..961b351b6ce4 100644 --- a/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/materials/visual_materials_cfg.py @@ -3,13 +3,13 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from collections.abc import Callable from dataclasses import MISSING from isaaclab.utils import configclass -from . import visual_materials - @configclass class VisualMaterialCfg: @@ -26,7 +26,7 @@ class PreviewSurfaceCfg(VisualMaterialCfg): See :meth:`spawn_preview_surface` for more information. """ - func: Callable = visual_materials.spawn_preview_surface + func: Callable | str = "{DIR}.visual_materials:spawn_preview_surface" diffuse_color: tuple[float, float, float] = (0.18, 0.18, 0.18) """The RGB diffusion color. This is the base color of the surface. Defaults to a dark gray.""" @@ -51,7 +51,7 @@ class MdlFileCfg(VisualMaterialCfg): See :meth:`spawn_from_mdl_file` for more information. """ - func: Callable = visual_materials.spawn_from_mdl_file + func: Callable | str = "{DIR}.visual_materials:spawn_from_mdl_file" mdl_path: str = MISSING """The path to the MDL material. @@ -93,7 +93,7 @@ class GlassMdlCfg(VisualMaterialCfg): The default values are taken from the glass material in the NVIDIA Nucleus. """ - func: Callable = visual_materials.spawn_from_mdl_file + func: Callable | str = "{DIR}.visual_materials:spawn_from_mdl_file" mdl_path: str = "OmniGlass.mdl" """The path to the MDL material. Defaults to the glass material in the NVIDIA Nucleus.""" diff --git a/source/isaaclab/isaaclab/sim/spawners/meshes/__init__.py b/source/isaaclab/isaaclab/sim/spawners/meshes/__init__.py index 49836dc5cbd4..fe8d5a81fcc1 100644 --- a/source/isaaclab/isaaclab/sim/spawners/meshes/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/meshes/__init__.py @@ -20,5 +20,6 @@ .. _USDGeomMesh: https://openusd.org/release/api/class_usd_geom_mesh.html """ -from .meshes import spawn_mesh_capsule, spawn_mesh_cone, spawn_mesh_cuboid, spawn_mesh_cylinder, spawn_mesh_sphere -from .meshes_cfg import MeshCapsuleCfg, MeshCfg, MeshConeCfg, MeshCuboidCfg, MeshCylinderCfg, MeshSphereCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/meshes/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/meshes/__init__.pyi new file mode 100644 index 000000000000..06490befb33f --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/meshes/__init__.pyi @@ -0,0 +1,38 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_mesh_capsule", + "spawn_mesh_cone", + "spawn_mesh_cuboid", + "spawn_mesh_cylinder", + "spawn_mesh_sphere", + "spawn_mesh_square", + "MeshCapsuleCfg", + "MeshCfg", + "MeshConeCfg", + "MeshCuboidCfg", + "MeshCylinderCfg", + "MeshSphereCfg", + "MeshSquareCfg", +] + +from .meshes import ( + spawn_mesh_capsule, + spawn_mesh_cone, + spawn_mesh_cuboid, + spawn_mesh_cylinder, + spawn_mesh_sphere, + spawn_mesh_square, +) +from .meshes_cfg import ( + MeshCapsuleCfg, + MeshCfg, + MeshConeCfg, + MeshCuboidCfg, + MeshCylinderCfg, + MeshSphereCfg, + MeshSquareCfg, +) diff --git a/source/isaaclab/isaaclab/sim/spawners/meshes/meshes.py b/source/isaaclab/isaaclab/sim/spawners/meshes/meshes.py index 066ca0bea188..75ead1c8fb67 100644 --- a/source/isaaclab/isaaclab/sim/spawners/meshes/meshes.py +++ b/source/isaaclab/isaaclab/sim/spawners/meshes/meshes.py @@ -11,12 +11,16 @@ import trimesh import trimesh.transformations +# deformables only supported on PhysX backend +from isaaclab_physx.sim import schemas as schemas_physx +from isaaclab_physx.sim.spawners.materials import DeformableBodyMaterialCfg, SurfaceDeformableBodyMaterialCfg + from pxr import Usd, UsdPhysics from isaaclab.sim import schemas from isaaclab.sim.utils import bind_physics_material, bind_visual_material, clone, create_prim, get_current_stage -from ..materials import DeformableBodyMaterialCfg, RigidBodyMaterialCfg +from ..materials import RigidBodyMaterialCfg if TYPE_CHECKING: from . import meshes_cfg @@ -43,7 +47,7 @@ def spawn_mesh_sphere( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -85,7 +89,7 @@ def spawn_mesh_cuboid( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -127,7 +131,7 @@ def spawn_mesh_cylinder( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -177,7 +181,7 @@ def spawn_mesh_capsule( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -227,7 +231,7 @@ def spawn_mesh_cone( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -256,6 +260,51 @@ def spawn_mesh_cone( return stage.GetPrimAtPath(prim_path) +@clone +def spawn_mesh_square( + prim_path: str, + cfg: meshes_cfg.MeshSquareCfg, + translation: tuple[float, float, float] | None = None, + orientation: tuple[float, float, float, float] | None = None, + **kwargs, +) -> Usd.Prim: + """Create a USD-Mesh 2D square prim with the given attributes. + + .. note:: + This function is decorated with :func:`clone` that resolves prim path into list of paths + if the input prim path is a regex pattern. This is done to support spawning multiple assets + from a single and cloning the USD prim at the given path expression. + + Args: + prim_path: The prim path or pattern to spawn the asset at. If the prim path is a regex pattern, + then the asset is spawned at all the matching prim paths. + cfg: The configuration instance. + translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case + this is set to the origin. + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, + in which case this is set to identity. + **kwargs: Additional keyword arguments, like ``clone_in_fabric``. + + Returns: + The created prim. + + Raises: + ValueError: If a prim already exists at the given path. + """ + # create a 2D triangle mesh grid + from omni.physx.scripts import deformableUtils + + vertices, faces = deformableUtils.create_triangle_mesh_square(cfg.resolution[0], cfg.resolution[1], scale=cfg.size) + grid = trimesh.Trimesh(vertices=vertices, faces=np.array(faces).reshape(-1, 3), process=False) + + # obtain stage handle + stage = get_current_stage() + # spawn the square as a mesh + _spawn_mesh_geom_from_mesh(prim_path, cfg, grid, translation, orientation, None, stage=stage) + # return the prim + return stage.GetPrimAtPath(prim_path) + + """ Helper functions. """ @@ -288,7 +337,7 @@ def _spawn_mesh_geom_from_mesh( mesh: The mesh to spawn the prim from. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. scale: The scale to apply to the prim. Defaults to None, in which case this is set to identity. stage: The stage to spawn the asset at. Defaults to None, in which case the current stage is used. @@ -342,14 +391,17 @@ def _spawn_mesh_geom_from_mesh( stage=stage, ) - # note: in case of deformable objects, we need to apply the deformable properties to the mesh prim. - # this is different from rigid objects where we apply the properties to the parent prim. if cfg.deformable_props is not None: - # apply mass properties - if cfg.mass_props is not None: - schemas.define_mass_properties(mesh_prim_path, cfg.mass_props, stage=stage) # apply deformable body properties - schemas.define_deformable_body_properties(mesh_prim_path, cfg.deformable_props, stage=stage) + deformable_type = "surface" if isinstance(cfg.physics_material, SurfaceDeformableBodyMaterialCfg) else "volume" + schemas_physx.define_deformable_body_properties( + prim_path, cfg.deformable_props, stage=stage, deformable_type=deformable_type + ) + if cfg.mass_props is not None: + raise ValueError( + """MassPropertiesCfg are not supported for deformable bodies + and should be set through DeformableBodyPropertiesCfg(mass=).""" + ) elif cfg.collision_props is not None: # decide on type of collision approximation based on the mesh if cfg.__class__.__name__ == "MeshSphereCfg": @@ -386,7 +438,7 @@ def _spawn_mesh_geom_from_mesh( # create material cfg.physics_material.func(material_path, cfg.physics_material) # apply material - bind_physics_material(mesh_prim_path, material_path, stage=stage) + bind_physics_material(prim_path, material_path, stage=stage) # note: we apply the rigid properties to the parent prim in case of rigid objects. if cfg.rigid_props is not None: diff --git a/source/isaaclab/isaaclab/sim/spawners/meshes/meshes_cfg.py b/source/isaaclab/isaaclab/sim/spawners/meshes/meshes_cfg.py index d5c39a505b8b..a9e7e44586d0 100644 --- a/source/isaaclab/isaaclab/sim/spawners/meshes/meshes_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/meshes/meshes_cfg.py @@ -9,12 +9,13 @@ from dataclasses import MISSING from typing import Literal +# deformables only supported on PhysX backend +from isaaclab_physx.sim.spawners.spawner_cfg import DeformableObjectSpawnerCfg + from isaaclab.sim.spawners import materials -from isaaclab.sim.spawners.spawner_cfg import DeformableObjectSpawnerCfg, RigidObjectSpawnerCfg +from isaaclab.sim.spawners.spawner_cfg import RigidObjectSpawnerCfg from isaaclab.utils import configclass -from . import meshes - @configclass class MeshCfg(RigidObjectSpawnerCfg, DeformableObjectSpawnerCfg): @@ -74,7 +75,7 @@ class MeshSphereCfg(MeshCfg): See :meth:`spawn_mesh_sphere` for more information. """ - func: Callable = meshes.spawn_mesh_sphere + func: Callable | str = "{DIR}.meshes:spawn_mesh_sphere" radius: float = MISSING """Radius of the sphere (in m).""" @@ -87,7 +88,7 @@ class MeshCuboidCfg(MeshCfg): See :meth:`spawn_mesh_cuboid` for more information. """ - func: Callable = meshes.spawn_mesh_cuboid + func: Callable | str = "{DIR}.meshes:spawn_mesh_cuboid" size: tuple[float, float, float] = MISSING """Size of the cuboid (in m).""" @@ -100,7 +101,7 @@ class MeshCylinderCfg(MeshCfg): See :meth:`spawn_cylinder` for more information. """ - func: Callable = meshes.spawn_mesh_cylinder + func: Callable | str = "{DIR}.meshes:spawn_mesh_cylinder" radius: float = MISSING """Radius of the cylinder (in m).""" @@ -117,7 +118,7 @@ class MeshCapsuleCfg(MeshCfg): See :meth:`spawn_capsule` for more information. """ - func: Callable = meshes.spawn_mesh_capsule + func: Callable | str = "{DIR}.meshes:spawn_mesh_capsule" radius: float = MISSING """Radius of the capsule (in m).""" @@ -134,7 +135,7 @@ class MeshConeCfg(MeshCfg): See :meth:`spawn_cone` for more information. """ - func: Callable = meshes.spawn_mesh_cone + func: Callable | str = "{DIR}.meshes:spawn_mesh_cone" radius: float = MISSING """Radius of the cone (in m).""" @@ -142,3 +143,18 @@ class MeshConeCfg(MeshCfg): """Height of the v (in m).""" axis: Literal["X", "Y", "Z"] = "Z" """Axis of the cone. Defaults to "Z".""" + + +@configclass +class MeshSquareCfg(MeshCfg): + """Configuration parameters for a 2D square mesh prim. + + See :meth:`spawn_mesh_square` for more information. + """ + + func: Callable | str = "{DIR}.meshes:spawn_mesh_square" + + size: float = MISSING + """Edge length of the square (in m).""" + resolution: tuple[int, int] = (5, 5) + """Resolution of the square (in elements/edges per side).""" diff --git a/source/isaaclab/isaaclab/sim/spawners/sensors/__init__.py b/source/isaaclab/isaaclab/sim/spawners/sensors/__init__.py index ac61868c0255..49249f9b6eb8 100644 --- a/source/isaaclab/isaaclab/sim/spawners/sensors/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/sensors/__init__.py @@ -11,5 +11,6 @@ """ -from .sensors import spawn_camera -from .sensors_cfg import FisheyeCameraCfg, PinholeCameraCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/sensors/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/sensors/__init__.pyi new file mode 100644 index 000000000000..ba5b96a44c7d --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/sensors/__init__.pyi @@ -0,0 +1,15 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_camera", + "spawn_sensor_frame", + "FisheyeCameraCfg", + "PinholeCameraCfg", + "SensorFrameCfg", +] + +from .sensors import spawn_camera, spawn_sensor_frame +from .sensors_cfg import FisheyeCameraCfg, PinholeCameraCfg, SensorFrameCfg diff --git a/source/isaaclab/isaaclab/sim/spawners/sensors/sensors.py b/source/isaaclab/isaaclab/sim/spawners/sensors/sensors.py index 6270447169e9..db68d21d8a90 100644 --- a/source/isaaclab/isaaclab/sim/spawners/sensors/sensors.py +++ b/source/isaaclab/isaaclab/sim/spawners/sensors/sensors.py @@ -72,7 +72,7 @@ def spawn_camera( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -118,6 +118,7 @@ def spawn_camera( "visible", "semantic_tags", "from_intrinsic_matrix", + "spawn_path", ] # get camera prim prim = stage.GetPrimAtPath(prim_path) @@ -144,3 +145,46 @@ def spawn_camera( prim.GetAttribute(prim_prop_name).Set(param_value) # return the prim return prim + + +@clone +def spawn_sensor_frame( + prim_path: str, + cfg: sensors_cfg.SensorFrameCfg, + translation: tuple[float, float, float] | None = None, + orientation: tuple[float, float, float, float] | None = None, + **kwargs, +) -> Usd.Prim: + """Create a plain USD Xform prim as a sensor attachment frame. + + .. note:: + This function is decorated with :func:`clone` that resolves prim path into list of paths + if the input prim path is a regex pattern. + + Args: + prim_path: The prim path or pattern to spawn the asset at. + cfg: The configuration instance. + translation: Local translation (x, y, z) [m] w.r.t. the parent prim. Defaults to None + (origin). + orientation: Local orientation as quaternion (x, y, z, w) w.r.t. the parent prim. + Defaults to None (identity). + **kwargs: Additional keyword arguments, like ``clone_in_fabric``. + + Returns: + The created USD prim. + + Raises: + ValueError: If a prim already exists at the given path. + """ + stage = get_current_stage() + if not stage.GetPrimAtPath(prim_path).IsValid(): + prim = create_prim( + prim_path, + "Xform", + translation=translation, + orientation=orientation, + stage=stage, + ) + else: + raise ValueError(f"A prim already exists at path: '{prim_path}'.") + return prim diff --git a/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py b/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py index 44e5eb061733..1ad9dcd73bf2 100644 --- a/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/sensors/sensors_cfg.py @@ -12,8 +12,6 @@ from isaaclab.sim.spawners.spawner_cfg import SpawnerCfg from isaaclab.utils import configclass -from . import sensors - @configclass class PinholeCameraCfg(SpawnerCfg): @@ -26,7 +24,7 @@ class PinholeCameraCfg(SpawnerCfg): world unit is Meter s.t. all of these values are set in cm. """ - func: Callable = sensors.spawn_camera + func: Callable | str = "{DIR}.sensors:spawn_camera" projection_type: str = "pinhole" """Type of projection to use for the camera. Defaults to "pinhole". @@ -172,7 +170,7 @@ class FisheyeCameraCfg(PinholeCameraCfg): .. _fish-eye camera: https://en.wikipedia.org/wiki/Fisheye_lens """ - func: Callable = sensors.spawn_camera + func: Callable | str = "{DIR}.sensors:spawn_camera" projection_type: Literal[ "fisheyePolynomial", @@ -224,3 +222,15 @@ class FisheyeCameraCfg(PinholeCameraCfg): fisheye_polynomial_f: float = 0.0 """Sixth component of fisheye polynomial. Defaults to 0.0.""" + + +@configclass +class SensorFrameCfg(SpawnerCfg): + """Spawns a plain USD Xform as a sensor attachment frame. + + The spawned prim carries no rigid body or collision API. It serves as a + non-physics child under a link so that :class:`~isaaclab.sim.views.FrameView` + can track it on all backends (including Newton, which rejects physics body prims). + """ + + func: Callable | str = "{DIR}.sensors:spawn_sensor_frame" diff --git a/source/isaaclab/isaaclab/sim/spawners/shapes/__init__.py b/source/isaaclab/isaaclab/sim/spawners/shapes/__init__.py index 8f6cab9439cd..d911de7bcacc 100644 --- a/source/isaaclab/isaaclab/sim/spawners/shapes/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/shapes/__init__.py @@ -14,5 +14,6 @@ """ -from .shapes import spawn_capsule, spawn_cone, spawn_cuboid, spawn_cylinder, spawn_sphere -from .shapes_cfg import CapsuleCfg, ConeCfg, CuboidCfg, CylinderCfg, ShapeCfg, SphereCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/shapes/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/shapes/__init__.pyi new file mode 100644 index 000000000000..04c7330c7b59 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/shapes/__init__.pyi @@ -0,0 +1,21 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_capsule", + "spawn_cone", + "spawn_cuboid", + "spawn_cylinder", + "spawn_sphere", + "CapsuleCfg", + "ConeCfg", + "CuboidCfg", + "CylinderCfg", + "ShapeCfg", + "SphereCfg", +] + +from .shapes import spawn_capsule, spawn_cone, spawn_cuboid, spawn_cylinder, spawn_sphere +from .shapes_cfg import CapsuleCfg, ConeCfg, CuboidCfg, CylinderCfg, ShapeCfg, SphereCfg diff --git a/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py b/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py index a7780c25596d..9e8eafc1c578 100644 --- a/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py +++ b/source/isaaclab/isaaclab/sim/spawners/shapes/shapes.py @@ -39,7 +39,7 @@ def spawn_sphere( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -85,7 +85,7 @@ def spawn_cuboid( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -130,7 +130,7 @@ def spawn_cylinder( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -172,7 +172,7 @@ def spawn_capsule( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -214,7 +214,7 @@ def spawn_cone( cfg: The configuration instance. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. **kwargs: Additional keyword arguments, like ``clone_in_fabric``. @@ -269,7 +269,7 @@ def _spawn_geom_from_prim_type( attributes: The attributes to apply to the prim. translation: The translation to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to the origin. - orientation: The orientation in (w, x, y, z) to apply to the prim w.r.t. its parent prim. Defaults to None, + orientation: The orientation in (x, y, z, w) to apply to the prim w.r.t. its parent prim. Defaults to None, in which case this is set to identity. scale: The scale to apply to the prim. Defaults to None, in which case this is set to identity. stage: The stage to spawn the asset at. Defaults to None, in which case the current stage is used. diff --git a/source/isaaclab/isaaclab/sim/spawners/shapes/shapes_cfg.py b/source/isaaclab/isaaclab/sim/spawners/shapes/shapes_cfg.py index d2de5a7f9416..2baa75d19e66 100644 --- a/source/isaaclab/isaaclab/sim/spawners/shapes/shapes_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/shapes/shapes_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + from collections.abc import Callable from dataclasses import MISSING from typing import Literal @@ -11,8 +13,6 @@ from isaaclab.sim.spawners.spawner_cfg import RigidObjectSpawnerCfg from isaaclab.utils import configclass -from . import shapes - @configclass class ShapeCfg(RigidObjectSpawnerCfg): @@ -52,7 +52,7 @@ class SphereCfg(ShapeCfg): See :meth:`spawn_sphere` for more information. """ - func: Callable = shapes.spawn_sphere + func: Callable | str = "{DIR}.shapes:spawn_sphere" radius: float = MISSING """Radius of the sphere (in m).""" @@ -65,7 +65,7 @@ class CuboidCfg(ShapeCfg): See :meth:`spawn_cuboid` for more information. """ - func: Callable = shapes.spawn_cuboid + func: Callable | str = "{DIR}.shapes:spawn_cuboid" size: tuple[float, float, float] = MISSING """Size of the cuboid.""" @@ -78,7 +78,7 @@ class CylinderCfg(ShapeCfg): See :meth:`spawn_cylinder` for more information. """ - func: Callable = shapes.spawn_cylinder + func: Callable | str = "{DIR}.shapes:spawn_cylinder" radius: float = MISSING """Radius of the cylinder (in m).""" @@ -95,7 +95,7 @@ class CapsuleCfg(ShapeCfg): See :meth:`spawn_capsule` for more information. """ - func: Callable = shapes.spawn_capsule + func: Callable | str = "{DIR}.shapes:spawn_capsule" radius: float = MISSING """Radius of the capsule (in m).""" @@ -112,7 +112,7 @@ class ConeCfg(ShapeCfg): See :meth:`spawn_cone` for more information. """ - func: Callable = shapes.spawn_cone + func: Callable | str = "{DIR}.shapes:spawn_cone" radius: float = MISSING """Radius of the cone (in m).""" diff --git a/source/isaaclab/isaaclab/sim/spawners/spawner_cfg.py b/source/isaaclab/isaaclab/sim/spawners/spawner_cfg.py index 2dea8db8fcb6..7a803ad0e0dd 100644 --- a/source/isaaclab/isaaclab/sim/spawners/spawner_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/spawner_cfg.py @@ -7,12 +7,15 @@ from collections.abc import Callable from dataclasses import MISSING +from typing import TYPE_CHECKING -from pxr import Usd - -from isaaclab.sim import schemas from isaaclab.utils import configclass +if TYPE_CHECKING: + from pxr import Usd + + from isaaclab.sim import schemas + @configclass class SpawnerCfg: @@ -66,6 +69,9 @@ class SpawnerCfg: the source prim, i.e. all USD changes to the source prim will be reflected in the cloned prims. """ + spawn_path: str | None = None + """Path where the prototype is spawned. Defaults to None.""" + @configclass class RigidObjectSpawnerCfg(SpawnerCfg): @@ -94,25 +100,3 @@ class RigidObjectSpawnerCfg(SpawnerCfg): This adds the PhysxContactReporter API to all the rigid bodies in the given prim path and its children. """ - - -@configclass -class DeformableObjectSpawnerCfg(SpawnerCfg): - """Configuration parameters for spawning a deformable asset. - - Unlike rigid objects, deformable objects are affected by forces and can deform when subjected to - external forces. This class is used to configure the properties of the deformable object. - - Deformable bodies don't have a separate collision mesh. The collision mesh is the same as the visual mesh. - The collision properties such as rest and collision offsets are specified in the :attr:`deformable_props`. - - Note: - By default, all properties are set to None. This means that no properties will be added or modified - to the prim outside of the properties available by default when spawning the prim. - """ - - mass_props: schemas.MassPropertiesCfg | None = None - """Mass properties.""" - - deformable_props: schemas.DeformableBodyPropertiesCfg | None = None - """Deformable body properties.""" diff --git a/source/isaaclab/isaaclab/sim/spawners/wrappers/__init__.py b/source/isaaclab/isaaclab/sim/spawners/wrappers/__init__.py index 4006fa1a6abc..154a1019e1c6 100644 --- a/source/isaaclab/isaaclab/sim/spawners/wrappers/__init__.py +++ b/source/isaaclab/isaaclab/sim/spawners/wrappers/__init__.py @@ -10,5 +10,6 @@ different configurations. """ -from .wrappers import spawn_multi_asset, spawn_multi_usd_file -from .wrappers_cfg import MultiAssetSpawnerCfg, MultiUsdFileCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/spawners/wrappers/__init__.pyi b/source/isaaclab/isaaclab/sim/spawners/wrappers/__init__.pyi new file mode 100644 index 000000000000..67d4213aa0bb --- /dev/null +++ b/source/isaaclab/isaaclab/sim/spawners/wrappers/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_multi_asset", + "spawn_multi_usd_file", + "MultiAssetSpawnerCfg", + "MultiUsdFileCfg", +] + +from .wrappers import spawn_multi_asset, spawn_multi_usd_file +from .wrappers_cfg import MultiAssetSpawnerCfg, MultiUsdFileCfg diff --git a/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers.py b/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers.py index 64d0c4f4ab91..285dd0373063 100644 --- a/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers.py +++ b/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers.py @@ -5,12 +5,10 @@ from __future__ import annotations -import random -import re +import logging from typing import TYPE_CHECKING -import carb -from pxr import Sdf, Usd +from pxr import Usd import isaaclab.sim as sim_utils from isaaclab.sim.spawners.from_files import UsdFileCfg @@ -18,6 +16,8 @@ if TYPE_CHECKING: from . import wrappers_cfg +logger = logging.getLogger(__name__) + def spawn_multi_asset( prim_path: str, @@ -27,49 +27,38 @@ def spawn_multi_asset( clone_in_fabric: bool = False, replicate_physics: bool = False, ) -> Usd.Prim: - """Spawn multiple assets based on the provided configurations. + """Spawn multiple assets into numbered prim paths derived from the provided configuration. - This function spawns multiple assets based on the provided configurations. The assets are spawned - in the order they are provided in the list. If the :attr:`~MultiAssetSpawnerCfg.random_choice` parameter is - set to True, a random asset configuration is selected for each spawn. + Assets are created in the order they appear in ``cfg.assets_cfg`` using the base name in ``prim_path``, + which must contain ``.*`` (for example, ``/World/Env_0/asset_.*`` spawns ``asset_0``, ``asset_1``, ...). + The prefix portion of ``prim_path`` may also include ``.*`` (for example, ``/World/env_.*/asset_.*``); + in this case, assets are spawned under the first match (``env_0``) and that structure is cloned to + other matching environments by the scene's cloner. Args: prim_path: The prim path to spawn the assets. cfg: The configuration for spawning the assets. translation: The translation of the spawned assets. Default is None. - orientation: The orientation of the spawned assets in (w, x, y, z) order. Default is None. + orientation: The orientation of the spawned assets in (x, y, z, w) order. Default is None. clone_in_fabric: Whether to clone in fabric. Default is False. replicate_physics: Whether to replicate physics. Default is False. Returns: The created prim at the first prim path. """ - # get stage handle - stage = sim_utils.get_current_stage() - - # resolve: {SPAWN_NS}/AssetName - # note: this assumes that the spawn namespace already exists in the stage - root_path, asset_path = prim_path.rsplit("/", 1) - # check if input is a regex expression - # note: a valid prim path can only contain alphanumeric characters, underscores, and forward slashes - is_regex_expression = re.match(r"^[a-zA-Z0-9/_]+$", root_path) is None - - # resolve matching prims for source prim path expression - if is_regex_expression and root_path != "": - source_prim_paths = sim_utils.find_matching_prim_paths(root_path) - # if no matching prims are found, raise an error - if len(source_prim_paths) == 0: - raise RuntimeError( - f"Unable to find source prim path: '{root_path}'. Please create the prim before spawning." - ) - else: - source_prim_paths = [root_path] - - # find a free prim path to hold all the template prims - template_prim_path = sim_utils.get_next_free_prim_path("/World/Template", stage=stage) - sim_utils.create_prim(template_prim_path, "Scope", stage=stage) + split_path = prim_path.split("/") + prefix_path, base_name = "/".join(split_path[:-1]), split_path[-1] + if ".*" not in base_name: + raise ValueError( + f" The base name '{base_name}' in the prim path '{prim_path}' must contain '.*' to indicate" + " the path each individual multiple-asset to be spawned." + ) + if cfg.random_choice: + logger.warning( + "`random_choice` parameter in `spawn_multi_asset` is deprecated, and nothing will happen. " + "Use `isaaclab.scene.interactive_scene_cfg.InteractiveSceneCfg.random_heterogeneous_cloning` instead." + ) - # spawn everything first in a "Dataset" prim proto_prim_paths = list() for index, asset_cfg in enumerate(cfg.assets_cfg): # append semantic tags if specified @@ -84,8 +73,8 @@ def spawn_multi_asset( attr_value = getattr(cfg, attr_name) if hasattr(asset_cfg, attr_name) and attr_value is not None: setattr(asset_cfg, attr_name, attr_value) - # spawn single instance - proto_prim_path = f"{template_prim_path}/Asset_{index:04d}" + + proto_prim_path = f"{prefix_path}/{base_name.replace('.*', str(index))}" asset_cfg.func( proto_prim_path, asset_cfg, @@ -97,35 +86,7 @@ def spawn_multi_asset( # append to proto prim paths proto_prim_paths.append(proto_prim_path) - # resolve prim paths for spawning and cloning - prim_paths = [f"{source_prim_path}/{asset_path}" for source_prim_path in source_prim_paths] - - # manually clone prims if the source prim path is a regex expression - # note: unlike in the cloner API from Isaac Sim, we do not "reset" xforms on the copied prims. - # This is because the "spawn" calls during the creation of the proto prims already handles this operation. - with Sdf.ChangeBlock(): - for index, prim_path in enumerate(prim_paths): - # spawn single instance - env_spec = Sdf.CreatePrimInLayer(stage.GetRootLayer(), prim_path) - # randomly select an asset configuration - if cfg.random_choice: - proto_path = random.choice(proto_prim_paths) - else: - proto_path = proto_prim_paths[index % len(proto_prim_paths)] - # copy the proto prim - Sdf.CopySpec(env_spec.layer, Sdf.Path(proto_path), env_spec.layer, Sdf.Path(prim_path)) - - # delete the dataset prim after spawning - sim_utils.delete_prim(template_prim_path, stage=stage) - - # set carb setting to indicate Isaac Lab's environments that different prims have been spawned - # at varying prim paths. In this case, PhysX parser shouldn't optimize the stage parsing. - # the flag is mainly used to inform the user that they should disable `InteractiveScene.replicate_physics` - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/isaaclab/spawn/multi_assets", True) - - # return the prim - return stage.GetPrimAtPath(prim_paths[0]) + return sim_utils.find_first_matching_prim(proto_prim_paths[0]) def spawn_multi_usd_file( @@ -145,7 +106,7 @@ def spawn_multi_usd_file( prim_path: The prim path to spawn the assets. cfg: The configuration for spawning the assets. translation: The translation of the spawned assets. Default is None. - orientation: The orientation of the spawned assets in (w, x, y, z) order. Default is None. + orientation: The orientation of the spawned assets in (x, y, z, w) order. Default is None. clone_in_fabric: Whether to clone in fabric. Default is False. replicate_physics: Whether to replicate physics. Default is False. @@ -165,7 +126,7 @@ def spawn_multi_usd_file( usd_template_cfg = UsdFileCfg() for attr_name, attr_value in cfg.__dict__.items(): # skip names we know are not present - if attr_name in ["func", "usd_path", "random_choice"]: + if attr_name in ["func", "usd_path", "random_choice", "spawn_path"]: continue # set the attribute into the template setattr(usd_template_cfg, attr_name, attr_value) @@ -175,8 +136,6 @@ def spawn_multi_usd_file( for usd_path in usd_paths: usd_cfg = usd_template_cfg.replace(usd_path=usd_path) multi_asset_cfg.assets_cfg.append(usd_cfg) - # set random choice - multi_asset_cfg.random_choice = cfg.random_choice # propagate the contact sensor settings # note: the default value for activate_contact_sensors in MultiAssetSpawnerCfg is False. diff --git a/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers_cfg.py b/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers_cfg.py index 07c585f7c4c3..d9b7d9ed0c35 100644 --- a/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/wrappers/wrappers_cfg.py @@ -3,14 +3,29 @@ # # SPDX-License-Identifier: BSD-3-Clause +import warnings from dataclasses import MISSING +# deformables only supported in PhysX backend +try: + from isaaclab_physx.sim.spawners.spawner_cfg import DeformableObjectSpawnerCfg +except ImportError as e: + warnings.warn( + f"""Could not import DeformableObjectSpawnerCfg, is isaaclab_physx installed? + Safe to ignore if using newton only. Complete exception: {e}""" + ) + # import dummy class to avoid errors in type hints + from isaaclab.utils import configclass + + @configclass + class DeformableObjectSpawnerCfg: + deformable_props = None + + from isaaclab.sim.spawners.from_files import UsdFileCfg -from isaaclab.sim.spawners.spawner_cfg import DeformableObjectSpawnerCfg, RigidObjectSpawnerCfg, SpawnerCfg +from isaaclab.sim.spawners.spawner_cfg import RigidObjectSpawnerCfg, SpawnerCfg from isaaclab.utils import configclass -from . import wrappers - @configclass class MultiAssetSpawnerCfg(RigidObjectSpawnerCfg, DeformableObjectSpawnerCfg): @@ -29,16 +44,19 @@ class MultiAssetSpawnerCfg(RigidObjectSpawnerCfg, DeformableObjectSpawnerCfg): """ - func = wrappers.spawn_multi_asset + func: str = "{DIR}.wrappers:spawn_multi_asset" assets_cfg: list[SpawnerCfg] = MISSING """List of asset configurations to spawn.""" random_choice: bool = True - """Whether to randomly select an asset configuration. Default is True. + """ This parameter is ignored. + See :attr:`isaaclab.scene.interactive_scene_cfg.InteractiveSceneCfg.random_heterogeneous_cloning` for details. - If False, the asset configurations are spawned in the order they are provided in the list. - If True, a random asset configuration is selected for each spawn. + .. warning:: + + This attribute is deprecated. Use + :attr:`~isaaclab.scene.interactive_scene_cfg.InteractiveSceneCfg.random_heterogeneous_cloning` instead. """ @@ -54,7 +72,7 @@ class MultiUsdFileCfg(UsdFileCfg): """ - func = wrappers.spawn_multi_usd_file + func: str = "{DIR}.wrappers:spawn_multi_usd_file" usd_path: str | list[str] = MISSING """Path or a list of paths to the USD files to spawn asset from.""" @@ -64,4 +82,9 @@ class MultiUsdFileCfg(UsdFileCfg): If False, the asset configurations are spawned in the order they are provided in the list. If True, a random asset configuration is selected for each spawn. + + .. warning:: + + This attribute is deprecated. Use + :attr:`~isaaclab.scene.interactive_scene_cfg.InteractiveSceneCfg.random_heterogeneous_cloning` instead. """ diff --git a/source/isaaclab/isaaclab/sim/utils/__init__.py b/source/isaaclab/isaaclab/sim/utils/__init__.py index 3a85ae44c2f1..bcaeee0ee606 100644 --- a/source/isaaclab/isaaclab/sim/utils/__init__.py +++ b/source/isaaclab/isaaclab/sim/utils/__init__.py @@ -5,9 +5,6 @@ """Utilities built around USD operations.""" -from .legacy import * # noqa: F401, F403 -from .prims import * # noqa: F401, F403 -from .queries import * # noqa: F401, F403 -from .semantics import * # noqa: F401, F403 -from .stage import * # noqa: F401, F403 -from .transforms import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/utils/__init__.pyi b/source/isaaclab/isaaclab/sim/utils/__init__.pyi new file mode 100644 index 000000000000..de4ab9c93266 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/utils/__init__.pyi @@ -0,0 +1,126 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "add_reference_to_stage", + "get_stage_up_axis", + "traverse_stage", + "get_prim_at_path", + "get_prim_path", + "is_prim_path_valid", + "define_prim", + "get_prim_type_name", + "get_next_free_path", + "create_prim", + "delete_prim", + "make_uninstanceable", + "set_prim_visibility", + "safe_set_attribute_on_usd_schema", + "safe_set_attribute_on_usd_prim", + "change_prim_property", + "export_prim_to_file", + "apply_nested", + "clone", + "bind_visual_material", + "bind_physics_material", + "add_usd_reference", + "get_usd_references", + "select_usd_variants", + "get_next_free_prim_path", + "get_first_matching_ancestor_prim", + "get_first_matching_child_prim", + "get_all_matching_child_prims", + "find_first_matching_prim", + "find_matching_prims", + "find_matching_prim_paths", + "find_global_fixed_joint_prim", + "add_labels", + "get_labels", + "remove_labels", + "check_missing_labels", + "count_total_labels", + "resolve_paths", + "create_new_stage", + "is_current_stage_in_memory", + "open_stage", + "use_stage", + "update_stage", + "save_stage", + "close_stage", + "clear_stage", + "get_current_stage", + "get_current_stage_id", + "standardize_xform_ops", + "validate_standard_xform_ops", + "resolve_prim_pose", + "resolve_prim_scale", + "convert_world_pose_to_local", +] + +from .legacy import ( + add_reference_to_stage, + get_stage_up_axis, + traverse_stage, + get_prim_at_path, + get_prim_path, + is_prim_path_valid, + define_prim, + get_prim_type_name, + get_next_free_path, +) +from .prims import ( + create_prim, + delete_prim, + make_uninstanceable, + set_prim_visibility, + safe_set_attribute_on_usd_schema, + safe_set_attribute_on_usd_prim, + change_prim_property, + export_prim_to_file, + apply_nested, + clone, + bind_visual_material, + bind_physics_material, + add_usd_reference, + get_usd_references, + select_usd_variants, +) +from .queries import ( + get_next_free_prim_path, + get_first_matching_ancestor_prim, + get_first_matching_child_prim, + get_all_matching_child_prims, + find_first_matching_prim, + find_matching_prims, + find_matching_prim_paths, + find_global_fixed_joint_prim, +) +from .semantics import ( + add_labels, + get_labels, + remove_labels, + check_missing_labels, + count_total_labels, +) +from .stage import ( + resolve_paths, + create_new_stage, + is_current_stage_in_memory, + open_stage, + use_stage, + update_stage, + save_stage, + close_stage, + clear_stage, + get_current_stage, + get_current_stage_id, +) +from .transforms import ( + standardize_xform_ops, + validate_standard_xform_ops, + resolve_prim_pose, + resolve_prim_scale, + convert_world_pose_to_local, +) diff --git a/source/isaaclab/isaaclab/sim/utils/newton_model_utils.py b/source/isaaclab/isaaclab/sim/utils/newton_model_utils.py new file mode 100644 index 000000000000..d94a309e5b55 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/utils/newton_model_utils.py @@ -0,0 +1,352 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""DO NOT USE ANY FUNCTION IN THIS FILE. + +This module exists only while Isaac Lab and Isaac Sim content still relies on NVIDIA-specific MDL and OmniPBR +materials; after migration to neutral USD materials that Newton can consume directly, this module is expected +to be deprecated and removed. +""" + +from __future__ import annotations + +import logging +import os +import warnings +from typing import Any + +import numpy as np +import warp as wp + +from pxr import Usd, UsdGeom, UsdShade + +__all__ = ["replace_newton_shape_colors"] + +logger = logging.getLogger(__name__) + +# MDL OmniPBR defaults when inputs are not authored (typical MDL defaults). Keys match shader input names. +_OMNIPBR_DEFAULTS: dict[str, tuple[float, float, float]] = { + "diffuse_color_constant": (0.2, 0.2, 0.2), + "diffuse_tint": (1.0, 1.0, 1.0), +} + +# Neutral linear RGB when a shape has no material binding and no ``displayColor`` override. +_UNBOUND_DEFAULT_FALLBACK_GRAY = (0.18, 0.18, 0.18) + + +@wp.func +def _linear_channel_to_srgb_warp(c: float) -> float: + """Per-channel sRGB OETF on device: linear ``[0, 1]`` to sRGB-encoded ``[0, 1]``.""" + if c <= 0.0: + return 0.0 + if c <= 0.0031308: + return 12.92 * c + if c >= 1.0: + return 1.0 + return 1.055 * wp.pow(c, 1.0 / 2.4) - 0.055 + + +@wp.func +def _linear_rgb_to_srgb_warp(linear_rgb: wp.vec3) -> wp.vec3: + """Apply sRGB OETF per channel: linear RGB ``[0, 1]`` to sRGB-encoded ``[0, 1]``.""" + return wp.vec3( + _linear_channel_to_srgb_warp(linear_rgb[0]), + _linear_channel_to_srgb_warp(linear_rgb[1]), + _linear_channel_to_srgb_warp(linear_rgb[2]), + ) + + +@wp.kernel +def _scatter_shape_color_rows_kernel( + shape_colors: wp.array(dtype=wp.vec3), # type: ignore + row_indices: wp.array(dtype=wp.int32), # type: ignore + row_colors: wp.array(dtype=wp.vec3), # type: ignore +): + """Write per-row sRGB colors into ``shape_colors``.""" + tid = wp.tid() + index = row_indices[tid] + color = row_colors[tid] + shape_colors[index] = _linear_rgb_to_srgb_warp(color) + + +def _canonical_prim_lookup_key(prim: Usd.Prim) -> str: + """Pick a single USD path for lookup, to maximize cache hits.""" + assert prim.IsValid() + + if prim.IsInstanceProxy(): + proto = prim.GetPrimInPrototype() + if proto.IsValid(): + return proto.GetPath().pathString + + return prim.GetPath().pathString + + +def _asset_path_to_str(asset_path: Any) -> str: + """Stringify an asset path.""" + if asset_path is None: + return "" + return str(asset_path.path) if hasattr(asset_path, "path") else str(asset_path) + + +def _is_omnipbr_shader(shader_prim: Usd.Prim) -> bool: + """Return True if the shader prim references the OmniPBR MDL module (MDL-in-USD metadata).""" + if shader_prim.IsValid(): + attr = shader_prim.GetAttribute("info:mdl:sourceAsset") + if attr and attr.HasAuthoredValue() and _asset_path_to_str(attr.Get()).endswith("OmniPBR.mdl"): + return True + + attr = shader_prim.GetAttribute("info:mdl:sourceAsset:subIdentifier") + if attr and attr.HasAuthoredValue() and str(attr.Get()) == "OmniPBR": + return True + + return False + + +def _get_bound_material_prim(shape_prim: Usd.Prim) -> Usd.Prim: + """Resolve the effective bound *visual* material path for a geometry prim. + + This uses :meth:`UsdShade.MaterialBindingAPI.ComputeBoundMaterial` so inherited bindings and + binding-strength semantics (e.g. ``strongerThanDescendants``) are handled correctly. + """ + if shape_prim.IsValid(): + material, _ = UsdShade.MaterialBindingAPI(shape_prim).ComputeBoundMaterial() + if material: + material_prim = material.GetPrim() + if material_prim.IsValid(): + return material_prim + + return Usd.Prim() + + +def _get_input_value(shader: UsdShade.Shader, name: str) -> tuple[float, float, float] | None: + """Fetch the effective input value from a shader, following connections.""" + inp = shader.GetInput(name) + if inp is not None: + attrs = UsdShade.Utils.GetValueProducingAttributes(inp) + if attrs and len(attrs) > 0: + value = attrs[0].Get() + if value is not None: + return _coerce_color(value) + + return None + + +def _get_surface_shader(material_prim: Usd.Prim) -> Usd.Prim: + """Get the surface shader from a material.""" + material = UsdShade.Material(material_prim) + surface_output = material.GetSurfaceOutput() + if not surface_output: + surface_output = material.GetOutput("surface") + if not surface_output: + surface_output = material.GetOutput("mdl:surface") + + shader_prim = Usd.Prim() + + if surface_output: + connected_source = surface_output.GetConnectedSource() + if connected_source: + shader_prim = connected_source[0].GetPrim() + + if not shader_prim.IsValid(): + for child in material_prim.GetChildren(): + if child.IsA(UsdShade.Shader): + shader_prim = child + break + + return shader_prim + + +def _get_omnipbr_input(shader: UsdShade.Shader, input_name: str) -> tuple[float, float, float]: + """Return authored linear RGB for ``input_name`` if it exists, else the MDL OmniPBR default.""" + value = _get_input_value(shader, input_name) + return value or _OMNIPBR_DEFAULTS[input_name] + + +def _get_omnipbr_albedo(shader_prim: Usd.Prim) -> tuple[float, float, float]: + """Return diffuse albedo as linear RGB (``diffuse_color_constant`` × ``diffuse_tint``).""" + surface_shader = UsdShade.Shader(shader_prim) + c0, c1, c2 = _get_omnipbr_input(surface_shader, "diffuse_color_constant") + t0, t1, t2 = _get_omnipbr_input(surface_shader, "diffuse_tint") + return (c0 * t0, c1 * t1, c2 * t2) + + +def _coerce_color(value: Any) -> tuple[float, float, float] | None: + """Coerce a value to an RGB color tuple, or None if not possible.""" + if value is None: + return None + color_np = np.array(value, dtype=np.float32).reshape(-1) + if color_np.size >= 3: + return (float(color_np[0]), float(color_np[1]), float(color_np[2])) + return None + + +def _get_primvar_display_color(shape_prim: Usd.Prim) -> tuple[float, float, float] | None: + """Get authored ``primvars:displayColor`` from a shape prim as linear RGB.""" + primvars_api = UsdGeom.PrimvarsAPI(shape_prim) + if not primvars_api.HasPrimvar("displayColor"): + return None + + primvar = primvars_api.GetPrimvar("displayColor") + if primvar is None: + return None + + return _coerce_color(primvar.Get()) + + +def _resolve_shape_color( + stage: Usd.Stage, + prim_path: str, + material_color_cache: dict[str, tuple[float, float, float] | None], +) -> tuple[float, float, float] | None: + """Resolve replacement linear RGB for one prim path (sRGB encoding is applied in the scatter kernel). + + Returns: + Linear RGB to pass to :func:`_scatter_shape_color_rows_kernel`, or ``None`` to leave the row unchanged. + """ + shape_prim = stage.GetPrimAtPath(prim_path) + if not shape_prim.IsValid(): + return None + + # Newton's random color palette is designed for guide shapes so we keep them unchanged. + imageable = UsdGeom.Imageable(shape_prim) + if bool(imageable) and imageable.ComputePurpose() == UsdGeom.Tokens.guide: + return None + + material_prim = _get_bound_material_prim(shape_prim) + if not material_prim.IsValid(): + display_color = _get_primvar_display_color(shape_prim) + return display_color or _UNBOUND_DEFAULT_FALLBACK_GRAY + + material_key = _canonical_prim_lookup_key(material_prim) + if material_key in material_color_cache: + return material_color_cache[material_key] + + # We only overwrite color if the material is OmniPBR. Otherwise, we leave the existing color unchanged. + shader_prim = _get_surface_shader(material_prim) + material_color = _get_omnipbr_albedo(shader_prim) if _is_omnipbr_shader(shader_prim) else None + + material_color_cache[material_key] = material_color + return material_color + + +def replace_newton_shape_colors(model: Any, stage: Usd.Stage | None = None) -> int: + """Align Newton visualization colors with the USD stage. + + Newton assigns a per-shape palette to ``shape_color``. This overwrites those rows so rendering matches authored USD + data where supported: + + - **No bound material**: use authored ``primvars:displayColor`` (treated as linear RGB), or a neutral 18% linear + gray if ``displayColor`` is not authored; linear values are encoded to sRGB in the scatter kernel. + - **OmniPBR**: use ``diffuse_color_constant`` times ``diffuse_tint`` (linear RGB, with MDL defaults when inputs are + not authored), encoded to sRGB in the scatter kernel. + - **Other materials**: leave the existing Newton color for that shape. + - **Guide purpose** prims (``UsdGeom.Tokens.guide``): leave unchanged so guide visualization stays on the palette. + + Args: + model: Object with ``shape_label`` (``list`` of USD prim paths) and ``shape_color`` (``wp.array`` of + ``wp.vec3``), typically a finalized Newton model. + stage: USD stage to read from. If ``None``, uses :func:`~isaaclab.sim.utils.stage.get_current_stage`. + + Returns: + Number of shapes that had their colors replaced. + + Note: + Set ``ISAACLAB_REPLACE_NEWTON_SHAPE_COLORS`` to ``0``, ``false``, ``off``, or ``no`` to skip this pass + entirely (returns ``0``). + + This pass exists only while Isaac Lab and Isaac Sim content still relies on NVIDIA-specific MDL and OmniPBR + materials; after migration to neutral USD materials that Newton can consume directly, this path is expected to + be deprecated and removed. + + Wall time for USD resolution and the GPU scatter is measured with :class:`~isaaclab.utils.timer.Timer`, which + may print a timing summary when the timer is enabled. + """ + env_val = os.getenv("ISAACLAB_REPLACE_NEWTON_SHAPE_COLORS") + if env_val is not None and env_val.strip().lower() in ["false", "0", "off", "no"]: + logger.debug("Newton shape color replacement is disabled") + return 0 + + warnings.warn( + "Newton shape color replacement is enabled; this workaround will be deprecated in a future release.", + FutureWarning, + stacklevel=2, + ) + + # Use duck typing to avoid introducing hard dependencies on newton. + shape_labels = getattr(model, "shape_label", None) + shape_colors = getattr(model, "shape_color", None) + + if not isinstance(shape_labels, list): + logger.debug("shape_label must be a list, got %s", type(shape_labels)) + return 0 + + if not isinstance(shape_colors, wp.array): + logger.debug("shape_color must be a Warp array, got %s", type(shape_colors)) + return 0 + + num_shapes = len(shape_labels) + if num_shapes == 0: + logger.debug("Found empty list of shape labels") + return 0 + + if num_shapes != len(shape_colors): + logger.debug("Mismatching length of shape_labels and shape_colors: %d != %d", num_shapes, len(shape_colors)) + return 0 + + from isaaclab.utils.timer import Timer + + num_color_updates = 0 + + with Timer(f"[INFO]: Time taken for replace_newton_shape_colors for {num_shapes} shapes", enable=False): + if stage is None: + from .stage import get_current_stage + + stage = get_current_stage() + + shape_keys: list[str] = [] + + for label in shape_labels: + prim = stage.GetPrimAtPath(label) + shape_keys.append(_canonical_prim_lookup_key(prim) if prim.IsValid() else label) + + # shape_keys must stay the same length as shape labels, to guarantee the correctness of + # shape indices that will be used in the scatter kernel. + assert num_shapes == len(shape_keys) + + resolved_color_cache: dict[str, tuple[float, float, float] | None] = {} + material_color_cache: dict[str, tuple[float, float, float] | None] = {} + + unique_keys = dict.fromkeys(shape_keys) + for key in unique_keys: + color = _resolve_shape_color(stage, key, material_color_cache) + resolved_color_cache[key] = color + + # Prepare the indices and colors for the scatter kernel: + # - Indices point to the slots in the shape_colors array that should be updated + # - Colors are the new values to write into the slots + indices_np = np.empty(num_shapes, dtype=np.int32) + colors_np = np.empty((num_shapes, 3), dtype=np.float32) + + for i, shape_key in enumerate(shape_keys): + if rgb := resolved_color_cache.get(shape_key): + indices_np[num_color_updates] = i + colors_np[num_color_updates] = rgb + num_color_updates += 1 + + # If there are any color updates, launch the scatter kernel to update the shape_colors array. + if num_color_updates != 0: + indices_wp = wp.from_numpy(indices_np[:num_color_updates], dtype=wp.int32, device=shape_colors.device) + colors_wp = wp.from_numpy(colors_np[:num_color_updates], dtype=wp.vec3, device=shape_colors.device) + + wp.launch( + kernel=_scatter_shape_color_rows_kernel, + dim=num_color_updates, + inputs=[shape_colors, indices_wp, colors_wp], + device=shape_colors.device, + ) + + logger.debug("Replaced colors for %d / %d shapes", num_color_updates, num_shapes) + + return num_color_updates diff --git a/source/isaaclab/isaaclab/sim/utils/prims.py b/source/isaaclab/isaaclab/sim/utils/prims.py index 6fb7850a5b5d..d54ca890d2b7 100644 --- a/source/isaaclab/isaaclab/sim/utils/prims.py +++ b/source/isaaclab/isaaclab/sim/utils/prims.py @@ -16,17 +16,15 @@ import torch -import omni.kit.commands -import omni.usd -from isaacsim.core.cloner import Cloner -from pxr import PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics, UsdShade, UsdUtils +from pxr import Sdf, Usd, UsdGeom, UsdPhysics, UsdShade, UsdUtils +from isaaclab.utils.assets import check_file_path, retrieve_file_path from isaaclab.utils.string import to_camel_case -from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.version import has_kit from .queries import find_matching_prim_paths from .semantics import add_labels -from .stage import get_current_stage, get_current_stage_id +from .stage import get_current_stage, resolve_paths from .transforms import convert_world_pose_to_local, standardize_xform_ops if TYPE_CHECKING: @@ -88,7 +86,7 @@ def create_prim( any coordinate transformation. Cannot be used with ``position``. Defaults to None, in which case no translation is applied. orientation: - Prim rotation as a quaternion (w, x, y, z). When used with ``position``, the + Prim rotation as a quaternion (x, y, z, w). When used with ``position``, the orientation is also converted from world space to local space. When used with ``translation``, it is applied directly as local orientation. Defaults to None. scale: @@ -215,52 +213,12 @@ def delete_prim(prim_path: str | Sequence[str], stage: Usd.Stage | None = None) stage_id = stage_cache.GetId(stage).ToLongInt() if stage_id < 0: stage_id = stage_cache.Insert(stage).ToLongInt() - # delete prims - success, _ = omni.kit.commands.execute( - "DeletePrimsCommand", - paths=prim_path, - stage=stage, - ) - return success - - -def move_prim(path_from: str, path_to: str, keep_world_transform: bool = True, stage: Usd.Stage | None = None) -> bool: - """Moves a prim from one path to another within a USD stage. - - This function moves the prim from the source path to the destination path. If the :attr:`keep_world_transform` - is set to True, the world transform of the prim is kept. This implies that the prim's local transform is reset - such that the prim's world transform is the same as the source path's world transform. If it is set to False, - the prim's local transform is preserved. - - .. warning:: - Reparenting or moving prims in USD is an expensive operation that may trigger - significant recomposition costs, especially in large or deeply layered stages. - Args: - path_from: Path of the USD Prim you wish to move - path_to: Final destination of the prim - keep_world_transform: Whether to keep the world transform of the prim. Defaults to True. - stage: The stage to move the prim in. Defaults to None, in which case the current stage is used. - - Returns: - True if the prim was moved successfully, False otherwise. - - Example: - >>> import isaaclab.sim as sim_utils - >>> - >>> # given the stage: /World/Cube. Move the prim Cube outside the prim World - >>> sim_utils.move_prim("/World/Cube", "/Cube") - """ - # get stage handle - stage = get_current_stage() if stage is None else stage - # move prim - success, _ = omni.kit.commands.execute( - "MovePrimCommand", - path_from=path_from, - path_to=path_to, - keep_world_transform=keep_world_transform, - stage_or_context=stage, - ) + # delete prims via the Sdf API directly + success = True + for path in prim_path: + if not stage.RemovePrim(path): + success = False return success @@ -400,6 +358,8 @@ def safe_set_attribute_on_usd_prim(prim: Usd.Prim, attr_name: str, value: Any, c sdf_type = Sdf.ValueTypeNames.Int elif isinstance(value, float): sdf_type = Sdf.ValueTypeNames.Float + elif isinstance(value, str): + sdf_type = Sdf.ValueTypeNames.String elif isinstance(value, (tuple, list)) and len(value) == 3 and any(isinstance(v, float) for v in value): sdf_type = Sdf.ValueTypeNames.Float3 elif isinstance(value, (tuple, list)) and len(value) == 2 and any(isinstance(v, float) for v in value): @@ -573,8 +533,8 @@ def export_prim_to_file( Sdf.CopySpec(source_layer, source_prim_path, target_layer, target_prim_path) # set the default prim target_layer.defaultPrim = Sdf.Path(target_prim_path).name - # resolve all paths relative to layer path - omni.usd.resolve_paths(source_layer.identifier, target_layer.identifier) + # resolve paths so asset references remain valid from the new location + resolve_paths(source_layer.identifier, target_layer.identifier) # save the stage target_layer.Save() @@ -709,10 +669,23 @@ def wrapper(prim_path: str | Sdf.Path, cfg: SpawnerCfg, *args, **kwargs): else: source_prim_paths = [root_path] - # resolve prim paths for spawning and cloning - prim_paths = [f"{source_prim_path}/{asset_path}" for source_prim_path in source_prim_paths] + # Build a prototype prim path to spawn once, then copy to ALL matching parents. + # + # Octi: Leaf note wild card and root not wild card should be treated differently: + # (A) ".*" in root_path e.g. /World/Origin_0.*/CameraSensor + # source_prim_paths holds ALL matching parent prims already in the stage. + # We spawn the child once at source_prim_paths[0] as the prototype, then + # Sdf.CopySpec it to every remaining parent so every parent ends up with + # the child prim. + # + # (B) ".*" in asset_path only e.g. /World/template/Object/proto_asset_.* + # No matching prims exist yet; source_prim_paths == [root_path] (one entry). + # Replacing ".*" → "0" in asset_path gives the intended name proto_asset_0. + # No copy step runs because there is only one parent. + # + prim_spawn_path = f"{source_prim_paths[0]}/{asset_path.replace('.*', '0')}" # spawn single instance - prim = func(prim_paths[0], cfg, *args, **kwargs) + prim = func(prim_spawn_path, cfg, *args, **kwargs) # set the prim visibility if hasattr(cfg, "visible"): imageable = UsdGeom.Imageable(prim) @@ -722,7 +695,6 @@ def wrapper(prim_path: str | Sdf.Path, cfg: SpawnerCfg, *args, **kwargs): imageable.MakeInvisible() # set the semantic annotations if hasattr(cfg, "semantic_tags") and cfg.semantic_tags is not None: - # note: taken from replicator scripts.utils.utils.py for semantic_type, semantic_value in cfg.semantic_tags: # deal with spaces by replacing them with underscores semantic_type_sanitized = semantic_type.replace(" ", "_") @@ -732,30 +704,19 @@ def wrapper(prim_path: str | Sdf.Path, cfg: SpawnerCfg, *args, **kwargs): prim, labels=[semantic_value_sanitized], instance_name=semantic_type_sanitized, overwrite=False ) # activate rigid body contact sensors (lazy import to avoid circular import with schemas) - if hasattr(cfg, "activate_contact_sensors") and cfg.activate_contact_sensors: # type: ignore + if hasattr(cfg, "activate_contact_sensors") and cfg.activate_contact_sensors: from ..schemas import schemas as _schemas - _schemas.activate_contact_sensors(prim_paths[0]) + _schemas.activate_contact_sensors(prim_spawn_path) # clone asset using cloner API - if len(prim_paths) > 1: - cloner = Cloner(stage=stage) - # check version of Isaac Sim to determine whether clone_in_fabric is valid - if get_isaac_sim_version().major < 5: - # clone the prim - cloner.clone( - prim_paths[0], prim_paths[1:], replicate_physics=False, copy_from_source=cfg.copy_from_source - ) - else: - # clone the prim - clone_in_fabric = kwargs.get("clone_in_fabric", False) - replicate_physics = kwargs.get("replicate_physics", False) - cloner.clone( - prim_paths[0], - prim_paths[1:], - replicate_physics=replicate_physics, - copy_from_source=cfg.copy_from_source, - clone_in_fabric=clone_in_fabric, - ) + if len(source_prim_paths) > 1: + sanitized_asset = asset_path.replace(".*", "0") + rl = stage.GetRootLayer() + with Sdf.ChangeBlock(): + for src_parent in source_prim_paths[1:]: + dest_path = f"{src_parent}/{sanitized_asset}" + Sdf.CreatePrimInLayer(rl, dest_path) + Sdf.CopySpec(rl, Sdf.Path(prim_spawn_path), rl, Sdf.Path(dest_path)) # return the source prim return prim @@ -795,6 +756,8 @@ def bind_visual_material( Raises: ValueError: If the provided prim paths do not exist on stage. """ + if not has_kit(): + return None # get stage handle if stage is None: stage = get_current_stage() @@ -813,6 +776,8 @@ def bind_visual_material( binding_strength = "weakerThanDescendants" # obtain material binding API # note: we prefer using the command here as it is more robust than the USD API + import omni.kit.commands + success, _ = omni.kit.commands.execute( "BindMaterialCommand", prim_path=prim_path, @@ -866,10 +831,11 @@ def bind_physics_material( # get USD prim prim = stage.GetPrimAtPath(prim_path) # check if prim has collision applied on it - has_physics_scene_api = prim.HasAPI(PhysxSchema.PhysxSceneAPI) + applied = prim.GetAppliedSchemas() + has_physics_scene_api = "PhysxSceneAPI" in applied has_collider = prim.HasAPI(UsdPhysics.CollisionAPI) - has_deformable_body = prim.HasAPI(PhysxSchema.PhysxDeformableBodyAPI) - has_particle_system = prim.IsA(PhysxSchema.PhysxParticleSystem) + has_deformable_body = "OmniPhysicsDeformableBodyAPI" in applied + has_particle_system = prim.GetTypeName() == "PhysxParticleSystem" if not (has_physics_scene_api or has_collider or has_deformable_body or has_particle_system): logger.debug( f"Cannot apply physics material '{material_path}' on prim '{prim_path}'. It is neither a" @@ -925,6 +891,16 @@ def add_usd_reference( Raises: FileNotFoundError: When the input USD file is not found at the specified path. """ + # resolve remote USD paths to local (same as Newton / add_reference_to_stage) + file_status = check_file_path(usd_path) + if file_status == 0: + raise FileNotFoundError(f"Unable to open the usd file at path: {usd_path}") + if file_status == 2: + try: + usd_path = retrieve_file_path(usd_path, force_download=False) + except Exception as e: + raise FileNotFoundError(f"Failed to retrieve USD file from {usd_path}") from e + # get current stage stage = get_current_stage() if stage is None else stage # get prim at path @@ -941,32 +917,6 @@ def _add_reference_to_prim(prim: Usd.Prim) -> Usd.Prim: ) return prim - # Compatibility with Isaac Sim 4.5 where omni.metrics is not available - if get_isaac_sim_version().major < 5: - return _add_reference_to_prim(prim) - - # check if the USD file is valid and add reference to the prim - sdf_layer = Sdf.Layer.FindOrOpen(usd_path) - if not sdf_layer: - raise FileNotFoundError(f"Unable to open the usd file at path: {usd_path}") - - # import metrics assembler interface - # note: this is only available in Isaac Sim 5.0 and above - from omni.metrics.assembler.core import get_metrics_assembler_interface - - # obtain the stage ID - stage_id = get_current_stage_id() - # check if the layers are compatible (i.e. the same units) - ret_val = get_metrics_assembler_interface().check_layers( - stage.GetRootLayer().identifier, sdf_layer.identifier, stage_id - ) - # log that metric assembler did not detect any issues - if ret_val["ret_val"]: - logger.info( - "Metric assembler detected no issues between the current stage and the referenced USD file at path:" - f" {usd_path}" - ) - # add reference to the prim return _add_reference_to_prim(prim) diff --git a/source/isaaclab/isaaclab/sim/utils/queries.py b/source/isaaclab/isaaclab/sim/utils/queries.py index 035681a726b4..1929e70941c7 100644 --- a/source/isaaclab/isaaclab/sim/utils/queries.py +++ b/source/isaaclab/isaaclab/sim/utils/queries.py @@ -11,8 +11,6 @@ import re from collections.abc import Callable -import omni -import omni.kit.app from pxr import Sdf, Usd, UsdPhysics from .stage import get_current_stage @@ -34,6 +32,9 @@ def get_next_free_prim_path(path: str, stage: Usd.Stage | None = None) -> str: Returns: A new path that is guaranteed to not exist on the current stage + Raises: + ValueError: If the path is not a valid prim path string. + Example: >>> import isaaclab.sim as sim_utils >>> @@ -44,8 +45,36 @@ def get_next_free_prim_path(path: str, stage: Usd.Stage | None = None) -> str: """ # get current stage stage = get_current_stage() if stage is None else stage - # get next free path - return omni.usd.get_stage_next_free_path(stage, path, True) + + # validate and convert path + if not Sdf.Path.IsValidPathString(path): + raise ValueError(f"'{path}' is not a valid prim path") + sdf_path = Sdf.Path(path) + + # ensure path is absolute + corrected_path = sdf_path.MakeAbsolutePath(Sdf.Path.absoluteRootPath) + if sdf_path != corrected_path: + logger.warning(f"Path '{sdf_path}' auto-corrected to '{corrected_path}'.") + sdf_path = corrected_path + + # prepend default prim if needed + if stage.HasDefaultPrim(): + default_prim = stage.GetDefaultPrim() + if default_prim and not (sdf_path.HasPrefix(default_prim.GetPath()) and sdf_path != default_prim.GetPath()): + sdf_path = sdf_path.ReplacePrefix(Sdf.Path.absoluteRootPath, default_prim.GetPath()) + + def _increment_path(path_str: str) -> str: + match = re.search(r"_(\d+)$", path_str) + if match: + new_num = int(match.group(1)) + 1 + return re.sub(r"_(\d+)$", f"_{new_num:02d}", path_str) + return path_str + "_01" + + path_string = sdf_path.pathString + while stage.GetPrimAtPath(path_string).IsValid(): + path_string = _increment_path(path_string) + + return path_string def get_first_matching_ancestor_prim( diff --git a/source/isaaclab/isaaclab/sim/utils/semantics.py b/source/isaaclab/isaaclab/sim/utils/semantics.py index 463eb8b5d42f..054c68e4e991 100644 --- a/source/isaaclab/isaaclab/sim/utils/semantics.py +++ b/source/isaaclab/isaaclab/sim/utils/semantics.py @@ -7,32 +7,17 @@ from __future__ import annotations -import contextlib import logging -from pxr import Usd, UsdGeom - -# USD Semantics is only available in Isaac Sim 5.0 and later. -with contextlib.suppress(ModuleNotFoundError, ImportError): - from pxr import UsdSemantics - -from isaaclab.utils.version import get_isaac_sim_version +from pxr import Usd, UsdGeom, UsdSemantics from .stage import get_current_stage -# import logger logger = logging.getLogger(__name__) def add_labels(prim: Usd.Prim, labels: list[str], instance_name: str = "class", overwrite: bool = True) -> None: - """Apply semantic labels to a prim using the :class:`UsdSemantics.LabelsAPI`. - - This function is a wrapper around the :func:`omni.replicator.core.functional.modify.semantics` function. - It applies the labels to the prim using the :class:`UsdSemantics.LabelsAPI`. - - .. versionadded:: 2.3.0 - This function is available in Isaac Sim 5.0 and later, which introduces the :class:`UsdSemantics.LabelsAPI`. - For previous versions, the function falls back to use the deprecated :class:`UsdSemantics.SemanticsAPI` instead. + """Apply semantic labels to a prim using :class:`UsdSemantics.LabelsAPI`. Example: >>> prim = sim_utils.create_prim("/World/Test/Sphere", "Sphere", stage=stage, attributes={"radius": 10.0}) @@ -45,53 +30,22 @@ def add_labels(prim: Usd.Prim, labels: list[str], instance_name: str = "class", overwrite: Whether to overwrite existing labels for this instance. If False, the new labels are appended to existing ones (if any). Defaults to True. """ - # Try modern approach (Isaac Sim >= 5.0) - try: - import omni.replicator.core.functional as rep_functional - - mode = "replace" if overwrite else "add" - rep_functional.modify.semantics(prim, {instance_name: labels}, mode=mode) - - return - except (ModuleNotFoundError, ImportError) as e: - # check if we are using isaac sim 5.0 - if get_isaac_sim_version().major >= 5: - logger.warning( - f"Failed to add labels to prim {prim.GetPath()} using Replicator API: {e}. " - "\nPlease ensure Replicator API is enabled by passing '--enable_cameras' to the AppLauncher." - "\nFalling back to legacy approach." - ) - - # Try legacy approach (Isaac Sim < 5.0) - try: - import Semantics - - # check we have only one label - if len(labels) != 1: - raise ValueError(f"Only one label can be applied to a prim. Received: {labels}") - # set the semantic API for the instance - instance_name = f"{instance_name}_{labels[0]}" - sem = Semantics.SemanticsAPI.Apply(prim, instance_name) - # create semantic type and data attributes - sem.CreateSemanticTypeAttr() - sem.CreateSemanticDataAttr() - sem.GetSemanticTypeAttr().Set(instance_name) - sem.GetSemanticDataAttr().Set(labels[0]) - except Exception as e: - logger.warning( - f"Failed to add labels to prim {prim.GetPath()} using legacy API: {e}. " - "\nSemantics functionality may not be available in this Isaac Sim version." - " Please open an issue at https://github.com/isaac-sim/IsaacLab/issues if you believe this is a bug." - ) + labels_api = UsdSemantics.LabelsAPI.Apply(prim, instance_name) + labels_attr = labels_api.CreateLabelsAttr() + if overwrite: + labels_attr.Set(labels) + else: + existing = labels_attr.Get() + if existing: + combined = list(existing) + [lbl for lbl in labels if lbl not in existing] + labels_attr.Set(combined) + else: + labels_attr.Set(labels) def get_labels(prim: Usd.Prim) -> dict[str, list[str]]: """Get all semantic labels (:class:`UsdSemantics.LabelsAPI`) applied to a prim. - .. versionadded:: 2.3.0 - This function is available in Isaac Sim 5.0 and later. For previous versions, - please use :mod:`isaacsim.core.utils.semantics` module instead. - Args: prim: The USD prim to return labels for. @@ -116,10 +70,6 @@ def get_labels(prim: Usd.Prim) -> dict[str, list[str]]: def remove_labels(prim: Usd.Prim, instance_name: str | None = None, include_descendants: bool = False): """Removes semantic labels (:class:`UsdSemantics.LabelsAPI`) from a prim and optionally its descendants. - .. versionadded:: 2.3.0 - This function is available in Isaac Sim 5.0 and later. For previous versions, - please use :mod:`isaacsim.core.utils.semantics` module instead. - Args: prim: The USD prim to remove labels from. instance_name: The specific instance name to remove. Defaults to None, in which case @@ -154,10 +104,6 @@ def check_missing_labels(prim_path: str | None = None, stage: Usd.Stage | None = .. note:: The function checks only prims that are :class:`UsdGeom.Gprim` type. - .. versionadded:: 2.3.0 - This function is available in Isaac Sim 5.0 and later. For previous versions, - please use :mod:`isaacsim.core.utils.semantics` module instead. - Args: prim_path: The prim path to search from. If None, the entire stage is inspected. stage: The stage to search from. If None, the current stage is used. @@ -197,10 +143,6 @@ def count_total_labels(prim_path: str | None = None, stage: Usd.Stage | None = N This function iterates over all the prims from the provided path and counts the number of times each label is applied to the prims. It returns a dictionary of labels and their corresponding count. - .. versionadded:: 2.3.0 - This function is available in Isaac Sim 5.0 and later. For previous versions, - please use :mod:`isaacsim.core.utils.semantics` module instead. - Args: prim_path: The prim path to search from. If None, the entire stage is inspected. stage: The stage to search from. If None, the current stage is used. diff --git a/source/isaaclab/isaaclab/sim/utils/stage.py b/source/isaaclab/isaaclab/sim/utils/stage.py index 88c61744e7db..3b2f5eb15a9e 100644 --- a/source/isaaclab/isaaclab/sim/utils/stage.py +++ b/source/isaaclab/isaaclab/sim/utils/stage.py @@ -5,127 +5,217 @@ """Utilities for operating on the USD stage.""" -import builtins +from __future__ import annotations + import contextlib import logging +import os import threading from collections.abc import Callable, Generator -import omni.kit.app -import omni.usd -from isaacsim.core.utils import stage as sim_stage from pxr import Sdf, Usd, UsdUtils -from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.version import get_isaac_sim_version, has_kit # import logger logger = logging.getLogger(__name__) _context = threading.local() # thread-local storage to handle nested contexts and concurrent access -# _context is a singleton design in isaacsim and for that reason -# until we fully replace all modules that references the singleton(such as XformPrim, Prim ....), we have to point -# that singleton to this _context -sim_stage._context = _context # type: ignore +# Kit-dependent imports (only available when running with Kit/Isaac Sim) +if has_kit(): + import omni.kit.app -def create_new_stage() -> Usd.Stage: - """Create a new stage attached to the USD context. +def _check_ancestral(prim: Usd.Prim) -> bool: + """Check if a prim is brought into composition by its ancestor (an ancestral prim). + + This is a pure USD implementation of ``omni.usd.check_ancestral``. + + An ancestral prim is one that exists due to a reference, payload, or other composition arc + on an ancestor prim. Such prims cannot be directly deleted because they are "opinions" from + the referenced asset, not locally authored prims. + + Args: + prim: The USD prim to check. Returns: - Usd.Stage: The created USD stage. + True if the prim is an ancestral prim, False otherwise. + """ + if not prim or not prim.IsValid(): + return False + + def _check_ancestral_node(node) -> bool: + """Recursively check if any composition node is due to an ancestor.""" + if node.IsDueToAncestor(): + return True + return any(_check_ancestral_node(child) for child in node.children) + + prim_index = prim.GetPrimIndex() + if not prim_index: + return False + + return _check_ancestral_node(prim_index.rootNode) - Raises: - RuntimeError: When failed to create a new stage. + +def resolve_paths( + src_layer_identifier: str, + dst_layer_identifier: str, + store_relative_path: bool = True, +) -> None: + """Resolve external asset paths in a destination layer relative to a source layer. + + When content is copied from one USD layer to another (e.g., via ``Sdf.CopySpec`` or + ``layer.TransferContent``), relative asset paths that were valid from the source + layer's location may become invalid from the destination layer's location. This + function recalculates those paths. + + This uses USD's built-in ``UsdUtils.ModifyAssetPaths`` to update all external references + (sublayers, references, payloads, asset paths) in the destination layer. + + Args: + src_layer_identifier: The identifier (path) of the source layer. + dst_layer_identifier: The identifier (path) of the destination layer. + store_relative_path: Whether to store paths as relative. Defaults to True. Example: + >>> from pxr import Sdf >>> import isaaclab.sim as sim_utils >>> - >>> sim_utils.create_new_stage() - Usd.Stage.Open(rootLayer=Sdf.Find('anon:0x7fba6c04f840:World7.usd'), - sessionLayer=Sdf.Find('anon:0x7fba6c01c5c0:World7-session.usda'), - pathResolverContext=) + >>> # After copying content to a new layer + >>> source_layer = stage.GetRootLayer() + >>> target_layer = Sdf.Layer.CreateNew("/path/to/output.usd") + >>> target_layer.TransferContent(source_layer) + >>> sim_utils.resolve_paths(source_layer.identifier, target_layer.identifier) + >>> target_layer.Save() """ - result = omni.usd.get_context().new_stage() - if result: - return omni.usd.get_context().get_stage() - else: - raise RuntimeError("Failed to create a new stage. Please check if the USD context is valid.") + src_layer = Sdf.Layer.FindOrOpen(src_layer_identifier) + dst_layer = Sdf.Layer.FindOrOpen(dst_layer_identifier) + if not src_layer: + logger.warning(f"Source layer not found: {src_layer_identifier}") + return + if not dst_layer: + logger.warning(f"Destination layer not found: {dst_layer_identifier}") + return -def create_new_stage_in_memory() -> Usd.Stage: - """Creates a new stage in memory, if supported. + dst_dir = os.path.dirname(dst_layer.realPath or dst_layer.identifier) + + def _modify_path(asset_path: str) -> str: + if not asset_path: + return asset_path + resolved = src_layer.ComputeAbsolutePath(asset_path) + if store_relative_path and resolved and dst_dir: + try: + return os.path.relpath(resolved, dst_dir) + except ValueError: + return resolved + return resolved or asset_path + + UsdUtils.ModifyAssetPaths(dst_layer, _modify_path) + + +# ############################################################################## +# Public API +# ############################################################################## + + +try: + # _context is a singleton design in isaacsim and for that reason + # until we fully replace all modules that references the singleton(such as XformPrim, Prim ....), we have to point + # that singleton to this _context + from isaacsim.core.experimental.utils import stage as sim_stage + + sim_stage._context = _context # type: ignore +except ImportError: + pass - .. versionadded:: 2.3.0 - This function is available in Isaac Sim 5.0 and later. For backwards - compatibility, it falls back to creating a new stage attached to the USD context. + +def create_new_stage() -> Usd.Stage: + """Create a new in-memory USD stage. + + Creates a new stage using pure USD (``Usd.Stage.CreateInMemory()``). + + If Kit is running and Kit extensions need to discover this stage (e.g. + PhysX, ``isaacsim.core.experimental.prims.Articulation``), call + :func:`attach_stage_to_usd_context` after scene setup. Returns: - The new stage in memory. + Usd.Stage: The created USD stage. Example: >>> import isaaclab.sim as sim_utils >>> - >>> sim_utils.create_new_stage_in_memory() - Usd.Stage.Open(rootLayer=Sdf.Find('anon:0xf7b00e0:tmp.usda'), - sessionLayer=Sdf.Find('anon:0xf7cd2e0:tmp-session.usda'), + >>> sim_utils.create_new_stage() + Usd.Stage.Open(rootLayer=Sdf.Find('anon:0x7fba6c04f840:World7.usd'), + sessionLayer=Sdf.Find('anon:0x7fba6c01c5c0:World7-session.usda'), pathResolverContext=) """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Isaac Sim < 5.0 does not support creating a new stage in memory. Falling back to creating a new" - " stage attached to USD context." - ) - return create_new_stage() - else: - return Usd.Stage.CreateInMemory() + stage: Usd.Stage = Usd.Stage.CreateInMemory() + _context.stage = stage + UsdUtils.StageCache.Get().Insert(stage) + return stage def is_current_stage_in_memory() -> bool: - """Checks if the current stage is in memory. + """Checks if the current stage is NOT attached to the USD context. + + This function compares the current stage (from :func:`get_current_stage`) with + the stage attached to Kit's ``omni.usd`` context. If they are different, + the current stage is considered "in memory" - meaning it's not the stage + that the viewport/UI displays. - This function compares the stage id of the current USD stage with the stage id of the USD context stage. + This is useful for determining if we're working with a separate in-memory + stage created via :func:`create_new_stage_in_memory` with + ``SimulationCfg(create_stage_in_memory=True)``. + + In kitless mode (no USD context), this always returns True. Returns: - Whether the current stage is in memory. + True if the current stage is different from (not attached to) the context stage. + Also returns True if there is no context stage at all. """ - # grab current stage id - stage_id = get_current_stage_id() + if not has_kit(): + return True + + import omni.usd + + context = omni.usd.get_context() + if context is None: + return True - # grab context stage id - context_stage = omni.usd.get_context().get_stage() - with use_stage(context_stage): - context_stage_id = get_current_stage_id() + context_stage = context.get_stage() + if context_stage is None: + return True - # check if stage ids are the same - return stage_id != context_stage_id + return get_current_stage() is not context_stage -def open_stage(usd_path: str) -> bool: - """Open the given usd file and replace currently opened stage. +def open_stage(usd_path: str) -> Usd.Stage: + """Open the given USD file. + + Opens a USD file using pure USD (``Usd.Stage.Open()``). If Kit is available and + context attachment is needed for viewport/UI display, use + :func:`attach_stage_to_usd_context` after opening the stage. Args: usd_path: The path to the USD file to open. Returns: - True if operation is successful, otherwise False. + The opened USD stage. Raises: ValueError: When input path is not a supported file type by USD. + RuntimeError: When failed to open the stage. """ - # check if USD file is supported if not Usd.Stage.IsSupportedFile(usd_path): raise ValueError(f"The USD file at path '{usd_path}' is not supported.") - # get USD context - usd_context = omni.usd.get_context() - # disable save to recent files - usd_context.disable_save_to_recent_files() - # open stage - result = usd_context.open_stage(usd_path) - # enable save to recent files - usd_context.enable_save_to_recent_files() - # return result - return result + stage = Usd.Stage.Open(usd_path) + if stage is None: + raise RuntimeError(f"Failed to open USD stage at path '{usd_path}'.") + # Set as current stage so get_current_stage() can find it + _context.stage = stage + return stage @contextlib.contextmanager @@ -160,7 +250,7 @@ def use_stage(stage: Usd.Stage) -> Generator[None, None, None]: ... pass >>> # operate on the default stage attached to the USD context """ - if get_isaac_sim_version().major < 5: + if has_kit() and get_isaac_sim_version().major < 5: logger.warning("Isaac Sim < 5.0 does not support thread-local stage contexts. Skipping use_stage().") yield # no-op else: @@ -182,24 +272,54 @@ def use_stage(stage: Usd.Stage) -> Generator[None, None, None]: def update_stage() -> None: - """Updates the current stage by triggering an application update cycle. - - This function triggers a single update cycle of the application interface, which - in turn updates the stage and all associated systems (rendering, physics, etc.). - This is necessary to ensure that changes made to the stage are properly processed - and reflected in the simulation. - - Note: - This function calls the application update interface rather than directly - updating the stage because the stage update is part of the broader - application update cycle that includes rendering, physics, and other systems. + """Triggers a full application update cycle to process USD stage changes. + + This function calls ``omni.kit.app.get_app_interface().update()`` which triggers + a complete application update including: + + * Physics simulation step (if ``/app/player/playSimulations`` is True) + * Rendering (RTX path tracing, viewport updates) + * UI updates (widgets, windows) + * Timeline events and callbacks + * Extension updates + * USD/Fabric synchronization + + When to Use: + * **After creating a new stage**: ``create_new_stage()`` → ``update_stage()`` + * **After spawning prims**: ``cfg.func("/World/Robot", cfg)`` → ``update_stage()`` + * **After USD authoring**: Creating materials, lights, meshes, etc. + * **Before simulation starts**: During setup phase, before ``sim.reset()`` + * **In test fixtures**: To ensure consistent state before each test + + When NOT to Use: + * **During active simulation** (after ``sim.play()``): Can interfere with + physics stepping and cause double-stepping or timing issues. + * **During sensor updates**: Can reset RTX renderer state mid-cycle, + causing incorrect sensor outputs (e.g., ``inf`` depth values). + * **Inside physics/render callbacks**: Can cause recursion or timing issues. + * **Inside ``sim.step()`` or ``sim.render()``**: These already perform + app updates internally with proper safeguards. + + For rendering during simulation without physics stepping, use:: + + sim.set_setting("/app/player/playSimulations", False) + omni.kit.app.get_app().update() + sim.set_setting("/app/player/playSimulations", True) Example: >>> import isaaclab.sim as sim_utils >>> - >>> sim_utils.update_stage() + >>> # Setup phase - safe to use + >>> sim_utils.create_new_stage() + >>> robot_cfg.func("/World/Robot", robot_cfg) + >>> sim_utils.update_stage() # Commit USD changes + >>> + >>> # Simulation phase - DO NOT use update_stage() + >>> sim.reset() + >>> sim.play() + >>> for _ in range(100): + ... sim.step() # Handles updates internally """ - # TODO: Why is this updating the simulation and not the stage? omni.kit.app.get_app_interface().update() @@ -232,8 +352,10 @@ def save_stage(usd_path: str, save_and_reload_in_place: bool = True) -> bool: root_layer = get_current_stage().GetRootLayer() # transfer content from root layer to new layer layer.TransferContent(root_layer) - # resolve paths - omni.usd.resolve_paths(root_layer.identifier, layer.identifier) + + # resolve paths so asset references remain valid from the new location + resolve_paths(root_layer.identifier, layer.identifier) + # save layer result = layer.Save() if not result: @@ -246,46 +368,71 @@ def save_stage(usd_path: str, save_and_reload_in_place: bool = True) -> bool: return result -def close_stage(callback_fn: Callable[[bool, str], None] | None = None) -> bool: +def close_stage() -> bool: """Closes the current USD stage. + If Kit is running, this first closes the stage via the Kit USD context + (``omni.usd.get_context().close_stage()``), then clears the stage cache. + Without Kit, only the stage cache is cleared. + .. note:: Once the stage is closed, it is necessary to open a new stage or create a new one in order to work on it. - Args: - callback_fn: A callback function to call while closing the stage. - The function should take two arguments: a boolean indicating whether the stage is closing - and a string indicating the error message if the stage closing fails. Defaults to None, - in which case the stage will be closed without a callback. - Returns: - True if operation is successful, otherwise False. + True if operation is successful. Example: >>> import isaaclab.sim as sim_utils >>> >>> sim_utils.close_stage() True - >>> + """ + # Close Kit's USD context first (while the stage is still in the cache), + # then clear the cache. Reversing this order causes Kit to fail with + # "Removal of UsdStage from cache failed" and can hang during teardown. + if has_kit(): + import omni.usd - Example with callback function: - >>> import isaaclab.sim as sim_utils - >>> - >>> def callback(*args, **kwargs): - ... print("callback:", args, kwargs) - >>> sim_utils.close_stage(callback) - True - >>> sim_utils.close_stage(callback) - callback: (False, 'Stage opening or closing already in progress!!') {} - False + omni.usd.get_context().close_stage() + + stage_cache = UsdUtils.StageCache.Get() + stage_cache.Clear() + _context.stage = None + + return True + + +def _is_prim_deletable(prim: Usd.Prim) -> bool: + """Check if a prim can be safely deleted. + + This function checks various conditions to determine if a prim should be deleted: + - Root prim ("/") cannot be deleted + - Prims under "/Render" namespace are preserved + - Prims with "no_delete" metadata are preserved + - Prims hidden in stage window are preserved + - Ancestral prims (from USD references) cannot be deleted + + Args: + prim: The USD prim to check. + + Returns: + True if the prim can be deleted, False otherwise. """ - if callback_fn is None: - result = omni.usd.get_context().close_stage() - else: - result = omni.usd.get_context().close_stage_with_callback(callback_fn) - return result + prim_path = prim.GetPath().pathString + if prim_path == "/": + return False + if prim_path.startswith("/Render"): + return False + if prim.GetMetadata("no_delete"): + return False + if prim.GetMetadata("hide_in_stage_window"): + return False + # Check ancestral prims (from USD references) using pure USD helper + if _check_ancestral(prim): + return False + return True def clear_stage(predicate: Callable[[Usd.Prim], bool] | None = None) -> None: @@ -316,60 +463,22 @@ def clear_stage(predicate: Callable[[Usd.Prim], bool] | None = None) -> None: from .prims import delete_prim from .queries import get_all_matching_child_prims - def _default_predicate(prim: Usd.Prim) -> bool: - """Check if the prim should be deleted.""" - prim_path = prim.GetPath().pathString - if prim_path == "/": - return False - if prim_path.startswith("/Render"): - return False - if prim.GetMetadata("no_delete"): - return False - if prim.GetMetadata("hide_in_stage_window"): - return False - if omni.usd.check_ancestral(prim): - return False - return True - def _predicate_from_path(prim: Usd.Prim) -> bool: if predicate is None: - return _default_predicate(prim) - return predicate(prim) + return _is_prim_deletable(prim) + # Custom predicate must also pass the deletable check + return predicate(prim) and _is_prim_deletable(prim) # get all prims to delete - if predicate is None: - prims = get_all_matching_child_prims("/", _default_predicate) - else: - prims = get_all_matching_child_prims("/", _predicate_from_path) + prims = get_all_matching_child_prims("/", _predicate_from_path) # convert prims to prim paths prim_paths_to_delete = [prim.GetPath().pathString for prim in prims] # delete prims delete_prim(prim_paths_to_delete) - - if builtins.ISAAC_LAUNCHED_FROM_TERMINAL is False: # type: ignore + if has_kit(): omni.kit.app.get_app_interface().update() -def is_stage_loading() -> bool: - """Convenience function to see if any files are being loaded. - - Returns: - True if loading, False otherwise - - Example: - >>> import isaaclab.sim as sim_utils - >>> - >>> sim_utils.is_stage_loading() - False - """ - context = omni.usd.get_context() - if context is None: - return False - else: - _, _, loading = context.get_stage_loading_status() - return loading > 0 - - def get_current_stage(fabric: bool = False) -> Usd.Stage: """Get the current open USD or Fabric stage @@ -387,14 +496,16 @@ def get_current_stage(fabric: bool = False) -> Usd.Stage: sessionLayer=Sdf.Find('anon:0x7fba6c01c5c0:World7-session.usda'), pathResolverContext=) """ - stage = getattr(_context, "stage", omni.usd.get_context().get_stage()) + # First check thread-local context for an in-memory stage + stage = getattr(_context, "stage", None) + if stage is not None: + if fabric: + import usdrt - if fabric: - import usdrt - - # Get stage ID and attach to Fabric stage - stage_id = get_current_stage_id() - return usdrt.Usd.Stage.Attach(stage_id) + # Get stage ID and attach to Fabric stage + stage_id = get_current_stage_id() + return usdrt.Usd.Stage.Attach(stage_id) + return stage return stage @@ -413,80 +524,17 @@ def get_current_stage_id() -> int: """ # get current stage stage = get_current_stage() + if stage is None: + raise RuntimeError("No current stage available. Did you create a stage?") + # retrieve stage ID from stage cache stage_cache = UsdUtils.StageCache.Get() stage_id = stage_cache.GetId(stage).ToLongInt() # if stage ID is not found, insert it into the stage cache if stage_id < 0: + # Ensure stage has a valid root layer before inserting + if not stage.GetRootLayer(): + raise RuntimeError("Stage has no root layer - cannot cache an incomplete stage.") stage_id = stage_cache.Insert(stage).ToLongInt() # return stage ID return stage_id - - -def attach_stage_to_usd_context(attaching_early: bool = False): - """Attaches the current USD stage in memory to the USD context. - - This function should be called during or after scene is created and before stage is simulated or rendered. - If the stage is not in memory or rendering is not enabled, this function will return without attaching. - - .. versionadded:: 2.3.0 - This function is available in Isaac Sim 5.0 and later. For backwards - compatibility, it returns without attaching to the USD context. - - Args: - attaching_early: Whether to attach the stage to the usd context before stage is created. Defaults to False. - """ - - import carb - import omni.physx - import omni.usd - from isaacsim.core.simulation_manager import SimulationManager - - from isaaclab.sim.simulation_context import SimulationContext - - # if Isaac Sim version is less than 5.0, stage in memory is not supported - if get_isaac_sim_version().major < 5: - return - - # if stage is not in memory, we can return early - if not is_current_stage_in_memory(): - return - - # attach stage to physx - stage_id = get_current_stage_id() - physx_sim_interface = omni.physx.get_physx_simulation_interface() - physx_sim_interface.attach_stage(stage_id) - - # this carb flag is equivalent to if rendering is enabled - carb_setting = carb.settings.get_settings() # type: ignore - is_rendering_enabled = carb_setting.get("/physics/fabricUpdateTransformations") - - # if rendering is not enabled, we don't need to attach it - if not is_rendering_enabled: - return - - # early attach warning msg - if attaching_early: - logger.warning( - "Attaching stage in memory to USD context early to support an operation which" - " does not support stage in memory." - ) - - # skip this callback to avoid wiping the stage after attachment - SimulationContext.instance().skip_next_stage_open_callback() - - # disable stage open callback to avoid clearing callbacks - SimulationManager.enable_stage_open_callback(False) - - # enable physics fabric - SimulationContext.instance()._physics_context.enable_fabric(True) # type: ignore - - # attach stage to usd context - omni.usd.get_context().attach_stage_with_callback(stage_id) - - # attach stage to physx - physx_sim_interface = omni.physx.get_physx_simulation_interface() - physx_sim_interface.attach_stage(stage_id) - - # re-enable stage open callback - SimulationManager.enable_stage_open_callback(True) diff --git a/source/isaaclab/isaaclab/sim/utils/transforms.py b/source/isaaclab/isaaclab/sim/utils/transforms.py index d7ae57a16a6a..b38cd8fa1cae 100644 --- a/source/isaaclab/isaaclab/sim/utils/transforms.py +++ b/source/isaaclab/isaaclab/sim/utils/transforms.py @@ -86,7 +86,7 @@ def standardize_xform_ops( translation: Optional translation vector (x, y, z) in local space. If provided, overrides the prim's current translation. If None, preserves the current local translation. Defaults to None. - orientation: Optional orientation quaternion (w, x, y, z) in local space. If provided, + orientation: Optional orientation quaternion (x, y, z, w) in local space. If provided, overrides the prim's current orientation. If None, preserves the current local orientation. Defaults to None. scale: Optional scale vector (x, y, z). If provided, overrides the prim's current scale. @@ -115,7 +115,7 @@ def standardize_xform_ops( >>> sim_utils.standardize_xform_ops( ... prim, ... translation=(1.0, 2.0, 3.0), - ... orientation=(1.0, 0.0, 0.0, 0.0), # identity rotation (w, x, y, z) + ... orientation=(0.0, 0.0, 0.0, 1.0), # identity rotation (x, y, z, w) ... scale=(2.0, 2.0, 2.0), ... ) >>> @@ -151,7 +151,8 @@ def standardize_xform_ops( if translation is not None: xform_pos = Gf.Vec3d(*translation) if orientation is not None: - xform_quat = Gf.Quatd(*orientation) + # orientation is (x, y, z, w), Gf.Quatd expects (w, x, y, z) + xform_quat = Gf.Quatd(orientation[3], orientation[0], orientation[1], orientation[2]) # Handle scale resolution if scale is not None: @@ -171,6 +172,19 @@ def standardize_xform_ops( # Verify if xform stack is reset has_reset = xformable.GetResetXformStack() + + # Ensure the prim has an "over" spec on the edit target layer. Prims from + # referenced USD files may only exist in the reference layer with no spec on + # the edit target. Inside an Sdf.ChangeBlock, AddXformOp calls CreateAttribute + # which needs an existing prim spec on the edit target — it cannot create one + # while stage recomposition is deferred. + edit_layer = prim.GetStage().GetEditTarget().GetLayer() + if not edit_layer.GetPrimAtPath(prim.GetPath()): + for prefix in prim.GetPath().GetPrefixes(): + if not edit_layer.GetPrimAtPath(prefix): + parent_spec = edit_layer.GetPrimAtPath(prefix.GetParentPath()) or edit_layer.pseudoRoot + Sdf.PrimSpec(parent_spec, prefix.name, Sdf.SpecifierOver) + # Batch the operations with Sdf.ChangeBlock(): # Clear the existing transform operation order @@ -272,7 +286,7 @@ def resolve_prim_pose( Returns: A tuple containing the position (as a 3D vector) and the quaternion orientation - in the (w, x, y, z) format. + in the (x, y, z, w) format. Raises: ValueError: If the prim or ref prim is not valid. @@ -319,7 +333,9 @@ def resolve_prim_pose( # extract position and orientation prim_pos = [*prim_tf.ExtractTranslation()] - prim_quat = [prim_tf.ExtractRotationQuat().real, *prim_tf.ExtractRotationQuat().imaginary] + # prim_quat = [prim_tf.ExtractRotationQuat().real, *prim_tf.ExtractRotationQuat().imaginary] + prim_quat = [*prim_tf.ExtractRotationQuat().imaginary, prim_tf.ExtractRotationQuat().real] + return tuple(prim_pos), tuple(prim_quat) @@ -385,7 +401,7 @@ def convert_world_pose_to_local( Args: position: The world-space position as (x, y, z). - orientation: The world-space orientation as quaternion (w, x, y, z). If None, only position is converted + orientation: The world-space orientation as quaternion (x, y, z, w). If None, only position is converted and None is returned for orientation. ref_prim: The reference USD prim to compute the local transform relative to. If this is the root prim ("/"), the world pose is returned unchanged. @@ -394,7 +410,7 @@ def convert_world_pose_to_local( A tuple of (local_translation, local_orientation) where: - local_translation is a tuple of (x, y, z) in local space relative to ref_prim - - local_orientation is a tuple of (w, x, y, z) in local space relative to ref_prim, + - local_orientation is a tuple of (x, y, z, w) in local space relative to ref_prim, or None if no orientation was provided Raises: @@ -410,7 +426,7 @@ def convert_world_pose_to_local( >>> >>> # Convert world pose to local (relative to ref_prim) >>> world_pos = (10.0, 5.0, 0.0) - >>> world_quat = (1.0, 0.0, 0.0, 0.0) # identity rotation + >>> world_quat = (0.0, 0.0, 0.0, 1.0) # identity rotation (x, y, z, w) >>> local_pos, local_quat = sim_utils.convert_world_pose_to_local(world_pos, world_quat, ref_prim) >>> print(f"Local position: {local_pos}") >>> print(f"Local orientation: {local_quat}") @@ -433,8 +449,8 @@ def convert_world_pose_to_local( desired_world_tf.SetTranslateOnly(Gf.Vec3d(*position)) if orientation is not None: - # Set rotation from quaternion (w, x, y, z) - quat = Gf.Quatd(*orientation) + # Set rotation from quaternion (x, y, z, w) - Gf.Quatd expects (w, x, y, z) + quat = Gf.Quatd(orientation[3], orientation[0], orientation[1], orientation[2]) desired_world_tf.SetRotateOnly(quat) # Convert world transform to local: local = world * inv(ref_world) @@ -448,6 +464,7 @@ def convert_world_pose_to_local( local_orientation = None if orientation is not None: quat_result = local_transform.GetRotation().GetQuat() - local_orientation = (quat_result.GetReal(), *quat_result.GetImaginary()) + # Gf.Quatd stores (w, x, y, z), return (x, y, z, w) for our convention + local_orientation = (*quat_result.GetImaginary(), quat_result.GetReal()) return local_translation, local_orientation diff --git a/source/isaaclab/isaaclab/sim/views/__init__.py b/source/isaaclab/isaaclab/sim/views/__init__.py index eb5bea7690cb..08bd01a1822f 100644 --- a/source/isaaclab/isaaclab/sim/views/__init__.py +++ b/source/isaaclab/isaaclab/sim/views/__init__.py @@ -5,4 +5,6 @@ """Views for manipulating USD prims.""" -from .xform_prim_view import XformPrimView +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/sim/views/__init__.pyi b/source/isaaclab/isaaclab/sim/views/__init__.pyi new file mode 100644 index 000000000000..d578f85d6ada --- /dev/null +++ b/source/isaaclab/isaaclab/sim/views/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseFrameView", + "UsdFrameView", + "FrameView", + # Deprecated alias + "XformPrimView", +] + +from .base_frame_view import BaseFrameView +from .usd_frame_view import UsdFrameView +from .frame_view import FrameView +# Deprecated alias +from .xform_prim_view import XformPrimView diff --git a/source/isaaclab/isaaclab/sim/views/base_frame_view.py b/source/isaaclab/isaaclab/sim/views/base_frame_view.py new file mode 100644 index 000000000000..d58cb0fbb534 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/views/base_frame_view.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Abstract base class for batched prim transform views.""" + +from __future__ import annotations + +import abc + +import warp as wp + +from isaaclab.utils.warp import ProxyArray + + +class BaseFrameView(abc.ABC): + """Abstract interface for reading and writing world-space transforms of multiple prims. + + Backend-specific implementations (USD/Fabric, Newton GPU state, etc.) subclass + this to provide efficient batched pose queries. The factory + :class:`~isaaclab.sim.views.FrameView` selects the correct + implementation at runtime based on the active physics backend. + + All pose getters return :class:`~isaaclab.utils.warp.ProxyArray`. Setters accept ``wp.array``. + """ + + @property + @abc.abstractmethod + def count(self) -> int: + """Number of prims in this view.""" + ... + + @property + @abc.abstractmethod + def device(self) -> str: + """Device where arrays are allocated (``"cpu"`` or ``"cuda:0"``).""" + ... + + @abc.abstractmethod + def get_world_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + """Get world-space positions and orientations for prims in the view. + + Args: + indices: Subset of prims to query. ``None`` means all prims. + + Returns: + A tuple ``(positions, orientations)`` of :class:`~isaaclab.utils.warp.ProxyArray` + wrappers. Use ``.warp`` for the underlying ``wp.array`` or ``.torch`` for a + cached zero-copy ``torch.Tensor`` view. + """ + ... + + @abc.abstractmethod + def set_world_poses( + self, + positions: wp.array | None = None, + orientations: wp.array | None = None, + indices: wp.array | None = None, + ) -> None: + """Set world-space positions and/or orientations for prims in the view. + + Args: + positions: World-space positions ``(M, 3)``. ``None`` leaves positions unchanged. + orientations: World-space quaternions ``(M, 4)``. ``None`` leaves orientations unchanged. + indices: Subset of prims to update. ``None`` means all prims. + """ + ... + + @abc.abstractmethod + def get_local_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + """Get local-space positions and orientations for prims in the view. + + Args: + indices: Subset of prims to query. ``None`` means all prims. + + Returns: + A tuple ``(translations, orientations)`` of :class:`~isaaclab.utils.warp.ProxyArray` + wrappers. Use ``.warp`` for the underlying ``wp.array`` or ``.torch`` for a + cached zero-copy ``torch.Tensor`` view. + """ + ... + + @abc.abstractmethod + def set_local_poses( + self, + translations: wp.array | None = None, + orientations: wp.array | None = None, + indices: wp.array | None = None, + ) -> None: + """Set local-space translations and/or orientations for prims in the view. + + Args: + translations: Local-space translations ``(M, 3)``. ``None`` leaves translations unchanged. + orientations: Local-space quaternions ``(M, 4)``. ``None`` leaves orientations unchanged. + indices: Subset of prims to update. ``None`` means all prims. + """ + ... + + @abc.abstractmethod + def get_scales(self, indices: wp.array | None = None) -> wp.array: + """Get scales for prims in the view. + + Args: + indices: Subset of prims to query. ``None`` means all prims. + + Returns: + A ``wp.array`` of shape ``(M, 3)``. + """ + ... + + @abc.abstractmethod + def set_scales(self, scales: wp.array, indices: wp.array | None = None) -> None: + """Set scales for prims in the view. + + Args: + scales: Scales ``(M, 3)`` as ``wp.array``. + indices: Subset of prims to update. ``None`` means all prims. + """ + ... diff --git a/source/isaaclab/isaaclab/sim/views/frame_view.py b/source/isaaclab/isaaclab/sim/views/frame_view.py new file mode 100644 index 000000000000..ea9d5bfbeea9 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/views/frame_view.py @@ -0,0 +1,48 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Backend-dispatching FrameView. + +``FrameView(path, device=...)`` automatically selects the right backend: +- PhysX: :class:`~isaaclab_physx.sim.views.FabricFrameView` +- Newton: :class:`~isaaclab_newton.sim.views.NewtonSiteFrameView` +""" + +from __future__ import annotations + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_frame_view import BaseFrameView + + +class FrameView(FactoryBase, BaseFrameView): + """FrameView that dispatches to the active physics backend. + + Callers use ``FrameView(prim_path, device=device)`` and get the + correct implementation automatically: + + - **PhysX / no backend**: :class:`~isaaclab_physx.sim.views.FabricFrameView` + (Fabric GPU acceleration with USD fallback). + - **Newton**: :class:`~isaaclab_newton.sim.views.NewtonSiteFrameView` + (GPU-resident site-based transforms). + """ + + _backend_class_names = {"physx": "FabricFrameView", "newton": "NewtonSiteFrameView"} + + @classmethod + def _get_backend(cls, *args, **kwargs) -> str: + from isaaclab.sim.simulation_context import SimulationContext # noqa: PLC0415 + + ctx = SimulationContext.instance() + if ctx is None: + return "physx" + manager_name = ctx.physics_manager.__name__.lower() + if "newton" in manager_name: + return "newton" + return "physx" + + def __new__(cls, *args, **kwargs) -> BaseFrameView: + """Create a new FrameView for the active physics backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sim/views/usd_frame_view.py b/source/isaaclab/isaaclab/sim/views/usd_frame_view.py new file mode 100644 index 000000000000..7730c3dd735d --- /dev/null +++ b/source/isaaclab/isaaclab/sim/views/usd_frame_view.py @@ -0,0 +1,362 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging + +import numpy as np +import torch +import warp as wp + +from pxr import Gf, Sdf, Usd, UsdGeom, Vt + +import isaaclab.sim as sim_utils +from isaaclab.utils.warp import ProxyArray + +from .base_frame_view import BaseFrameView + +logger = logging.getLogger(__name__) + + +class UsdFrameView(BaseFrameView): + """Batched interface for reading and writing transforms of multiple USD prims. + + Provides batch operations for getting and setting poses (position and orientation) + of multiple prims at once via USD's ``XformCache``. + + The class supports both world-space and local-space pose operations: + + - **World poses**: Positions and orientations in the global world frame + - **Local poses**: Positions and orientations relative to each prim's parent + + For GPU-accelerated Fabric operations, use the PhysX backend variant + obtained via :class:`~isaaclab.sim.views.FrameView`. + + Pose getters return :class:`~isaaclab.utils.warp.ProxyArray`. Setters accept ``wp.array``. + + .. note:: + **Transform Requirements:** + + All prims in the view must be Xformable and have standardized transform operations: + ``[translate, orient, scale]``. Non-standard prims will raise a ValueError during + initialization if :attr:`validate_xform_ops` is True. Please use the function + :func:`isaaclab.sim.utils.standardize_xform_ops` to prepare prims before using this view. + + .. warning:: + This class operates at the USD default time code. Any animation or time-sampled data + will not be affected by write operations. For animated transforms, you need to handle + time-sampled keyframes separately. + """ + + def __init__( + self, + prim_path: str, + device: str = "cpu", + validate_xform_ops: bool = True, + stage: Usd.Stage | None = None, + **kwargs, + ): + """Initialize the view with matching prims. + + Args: + prim_path: USD prim path pattern to match prims. Supports wildcards (``*``) and + regex patterns (e.g., ``"/World/Env_.*/Robot"``). See + :func:`isaaclab.sim.utils.find_matching_prims` for pattern syntax. + device: Device to place arrays on. Can be ``"cpu"`` or CUDA devices like + ``"cuda:0"``. Defaults to ``"cpu"``. + validate_xform_ops: Whether to validate that the prims have standard xform operations. + Defaults to True. + stage: USD stage to search for prims. Defaults to None, in which case the current + active stage from the simulation context is used. + **kwargs: Additional keyword arguments (ignored). Allows forward-compatible + construction when callers pass backend-specific options like + ``sync_usd_on_fabric_write``. + + Raises: + ValueError: If any matched prim is not Xformable or doesn't have standardized + transform operations (translate, orient, scale in that order). + """ + self._prim_path = prim_path + self._device = device + + stage = sim_utils.get_current_stage() if stage is None else stage + self._prims: list[Usd.Prim] = sim_utils.find_matching_prims(prim_path, stage=stage) + + if validate_xform_ops: + for prim in self._prims: + sim_utils.standardize_xform_ops(prim) + if not sim_utils.validate_standard_xform_ops(prim): + raise ValueError( + f"Prim at path '{prim.GetPath().pathString}' is not a xformable prim with standard transform" + f" operations [translate, orient, scale]. Received type: '{prim.GetTypeName()}'." + " Use sim_utils.standardize_xform_ops() to prepare the prim." + ) + + self._ALL_INDICES = list(range(len(self._prims))) + + # ------------------------------------------------------------------ + # Properties + # ------------------------------------------------------------------ + + @property + def count(self) -> int: + """Number of prims in this view.""" + return len(self._prims) + + @property + def device(self) -> str: + """Device where arrays are allocated (cpu or cuda).""" + return self._device + + @property + def prims(self) -> list[Usd.Prim]: + """List of USD prims being managed by this view.""" + return self._prims + + @property + def prim_paths(self) -> list[str]: + """List of prim paths (as strings) for all prims being managed by this view. + + The conversion is performed lazily on first access and cached. + """ + if not hasattr(self, "_prim_paths"): + self._prim_paths = [prim.GetPath().pathString for prim in self._prims] + return self._prim_paths + + # ------------------------------------------------------------------ + # Setters + # ------------------------------------------------------------------ + + def set_world_poses( + self, + positions: wp.array | None = None, + orientations: wp.array | None = None, + indices: wp.array | None = None, + ): + """Set world-space poses for prims in the view. + + Converts the desired world pose to local-space relative to each prim's + parent before writing to USD xform ops. + + Args: + positions: World-space positions of shape ``(M, 3)``. + orientations: World-space quaternions ``(w, x, y, z)`` of shape ``(M, 4)``. + indices: Indices of prims to set poses for. Defaults to None (all prims). + """ + indices_list = self._resolve_indices(indices) + + positions_array = Vt.Vec3dArray.FromNumpy(self._to_numpy(positions)) if positions is not None else None + orientations_array = Vt.QuatdArray.FromNumpy(self._to_numpy(orientations)) if orientations is not None else None + + xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) + + with Sdf.ChangeBlock(): + for idx, prim_idx in enumerate(indices_list): + prim = self._prims[prim_idx] + parent_prim = prim.GetParent() + + world_pos = positions_array[idx] if positions_array is not None else None + world_quat = orientations_array[idx] if orientations_array is not None else None + + if parent_prim.IsValid() and parent_prim.GetPath() != Sdf.Path.absoluteRootPath: + if positions_array is None or orientations_array is None: + prim_tf = xform_cache.GetLocalToWorldTransform(prim) + prim_tf.Orthonormalize() + if world_pos is not None: + prim_tf.SetTranslateOnly(world_pos) + if world_quat is not None: + prim_tf.SetRotateOnly(world_quat) + else: + prim_tf = Gf.Matrix4d() + prim_tf.SetTranslateOnly(world_pos) + prim_tf.SetRotateOnly(world_quat) + + parent_world_tf = xform_cache.GetLocalToWorldTransform(parent_prim) + local_tf = prim_tf * parent_world_tf.GetInverse() + local_pos = local_tf.ExtractTranslation() + local_quat = local_tf.ExtractRotationQuat() + else: + # Root-level prim: world == local + local_pos = world_pos + local_quat = world_quat + + if local_pos is not None: + prim.GetAttribute("xformOp:translate").Set(local_pos) + if local_quat is not None: + prim.GetAttribute("xformOp:orient").Set(local_quat) + + def set_local_poses( + self, + translations: wp.array | None = None, + orientations: wp.array | None = None, + indices: wp.array | None = None, + ): + """Set local-space poses for prims in the view. + + Args: + translations: Local-space translations of shape ``(M, 3)``. + orientations: Local-space quaternions ``(w, x, y, z)`` of shape ``(M, 4)``. + indices: Indices of prims to set poses for. Defaults to None (all prims). + """ + indices_list = self._resolve_indices(indices) + + translations_array = Vt.Vec3dArray.FromNumpy(self._to_numpy(translations)) if translations is not None else None + orientations_array = Vt.QuatdArray.FromNumpy(self._to_numpy(orientations)) if orientations is not None else None + + with Sdf.ChangeBlock(): + for idx, prim_idx in enumerate(indices_list): + prim = self._prims[prim_idx] + if translations_array is not None: + prim.GetAttribute("xformOp:translate").Set(translations_array[idx]) + if orientations_array is not None: + prim.GetAttribute("xformOp:orient").Set(orientations_array[idx]) + + def set_scales(self, scales: wp.array, indices: wp.array | None = None): + """Set scales for prims in the view. + + Args: + scales: Scales of shape ``(M, 3)``. + indices: Indices of prims to set scales for. Defaults to None (all prims). + """ + indices_list = self._resolve_indices(indices) + scales_array = Vt.Vec3dArray.FromNumpy(self._to_numpy(scales)) + + with Sdf.ChangeBlock(): + for idx, prim_idx in enumerate(indices_list): + prim = self._prims[prim_idx] + prim.GetAttribute("xformOp:scale").Set(scales_array[idx]) + + def set_visibility(self, visibility: torch.Tensor, indices: wp.array | None = None): + """Set visibility for prims in the view. + + Args: + visibility: Visibility as a boolean tensor of shape ``(M,)``. + indices: Indices of prims to set visibility for. Defaults to None (all prims). + """ + indices_list = self._resolve_indices(indices) + + if visibility.shape != (len(indices_list),): + raise ValueError(f"Expected visibility shape ({len(indices_list)},), got {visibility.shape}.") + + with Sdf.ChangeBlock(): + for idx, prim_idx in enumerate(indices_list): + imageable = UsdGeom.Imageable(self._prims[prim_idx]) + if visibility[idx]: + imageable.MakeVisible() + else: + imageable.MakeInvisible() + + # ------------------------------------------------------------------ + # Getters + # ------------------------------------------------------------------ + + def get_world_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + """Get world-space poses for prims in the view. + + Args: + indices: Indices of prims to get poses for. Defaults to None (all prims). + + Returns: + A tuple ``(positions, orientations)`` of :class:`~isaaclab.utils.warp.ProxyArray` + wrappers. Use ``.warp`` for the underlying ``wp.array`` or ``.torch`` for a + cached zero-copy ``torch.Tensor`` view. + """ + indices_list = self._resolve_indices(indices) + + positions = Vt.Vec3dArray(len(indices_list)) + orientations = Vt.QuatdArray(len(indices_list)) + xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) + + for idx, prim_idx in enumerate(indices_list): + prim = self._prims[prim_idx] + prim_tf = xform_cache.GetLocalToWorldTransform(prim) + prim_tf.Orthonormalize() + positions[idx] = prim_tf.ExtractTranslation() + orientations[idx] = prim_tf.ExtractRotationQuat() + + pos_wp = wp.array(np.array(positions, dtype=np.float32), dtype=wp.float32, device=self._device) + quat_wp = wp.array(np.array(orientations, dtype=np.float32), dtype=wp.float32, device=self._device) + return ProxyArray(pos_wp), ProxyArray(quat_wp) + + def get_local_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + """Get local-space poses for prims in the view. + + Args: + indices: Indices of prims to get poses for. Defaults to None (all prims). + + Returns: + A tuple ``(translations, orientations)`` of :class:`~isaaclab.utils.warp.ProxyArray` + wrappers. Use ``.warp`` for the underlying ``wp.array`` or ``.torch`` for a + cached zero-copy ``torch.Tensor`` view. + """ + indices_list = self._resolve_indices(indices) + + translations = Vt.Vec3dArray(len(indices_list)) + orientations = Vt.QuatdArray(len(indices_list)) + xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) + + for idx, prim_idx in enumerate(indices_list): + prim = self._prims[prim_idx] + prim_tf = xform_cache.GetLocalTransformation(prim)[0] + prim_tf.Orthonormalize() + translations[idx] = prim_tf.ExtractTranslation() + orientations[idx] = prim_tf.ExtractRotationQuat() + + pos_wp = wp.array(np.array(translations, dtype=np.float32), dtype=wp.float32, device=self._device) + quat_wp = wp.array(np.array(orientations, dtype=np.float32), dtype=wp.float32, device=self._device) + return ProxyArray(pos_wp), ProxyArray(quat_wp) + + def get_scales(self, indices: wp.array | None = None) -> wp.array: + """Get scales for prims in the view. + + Args: + indices: Indices of prims to get scales for. Defaults to None (all prims). + + Returns: + A ``wp.array`` of shape ``(M, 3)``. + """ + indices_list = self._resolve_indices(indices) + + scales = Vt.Vec3dArray(len(indices_list)) + for idx, prim_idx in enumerate(indices_list): + prim = self._prims[prim_idx] + scales[idx] = prim.GetAttribute("xformOp:scale").Get() + + return wp.array(np.array(scales, dtype=np.float32), dtype=wp.float32, device=self._device) + + def get_visibility(self, indices: wp.array | None = None) -> torch.Tensor: + """Get visibility for prims in the view. + + Args: + indices: Indices of prims to get visibility for. Defaults to None (all prims). + + Returns: + A tensor of shape ``(M,)`` containing the visibility of each prim (bool). + """ + indices_list = self._resolve_indices(indices) + + visibility = torch.zeros(len(indices_list), dtype=torch.bool, device=self._device) + for idx, prim_idx in enumerate(indices_list): + imageable = UsdGeom.Imageable(self._prims[prim_idx]) + visibility[idx] = imageable.ComputeVisibility() != UsdGeom.Tokens.invisible + return visibility + + # ------------------------------------------------------------------ + # Helpers + # ------------------------------------------------------------------ + + def _resolve_indices(self, indices: wp.array | None): + """Resolve warp indices to an iterable of ints for per-prim USD operations.""" + if indices is None or indices == slice(None): + return self._ALL_INDICES + return indices.numpy() + + @staticmethod + def _to_numpy(data: wp.array | torch.Tensor) -> np.ndarray: + """Convert a ``wp.array`` or ``torch.Tensor`` to a numpy array on CPU.""" + if isinstance(data, wp.array): + return data.numpy() + return data.cpu().numpy() diff --git a/source/isaaclab/isaaclab/sim/views/xform_prim_view.py b/source/isaaclab/isaaclab/sim/views/xform_prim_view.py index 38a786117bcf..ce480fa65594 100644 --- a/source/isaaclab/isaaclab/sim/views/xform_prim_view.py +++ b/source/isaaclab/isaaclab/sim/views/xform_prim_view.py @@ -3,1112 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause -from __future__ import annotations +"""Backward-compatibility alias: ``XformPrimView`` -> :class:`FrameView`.""" -import logging -from collections.abc import Sequence +from .frame_view import FrameView -import numpy as np -import torch -import warp as wp - -import carb -from pxr import Gf, Sdf, Usd, UsdGeom, Vt - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -from isaaclab.utils.warp import fabric as fabric_utils - -logger = logging.getLogger(__name__) - - -class XformPrimView: - """Optimized batched interface for reading and writing transforms of multiple USD prims. - - This class provides efficient batch operations for getting and setting poses (position and orientation) - of multiple prims at once using torch tensors. It is designed for scenarios where you need to manipulate - many prims simultaneously, such as in multi-agent simulations or large-scale procedural generation. - - The class supports both world-space and local-space pose operations: - - - **World poses**: Positions and orientations in the global world frame - - **Local poses**: Positions and orientations relative to each prim's parent - - When Fabric is enabled, the class leverages NVIDIA's Fabric API for GPU-accelerated batch operations: - - - Uses `omni:fabric:worldMatrix` and `omni:fabric:localMatrix` attributes for all Boundable prims - - Performs batch matrix decomposition/composition using Warp kernels on GPU - - Achieves performance comparable to Isaac Sim's XFormPrim implementation - - Works for both physics-enabled and non-physics prims (cameras, meshes, etc.). - Note: renderers typically consume USD-authored camera transforms. - - .. warning:: - **Fabric requires CUDA**: Fabric is only supported with on CUDA devices. - Warp's CPU backend for fabric-array writes has known issues, so attempting to use - Fabric with CPU device (``device="cpu"``) will raise a ValueError at initialization. - - .. note:: - **Fabric Support:** - - When Fabric is enabled, this view ensures prims have the required Fabric hierarchy - attributes (``omni:fabric:localMatrix`` and ``omni:fabric:worldMatrix``). On first Fabric - read, USD-authored transforms initialize Fabric state. Fabric writes can optionally - be mirrored back to USD via :attr:`sync_usd_on_fabric_write`. - - For more information, see the `Fabric Hierarchy documentation`_. - - .. _Fabric Hierarchy documentation: https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/fabric_hierarchy.html - - .. note:: - **Performance Considerations:** - - * Tensor operations are performed on the specified device (CPU/CUDA) - * USD write operations use ``Sdf.ChangeBlock`` for batched updates - * Fabric operations use GPU-accelerated Warp kernels for maximum performance - * For maximum performance, minimize get/set operations within tight loops - - .. note:: - **Transform Requirements:** - - All prims in the view must be Xformable and have standardized transform operations: - ``[translate, orient, scale]``. Non-standard prims will raise a ValueError during - initialization if :attr:`validate_xform_ops` is True. Please use the function - :func:`isaaclab.sim.utils.standardize_xform_ops` to prepare prims before using this view. - - .. warning:: - This class operates at the USD default time code. Any animation or time-sampled data - will not be affected by write operations. For animated transforms, you need to handle - time-sampled keyframes separately. - """ - - def __init__( - self, - prim_path: str, - device: str = "cpu", - validate_xform_ops: bool = True, - sync_usd_on_fabric_write: bool = False, - stage: Usd.Stage | None = None, - ): - """Initialize the view with matching prims. - - This method searches the USD stage for all prims matching the provided path pattern, - validates that they are Xformable with standard transform operations, and stores - references for efficient batch operations. - - We generally recommend to validate the xform operations, as it ensures that the prims are in a consistent state - and have the standard transform operations (translate, orient, scale in that order). - However, if you are sure that the prims are in a consistent state, you can set this to False to improve - performance. This can save around 45-50% of the time taken to initialize the view. - - Args: - prim_path: USD prim path pattern to match prims. Supports wildcards (``*``) and - regex patterns (e.g., ``"/World/Env_.*/Robot"``). See - :func:`isaaclab.sim.utils.find_matching_prims` for pattern syntax. - device: Device to place the tensors on. Can be ``"cpu"`` or CUDA devices like - ``"cuda:0"``. Defaults to ``"cpu"``. - validate_xform_ops: Whether to validate that the prims have standard xform operations. - Defaults to True. - sync_usd_on_fabric_write: Whether to mirror Fabric transform writes back to USD. - When True, transform updates are synchronized to USD so that USD data readers (e.g., rendering - cameras) can observe these changes. Defaults to False for better performance. - stage: USD stage to search for prims. Defaults to None, in which case the current active stage - from the simulation context is used. - - Raises: - ValueError: If any matched prim is not Xformable or doesn't have standardized - transform operations (translate, orient, scale in that order). - """ - # Store configuration - self._prim_path = prim_path - self._device = device - - # Find and validate matching prims - stage = sim_utils.get_current_stage() if stage is None else stage - self._prims: list[Usd.Prim] = sim_utils.find_matching_prims(prim_path, stage=stage) - - # Validate all prims have standard xform operations - if validate_xform_ops: - for prim in self._prims: - if not sim_utils.validate_standard_xform_ops(prim): - raise ValueError( - f"Prim at path '{prim.GetPath().pathString}' is not a xformable prim with standard transform" - f" operations [translate, orient, scale]. Received type: '{prim.GetTypeName()}'." - " Use sim_utils.standardize_xform_ops() to prepare the prim." - ) - - # Determine if Fabric is supported on the device - self._use_fabric = carb.settings.get_settings().get("/physics/fabricEnabled") - logger.debug(f"Using Fabric for the XFormPrimView over '{self._prim_path}' on device '{self._device}'.") - - # Check for unsupported Fabric + CPU combination - if self._use_fabric and self._device == "cpu": - logger.warning( - "Fabric mode with Warp fabric-array operations is not supported on CPU devices. " - "While Fabric itself can run on both CPU and GPU, our batch Warp kernels for " - "fabric-array operations require CUDA and are not reliable on the CPU backend. " - "To ensure stability, Fabric is being disabled and execution will fall back " - "to standard USD operations on the CPU. This may impact performance." - ) - self._use_fabric = False - - # Create indices buffer - # Since we iterate over the indices, we need to use range instead of torch tensor - self._ALL_INDICES = list(range(len(self._prims))) - - # Some prims (e.g., Cameras) require USD-authored transforms for rendering. - # When enabled, mirror Fabric pose writes to USD for those prims. - self._sync_usd_on_fabric_write = sync_usd_on_fabric_write - - # Fabric batch infrastructure (initialized lazily on first use) - self._fabric_initialized = False - self._fabric_usd_sync_done = False - self._fabric_selection = None - self._fabric_to_view: wp.array | None = None - self._view_to_fabric: wp.array | None = None - self._default_view_indices: wp.array | None = None - self._fabric_hierarchy = None - # Create a valid USD attribute name: namespace:name - # Use "isaaclab" namespace to identify our custom attributes - self._view_index_attr = f"isaaclab:view_index:{abs(hash(self))}" - - """ - Properties. - """ - - @property - def count(self) -> int: - """Number of prims in this view.""" - return len(self._prims) - - @property - def device(self) -> str: - """Device where tensors are allocated (cpu or cuda).""" - return self._device - - @property - def prims(self) -> list[Usd.Prim]: - """List of USD prims being managed by this view.""" - return self._prims - - @property - def prim_paths(self) -> list[str]: - """List of prim paths (as strings) for all prims being managed by this view. - - This property converts each prim to its path string representation. The conversion is - performed lazily on first access and cached for subsequent accesses. - - Note: - For most use cases, prefer using :attr:`prims` directly as it provides direct access - to the USD prim objects without the conversion overhead. This property is mainly useful - for logging, debugging, or when string paths are explicitly required. - """ - # we cache it the first time it is accessed. - # we don't compute it in constructor because it is expensive and we don't need it most of the time. - # users should usually deal with prims directly as they typically need to access the prims directly. - if not hasattr(self, "_prim_paths"): - self._prim_paths = [prim.GetPath().pathString for prim in self._prims] - return self._prim_paths - - """ - Operations - Setters. - """ - - def set_world_poses( - self, - positions: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set world-space poses for prims in the view. - - This method sets the position and/or orientation of each prim in world space. - - - When Fabric is enabled, the function writes directly to Fabric's ``omni:fabric:worldMatrix`` - attribute using GPU-accelerated batch operations. - - When Fabric is disabled, the function converts to local space and writes to USD's ``xformOp:translate`` - and ``xformOp:orient`` attributes. - - Args: - positions: World-space positions as a tensor of shape (M, 3) where M is the number of prims - to set (either all prims if indices is None, or the number of indices provided). - Defaults to None, in which case positions are not modified. - orientations: World-space orientations as quaternions (w, x, y, z) with shape (M, 4). - Defaults to None, in which case orientations are not modified. - indices: Indices of prims to set poses for. Defaults to None, in which case poses are set - for all prims in the view. - - Raises: - ValueError: If positions shape is not (M, 3) or orientations shape is not (M, 4). - ValueError: If the number of poses doesn't match the number of indices provided. - """ - if self._use_fabric: - self._set_world_poses_fabric(positions, orientations, indices) - else: - self._set_world_poses_usd(positions, orientations, indices) - - def set_local_poses( - self, - translations: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set local-space poses for prims in the view. - - This method sets the position and/or orientation of each prim in local space (relative to - their parent prims). - - The function writes directly to USD's ``xformOp:translate`` and ``xformOp:orient`` attributes. - - Note: - Even in Fabric mode, local pose operations use USD. This behavior is based on Isaac Sim's design - where Fabric is only used for world pose operations. - - Rationale: - - Local pose writes need correct parent-child hierarchy relationships - - USD maintains these relationships correctly and efficiently - - Fabric is optimized for world pose operations, not local hierarchies - - Args: - translations: Local-space translations as a tensor of shape (M, 3) where M is the number of prims - to set (either all prims if indices is None, or the number of indices provided). - Defaults to None, in which case translations are not modified. - orientations: Local-space orientations as quaternions (w, x, y, z) with shape (M, 4). - Defaults to None, in which case orientations are not modified. - indices: Indices of prims to set poses for. Defaults to None, in which case poses are set - for all prims in the view. - - Raises: - ValueError: If translations shape is not (M, 3) or orientations shape is not (M, 4). - ValueError: If the number of poses doesn't match the number of indices provided. - """ - if self._use_fabric: - self._set_local_poses_fabric(translations, orientations, indices) - else: - self._set_local_poses_usd(translations, orientations, indices) - - def set_scales(self, scales: torch.Tensor, indices: Sequence[int] | None = None): - """Set scales for prims in the view. - - This method sets the scale of each prim in the view. - - - When Fabric is enabled, the function updates scales in Fabric matrices using GPU-accelerated batch operations. - - When Fabric is disabled, the function writes to USD's ``xformOp:scale`` attributes. - - Args: - scales: Scales as a tensor of shape (M, 3) where M is the number of prims - to set (either all prims if indices is None, or the number of indices provided). - indices: Indices of prims to set scales for. Defaults to None, in which case scales are set - for all prims in the view. - - Raises: - ValueError: If scales shape is not (M, 3). - """ - if self._use_fabric: - self._set_scales_fabric(scales, indices) - else: - self._set_scales_usd(scales, indices) - - def set_visibility(self, visibility: torch.Tensor, indices: Sequence[int] | None = None): - """Set visibility for prims in the view. - - This method sets the visibility of each prim in the view. - - Args: - visibility: Visibility as a boolean tensor of shape (M,) where M is the - number of prims to set (either all prims if indices is None, or the number of indices provided). - indices: Indices of prims to set visibility for. Defaults to None, in which case visibility is set - for all prims in the view. - - Raises: - ValueError: If visibility shape is not (M,). - """ - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Validate inputs - if visibility.shape != (len(indices_list),): - raise ValueError(f"Expected visibility shape ({len(indices_list)},), got {visibility.shape}.") - - # Set visibility for each prim - with Sdf.ChangeBlock(): - for idx, prim_idx in enumerate(indices_list): - # Convert prim to imageable - imageable = UsdGeom.Imageable(self._prims[prim_idx]) - # Set visibility - if visibility[idx]: - imageable.MakeVisible() - else: - imageable.MakeInvisible() - - """ - Operations - Getters. - """ - - def get_world_poses(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get world-space poses for prims in the view. - - This method retrieves the position and orientation of each prim in world space by computing - the full transform hierarchy from the prim to the world root. - - - When Fabric is enabled, the function uses Fabric batch operations with Warp kernels. - - When Fabric is disabled, the function uses USD XformCache. - - Note: - Scale and skew are ignored. The returned poses contain only translation and rotation. - - Args: - indices: Indices of prims to get poses for. Defaults to None, in which case poses are retrieved - for all prims in the view. - - Returns: - A tuple of (positions, orientations) where: - - - positions: Torch tensor of shape (M, 3) containing world-space positions (x, y, z), - where M is the number of prims queried. - - orientations: Torch tensor of shape (M, 4) containing world-space quaternions (w, x, y, z) - """ - if self._use_fabric: - return self._get_world_poses_fabric(indices) - else: - return self._get_world_poses_usd(indices) - - def get_local_poses(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get local-space poses for prims in the view. - - This method retrieves the position and orientation of each prim in local space (relative to - their parent prims). It reads directly from USD's ``xformOp:translate`` and ``xformOp:orient`` attributes. - - Note: - Even in Fabric mode, local pose operations use USD. This behavior is based on Isaac Sim's design - where Fabric is only used for world pose operations. - - Rationale: - - Local pose reads need correct parent-child hierarchy relationships - - USD maintains these relationships correctly and efficiently - - Fabric is optimized for world pose operations, not local hierarchies - - Note: - Scale is ignored. The returned poses contain only translation and rotation. - - Args: - indices: Indices of prims to get poses for. Defaults to None, in which case poses are retrieved - for all prims in the view. - - Returns: - A tuple of (translations, orientations) where: - - - translations: Torch tensor of shape (M, 3) containing local-space translations (x, y, z), - where M is the number of prims queried. - - orientations: Torch tensor of shape (M, 4) containing local-space quaternions (w, x, y, z) - """ - if self._use_fabric: - return self._get_local_poses_fabric(indices) - else: - return self._get_local_poses_usd(indices) - - def get_scales(self, indices: Sequence[int] | None = None) -> torch.Tensor: - """Get scales for prims in the view. - - This method retrieves the scale of each prim in the view. - - - When Fabric is enabled, the function extracts scales from Fabric matrices using batch operations with - Warp kernels. - - When Fabric is disabled, the function reads from USD's ``xformOp:scale`` attributes. - - Args: - indices: Indices of prims to get scales for. Defaults to None, in which case scales are retrieved - for all prims in the view. - - Returns: - A tensor of shape (M, 3) containing the scales of each prim, where M is the number of prims queried. - """ - if self._use_fabric: - return self._get_scales_fabric(indices) - else: - return self._get_scales_usd(indices) - - def get_visibility(self, indices: Sequence[int] | None = None) -> torch.Tensor: - """Get visibility for prims in the view. - - This method retrieves the visibility of each prim in the view. - - Args: - indices: Indices of prims to get visibility for. Defaults to None, in which case visibility is retrieved - for all prims in the view. - - Returns: - A tensor of shape (M,) containing the visibility of each prim, where M is the number of prims queried. - The tensor is of type bool. - """ - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - # Convert to list if it is a tensor array - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Create buffers - visibility = torch.zeros(len(indices_list), dtype=torch.bool, device=self._device) - - for idx, prim_idx in enumerate(indices_list): - # Get prim - imageable = UsdGeom.Imageable(self._prims[prim_idx]) - # Get visibility - visibility[idx] = imageable.ComputeVisibility() != UsdGeom.Tokens.invisible - - return visibility - - """ - Internal Functions - USD. - """ - - def _set_world_poses_usd( - self, - positions: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set world poses to USD.""" - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - # Convert to list if it is a tensor array - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Validate inputs - if positions is not None: - if positions.shape != (len(indices_list), 3): - raise ValueError( - f"Expected positions shape ({len(indices_list)}, 3), got {positions.shape}. " - "Number of positions must match the number of prims in the view." - ) - positions_array = Vt.Vec3dArray.FromNumpy(positions.cpu().numpy()) - else: - positions_array = None - if orientations is not None: - if orientations.shape != (len(indices_list), 4): - raise ValueError( - f"Expected orientations shape ({len(indices_list)}, 4), got {orientations.shape}. " - "Number of orientations must match the number of prims in the view." - ) - # Vt expects quaternions in xyzw order - orientations_array = Vt.QuatdArray.FromNumpy(math_utils.convert_quat(orientations, to="xyzw").cpu().numpy()) - else: - orientations_array = None - - # Create xform cache instance - xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) - - # Set poses for each prim - # We use Sdf.ChangeBlock to minimize notification overhead. - with Sdf.ChangeBlock(): - for idx, prim_idx in enumerate(indices_list): - # Get prim - prim = self._prims[prim_idx] - # Get parent prim for local space conversion - parent_prim = prim.GetParent() - - # Determine what to set - world_pos = positions_array[idx] if positions_array is not None else None - world_quat = orientations_array[idx] if orientations_array is not None else None - - # Convert world pose to local if we have a valid parent - # Note: We don't use :func:`isaaclab.sim.utils.transforms.convert_world_pose_to_local` - # here since it isn't optimized for batch operations. - if parent_prim.IsValid() and parent_prim.GetPath() != Sdf.Path.absoluteRootPath: - # Get current world pose if we're only setting one component - if positions_array is None or orientations_array is None: - # get prim xform - prim_tf = xform_cache.GetLocalToWorldTransform(prim) - # sanitize quaternion - # this is needed, otherwise the quaternion might be non-normalized - prim_tf.Orthonormalize() - # populate desired world transform - if world_pos is not None: - prim_tf.SetTranslateOnly(world_pos) - if world_quat is not None: - prim_tf.SetRotateOnly(world_quat) - else: - # Both position and orientation are provided, create new transform - prim_tf = Gf.Matrix4d() - prim_tf.SetTranslateOnly(world_pos) - prim_tf.SetRotateOnly(world_quat) - - # Convert to local space - parent_world_tf = xform_cache.GetLocalToWorldTransform(parent_prim) - local_tf = prim_tf * parent_world_tf.GetInverse() - local_pos = local_tf.ExtractTranslation() - local_quat = local_tf.ExtractRotationQuat() - else: - # No parent or parent is root, world == local - local_pos = world_pos - local_quat = world_quat - - # Get or create the standard transform operations - if local_pos is not None: - prim.GetAttribute("xformOp:translate").Set(local_pos) - if local_quat is not None: - prim.GetAttribute("xformOp:orient").Set(local_quat) - - def _set_local_poses_usd( - self, - translations: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set local poses to USD.""" - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Validate inputs - if translations is not None: - if translations.shape != (len(indices_list), 3): - raise ValueError(f"Expected translations shape ({len(indices_list)}, 3), got {translations.shape}.") - translations_array = Vt.Vec3dArray.FromNumpy(translations.cpu().numpy()) - else: - translations_array = None - if orientations is not None: - if orientations.shape != (len(indices_list), 4): - raise ValueError(f"Expected orientations shape ({len(indices_list)}, 4), got {orientations.shape}.") - orientations_array = Vt.QuatdArray.FromNumpy(math_utils.convert_quat(orientations, to="xyzw").cpu().numpy()) - else: - orientations_array = None - - # Set local poses - with Sdf.ChangeBlock(): - for idx, prim_idx in enumerate(indices_list): - prim = self._prims[prim_idx] - if translations_array is not None: - prim.GetAttribute("xformOp:translate").Set(translations_array[idx]) - if orientations_array is not None: - prim.GetAttribute("xformOp:orient").Set(orientations_array[idx]) - - def _set_scales_usd(self, scales: torch.Tensor, indices: Sequence[int] | None = None): - """Set scales to USD.""" - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Validate inputs - if scales.shape != (len(indices_list), 3): - raise ValueError(f"Expected scales shape ({len(indices_list)}, 3), got {scales.shape}.") - - scales_array = Vt.Vec3dArray.FromNumpy(scales.cpu().numpy()) - # Set scales for each prim - with Sdf.ChangeBlock(): - for idx, prim_idx in enumerate(indices_list): - prim = self._prims[prim_idx] - prim.GetAttribute("xformOp:scale").Set(scales_array[idx]) - - def _get_world_poses_usd(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get world poses from USD.""" - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - # Convert to list if it is a tensor array - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Create buffers - positions = Vt.Vec3dArray(len(indices_list)) - orientations = Vt.QuatdArray(len(indices_list)) - # Create xform cache instance - xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) - - # Note: We don't use :func:`isaaclab.sim.utils.transforms.resolve_prim_pose` - # here since it isn't optimized for batch operations. - for idx, prim_idx in enumerate(indices_list): - # Get prim - prim = self._prims[prim_idx] - # get prim xform - prim_tf = xform_cache.GetLocalToWorldTransform(prim) - # sanitize quaternion - # this is needed, otherwise the quaternion might be non-normalized - prim_tf.Orthonormalize() - # extract position and orientation - positions[idx] = prim_tf.ExtractTranslation() - orientations[idx] = prim_tf.ExtractRotationQuat() - - # move to torch tensors - positions = torch.tensor(np.array(positions), dtype=torch.float32, device=self._device) - orientations = torch.tensor(np.array(orientations), dtype=torch.float32, device=self._device) - # underlying data is in xyzw order, convert to wxyz order - orientations = math_utils.convert_quat(orientations, to="wxyz") - - return positions, orientations # type: ignore - - def _get_local_poses_usd(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get local poses from USD.""" - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Create buffers - translations = Vt.Vec3dArray(len(indices_list)) - orientations = Vt.QuatdArray(len(indices_list)) - - # Create a fresh XformCache to avoid stale cached values - xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) - - for idx, prim_idx in enumerate(indices_list): - prim = self._prims[prim_idx] - prim_tf = xform_cache.GetLocalTransformation(prim)[0] - prim_tf.Orthonormalize() - translations[idx] = prim_tf.ExtractTranslation() - orientations[idx] = prim_tf.ExtractRotationQuat() - - translations = torch.tensor(np.array(translations), dtype=torch.float32, device=self._device) - orientations = torch.tensor(np.array(orientations), dtype=torch.float32, device=self._device) - orientations = math_utils.convert_quat(orientations, to="wxyz") - - return translations, orientations # type: ignore - - def _get_scales_usd(self, indices: Sequence[int] | None = None) -> torch.Tensor: - """Get scales from USD.""" - # Resolve indices - if indices is None or indices == slice(None): - indices_list = self._ALL_INDICES - else: - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - - # Create buffers - scales = Vt.Vec3dArray(len(indices_list)) - - for idx, prim_idx in enumerate(indices_list): - prim = self._prims[prim_idx] - scales[idx] = prim.GetAttribute("xformOp:scale").Get() - - # Convert to tensor - return torch.tensor(np.array(scales), dtype=torch.float32, device=self._device) - - """ - Internal Functions - Fabric. - """ - - def _set_world_poses_fabric( - self, - positions: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set world poses using Fabric GPU batch operations. - - Writes directly to Fabric's ``omni:fabric:worldMatrix`` attribute using Warp kernels. - Changes are propagated through Fabric's hierarchy system but remain GPU-resident. - - For workflows mixing Fabric world pose writes with USD local pose queries, note - that local poses read from USD's xformOp:* attributes, which may not immediately - reflect Fabric changes. For best performance and consistency, use Fabric methods - exclusively (get_world_poses/set_world_poses with Fabric enabled). - """ - # Lazy initialization - if not self._fabric_initialized: - self._initialize_fabric() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Convert torch to warp (if provided), use dummy arrays for None to avoid Warp kernel issues - if positions is not None: - positions_wp = wp.from_torch(positions) - else: - positions_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - - if orientations is not None: - orientations_wp = wp.from_torch(orientations) - else: - orientations_wp = wp.zeros((0, 4), dtype=wp.float32).to(self._device) - - # Dummy array for scales (not modifying) - scales_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - - # Use cached fabricarray for world matrices - world_matrices = self._fabric_world_matrices - - # Batch compose matrices with a single kernel launch - wp.launch( - kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, - orientations_wp, - scales_wp, # dummy array instead of None - False, # broadcast_positions - False, # broadcast_orientations - False, # broadcast_scales - indices_wp, - self._view_to_fabric, - ], - device=self._device, - ) - - # Synchronize to ensure kernel completes - wp.synchronize() - - # Update world transforms within Fabric hierarchy - self._fabric_hierarchy.update_world_xforms() - # Fabric now has authoritative data; skip future USD syncs - self._fabric_usd_sync_done = True - # Mirror to USD for renderer-facing prims when enabled. - if self._sync_usd_on_fabric_write: - self._set_world_poses_usd(positions, orientations, indices) - - # Fabric writes are GPU-resident; local pose operations still use USD. - - def _set_local_poses_fabric( - self, - translations: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set local poses using USD (matches Isaac Sim's design). - - Note: Even in Fabric mode, local pose operations use USD. - This is Isaac Sim's design: the ``usd=False`` parameter only affects world poses. - - Rationale: - - Local pose writes need correct parent-child hierarchy relationships - - USD maintains these relationships correctly and efficiently - - Fabric is optimized for world pose operations, not local hierarchies - """ - self._set_local_poses_usd(translations, orientations, indices) - - def _set_scales_fabric(self, scales: torch.Tensor, indices: Sequence[int] | None = None): - """Set scales using Fabric GPU batch operations.""" - # Lazy initialization - if not self._fabric_initialized: - self._initialize_fabric() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Convert torch to warp - scales_wp = wp.from_torch(scales) - - # Dummy arrays for positions and orientations (not modifying) - positions_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - orientations_wp = wp.zeros((0, 4), dtype=wp.float32).to(self._device) - - # Use cached fabricarray for world matrices - world_matrices = self._fabric_world_matrices - - # Batch compose matrices on GPU with a single kernel launch - wp.launch( - kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, # dummy array instead of None - orientations_wp, # dummy array instead of None - scales_wp, - False, # broadcast_positions - False, # broadcast_orientations - False, # broadcast_scales - indices_wp, - self._view_to_fabric, - ], - device=self._device, - ) - - # Synchronize to ensure kernel completes before syncing - wp.synchronize() - - # Update world transforms to propagate changes - self._fabric_hierarchy.update_world_xforms() - # Fabric now has authoritative data; skip future USD syncs - self._fabric_usd_sync_done = True - # Mirror to USD for renderer-facing prims when enabled. - if self._sync_usd_on_fabric_write: - self._set_scales_usd(scales, indices) - - def _get_world_poses_fabric(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get world poses from Fabric using GPU batch operations.""" - # Lazy initialization of Fabric infrastructure - if not self._fabric_initialized: - self._initialize_fabric() - # Sync once from USD to ensure reads see the latest authored transforms - if not self._fabric_usd_sync_done: - self._sync_fabric_from_usd_once() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Use pre-allocated buffers for full reads, allocate only for partial reads - use_cached_buffers = indices is None or indices == slice(None) - if use_cached_buffers: - # Full read: Use cached buffers (zero allocation overhead!) - positions_wp = self._fabric_positions_buffer - orientations_wp = self._fabric_orientations_buffer - scales_wp = self._fabric_dummy_buffer - else: - # Partial read: Need to allocate buffers of appropriate size - positions_wp = wp.zeros((count, 3), dtype=wp.float32).to(self._device) - orientations_wp = wp.zeros((count, 4), dtype=wp.float32).to(self._device) - scales_wp = self._fabric_dummy_buffer # Always use dummy for scales - - # Use cached fabricarray for world matrices - # This eliminates the 0.06-0.30ms variability from creating fabricarray each call - world_matrices = self._fabric_world_matrices - - # Launch GPU kernel to decompose matrices in parallel - wp.launch( - kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, - orientations_wp, - scales_wp, # dummy array instead of None - indices_wp, - self._view_to_fabric, - ], - device=self._device, - ) - - # Return tensors: zero-copy for cached buffers, conversion for partial reads - if use_cached_buffers: - # Zero-copy! The Warp kernel wrote directly into the PyTorch tensors - # We just need to synchronize to ensure the kernel is done - wp.synchronize() - return self._fabric_positions_torch, self._fabric_orientations_torch - else: - # Partial read: Need to convert from Warp to torch - positions = wp.to_torch(positions_wp) - orientations = wp.to_torch(orientations_wp) - return positions, orientations - - def _get_local_poses_fabric(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get local poses using USD (matches Isaac Sim's design). - - Note: - Even in Fabric mode, local pose operations use USD's XformCache. - This is Isaac Sim's design: the ``usd=False`` parameter only affects world poses. - - Rationale: - - Local pose computation requires parent transforms which may not be in the view - - USD's XformCache provides efficient hierarchy-aware local transform queries - - Fabric is optimized for world pose operations, not local hierarchies - """ - return self._get_local_poses_usd(indices) - - def _get_scales_fabric(self, indices: Sequence[int] | None = None) -> torch.Tensor: - """Get scales from Fabric using GPU batch operations.""" - # Lazy initialization - if not self._fabric_initialized: - self._initialize_fabric() - # Sync once from USD to ensure reads see the latest authored transforms - if not self._fabric_usd_sync_done: - self._sync_fabric_from_usd_once() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Use pre-allocated buffers for full reads, allocate only for partial reads - use_cached_buffers = indices is None or indices == slice(None) - if use_cached_buffers: - # Full read: Use cached buffers (zero allocation overhead!) - scales_wp = self._fabric_scales_buffer - else: - # Partial read: Need to allocate buffer of appropriate size - scales_wp = wp.zeros((count, 3), dtype=wp.float32).to(self._device) - - # Always use dummy buffers for positions and orientations (not needed for scales) - positions_wp = self._fabric_dummy_buffer - orientations_wp = self._fabric_dummy_buffer - - # Use cached fabricarray for world matrices - world_matrices = self._fabric_world_matrices - - # Launch GPU kernel to decompose matrices in parallel - wp.launch( - kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, # dummy array instead of None - orientations_wp, # dummy array instead of None - scales_wp, - indices_wp, - self._view_to_fabric, - ], - device=self._device, - ) - - # Return tensor: zero-copy for cached buffers, conversion for partial reads - if use_cached_buffers: - # Zero-copy! The Warp kernel wrote directly into the PyTorch tensor - wp.synchronize() - return self._fabric_scales_torch - else: - # Partial read: Need to convert from Warp to torch - return wp.to_torch(scales_wp) - - """ - Internal Functions - Initialization. - """ - - def _initialize_fabric(self) -> None: - """Initialize Fabric batch infrastructure for GPU-accelerated pose queries. - - This method ensures all prims have the required Fabric hierarchy attributes - (``omni:fabric:localMatrix`` and ``omni:fabric:worldMatrix``) and creates the necessary - infrastructure for batch GPU operations using Warp. - - Based on the Fabric Hierarchy documentation, when Fabric Scene Delegate is enabled, - all boundable prims should have these attributes. This method ensures they exist - and are properly synchronized with USD. - """ - import usdrt - from usdrt import Rt - - # Get USDRT (Fabric) stage - stage_id = sim_utils.get_current_stage_id() - fabric_stage = usdrt.Usd.Stage.Attach(stage_id) - - # Step 1: Ensure all prims have Fabric hierarchy attributes - # According to the documentation, these attributes are created automatically - # when Fabric Scene Delegate is enabled, but we ensure they exist - for i in range(self.count): - rt_prim = fabric_stage.GetPrimAtPath(self.prim_paths[i]) - rt_xformable = Rt.Xformable(rt_prim) - - # Create Fabric hierarchy world matrix attribute if it doesn't exist - has_attr = ( - rt_xformable.HasFabricHierarchyWorldMatrixAttr() - if hasattr(rt_xformable, "HasFabricHierarchyWorldMatrixAttr") - else False - ) - if not has_attr: - rt_xformable.CreateFabricHierarchyWorldMatrixAttr() - - # Best-effort USD->Fabric sync; authoritative initialization happens on first read. - rt_xformable.SetWorldXformFromUsd() - - # Create view index attribute for batch operations - rt_prim.CreateAttribute(self._view_index_attr, usdrt.Sdf.ValueTypeNames.UInt, custom=True) - rt_prim.GetAttribute(self._view_index_attr).Set(i) - - # After syncing all prims, update the Fabric hierarchy to ensure world matrices are computed - self._fabric_hierarchy = usdrt.hierarchy.IFabricHierarchy().get_fabric_hierarchy( - fabric_stage.GetFabricId(), fabric_stage.GetStageIdAsStageId() - ) - self._fabric_hierarchy.update_world_xforms() - - # Step 2: Create index arrays for batch operations - self._default_view_indices = wp.zeros((self.count,), dtype=wp.uint32).to(self._device) - wp.launch( - kernel=fabric_utils.arange_k, - dim=self.count, - inputs=[self._default_view_indices], - device=self._device, - ) - wp.synchronize() # Ensure indices are ready - - # Step 3: Create Fabric selection with attribute filtering - # SelectPrims expects device format like "cuda:0" not "cuda" - # - # KNOWN ISSUE: SelectPrims may return prims in a different order than self._prims - # (which comes from USD's find_matching_prims). We create a bidirectional mapping - # (_view_to_fabric and _fabric_to_view) to handle this ordering difference. - # This works correctly for full-view operations but partial indexing still has issues. - fabric_device = self._device - if self._device == "cuda": - logger.warning("Fabric device is not specified, defaulting to 'cuda:0'.") - fabric_device = "cuda:0" - - self._fabric_selection = fabric_stage.SelectPrims( - require_attrs=[ - (usdrt.Sdf.ValueTypeNames.UInt, self._view_index_attr, usdrt.Usd.Access.Read), - (usdrt.Sdf.ValueTypeNames.Matrix4d, "omni:fabric:worldMatrix", usdrt.Usd.Access.ReadWrite), - ], - device=fabric_device, - ) - - # Step 4: Create bidirectional mapping between view and fabric indices - self._view_to_fabric = wp.zeros((self.count,), dtype=wp.uint32).to(self._device) - self._fabric_to_view = wp.fabricarray(self._fabric_selection, self._view_index_attr) - - wp.launch( - kernel=fabric_utils.set_view_to_fabric_array, - dim=self._fabric_to_view.shape[0], - inputs=[self._fabric_to_view, self._view_to_fabric], - device=self._device, - ) - # Synchronize to ensure mapping is ready before any operations - wp.synchronize() - - # Pre-allocate reusable output buffers for read operations - self._fabric_positions_torch = torch.zeros((self.count, 3), dtype=torch.float32, device=self._device) - self._fabric_orientations_torch = torch.zeros((self.count, 4), dtype=torch.float32, device=self._device) - self._fabric_scales_torch = torch.zeros((self.count, 3), dtype=torch.float32, device=self._device) - - # Create Warp views of the PyTorch tensors - self._fabric_positions_buffer = wp.from_torch(self._fabric_positions_torch, dtype=wp.float32) - self._fabric_orientations_buffer = wp.from_torch(self._fabric_orientations_torch, dtype=wp.float32) - self._fabric_scales_buffer = wp.from_torch(self._fabric_scales_torch, dtype=wp.float32) - - # Dummy array for unused outputs (always empty) - self._fabric_dummy_buffer = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - - # Cache fabricarray for world matrices to avoid recreation overhead - # Refs: https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/usdrt_prim_selection.html - # https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/scenegraph_use.html - self._fabric_world_matrices = wp.fabricarray(self._fabric_selection, "omni:fabric:worldMatrix") - - # Cache Fabric stage to avoid expensive get_current_stage() calls - self._fabric_stage = fabric_stage - - self._fabric_initialized = True - # Force a one-time USD->Fabric sync on first read to pick up any USD edits - # made after the view was constructed. - self._fabric_usd_sync_done = False - - def _sync_fabric_from_usd_once(self) -> None: - """Sync Fabric world matrices from USD once, on the first read.""" - # Ensure Fabric is initialized - if not self._fabric_initialized: - self._initialize_fabric() - - # Ensure authored USD transforms are flushed before reading into Fabric. - sim_utils.update_stage() - - # Read authoritative transforms from USD and write once into Fabric. - positions_usd, orientations_usd = self._get_world_poses_usd() - scales_usd = self._get_scales_usd() - - prev_sync = self._sync_usd_on_fabric_write - self._sync_usd_on_fabric_write = False - self._set_world_poses_fabric(positions_usd, orientations_usd) - self._set_scales_fabric(scales_usd) - self._sync_usd_on_fabric_write = prev_sync - - self._fabric_usd_sync_done = True - - def _resolve_indices_wp(self, indices: Sequence[int] | None) -> wp.array: - """Resolve view indices as a Warp array.""" - if indices is None or indices == slice(None): - if self._default_view_indices is None: - raise RuntimeError("Fabric indices are not initialized.") - return self._default_view_indices - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - return wp.array(indices_list, dtype=wp.uint32).to(self._device) +XformPrimView = FrameView diff --git a/source/isaaclab/isaaclab/terrains/__init__.py b/source/isaaclab/isaaclab/terrains/__init__.py index 6f0b50185572..da52a80ff6fa 100644 --- a/source/isaaclab/isaaclab/terrains/__init__.py +++ b/source/isaaclab/isaaclab/terrains/__init__.py @@ -19,11 +19,7 @@ * :meth:`TerrainImporter.import_usd`: spawn a prim as reference to input USD file. """ -from .height_field import * # noqa: F401, F403 -from .sub_terrain_cfg import FlatPatchSamplingCfg, SubTerrainBaseCfg -from .terrain_generator import TerrainGenerator -from .terrain_generator_cfg import TerrainGeneratorCfg -from .terrain_importer import TerrainImporter -from .terrain_importer_cfg import TerrainImporterCfg -from .trimesh import * # noqa: F401, F403 -from .utils import color_meshes_by_height, create_prim_from_mesh + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/terrains/__init__.pyi b/source/isaaclab/isaaclab/terrains/__init__.pyi new file mode 100644 index 000000000000..9bb6cccd1cab --- /dev/null +++ b/source/isaaclab/isaaclab/terrains/__init__.pyi @@ -0,0 +1,70 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "FlatPatchSamplingCfg", + "SubTerrainBaseCfg", + "TerrainGenerator", + "TerrainGeneratorCfg", + "TerrainImporter", + "TerrainImporterCfg", + "color_meshes_by_height", + "create_prim_from_mesh", + "HfDiscreteObstaclesTerrainCfg", + "HfInvertedPyramidSlopedTerrainCfg", + "HfInvertedPyramidStairsTerrainCfg", + "HfPyramidSlopedTerrainCfg", + "HfPyramidStairsTerrainCfg", + "HfRandomUniformTerrainCfg", + "HfSteppingStonesTerrainCfg", + "HfTerrainBaseCfg", + "HfWaveTerrainCfg", + "MeshBoxTerrainCfg", + "MeshFloatingRingTerrainCfg", + "MeshGapTerrainCfg", + "MeshInvertedPyramidStairsTerrainCfg", + "MeshPitTerrainCfg", + "MeshPlaneTerrainCfg", + "MeshPyramidStairsTerrainCfg", + "MeshRailsTerrainCfg", + "MeshRandomGridTerrainCfg", + "MeshRepeatedBoxesTerrainCfg", + "MeshRepeatedCylindersTerrainCfg", + "MeshRepeatedPyramidsTerrainCfg", + "MeshStarTerrainCfg", +] + +from .sub_terrain_cfg import FlatPatchSamplingCfg, SubTerrainBaseCfg +from .terrain_generator import TerrainGenerator +from .terrain_generator_cfg import TerrainGeneratorCfg +from .terrain_importer import TerrainImporter +from .terrain_importer_cfg import TerrainImporterCfg +from .utils import color_meshes_by_height, create_prim_from_mesh +from .height_field import ( + HfDiscreteObstaclesTerrainCfg, + HfInvertedPyramidSlopedTerrainCfg, + HfInvertedPyramidStairsTerrainCfg, + HfPyramidSlopedTerrainCfg, + HfPyramidStairsTerrainCfg, + HfRandomUniformTerrainCfg, + HfSteppingStonesTerrainCfg, + HfTerrainBaseCfg, + HfWaveTerrainCfg, +) +from .trimesh import ( + MeshBoxTerrainCfg, + MeshFloatingRingTerrainCfg, + MeshGapTerrainCfg, + MeshInvertedPyramidStairsTerrainCfg, + MeshPitTerrainCfg, + MeshPlaneTerrainCfg, + MeshPyramidStairsTerrainCfg, + MeshRailsTerrainCfg, + MeshRandomGridTerrainCfg, + MeshRepeatedBoxesTerrainCfg, + MeshRepeatedCylindersTerrainCfg, + MeshRepeatedPyramidsTerrainCfg, + MeshStarTerrainCfg, +) diff --git a/source/isaaclab/isaaclab/terrains/config/__init__.py b/source/isaaclab/isaaclab/terrains/config/__init__.py index 264189e183b4..a555996714ee 100644 --- a/source/isaaclab/isaaclab/terrains/config/__init__.py +++ b/source/isaaclab/isaaclab/terrains/config/__init__.py @@ -5,4 +5,6 @@ """Pre-defined terrain configurations for the terrain generator.""" -from .rough import * # noqa: F401 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/terrains/config/__init__.pyi b/source/isaaclab/isaaclab/terrains/config/__init__.pyi new file mode 100644 index 000000000000..8fd20d1265ea --- /dev/null +++ b/source/isaaclab/isaaclab/terrains/config/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ROUGH_TERRAINS_CFG", +] + +from .rough import ROUGH_TERRAINS_CFG diff --git a/source/isaaclab/isaaclab/terrains/height_field/__init__.py b/source/isaaclab/isaaclab/terrains/height_field/__init__.py index 3bc28ba3ccfe..2f8203f40ee8 100644 --- a/source/isaaclab/isaaclab/terrains/height_field/__init__.py +++ b/source/isaaclab/isaaclab/terrains/height_field/__init__.py @@ -25,14 +25,6 @@ """ -from .hf_terrains_cfg import ( - HfDiscreteObstaclesTerrainCfg, - HfInvertedPyramidSlopedTerrainCfg, - HfInvertedPyramidStairsTerrainCfg, - HfPyramidSlopedTerrainCfg, - HfPyramidStairsTerrainCfg, - HfRandomUniformTerrainCfg, - HfSteppingStonesTerrainCfg, - HfTerrainBaseCfg, - HfWaveTerrainCfg, -) +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/terrains/height_field/__init__.pyi b/source/isaaclab/isaaclab/terrains/height_field/__init__.pyi new file mode 100644 index 000000000000..8d953eb3f2f4 --- /dev/null +++ b/source/isaaclab/isaaclab/terrains/height_field/__init__.pyi @@ -0,0 +1,28 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "HfDiscreteObstaclesTerrainCfg", + "HfInvertedPyramidSlopedTerrainCfg", + "HfInvertedPyramidStairsTerrainCfg", + "HfPyramidSlopedTerrainCfg", + "HfPyramidStairsTerrainCfg", + "HfRandomUniformTerrainCfg", + "HfSteppingStonesTerrainCfg", + "HfTerrainBaseCfg", + "HfWaveTerrainCfg", +] + +from .hf_terrains_cfg import ( + HfDiscreteObstaclesTerrainCfg, + HfInvertedPyramidSlopedTerrainCfg, + HfInvertedPyramidStairsTerrainCfg, + HfPyramidSlopedTerrainCfg, + HfPyramidStairsTerrainCfg, + HfRandomUniformTerrainCfg, + HfSteppingStonesTerrainCfg, + HfTerrainBaseCfg, + HfWaveTerrainCfg, +) diff --git a/source/isaaclab/isaaclab/terrains/height_field/hf_terrains_cfg.py b/source/isaaclab/isaaclab/terrains/height_field/hf_terrains_cfg.py index df1d6dcc21a2..069f87503e65 100644 --- a/source/isaaclab/isaaclab/terrains/height_field/hf_terrains_cfg.py +++ b/source/isaaclab/isaaclab/terrains/height_field/hf_terrains_cfg.py @@ -8,7 +8,6 @@ from isaaclab.utils import configclass from ..sub_terrain_cfg import SubTerrainBaseCfg -from . import hf_terrains @configclass @@ -42,7 +41,7 @@ class HfTerrainBaseCfg(SubTerrainBaseCfg): class HfRandomUniformTerrainCfg(HfTerrainBaseCfg): """Configuration for a random uniform height field terrain.""" - function = hf_terrains.random_uniform_terrain + function: str = "{DIR}.hf_terrains:random_uniform_terrain" noise_range: tuple[float, float] = MISSING """The minimum and maximum height noise (i.e. along z) of the terrain (in m).""" @@ -63,7 +62,7 @@ class HfRandomUniformTerrainCfg(HfTerrainBaseCfg): class HfPyramidSlopedTerrainCfg(HfTerrainBaseCfg): """Configuration for a pyramid sloped height field terrain.""" - function = hf_terrains.pyramid_sloped_terrain + function: str = "{DIR}.hf_terrains:pyramid_sloped_terrain" slope_range: tuple[float, float] = MISSING """The slope of the terrain (in radians).""" @@ -95,7 +94,7 @@ class HfInvertedPyramidSlopedTerrainCfg(HfPyramidSlopedTerrainCfg): class HfPyramidStairsTerrainCfg(HfTerrainBaseCfg): """Configuration for a pyramid stairs height field terrain.""" - function = hf_terrains.pyramid_stairs_terrain + function: str = "{DIR}.hf_terrains:pyramid_stairs_terrain" step_height_range: tuple[float, float] = MISSING """The minimum and maximum height of the steps (in m).""" @@ -130,7 +129,7 @@ class HfInvertedPyramidStairsTerrainCfg(HfPyramidStairsTerrainCfg): class HfDiscreteObstaclesTerrainCfg(HfTerrainBaseCfg): """Configuration for a discrete obstacles height field terrain.""" - function = hf_terrains.discrete_obstacles_terrain + function: str = "{DIR}.hf_terrains:discrete_obstacles_terrain" obstacle_height_mode: str = "choice" """The mode to use for the obstacle height. Defaults to "choice". @@ -155,7 +154,7 @@ class HfDiscreteObstaclesTerrainCfg(HfTerrainBaseCfg): class HfWaveTerrainCfg(HfTerrainBaseCfg): """Configuration for a wave height field terrain.""" - function = hf_terrains.wave_terrain + function: str = "{DIR}.hf_terrains:wave_terrain" amplitude_range: tuple[float, float] = MISSING """The minimum and maximum amplitude of the wave (in m).""" @@ -168,7 +167,7 @@ class HfWaveTerrainCfg(HfTerrainBaseCfg): class HfSteppingStonesTerrainCfg(HfTerrainBaseCfg): """Configuration for a stepping stones height field terrain.""" - function = hf_terrains.stepping_stones_terrain + function: str = "{DIR}.hf_terrains:stepping_stones_terrain" stone_height_max: float = MISSING """The maximum height of the stones (in m).""" diff --git a/source/isaaclab/isaaclab/terrains/sub_terrain_cfg.py b/source/isaaclab/isaaclab/terrains/sub_terrain_cfg.py index 0dab3ea8f3c1..5167afd5eb68 100644 --- a/source/isaaclab/isaaclab/terrains/sub_terrain_cfg.py +++ b/source/isaaclab/isaaclab/terrains/sub_terrain_cfg.py @@ -8,12 +8,14 @@ from collections.abc import Callable from dataclasses import MISSING - -import numpy as np -import trimesh +from typing import TYPE_CHECKING from isaaclab.utils import configclass +if TYPE_CHECKING: + import numpy as np + import trimesh + @configclass class FlatPatchSamplingCfg: diff --git a/source/isaaclab/isaaclab/terrains/terrain_generator_cfg.py b/source/isaaclab/isaaclab/terrains/terrain_generator_cfg.py index 6a3238c7cb4a..605b8116e7dd 100644 --- a/source/isaaclab/isaaclab/terrains/terrain_generator_cfg.py +++ b/source/isaaclab/isaaclab/terrains/terrain_generator_cfg.py @@ -15,19 +15,21 @@ from __future__ import annotations from dataclasses import MISSING -from typing import Literal +from typing import TYPE_CHECKING, Literal from isaaclab.utils import configclass from .sub_terrain_cfg import SubTerrainBaseCfg -from .terrain_generator import TerrainGenerator + +if TYPE_CHECKING: + from .terrain_generator import TerrainGenerator @configclass class TerrainGeneratorCfg: """Configuration for the terrain generator.""" - class_type: type = TerrainGenerator + class_type: type[TerrainGenerator] | str = "{DIR}.terrain_generator:TerrainGenerator" """The class to use for the terrain generator. Defaults to :class:`isaaclab.terrains.terrain_generator.TerrainGenerator`. diff --git a/source/isaaclab/isaaclab/terrains/terrain_importer.py b/source/isaaclab/isaaclab/terrains/terrain_importer.py index e9ddc691b35b..c78f9ae3344e 100644 --- a/source/isaaclab/isaaclab/terrains/terrain_importer.py +++ b/source/isaaclab/isaaclab/terrains/terrain_importer.py @@ -355,17 +355,9 @@ def _compute_env_origins_curriculum(self, num_envs: int, origins: torch.Tensor) def _compute_env_origins_grid(self, num_envs: int, env_spacing: float) -> torch.Tensor: """Compute the origins of the environments in a grid based on configured spacing.""" - # create tensor based on number of environments - env_origins = torch.zeros(num_envs, 3, device=self.device) - # create a grid of origins - num_rows = np.ceil(num_envs / int(np.sqrt(num_envs))) - num_cols = np.ceil(num_envs / num_rows) - ii, jj = torch.meshgrid( - torch.arange(num_rows, device=self.device), torch.arange(num_cols, device=self.device), indexing="ij" - ) - env_origins[:, 0] = -(ii.flatten()[:num_envs] - (num_rows - 1) / 2) * env_spacing - env_origins[:, 1] = (jj.flatten()[:num_envs] - (num_cols - 1) / 2) * env_spacing - env_origins[:, 2] = 0.0 + from isaaclab.cloner import grid_transforms + + env_origins, _ = grid_transforms(num_envs, env_spacing, device=self.device) return env_origins """ diff --git a/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py b/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py index 7b42e06caaf3..279c4c981ebb 100644 --- a/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py +++ b/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py @@ -11,17 +11,16 @@ import isaaclab.sim as sim_utils from isaaclab.utils import configclass -from .terrain_importer import TerrainImporter - if TYPE_CHECKING: from .terrain_generator_cfg import TerrainGeneratorCfg + from .terrain_importer import TerrainImporter @configclass class TerrainImporterCfg: """Configuration for the terrain manager.""" - class_type: type = TerrainImporter + class_type: type[TerrainImporter] | str = "{DIR}.terrain_importer:TerrainImporter" """The class to use for the terrain importer. Defaults to :class:`isaaclab.terrains.terrain_importer.TerrainImporter`. diff --git a/source/isaaclab/isaaclab/terrains/trimesh/__init__.py b/source/isaaclab/isaaclab/terrains/trimesh/__init__.py index b27b7a921109..b586b6a18912 100644 --- a/source/isaaclab/isaaclab/terrains/trimesh/__init__.py +++ b/source/isaaclab/isaaclab/terrains/trimesh/__init__.py @@ -12,18 +12,6 @@ efficient than the height-field representation, but it is not as flexible. """ -from .mesh_terrains_cfg import ( - MeshBoxTerrainCfg, - MeshFloatingRingTerrainCfg, - MeshGapTerrainCfg, - MeshInvertedPyramidStairsTerrainCfg, - MeshPitTerrainCfg, - MeshPlaneTerrainCfg, - MeshPyramidStairsTerrainCfg, - MeshRailsTerrainCfg, - MeshRandomGridTerrainCfg, - MeshRepeatedBoxesTerrainCfg, - MeshRepeatedCylindersTerrainCfg, - MeshRepeatedPyramidsTerrainCfg, - MeshStarTerrainCfg, -) +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/terrains/trimesh/__init__.pyi b/source/isaaclab/isaaclab/terrains/trimesh/__init__.pyi new file mode 100644 index 000000000000..9ff71be68f94 --- /dev/null +++ b/source/isaaclab/isaaclab/terrains/trimesh/__init__.pyi @@ -0,0 +1,36 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MeshBoxTerrainCfg", + "MeshFloatingRingTerrainCfg", + "MeshGapTerrainCfg", + "MeshInvertedPyramidStairsTerrainCfg", + "MeshPitTerrainCfg", + "MeshPlaneTerrainCfg", + "MeshPyramidStairsTerrainCfg", + "MeshRailsTerrainCfg", + "MeshRandomGridTerrainCfg", + "MeshRepeatedBoxesTerrainCfg", + "MeshRepeatedCylindersTerrainCfg", + "MeshRepeatedPyramidsTerrainCfg", + "MeshStarTerrainCfg", +] + +from .mesh_terrains_cfg import ( + MeshBoxTerrainCfg, + MeshFloatingRingTerrainCfg, + MeshGapTerrainCfg, + MeshInvertedPyramidStairsTerrainCfg, + MeshPitTerrainCfg, + MeshPlaneTerrainCfg, + MeshPyramidStairsTerrainCfg, + MeshRailsTerrainCfg, + MeshRandomGridTerrainCfg, + MeshRepeatedBoxesTerrainCfg, + MeshRepeatedCylindersTerrainCfg, + MeshRepeatedPyramidsTerrainCfg, + MeshStarTerrainCfg, +) diff --git a/source/isaaclab/isaaclab/terrains/trimesh/mesh_terrains_cfg.py b/source/isaaclab/isaaclab/terrains/trimesh/mesh_terrains_cfg.py index 4247e21486be..1e82a88aee77 100644 --- a/source/isaaclab/isaaclab/terrains/trimesh/mesh_terrains_cfg.py +++ b/source/isaaclab/isaaclab/terrains/trimesh/mesh_terrains_cfg.py @@ -7,8 +7,6 @@ from dataclasses import MISSING from typing import Literal -import isaaclab.terrains.trimesh.mesh_terrains as mesh_terrains -import isaaclab.terrains.trimesh.utils as mesh_utils_terrains from isaaclab.utils import configclass from ..sub_terrain_cfg import SubTerrainBaseCfg @@ -22,14 +20,14 @@ class MeshPlaneTerrainCfg(SubTerrainBaseCfg): """Configuration for a plane mesh terrain.""" - function = mesh_terrains.flat_terrain + function: str = "{DIR}.mesh_terrains:flat_terrain" @configclass class MeshPyramidStairsTerrainCfg(SubTerrainBaseCfg): """Configuration for a pyramid stair mesh terrain.""" - function = mesh_terrains.pyramid_stairs_terrain + function: str = "{DIR}.mesh_terrains:pyramid_stairs_terrain" border_width: float = 0.0 """The width of the border around the terrain (in m). Defaults to 0.0. @@ -63,14 +61,14 @@ class MeshInvertedPyramidStairsTerrainCfg(MeshPyramidStairsTerrainCfg): This is the same as :class:`MeshPyramidStairsTerrainCfg` except that the steps are inverted. """ - function = mesh_terrains.inverted_pyramid_stairs_terrain + function: str = "{DIR}.mesh_terrains:inverted_pyramid_stairs_terrain" @configclass class MeshRandomGridTerrainCfg(SubTerrainBaseCfg): """Configuration for a random grid mesh terrain.""" - function = mesh_terrains.random_grid_terrain + function: str = "{DIR}.mesh_terrains:random_grid_terrain" grid_width: float = MISSING """The width of the grid cells (in m).""" @@ -93,7 +91,7 @@ class MeshRandomGridTerrainCfg(SubTerrainBaseCfg): class MeshRailsTerrainCfg(SubTerrainBaseCfg): """Configuration for a terrain with box rails as extrusions.""" - function = mesh_terrains.rails_terrain + function: str = "{DIR}.mesh_terrains:rails_terrain" rail_thickness_range: tuple[float, float] = MISSING """The thickness of the inner and outer rails (in m).""" @@ -109,7 +107,7 @@ class MeshRailsTerrainCfg(SubTerrainBaseCfg): class MeshPitTerrainCfg(SubTerrainBaseCfg): """Configuration for a terrain with a pit that leads out of the pit.""" - function = mesh_terrains.pit_terrain + function: str = "{DIR}.mesh_terrains:pit_terrain" pit_depth_range: tuple[float, float] = MISSING """The minimum and maximum height of the pit (in m).""" @@ -125,7 +123,7 @@ class MeshPitTerrainCfg(SubTerrainBaseCfg): class MeshBoxTerrainCfg(SubTerrainBaseCfg): """Configuration for a terrain with boxes (similar to a pyramid).""" - function = mesh_terrains.box_terrain + function: str = "{DIR}.mesh_terrains:box_terrain" box_height_range: tuple[float, float] = MISSING """The minimum and maximum height of the box (in m).""" @@ -141,7 +139,7 @@ class MeshBoxTerrainCfg(SubTerrainBaseCfg): class MeshGapTerrainCfg(SubTerrainBaseCfg): """Configuration for a terrain with a gap around the platform.""" - function = mesh_terrains.gap_terrain + function: str = "{DIR}.mesh_terrains:gap_terrain" gap_width_range: tuple[float, float] = MISSING """The minimum and maximum width of the gap (in m).""" @@ -154,7 +152,7 @@ class MeshGapTerrainCfg(SubTerrainBaseCfg): class MeshFloatingRingTerrainCfg(SubTerrainBaseCfg): """Configuration for a terrain with a floating ring around the center.""" - function = mesh_terrains.floating_ring_terrain + function: str = "{DIR}.mesh_terrains:floating_ring_terrain" ring_width_range: tuple[float, float] = MISSING """The minimum and maximum width of the ring (in m).""" @@ -173,7 +171,7 @@ class MeshFloatingRingTerrainCfg(SubTerrainBaseCfg): class MeshStarTerrainCfg(SubTerrainBaseCfg): """Configuration for a terrain with a star pattern.""" - function = mesh_terrains.star_terrain + function: str = "{DIR}.mesh_terrains:star_terrain" num_bars: int = MISSING """The number of bars per-side the star. Must be greater than 2.""" @@ -201,7 +199,7 @@ class ObjectCfg: height: float = MISSING """The height (along z) of the object (in m).""" - function = mesh_terrains.repeated_objects_terrain + function: str = "{DIR}.mesh_terrains:repeated_objects_terrain" object_type: Literal["cylinder", "box", "cone"] | callable = MISSING """The type of object to generate. @@ -263,7 +261,7 @@ class ObjectCfg(MeshRepeatedObjectsTerrainCfg.ObjectCfg): degrees: bool = True """Whether the angle is in degrees. Defaults to True.""" - object_type = mesh_utils_terrains.make_cone + object_type: str = "{DIR}.utils:make_cone" object_params_start: ObjectCfg = MISSING """The object curriculum parameters at the start of the curriculum.""" @@ -287,7 +285,7 @@ class ObjectCfg(MeshRepeatedObjectsTerrainCfg.ObjectCfg): degrees: bool = True """Whether the angle is in degrees. Defaults to True.""" - object_type = mesh_utils_terrains.make_box + object_type: str = "{DIR}.utils:make_box" object_params_start: ObjectCfg = MISSING """The box curriculum parameters at the start of the curriculum.""" @@ -311,7 +309,7 @@ class ObjectCfg(MeshRepeatedObjectsTerrainCfg.ObjectCfg): degrees: bool = True """Whether the angle is in degrees. Defaults to True.""" - object_type = mesh_utils_terrains.make_cylinder + object_type: str = "{DIR}.utils:make_cylinder" object_params_start: ObjectCfg = MISSING """The box curriculum parameters at the start of the curriculum.""" diff --git a/source/isaaclab/isaaclab/test/benchmark/__init__.py b/source/isaaclab/isaaclab/test/benchmark/__init__.py new file mode 100644 index 000000000000..77603f9fd45a --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Benchmarking utilities for IsaacLab. + +This package provides benchmarking utilities used across different test modules. +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/test/benchmark/__init__.pyi b/source/isaaclab/isaaclab/test/benchmark/__init__.pyi new file mode 100644 index 000000000000..9e61b1eec69c --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/__init__.pyi @@ -0,0 +1,47 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseIsaacLabBenchmark", + "get_default_output_filename", + "BenchmarkMonitor", + "MethodBenchmarkDefinition", + "MethodBenchmarkRunner", + "MethodBenchmarkRunnerConfig", + "BooleanMeasurement", + "DictMeasurement", + "DictMetadata", + "FloatMetadata", + "IntMetadata", + "ListMeasurement", + "Measurement", + "MetadataBase", + "SingleMeasurement", + "StatisticalMeasurement", + "StringMetadata", + "TestPhase", +] + +from .benchmark_core import BaseIsaacLabBenchmark, get_default_output_filename +from .benchmark_monitor import BenchmarkMonitor +from .method_benchmark import ( + MethodBenchmarkDefinition, + MethodBenchmarkRunner, + MethodBenchmarkRunnerConfig, +) +from .measurements import ( + BooleanMeasurement, + DictMeasurement, + DictMetadata, + FloatMetadata, + IntMetadata, + ListMeasurement, + Measurement, + MetadataBase, + SingleMeasurement, + StatisticalMeasurement, + StringMetadata, + TestPhase, +) diff --git a/source/isaaclab/isaaclab/test/benchmark/backends.py b/source/isaaclab/isaaclab/test/benchmark/backends.py new file mode 100644 index 000000000000..6b6c0acacb10 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/backends.py @@ -0,0 +1,625 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import copy +import json +import logging +import os +import textwrap +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Any + +from .measurements import SingleMeasurement, StatisticalMeasurement, TestPhase, TestPhaseEncoder + +logger = logging.getLogger(__name__) + + +def get_default_output_filename(prefix: str = "benchmark") -> str: + """Generate default output filename with current date and time. + + Args: + prefix: Prefix for the filename (e.g., "articulation_benchmark"). + + Returns: + Filename string with timestamp (without extension). + """ + datetime_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + return f"{prefix}_{datetime_str}" + + +class MetricsBackendInterface(ABC): + """Abstract base class for metrics backends.""" + + @abstractmethod + def add_metrics(self, test_phase: TestPhase) -> None: + """Add metrics from a test phase. + + Args: + test_phase: Test phase containing metrics to add. + """ + pass + + @abstractmethod + def finalize(self, output_path: str, **kwargs) -> None: + """Finalize and write metrics to output. + + Args: + output_path: Path to write output file(s). + **kwargs: Additional backend-specific options. + """ + pass + + +class MetricsBackend: + """Factory for creating metrics backend instances.""" + + _instances: dict[str, MetricsBackendInterface] = {} + + @classmethod + def get_instance(cls, instance_type: str) -> MetricsBackendInterface: + """Get or create a backend instance by type name. + + Args: + instance_type: Type of backend to create ("json", "osmo", or "omniperf"). + + Returns: + Backend instance of the requested type. + + Raises: + ValueError: If the instance_type is not recognized. + """ + if instance_type not in cls._instances: + backend_map = { + "json": JSONFileMetrics, + "osmo": OsmoKPIFile, + "omniperf": OmniPerfKPIFile, + "summary": SummaryMetrics, + } + if instance_type not in backend_map: + raise ValueError(f"Unknown backend type: {instance_type}. Available: {list(backend_map.keys())}") + cls._instances[instance_type] = backend_map[instance_type]() + return cls._instances[instance_type] + + @classmethod + def reset_instances(cls) -> None: + """Reset all cached backend instances. Useful for testing.""" + cls._instances.clear() + + +class JSONFileMetrics(MetricsBackendInterface): + """Write metrics to a JSON file at the end of a session.""" + + def __init__(self) -> None: + self.data: list[TestPhase] = [] + self.test_name = "" + + def add_metrics(self, test_phase: TestPhase) -> None: + """Accumulate a test phase for later serialization. + + Args: + test_phase: Test phase to add. + + Example: + + .. code-block:: python + + backend.add_metrics(test_phase) + """ + self.data.append(copy.deepcopy(test_phase)) + + def finalize(self, output_path: str, output_filename: str, **kwargs) -> None: + """Write metrics data to a JSON file. + + Args: + output_path: Output path in which metrics file will be stored. + output_filename: Output filename. + **kwargs: Additional backend-specific options. + + Example: + + .. code-block:: python + + backend.finalize("/tmp/metrics", "metrics") + """ + if not self.data: + logger.warning("No test data to write. Skipping metrics file generation.") + return + + # Append test name to measurement name as OVAT needs to uniquely identify + for test_phase in self.data: + test_name = test_phase.get_metadata_field("workflow_name") + # Store the test name + if test_name != self.test_name: + if self.test_name: + logger.warning( + f"Nonempty test name {self.test_name} different from name {test_name} provided by test phase." + ) + self.test_name = test_name + logger.info(f"Setting test name to {self.test_name}") + + phase_name = test_phase.get_metadata_field("phase") + for measurement in test_phase.measurements: + measurement.name = f"{test_name} {phase_name} {measurement.name}" + + for metadata in test_phase.metadata: + metadata.name = f"{test_name} {phase_name} {metadata.name}" + + json_data = json.dumps(self.data, indent=4, cls=TestPhaseEncoder) + + metrics_path = os.path.join(output_path, f"{output_filename}.json") + with open(metrics_path, "w") as f: + f.write(json_data) + print(f"Results written to: {metrics_path}") + + self.data.clear() + + +class SummaryMetrics(MetricsBackendInterface): + """Print a human-readable summary and write JSON metrics.""" + + def __init__(self) -> None: + """Initialize internal phase storage and JSON backend.""" + self._phases: list[TestPhase] = [] + self._json_backend = JSONFileMetrics() + self._report_width = 86 + + def add_metrics(self, test_phase: TestPhase) -> None: + """Add metrics from a test phase; store for summary and forward to JSON backend. + + Args: + test_phase: Test phase containing measurements and metadata. + """ + self._phases.append(copy.deepcopy(test_phase)) + self._json_backend.add_metrics(test_phase) + + def finalize(self, output_path: str, output_filename: str, **kwargs) -> None: + """Write JSON output and print human-readable summary to console. + + Args: + output_path: Path to write output file(s). + output_filename: Base filename for the JSON file. + **kwargs: Additional options passed to the JSON backend. + """ + self._json_backend.finalize(output_path, output_filename, **kwargs) + if self._phases: + self._print_summary() + self._phases.clear() + + def _print_summary(self) -> None: + """Format and print the boxed summary report to stdout.""" + phases = self._merge_phases() + benchmark_info = phases.get("benchmark_info") + runtime_phase = phases.get("runtime") + startup_phase = phases.get("startup") + train_phase = phases.get("train") + frametime_phase = phases.get("frametime") + hardware_info = phases.get("hardware_info") + version_info = phases.get("version_info") + + benchmark_meta = self._metadata_map(benchmark_info) + hardware_meta = self._metadata_map(hardware_info) + version_meta = self._metadata_map(version_info) + dev_meta = version_meta.get("dev", {}) if isinstance(version_meta.get("dev"), dict) else {} + + workflow_name = benchmark_meta.get("workflow_name") + timestamp = benchmark_meta.get("timestamp") + task = benchmark_meta.get("task") + seed = benchmark_meta.get("seed") + num_envs = benchmark_meta.get("num_envs") + max_iterations = benchmark_meta.get("max_iterations") + num_cpus = hardware_meta.get("physical_cores") + commit = dev_meta.get("commit_hash_short") or dev_meta.get("commit_hash") + branch = dev_meta.get("branch") + + gpu_name, gpu_total_mem = self._get_gpu_summary(hardware_meta) + + print() + self._print_box_separator() + self._print_box_line("Summary Report".center(self._report_width - 4)) + self._print_box_separator() + self._print_box_kv("workflow_name", workflow_name) + self._print_box_kv("timestamp", timestamp) + self._print_box_kv("task", task) + self._print_box_kv("seed", seed) + self._print_box_kv("num_envs", num_envs) + self._print_box_kv("max_iterations", max_iterations) + self._print_box_kv("num_cpus", num_cpus) + self._print_box_kv("commit", commit) + self._print_box_kv("branch", branch) + self._print_box_kv("gpu_name", gpu_name) + if gpu_total_mem is not None: + self._print_box_kv("gpu_total_memory_gb", gpu_total_mem) + self._print_box_separator() + + if runtime_phase: + runtime_rows = self._summarize_runtime_metrics(runtime_phase.measurements) + self._print_box_line("Phase: runtime") + for row in runtime_rows: + self._print_box_line(row) + self._print_box_separator() + + if startup_phase: + self._print_box_line("Phase: startup") + self._print_optional_measurement(startup_phase, "App Launch Time") + self._print_optional_measurement(startup_phase, "Python Imports Time") + self._print_optional_measurement(startup_phase, "Task Creation and Start Time") + self._print_optional_measurement(startup_phase, "Scene Creation Time") + self._print_optional_measurement(startup_phase, "Simulation Start Time") + self._print_optional_measurement(startup_phase, "Total Start Time (Launch to Train)") + self._print_box_separator() + + if train_phase: + self._print_box_line("Phase: train") + self._print_optional_measurement(train_phase, "Max Rewards", unit_fallback="float") + self._print_optional_measurement(train_phase, "Max Episode Lengths", unit_fallback="float") + self._print_optional_measurement(train_phase, "Last Reward", unit_fallback="float") + self._print_optional_measurement(train_phase, "Last Episode Length", unit_fallback="float") + self._print_optional_measurement(train_phase, "EMA 0.95 Reward", unit_fallback="float") + self._print_optional_measurement(train_phase, "EMA 0.95 Episode Length", unit_fallback="float") + self._print_box_separator() + + if frametime_phase and frametime_phase.measurements: + self._print_box_line("Phase: frametime") + for measurement in frametime_phase.measurements: + label = measurement.name + if isinstance(measurement, StatisticalMeasurement): + unit_str = f" {measurement.unit.strip()}" if (measurement.unit and measurement.unit.strip()) else "" + value = f"{self._format_scalar(measurement.mean)}{unit_str}" + elif isinstance(measurement, SingleMeasurement): + unit_str = f" {measurement.unit.strip()}" if (measurement.unit and measurement.unit.strip()) else "" + value = f"{self._format_scalar(measurement.value)}{unit_str}" + else: + continue + self._print_box_line(f"{label}: {value}") + self._print_box_separator() + + # Render any phases not handled above (e.g. profiling phases from benchmark_startup) + known_phases = { + "benchmark_info", + "runtime", + "startup", + "train", + "frametime", + "hardware_info", + "version_info", + } + for phase_name, phase in phases.items(): + if phase_name in known_phases or not phase.measurements: + continue + self._print_box_line(f"Phase: {phase_name}") + for measurement in phase.measurements: + label = measurement.name + if isinstance(measurement, StatisticalMeasurement): + unit_str = f" {measurement.unit.strip()}" if (measurement.unit and measurement.unit.strip()) else "" + value = f"{self._format_scalar(measurement.mean)}{unit_str}" + elif isinstance(measurement, SingleMeasurement): + unit_str = f" {measurement.unit.strip()}" if (measurement.unit and measurement.unit.strip()) else "" + value = f"{self._format_scalar(measurement.value)}{unit_str}" + else: + continue + self._print_box_line(f"{label}: {value}") + self._print_box_separator() + + if hardware_meta: + self._print_box_line("System:") + self._print_box_kv("cpu_name", hardware_meta.get("cpu_name")) + self._print_box_kv("physical_cores", hardware_meta.get("physical_cores")) + self._print_box_kv("total_ram_gb", hardware_meta.get("total_ram_gb")) + self._print_box_kv("gpu_device_count", hardware_meta.get("gpu_device_count")) + self._print_box_kv("cuda_version", hardware_meta.get("cuda_version")) + self._print_box_separator() + + def _merge_phases(self) -> dict[str, TestPhase]: + """Merge all stored phases by name, combining measurements and metadata. + + Returns: + Dictionary mapping phase name to a single merged TestPhase. + """ + merged: dict[str, TestPhase] = {} + for phase in self._phases: + name = phase.phase_name + if name not in merged: + merged[name] = copy.deepcopy(phase) + else: + merged[name].measurements.extend(phase.measurements) + merged[name].metadata.extend(phase.metadata) + return merged + + def _metadata_map(self, phase: TestPhase | None) -> dict[str, Any]: + """Build a name -> data map from a phase's metadata list. + + Args: + phase: Test phase, or None. + + Returns: + Dictionary of metadata names to their data values. + """ + if not phase: + return {} + metadata: dict[str, Any] = {} + for item in phase.metadata: + if hasattr(item, "data"): + metadata[item.name] = item.data + return metadata + + def _get_gpu_summary(self, hardware_meta: dict[str, Any]) -> tuple[str | None, float | None]: + """Extract GPU name and total memory (GB) from hardware metadata. + + Args: + hardware_meta: Metadata dict from the hardware_info phase. + + Returns: + (gpu_name, total_memory_gb) or (None, None) if not available. + """ + gpu_devices = hardware_meta.get("gpu_devices") + current_device = hardware_meta.get("gpu_current_device", 0) + if isinstance(gpu_devices, dict): + device = gpu_devices.get(str(current_device)) or next(iter(gpu_devices.values()), {}) + name = device.get("name") + total_mem = device.get("total_memory_gb") + return name, total_mem + return None, None + + def _print_optional_measurement(self, phase: TestPhase, name: str, unit_fallback: str | None = None) -> None: + """Print a single measurement line if present in the phase. + + Args: + phase: Test phase to look up the measurement. + name: Measurement name. + unit_fallback: Unit string to use when measurement has no unit. + """ + measurement = self._get_single_measurement(phase, name) + if measurement is None: + return + unit = (measurement.unit or unit_fallback or "").strip() + suffix = f" {unit}" if unit else "" + self._print_box_line(f"{name}: {self._format_scalar(measurement.value)}{suffix}") + + def _get_single_measurement(self, phase: TestPhase, name: str) -> SingleMeasurement | None: + """Return the first SingleMeasurement in the phase with the given name. + + Args: + phase: Test phase to search. + name: Measurement name. + + Returns: + The matching SingleMeasurement, or None. + """ + for measurement in phase.measurements: + if isinstance(measurement, SingleMeasurement) and measurement.name == name: + return measurement + return None + + def _summarize_runtime_metrics(self, measurements: list) -> list[str]: + """Build min/mean/max summary rows from SingleMeasurement runtime metrics. + + Args: + measurements: List of measurements (typically from the runtime phase). + + Returns: + List of formatted lines, grouped by category (Collection, Learning, etc.). + """ + series: dict[str, dict[str, float]] = {} + units: dict[str, str | None] = {} + for measurement in measurements: + if not isinstance(measurement, SingleMeasurement): + continue + name = measurement.name + value = measurement.value + unit = measurement.unit + if not isinstance(value, (int, float)): + continue + if name.startswith("Min "): + base = name[len("Min ") :] + series.setdefault(base, {})["min"] = float(value) + units.setdefault(base, unit) + elif name.startswith("Max "): + base = name[len("Max ") :] + series.setdefault(base, {})["max"] = float(value) + units.setdefault(base, unit) + elif name.startswith("Mean "): + base = name[len("Mean ") :] + series.setdefault(base, {})["mean"] = float(value) + units.setdefault(base, unit) + + category_order = ["Collection", "Learning", "Step Times", "Throughput", "Other"] + categorized: dict[str, list[str]] = {key: [] for key in category_order} + for base, stats in series.items(): + raw_unit = units.get(base) + unit = (raw_unit or "").strip() if isinstance(raw_unit, str) else "" + unit_suffix = f" {unit}" if unit else "" + min_val = self._format_scalar(stats.get("min", 0.0)) + mean_val = self._format_scalar(stats.get("mean", 0.0)) + max_val = self._format_scalar(stats.get("max", 0.0)) + row = f"{base} (min/mean/max): {min_val} / {mean_val} / {max_val}{unit_suffix}" + + if "Collection" in base: + categorized["Collection"].append(row) + elif "Learning" in base: + categorized["Learning"].append(row) + elif "step time" in base.lower(): + categorized["Step Times"].append(row) + elif "FPS" in base or "Throughput" in base: + categorized["Throughput"].append(row) + else: + categorized["Other"].append(row) + + rows: list[str] = [] + for category in category_order: + if not categorized[category]: + continue + rows.append(f"{category}:") + rows.extend(f" {entry}" for entry in categorized[category]) + if not rows: + rows.append("No runtime metrics available.") + return rows + + def _print_box_separator(self) -> None: + """Print a horizontal rule line for the summary box.""" + print("|" + "-" * (self._report_width - 2) + "|") + + def _print_box_line(self, text: str) -> None: + """Print a line of text inside the box, wrapping if needed.""" + inner_width = self._report_width - 4 + if not text: + print(f"| {' ' * inner_width} |") + return + for line in textwrap.wrap(text, width=inner_width, break_long_words=False, break_on_hyphens=False): + print(f"| {line.ljust(inner_width)} |") + + def _print_box_kv(self, key: str, value: Any) -> None: + """Print a key-value line; skip if value is None.""" + if value is None: + return + if isinstance(value, float): + value = self._format_scalar(value) + self._print_box_line(f"{key}: {value}") + + def _format_scalar(self, value: float | int) -> str: + """Format a numeric value for display (two decimal places for floats).""" + if isinstance(value, float): + return f"{value:.2f}" + return str(value) + + +class OsmoKPIFile(MetricsBackendInterface): + """Write per-phase KPI documents for Osmo ingestion. + + Only SingleMeasurement metrics and metadata are written as key-value pairs. + """ + + def __init__(self) -> None: + self._test_phases: list[TestPhase] = [] + + def add_metrics(self, test_phase: TestPhase) -> None: + """Adds provided test_phase to internal list of test_phases. + + Args: + test_phase: Current test phase. + + Example: + + .. code-block:: python + + backend.add_metrics(test_phase) + """ + self._test_phases.append(test_phase) + + def finalize(self, output_path: str, output_filename: str, **kwargs) -> None: + """Write metrics to output file(s). + + Each test phase's SingleMeasurement metrics and metadata are written to an output JSON file, at path + `[output_path]/[output_filename].json`. + + Args: + output_path: Output path in which metrics files will be stored. + output_filename: Output filename. + **kwargs: Additional backend-specific options. + + Example: + + .. code-block:: python + + backend.finalize("/tmp/metrics", "kpis") + """ + for test_phase in self._test_phases: + # Retrieve useful metadata from test_phase + phase_name = test_phase.get_metadata_field("phase") + + osmo_kpis: dict[str, object] = {} + log_statements = [f"{phase_name} KPIs:"] + # Add metadata as KPIs + for metadata in test_phase.metadata: + osmo_kpis[metadata.name] = metadata.data + log_statements.append(f"{metadata.name}: {metadata.data}") + # Add single measurements as KPIs + for measurement in test_phase.measurements: + if isinstance(measurement, SingleMeasurement): + osmo_kpis[measurement.name] = measurement.value + log_statements.append(f"{measurement.name}: {measurement.value} {measurement.unit}") + # Generate the output filename with timestamp + metrics_path = os.path.join(output_path, f"{output_filename}.json") + # Dump key-value pairs (fields) to the JSON document + json_data = json.dumps(osmo_kpis, indent=4) + with open(metrics_path, "w") as f: + f.write(json_data) + print(f"Results written to: {metrics_path}") + + +class OmniPerfKPIFile(MetricsBackendInterface): + """Write KPI metrics for upload to a PostgreSQL database.""" + + def __init__(self) -> None: + self._test_phases: list[TestPhase] = [] + + def add_metrics(self, test_phase: TestPhase) -> None: + """Adds provided test_phase to internal list of test_phases. + + Args: + test_phase: Current test phase. + + Example: + + .. code-block:: python + + backend.add_metrics(test_phase) + """ + self._test_phases.append(test_phase) + + def finalize(self, output_path: str, output_filename: str, **kwargs) -> None: + """Write metrics to output file(s). + + Measurement metrics and metadata are written to an output JSON file, at path + `[output_path]/[output_filename].json`. + + Args: + output_path: Output path in which metrics file will be stored. + output_filename: Output filename. + **kwargs: Additional backend-specific options. + + Example: + + .. code-block:: python + + backend.finalize("/tmp/metrics", "omniperf") + """ + if not self._test_phases: + logger.warning("No test phases to write. Skipping metrics file generation.") + return + + workflow_data: dict[str, object] = {} + + for test_phase in self._test_phases: + # Retrieve useful metadata from test_phase + phase_name = test_phase.get_metadata_field("phase") + + phase_data: dict[str, object] = {} + log_statements = [f"{phase_name} Metrics:"] + # Add metadata as metrics + for metadata in test_phase.metadata: + phase_data[metadata.name] = metadata.data + log_statements.append(f"{metadata.name}: {metadata.data}") + # Add measurements as metrics + for measurement in test_phase.measurements: + if isinstance(measurement, StatisticalMeasurement): + log_statements.append( + f"{measurement.name}: {measurement.mean:.2f} ± {measurement.std:.2f} " + f"{measurement.unit} (n={measurement.n})" + ) + phase_data[f"{measurement.name}_mean"] = measurement.mean + phase_data[f"{measurement.name}_std"] = measurement.std + phase_data[f"{measurement.name}_n"] = measurement.n + elif type(measurement).__name__ == "SingleMeasurement": + log_statements.append(f"{measurement.name}: {measurement.value} {measurement.unit}") + phase_data[measurement.name] = measurement.value + workflow_data[phase_name] = phase_data + + metrics_path = os.path.join(output_path, f"{output_filename}.json") + # Dump key-value pairs (fields) to the JSON document + json_data = json.dumps(workflow_data, indent=4) + with open(metrics_path, "w") as f: + f.write(json_data) + print(f"Results written to: {metrics_path}") diff --git a/source/isaaclab/isaaclab/test/benchmark/benchmark_core.py b/source/isaaclab/isaaclab/test/benchmark/benchmark_core.py new file mode 100644 index 000000000000..7daedbc132a8 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/benchmark_core.py @@ -0,0 +1,307 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import os +import time +from collections.abc import Sequence +from datetime import datetime + +from . import backends +from .backends import get_default_output_filename +from .interfaces import MeasurementDataRecorder +from .measurements import DictMetadata, FloatMetadata, IntMetadata, Measurement, MetadataBase, StringMetadata, TestPhase +from .recorders import CPUInfoRecorder, GPUInfoRecorder, MemoryInfoRecorder, VersionInfoRecorder + +logger = logging.getLogger(__name__) + +# Valid measurement and metadata class names (to support both isaaclab and isaacsim types) +_MEASUREMENT_CLASS_NAMES = { + "Measurement", + "SingleMeasurement", + "StatisticalMeasurement", + "BooleanMeasurement", + "DictMeasurement", + "ListMeasurement", +} +_METADATA_CLASS_NAMES = {"MetadataBase", "StringMetadata", "IntMetadata", "FloatMetadata", "DictMetadata"} + + +def _is_measurement_type(obj: object) -> bool: + """Check if object is a measurement type by class name (supports isaacsim types).""" + return type(obj).__name__ in _MEASUREMENT_CLASS_NAMES + + +def _is_metadata_type(obj: object) -> bool: + """Check if object is a metadata type by class name (supports isaacsim types).""" + return type(obj).__name__ in _METADATA_CLASS_NAMES + + +class BaseIsaacLabBenchmark: + """Base benchmark class for IsaacLab's benchmarks.""" + + def __init__( + self, + benchmark_name: str, + backend_type: str, + output_path: str, + use_recorders: bool = True, + output_prefix: str | None = None, + workflow_metadata: dict | None = None, + frametime_recorders: bool = False, + ): + """Initialize common benchmark state and recorders. + + Args: + benchmark_name: Name of benchmark to use in outputs. + backend_type: Type of backend used to collect and print metrics. + output_path: Path to output directory. + use_recorders: Whether to use recorders to collect metrics. Defaults to True. + output_filename: Filename to use for the output file, defaults to None. + workflow_metadata: Metadata describing benchmark, defaults to None. + frametime_recorders: Whether to use frametime recorders to collect metrics. Defaults to True. + """ + self.benchmark_name = benchmark_name + + # Resolve output path + if not os.path.exists(output_path): + try: + os.makedirs(output_path) + except Exception as e: + raise ValueError(f"Could not create output directory {output_path}: {e}") + self.output_path = output_path + if output_prefix is None: + output_prefix = "benchmark" + logger.warning("No output prefix provided, using default prefix: benchmark") + self.output_prefix = get_default_output_filename(output_prefix) + + # Get metrics backend + logger.info("Using metrics backend = %s", backend_type) + self._metrics = backends.MetricsBackend.get_instance(instance_type=backend_type) + self._phases: dict[str, TestPhase] = {} + + # Generate workflow-level metadata + workflow_name = StringMetadata(name="workflow_name", data=self.benchmark_name) + timestamp = StringMetadata(name="timestamp", data=datetime.now().isoformat()) + self.add_measurement("benchmark_info", metadata=workflow_name) + self.add_measurement("benchmark_info", metadata=timestamp) + if workflow_metadata: + if "metadata" in workflow_metadata: + self.add_measurement("benchmark_info", metadata=self._metadata_from_dict(workflow_metadata)) + else: + logger.warning( + "workflow_metadata provided, but missing expected 'metadata' entry. Metadata will not be read." + ) + + # Whether to use recorders to collect metrics. + self._use_recorders = use_recorders + self._use_frametime_recorders = frametime_recorders + + # Initialize frametime recorders dict (always, even when not using recorders) + self._frametime_recorders: dict[str, MeasurementDataRecorder] = {} + + if self._use_recorders: + # Recorders that need to be updated manually since they don't depend on the kit timeline. + self._manual_recorders: dict[str, MeasurementDataRecorder] = { + "CPUInfo": CPUInfoRecorder(), + "GPUInfo": GPUInfoRecorder(), + "MemoryInfo": MemoryInfoRecorder(), + "VersionInfo": VersionInfoRecorder(), + } + + # "Kit-full" means Isaac Sim (Kit) runtime is available, so benchmark services can + # provide frametime recorders. "Kit-less" means those services are absent; we should + # gracefully skip or fall back while still allowing mixed modes (e.g., kit-full physics + # with kit-less rendering). + # If we're using Kit, then we can use IsaacSim's benchmark services to peek into the frametimes. + if self._use_frametime_recorders: + try: + # Enable the benchmark services extension first + from isaacsim.core.experimental.utils.app import enable_extension + + enable_extension("isaacsim.benchmark.services") + + added_any = False + + # Try individual recorders first so we can collect partial metrics when only some are available. + try: + from isaacsim.benchmark.services.datarecorders.physics_frametime import PhysicsFrametimeRecorder + + self._frametime_recorders["PhysicsFrametime"] = PhysicsFrametimeRecorder() + added_any = True + except (ImportError, Exception) as e: + logger.debug(f"Physics frametime recorder unavailable: {e}") + + try: + from isaacsim.benchmark.services.datarecorders.render_frametime import RenderFrametimeRecorder + + self._frametime_recorders["RenderFrametime"] = RenderFrametimeRecorder() + added_any = True + except (ImportError, Exception) as e: + logger.debug(f"Render frametime recorder unavailable: {e}") + + try: + from isaacsim.benchmark.services.datarecorders.app_frametime import AppFrametimeRecorder + + self._frametime_recorders["AppFrametime"] = AppFrametimeRecorder() + added_any = True + except (ImportError, Exception) as e: + logger.debug(f"App frametime recorder unavailable: {e}") + + try: + from isaacsim.benchmark.services.datarecorders.gpu_frametime import GPUFrametimeRecorder + + self._frametime_recorders["GPUFrametime"] = GPUFrametimeRecorder() + added_any = True + except (ImportError, Exception) as e: + logger.debug(f"GPU frametime recorder unavailable: {e}") + + if not added_any: + # Fallback for Isaac Sim packaging that bundles frametime recorders in a single module. + try: + from isaacsim.benchmark.services.datarecorders.interface import InputContext + from isaacsim.benchmark.services.recorders import IsaacFrameTimeRecorder + + context = InputContext(phase="frametime") + self._frametime_recorders["IsaacFrameTime"] = IsaacFrameTimeRecorder( + context=context, gpu_frametime=False + ) + except ImportError as e: + logger.warning( + "Could not import bundled frametime recorder: " + f"{e}. Frametime measurements will not be available." + ) + except ImportError as e: + logger.warning( + f"Could not import kit recorders: {e}. Kit related measurements will not be available." + ) + + # Start collecting frametime recorders. + for recorder in self._frametime_recorders.values(): + recorder.start_collecting() + + # Set the start time of the benchmark. + logger.info("Starting") + self.benchmark_start_time = time.time() + + @property + def output_file_path(self) -> str: + """Get the full path to the output file.""" + return os.path.join(self.output_path, f"{self.output_prefix}.json") + + def _metadata_from_dict(self, metadata_dict: dict) -> list[MetadataBase]: + """Convert a dictionary with metadata lists into a list of MetadataBase objects. + + Example: + .. code-block:: python + metadata = self._metadata_from_dict({"metadata": [{"name": "gpu", "data": "A10"}]}) + + Args: + metadata_dict: A dictionary with metadata lists. + + Returns: + A list of MetadataBase objects. + """ + metadata: list[MetadataBase] = [] + metadata_mapping = {str: StringMetadata, int: IntMetadata, float: FloatMetadata, dict: DictMetadata} + for meas in metadata_dict["metadata"]: + if "data" in meas: + metadata_type = metadata_mapping.get(type(meas["data"])) + if metadata_type: + curr_meta = metadata_type(name=meas["name"], data=meas["data"]) + metadata.append(curr_meta) + return metadata + + def update_manual_recorders(self) -> None: + """Update manual recorders that don't depend on the kit timeline.""" + + if not self._use_recorders: + logger.warning("Recorders are not enabled. Skipping update of manual recorders.") + return + + for recorder in self._manual_recorders.values(): + recorder.update() + + def add_measurement( + self, + phase_name: str, + measurement: Measurement | Sequence[Measurement] | None = None, + metadata: MetadataBase | Sequence[MetadataBase] | None = None, + ) -> None: + """Add a measurement to the benchmark. + + Args: + phase_name: The name of the phase to add the measurement to. + measurement: The measurement to add. + metadata: The metadata to add. + """ + if phase_name not in self._phases: + self._phases[phase_name] = TestPhase(phase_name=phase_name) + # Add required phase metadata for backends + phase_metadata = StringMetadata(name="phase", data=phase_name) + workflow_metadata = StringMetadata(name="workflow_name", data=self.benchmark_name) + self._phases[phase_name].metadata.extend([phase_metadata, workflow_metadata]) + + if measurement: + if isinstance(measurement, Sequence): + # Check that all the elements are of type Measurement + for m in measurement: + if not _is_measurement_type(m): + raise ValueError(f"Measurement element {m} is not of type Measurement") + self._phases[phase_name].measurements.extend(measurement) + else: + # Check that the element is of type Measurement + if not _is_measurement_type(measurement): + raise ValueError(f"Measurement element {measurement} is not of type Measurement") + self._phases[phase_name].measurements.append(measurement) + if metadata: + if isinstance(metadata, Sequence): + # Check that all the elements are of type MetadataBase + for m in metadata: + if not _is_metadata_type(m): + raise ValueError(f"Metadata element {m} is not of type MetadataBase") + self._phases[phase_name].metadata.extend(metadata) + else: + # Check that the element is of type MetadataBase + if not _is_metadata_type(metadata): + raise ValueError(f"Metadata element {metadata} is not of type MetadataBase") + self._phases[phase_name].metadata.append(metadata) + + def _finalize_impl(self) -> None: + # Stop collecting frametime recorders. + for recorder in self._frametime_recorders.values(): + recorder.stop_collecting() + + # Add measurements and metadata from recorders to the phases. + if self._use_recorders: + for recorder_name, measurement_data in self._manual_recorders.items(): + data = measurement_data.get_data() + # Add measurements to runtime phase if present + if data.measurements: + self.add_measurement("runtime", measurement=data.measurements) + # Add metadata to appropriate phase (even if no measurements) + if data.metadata: + if recorder_name == "VersionInfo": + self.add_measurement("version_info", metadata=data.metadata) + else: + self.add_measurement("hardware_info", metadata=data.metadata) + for recorder_name, measurement_data in self._frametime_recorders.items(): + data = measurement_data.get_data() + # Add measurements to runtime phase if present + if data.measurements: + self.add_measurement("frametime", measurement=data.measurements) + + # Check that there are phases to write. + if not self._phases: + logger.warning("No phases collected.No metrics will be written.") + return + + # Add the phases to the metrics backend. + for phase in self._phases.values(): + self._metrics.add_metrics(phase) + + self._metrics.finalize(self.output_path, self.output_prefix) + self._manual_recorders = None + self._frametime_recorders = None diff --git a/source/isaaclab/isaaclab/test/benchmark/benchmark_monitor.py b/source/isaaclab/isaaclab/test/benchmark/benchmark_monitor.py new file mode 100644 index 000000000000..f4c21ca59453 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/benchmark_monitor.py @@ -0,0 +1,77 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Background monitoring thread for continuous benchmark recording during blocking operations.""" + +import threading +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from .benchmark_core import BaseIsaacLabBenchmark + + +class BenchmarkMonitor: + """Background thread that periodically updates benchmark recorders. + + This utility enables continuous system resource monitoring during blocking + RL training loops (RSL-RL, RL-Games) where `update_manual_recorders()` would + otherwise only be called once after training completes. + + Usage: + with BenchmarkMonitor(benchmark, interval=1.0): + runner.learn(...) # Blocking training call + """ + + def __init__(self, benchmark: "BaseIsaacLabBenchmark", interval: float = 1.0): + """Initialize the benchmark monitor. + + Args: + benchmark: The benchmark instance to monitor. + interval: Time between recorder updates in seconds. Defaults to 1.0. + """ + self._benchmark = benchmark + self._interval = interval + self._stop_event = threading.Event() + self._thread: threading.Thread | None = None + self._exception: Exception | None = None + + def _monitor_loop(self) -> None: + """Background loop that updates recorders at the specified interval.""" + while not self._stop_event.is_set(): + try: + self._benchmark.update_manual_recorders() + except Exception as e: + self._exception = e + # Log but don't crash - monitoring failure shouldn't stop training + print(f"[BenchmarkMonitor] Warning: update failed: {e}") + + # Wait for interval or stop signal + self._stop_event.wait(timeout=self._interval) + + def start(self) -> None: + """Start the monitoring thread.""" + if self._thread is not None and self._thread.is_alive(): + return # Already running + + self._stop_event.clear() + self._exception = None + self._thread = threading.Thread(target=self._monitor_loop, daemon=True) + self._thread.start() + + def stop(self) -> None: + """Stop the monitoring thread and wait for it to finish.""" + self._stop_event.set() + if self._thread is not None: + self._thread.join(timeout=self._interval + 1.0) + self._thread = None + + def __enter__(self) -> "BenchmarkMonitor": + """Start monitoring when entering context.""" + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + """Stop monitoring when exiting context.""" + self.stop() diff --git a/source/isaaclab/isaaclab/test/benchmark/interfaces.py b/source/isaaclab/isaaclab/test/benchmark/interfaces.py new file mode 100644 index 000000000000..bdd79757843b --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/interfaces.py @@ -0,0 +1,53 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from collections.abc import Sequence +from dataclasses import dataclass, field +from pathlib import Path + +from .measurements import Measurement, MetadataBase + + +@dataclass +class MeasurementData: + """Container for recorder measurements, metadata, and artifacts. + + Args: + measurements: Recorded measurement values. Defaults to an empty list. + metadata: Recorded metadata values. Defaults to an empty list. + artefacts: Artifact tuples of (path, label). Defaults to an empty list. + """ + + measurements: Sequence[Measurement] = field(default_factory=lambda: []) + metadata: Sequence[MetadataBase] = field(default_factory=lambda: []) + artefacts: Sequence[tuple[Path, str]] = field(default_factory=lambda: []) # (path, artefact-label) + + +class MeasurementDataRecorder: + """Base class for recording metrics, metadata, and file-based artifacts. + + There are two common recorder styles: instantaneous measurements taken at + a point in time, and sampling-based measurements gathered over a period. + """ + + def __init__(self): + pass + + def update(self) -> None: + pass + + def get_data(self) -> MeasurementData: + """Return measurements, metadata, and artifacts collected so far. + + Returns: + The collected measurement data container. + + Example: + + .. code-block:: python + + data = recorder.get_data() + """ + return MeasurementData() diff --git a/source/isaaclab/isaaclab/test/benchmark/measurements.py b/source/isaaclab/isaaclab/test/benchmark/measurements.py new file mode 100644 index 000000000000..54c14b120618 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/measurements.py @@ -0,0 +1,351 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import json +import logging +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Union, cast + +logger = logging.getLogger(__name__) + +# Type alias for metadata with data attribute (defined after classes below) +_MetadataWithData = Union["StringMetadata", "IntMetadata", "FloatMetadata", "DictMetadata"] + + +@dataclass +class Measurement: + """Base measurement record. + + Args: + name: Measurement name. + """ + + name: str + + +@dataclass +class SingleMeasurement(Measurement): + """Single floating-point measurement. + + Args: + name: Measurement name. + value: Measurement value. + unit: Unit string. + type: Measurement type label. Defaults to "single". + """ + + value: float | int | str + unit: str + type: str = "single" + + +@dataclass +class StatisticalMeasurement(Measurement): + """Statistical measurement. + + Args: + name: Measurement name. + mean: Mean value. + std: Standard deviation value. + n: Number of samples. + unit: Unit string. + type: Measurement type label. Defaults to "statistical". + """ + + mean: float + std: float + n: int + unit: str + type: str = "statistical" + + +@dataclass +class BooleanMeasurement(Measurement): + """Boolean measurement. + + Args: + name: Measurement name. + bvalue: Measurement value. + type: Measurement type label. Defaults to "boolean". + """ + + bvalue: bool + type: str = "boolean" + + +@dataclass +class DictMeasurement(Measurement): + """Dictionary measurement. + + Args: + name: Measurement name. + value: Measurement value. + type: Measurement type label. Defaults to "dict". + """ + + value: dict + type: str = "dict" + + +@dataclass +class ListMeasurement(Measurement): + """List measurement. + + Args: + name: Measurement name. + value: Measurement value. + type: Measurement type label. Defaults to "list". + """ + + value: list + type: str = "list" + + def __repr__(self): + """Return a compact string representation. + + Returns: + String representation of the measurement. + + Example: + + .. code-block:: python + + repr_str = repr(ListMeasurement(name="samples", value=[1, 2, 3])) + """ + return f"{self.__class__.__name__}(name={self.name!r}, length={len(self.value)})" + + +@dataclass +class MetadataBase: + """Base metadata record. + + Args: + name: Metadata name. + """ + + name: str + + +@dataclass +class StringMetadata(MetadataBase): + """String metadata. + + Args: + name: Metadata name. + data: Metadata value. + type: Metadata type label. Defaults to "string". + """ + + data: str + type: str = "string" + + +@dataclass +class IntMetadata(MetadataBase): + """Integer metadata. + + Args: + name: Metadata name. + data: Metadata value. + type: Metadata type label. Defaults to "int". + """ + + data: int + type: str = "int" + + +@dataclass +class FloatMetadata(MetadataBase): + """Float metadata. + + Args: + name: Metadata name. + data: Metadata value. + type: Metadata type label. Defaults to "float". + """ + + data: float + type: str = "float" + + +@dataclass +class DictMetadata(MetadataBase): + """Dictionary metadata. + + Args: + name: Metadata name. + data: Metadata value. + type: Metadata type label. Defaults to "dict". + """ + + data: dict + type: str = "dict" + + +@dataclass +class TestPhase: + """Represent a single test phase with associated metrics and metadata. + + Args: + phase_name: Name of the phase. + measurements: Measurements recorded for the phase. Defaults to an empty list. + metadata: Metadata recorded for the phase. Defaults to an empty list. + """ + + phase_name: str + measurements: list[Measurement] = field(default_factory=list) + metadata: list[_MetadataWithData] = field(default_factory=list) + + def get_metadata_field(self, name: str, default: Any = KeyError) -> Any: + """Get a metadata field's value. + + Args: + name: Field name. Note that fields are named internally like 'Empty_Scene Stage DSSIM Status', however + `name` is case-insensitive, and drops the stage name. In this eg it would be 'stage dssim status'. + default: Default value to return when the field is missing. + + Returns: + Metadata value, or default if provided. + + Raises: + KeyError: If the field is not found and no default is provided. + + Example: + + .. code-block:: python + + status = phase.get_metadata_field("stage dssim status", default=None) + """ + name = name.lower() + for m in self.metadata: + name2 = m.name.replace(self.phase_name, "").strip().lower() + if name == name2: + return cast(Any, m).data + + if default is KeyError: + raise KeyError(name) + return default + + @classmethod + def metadata_from_dict(cls, m: dict) -> list[_MetadataWithData]: + """Build metadata objects from a metadata dictionary. + + Args: + m: Dictionary containing a "metadata" list. + + Returns: + List of metadata objects. + + Example: + + .. code-block:: python + + metadata = TestPhase.metadata_from_dict({"metadata": [{"name": "gpu", "data": "A10"}]}) + """ + metadata: list[_MetadataWithData] = [] + metadata_mapping = {str: StringMetadata, int: IntMetadata, float: FloatMetadata, dict: DictMetadata} + for meas in m["metadata"]: + if "data" in meas: + metadata_type = metadata_mapping.get(type(meas["data"])) + if metadata_type: + curr_meta = metadata_type(name=meas["name"], data=meas["data"]) + metadata.append(curr_meta) + return metadata + + @classmethod + def from_json(cls, m: dict) -> "TestPhase": + """Deserialize measurements and metadata from a JSON structure. + + Args: + m: JSON-compatible dictionary containing phase data. + + Returns: + Deserialized test phase object. + + Example: + + .. code-block:: python + + phase = TestPhase.from_json(phase_dict) + """ + curr_run = TestPhase(m["phase_name"]) + + for meas in m["measurements"]: + if "value" in meas: + if isinstance(meas["value"], float): + curr_meas: Measurement = SingleMeasurement( + name=meas["name"], value=meas["value"], unit=meas["unit"] + ) + curr_run.measurements.append(curr_meas) + elif isinstance(meas["value"], dict): + curr_meas = DictMeasurement(name=meas["name"], value=meas["value"]) + curr_run.measurements.append(curr_meas) + elif isinstance(meas["value"], list): + curr_meas = ListMeasurement(name=meas["name"], value=meas["value"]) + curr_run.measurements.append(curr_meas) + elif "bvalue" in meas: + curr_meas = BooleanMeasurement(name=meas["name"], bvalue=meas["bvalue"]) + curr_run.measurements.append(curr_meas) + + curr_run.metadata = TestPhase.metadata_from_dict(m["metadata"]) + return curr_run + + @classmethod + def aggregate_json_files(cls, json_folder_path: str | Path) -> list["TestPhase"]: + """Aggregate test phases from JSON files in a folder. + + Args: + json_folder_path: Folder containing metrics JSON files. + + Returns: + List of aggregated test phases. + + Example: + + .. code-block:: python + + phases = TestPhase.aggregate_json_files("/tmp/metrics") + """ + # Gather the separate metrics files for each test + test_runs = [] + metric_files = os.listdir(json_folder_path) + for f in metric_files: + metric_path = os.path.join(json_folder_path, f) + if os.path.isfile(metric_path): + if f.startswith("metrics") and f.endswith(".json"): + with open(metric_path) as json_file: + try: + test_run_json_list = json.load(json_file) + for m in test_run_json_list: + run = cls.from_json(m) + test_runs.append(run) + except json.JSONDecodeError: + logger.error( + f'aggregate_json_files, problems parsing field {f} with content "{json_file.read()}"' + ) + return test_runs + + +class TestPhaseEncoder(json.JSONEncoder): + """JSON encoder for test phases and measurement objects.""" + + def default(self, o: object) -> dict: + """Serialize objects by exposing their dictionary representation. + + Args: + o: Object to serialize. + + Returns: + Dictionary representation of the object. + + Example: + + .. code-block:: python + + json.dumps(phase, cls=TestPhaseEncoder) + """ + return o.__dict__ diff --git a/source/isaaclab/isaaclab/test/benchmark/method_benchmark.py b/source/isaaclab/isaaclab/test/benchmark/method_benchmark.py new file mode 100644 index 000000000000..3c20edfdb1a7 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/method_benchmark.py @@ -0,0 +1,477 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Method-level benchmarking framework for IsaacLab. + +This module provides a framework for benchmarking individual methods with support for: +- Multiple input modes (torch_list, torch_tensor, etc.) +- Automatic hardware/version info collection via recorders +- Multiple output backends (JSON, Osmo, OmniPerf) +- Statistical measurements (mean, std, n) + +Example usage: + +.. code-block:: python + + from isaaclab.test.benchmark import ( + MethodBenchmarkRunner, + MethodBenchmarkRunnerConfig, + MethodBenchmarkDefinition, + ) + + # Define benchmarks + BENCHMARKS = [ + MethodBenchmarkDefinition( + name="write_root_state_to_sim", + method_name="write_root_state_to_sim", + input_generators={ + "torch_list": gen_root_state_torch_list, + "torch_tensor": gen_root_state_torch_tensor, + }, + category="root_state", + ), + ] + + # Configure and run + config = MethodBenchmarkRunnerConfig( + num_iterations=1000, + warmup_steps=10, + num_instances=4096, + device="cuda:0", + ) + runner = MethodBenchmarkRunner("articulation_benchmark", config, "json", ".") + runner.run_benchmarks(BENCHMARKS, articulation) + runner.finalize() +""" + +from __future__ import annotations + +import contextlib +import logging +import time +from collections.abc import Callable +from dataclasses import dataclass, field + +import numpy as np +import torch + +from .benchmark_core import BaseIsaacLabBenchmark +from .measurements import StatisticalMeasurement + +logger = logging.getLogger(__name__) + + +@dataclass +class MethodBenchmarkRunnerConfig: + """Configuration for MethodBenchmarkRunner. + + Attributes: + num_iterations: Number of timing iterations per method. + warmup_steps: Number of warmup iterations before timing. + num_instances: Number of environment instances. + num_bodies: Number of bodies per instance. + num_joints: Number of joints per instance. + device: Device to run benchmarks on. + mode: Which input modes to run ("all" or specific mode name). + """ + + num_iterations: int = 1000 + warmup_steps: int = 10 + num_instances: int = 4096 + num_bodies: int = 12 + num_joints: int = 11 + device: str = "cuda:0" + mode: str | list[str] = "all" + + +@dataclass +class MethodBenchmarkDefinition: + """Definition of a method benchmark. + + Attributes: + name: Display name for the benchmark. + method_name: Name of the method to benchmark on the target object. + input_generators: Dict mapping mode names to input generator functions. + category: Category for grouping results into phases. + dependencies: List of method names that must be called first. + """ + + name: str + method_name: str + input_generators: dict[str, Callable] + category: str = "default" + dependencies: list[str] = field(default_factory=list) + + +class MethodBenchmarkRunner(BaseIsaacLabBenchmark): + """Runner for method-level benchmarks using the new benchmark tooling. + + This class extends BaseIsaacLabBenchmark to provide method-level benchmarking + with automatic hardware/version info collection, multiple backend support, + and organized output by category phases. + """ + + def __init__( + self, + benchmark_name: str, + config: MethodBenchmarkRunnerConfig, + backend_type: str = "json", + output_path: str = ".", + use_recorders: bool = True, + ): + """Initialize the method benchmark runner. + + Args: + benchmark_name: Name of the benchmark (used in output files). + config: Benchmark configuration. + backend_type: Output backend type ("json", "osmo", "omni_perf"). + output_path: Directory to write output files. + use_recorders: Whether to collect hardware/version info. + """ + self._config = config + + # Build workflow metadata from config + workflow_metadata = { + "metadata": [ + {"name": "num_iterations", "data": config.num_iterations}, + {"name": "warmup_steps", "data": config.warmup_steps}, + {"name": "num_instances", "data": config.num_instances}, + {"name": "num_bodies", "data": config.num_bodies}, + {"name": "num_joints", "data": config.num_joints}, + {"name": "device", "data": config.device}, + ] + } + + super().__init__( + benchmark_name=benchmark_name, + backend_type=backend_type, + output_path=output_path, + use_recorders=use_recorders, + output_prefix=benchmark_name, + workflow_metadata=workflow_metadata, + ) + + # Determine which modes to run + if isinstance(config.mode, str): + if config.mode == "all": + self._modes_to_run = None # Run all available + else: + self._modes_to_run = [config.mode] + else: + self._modes_to_run = config.mode + + @property + def config(self) -> MethodBenchmarkRunnerConfig: + """Return the benchmark configuration.""" + return self._config + + def run_benchmarks( + self, + benchmarks: list[MethodBenchmarkDefinition], + target_object: object, + dependencies: dict[str, list[str]] | None = None, + ) -> None: + """Run all defined benchmarks on the target object. + + Args: + benchmarks: List of benchmark definitions to run. + target_object: Object containing the methods to benchmark. + dependencies: Optional dict mapping method names to their dependencies. + """ + if dependencies is None: + dependencies = {} + + print(f"\nBenchmarking {len(benchmarks)} methods...") + print(f"Config: {self._config.num_iterations} iterations, {self._config.warmup_steps} warmup steps") + print( + f" {self._config.num_instances} instances, {self._config.num_bodies} bodies, " + f"{self._config.num_joints} joints" + ) + print(f"Device: {self._config.device}") + print(f"Modes: {self._modes_to_run if self._modes_to_run else 'All available'}") + print("-" * 80) + + for i, benchmark in enumerate(benchmarks): + method = getattr(target_object, benchmark.method_name, None) + + # Determine which modes to run for this benchmark + available_modes = list(benchmark.input_generators.keys()) + current_modes = self._modes_to_run if self._modes_to_run is not None else available_modes + current_modes = [m for m in current_modes if m in available_modes] + + # Get dependencies for this method + method_deps = benchmark.dependencies or dependencies.get(benchmark.method_name, []) + + for mode in current_modes: + # Update manual recorders + self.update_manual_recorders() + + generator = benchmark.input_generators[mode] + bench_name = f"{benchmark.name}_{mode}" + print(f"[{i + 1}/{len(benchmarks)}] [{mode.upper()}] {benchmark.name}...", end=" ", flush=True) + + result = self._benchmark_method( + method=method, + method_name=bench_name, + generator=generator, + dependencies=method_deps, + ) + + if result is None: + print("SKIPPED (method not found)") + elif result.get("skipped"): + print(f"SKIPPED ({result.get('skip_reason', 'unknown')})") + else: + mean = result["mean"] + std = result["std"] + print(f"{mean:.2f} +/- {std:.2f} us") + + # Add measurement to mode-based phase (torch_list, torch_tensor, etc.) + measurement = StatisticalMeasurement( + name=benchmark.name, + mean=mean, + std=std, + n=result["n"], + unit="us", + ) + self.add_measurement(mode, measurement=measurement) + + def _benchmark_method( + self, + method: Callable | None, + method_name: str, + generator: Callable, + dependencies: list[str], + ) -> dict | None: + """Benchmark a single method. + + Args: + method: The method to benchmark (or None if not found). + method_name: Name of the method for reporting. + generator: Function that generates input arguments. + dependencies: List of dependency method names to call first. + + Returns: + Dict with timing results, or None if method not found. + """ + if method is None: + return None + + # Try to call the method once to check for NotImplementedError + try: + inputs = generator(self._config) + _ = method(**inputs) + except NotImplementedError as e: + return {"skipped": True, "skip_reason": f"NotImplementedError: {e}"} + except Exception as e: + return {"skipped": True, "skip_reason": f"Error: {type(e).__name__}: {e}"} + + # Warmup phase + for _ in range(self._config.warmup_steps): + try: + inputs = generator(self._config) + _ = method(**inputs) + except Exception: + pass + # Sync GPU + if self._config.device.startswith("cuda"): + self._sync_device() + + # Timing phase + times = [] + for _ in range(self._config.num_iterations): + inputs = generator(self._config) + + # Sync before timing + if self._config.device.startswith("cuda"): + self._sync_device() + + # Time the method + start_time = time.perf_counter() + try: + _ = method(**inputs) + except Exception: + continue + + # Sync after to ensure kernel completion + if self._config.device.startswith("cuda"): + self._sync_device() + + end_time = time.perf_counter() + times.append((end_time - start_time) * 1e6) # Convert to microseconds + + if not times: + return {"skipped": True, "skip_reason": "No successful iterations"} + + return { + "mean": float(np.mean(times)), + "std": float(np.std(times)), + "n": len(times), + } + + def _sync_device(self) -> None: + """Synchronize GPU device.""" + try: + import warp as wp + + wp.synchronize() + except ImportError: + pass + + if torch.cuda.is_available(): + torch.cuda.synchronize() + + def run_property_benchmarks( + self, + target_data: object, + properties: list[str], + gen_mock_data: Callable, + dependencies: dict[str, list[str]] | None = None, + category: str = "property", + ) -> None: + """Run benchmarks for data class properties. + + This is a convenience method for benchmarking properties on data classes + where the test involves generating mock data and accessing properties. + + Args: + target_data: Data object containing the properties to benchmark. + properties: List of property names to benchmark. + gen_mock_data: Function that generates/updates mock data. + dependencies: Optional dict mapping property names to their dependencies. + category: Category name for grouping results. + """ + if dependencies is None: + dependencies = {} + + # Update manual recorders at start + self.update_manual_recorders() + + print(f"\nBenchmarking {len(properties)} properties...") + print(f"Config: {self._config.num_iterations} iterations, {self._config.warmup_steps} warmup steps") + print( + f" {self._config.num_instances} instances, {self._config.num_bodies} bodies, " + f"{self._config.num_joints} joints" + ) + print("-" * 80) + + for i, prop_name in enumerate(properties): + print(f"[{i + 1}/{len(properties)}] [DEFAULT] {prop_name}...", end=" ", flush=True) + + # Get dependencies for this property + prop_deps = dependencies.get(prop_name, []) + + result = self._benchmark_property( + target_data=target_data, + prop_name=prop_name, + gen_mock_data=gen_mock_data, + dependencies=prop_deps, + ) + + if result is None: + print("SKIPPED (property not found)") + elif result.get("skipped"): + print(f"SKIPPED ({result.get('skip_reason', 'unknown')})") + else: + mean = result["mean"] + std = result["std"] + print(f"{mean:.2f} +/- {std:.2f} us") + + # Add measurement + measurement = StatisticalMeasurement( + name=prop_name, + mean=mean, + std=std, + n=result["n"], + unit="us", + ) + self.add_measurement(category, measurement=measurement) + + def _benchmark_property( + self, + target_data: object, + prop_name: str, + gen_mock_data: Callable, + dependencies: list[str], + ) -> dict | None: + """Benchmark a single property access. + + Args: + target_data: Data object containing the property. + prop_name: Name of the property to benchmark. + gen_mock_data: Function that generates/updates mock data. + dependencies: List of property names to access first. + + Returns: + Dict with timing results, or None if property not found. + """ + # Check if property exists + if not hasattr(target_data, prop_name): + return None + + # Try to access the property once to check for errors + try: + gen_mock_data(self._config) + _ = getattr(target_data, prop_name) + except NotImplementedError as e: + return {"skipped": True, "skip_reason": f"NotImplementedError: {e}"} + except Exception as e: + return {"skipped": True, "skip_reason": f"Error: {type(e).__name__}: {e}"} + + # Warmup phase + for _ in range(self._config.warmup_steps): + try: + gen_mock_data(self._config) + # Access dependencies first + for dep in dependencies: + with contextlib.suppress(Exception): + _ = getattr(target_data, dep) + _ = getattr(target_data, prop_name) + except Exception: + pass + # Sync GPU + if self._config.device.startswith("cuda"): + self._sync_device() + + # Timing phase + times = [] + for _ in range(self._config.num_iterations): + gen_mock_data(self._config) + + # Access dependencies first (not timed) + for dep in dependencies: + with contextlib.suppress(Exception): + _ = getattr(target_data, dep) + + # Sync before timing + if self._config.device.startswith("cuda"): + self._sync_device() + + # Time the property access + start_time = time.perf_counter() + try: + _ = getattr(target_data, prop_name) + except Exception: + continue + + # Sync after + if self._config.device.startswith("cuda"): + self._sync_device() + + end_time = time.perf_counter() + times.append((end_time - start_time) * 1e6) # microseconds + + if not times: + return {"skipped": True, "skip_reason": "No successful iterations"} + + return { + "mean": float(np.mean(times)), + "std": float(np.std(times)), + "n": len(times), + } + + def finalize(self) -> None: + """Finalize the benchmark and write results.""" + self._finalize_impl() diff --git a/source/isaaclab/isaaclab/test/benchmark/recorders/__init__.py b/source/isaaclab/isaaclab/test/benchmark/recorders/__init__.py new file mode 100644 index 000000000000..e14e0f6d52c5 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/recorders/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/test/benchmark/recorders/__init__.pyi b/source/isaaclab/isaaclab/test/benchmark/recorders/__init__.pyi new file mode 100644 index 000000000000..c2f9a3c97e06 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/recorders/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "CPUInfoRecorder", + "GPUInfoRecorder", + "MemoryInfoRecorder", + "VersionInfoRecorder", +] + +from .record_cpu_info import CPUInfoRecorder +from .record_gpu_info import GPUInfoRecorder +from .record_memory_info import MemoryInfoRecorder +from .record_version_info import VersionInfoRecorder diff --git a/source/isaaclab/isaaclab/test/benchmark/recorders/record_cpu_info.py b/source/isaaclab/isaaclab/test/benchmark/recorders/record_cpu_info.py new file mode 100644 index 000000000000..159dbf45f3d5 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/recorders/record_cpu_info.py @@ -0,0 +1,81 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import contextlib +import math +import os +import platform + +import psutil + +from isaaclab.test.benchmark.interfaces import MeasurementData, MeasurementDataRecorder +from isaaclab.test.benchmark.measurements import IntMetadata, SingleMeasurement, StringMetadata + + +class CPUInfoRecorder(MeasurementDataRecorder): + def __init__(self): + # Empty dictionaries to store hardware and runtime information + self._cpu_hardware_info = {} + self._cpu_runtime_info = {} + # Welford's algorithm for computing the mean and standard deviation + self._mean = 0 + self._std = 0 + self._n = 0 + self._m2 = 0 + # CPU usage + self._process = psutil.Process(os.getpid()) + self._get_hardware_info() + + def _get_hardware_info(self) -> None: + # CPU info + self._cpu_hardware_info["physical_cores"] = os.cpu_count() + self._cpu_hardware_info["name"] = platform.processor() or "Unknown" + with contextlib.suppress(Exception): + with open("/proc/cpuinfo") as f: + cpuinfo = f.read() + for line in cpuinfo.split("\n"): + if "model name" in line: + self._cpu_hardware_info["name"] = line.split(":")[1].strip() + break + + def _get_runtime_info(self) -> None: + process_cpu_percent = self._process.cpu_percent(interval=None) + # Welford's algorithm for computing the mean and standard deviation + self._n += 1 + delta = process_cpu_percent - self._mean + self._mean += delta / self._n + delta2 = process_cpu_percent - self._mean + self._m2 += delta * delta2 + if self._n > 1: + self._std = math.sqrt(self._m2 / (self._n - 1)) + self._cpu_runtime_info["mean"] = self._mean + self._cpu_runtime_info["std"] = self._std + self._cpu_runtime_info["n"] = self._n + + def update(self) -> None: + self._get_runtime_info() + + def get_initial_data(self) -> dict: + return { + "cpu_metadata": self._cpu_hardware_info, + } + + def get_runtime_data(self) -> dict: + return { + "cpu_utilization": self._cpu_runtime_info, + } + + def get_data(self) -> MeasurementData: + return MeasurementData( + measurements=[ + SingleMeasurement(name="CPU Utilization", value=self._cpu_runtime_info["mean"], unit="%"), + SingleMeasurement(name="CPU Utilization std", value=self._cpu_runtime_info["std"], unit="%"), + SingleMeasurement(name="CPU Utilization n", value=self._cpu_runtime_info["n"], unit=""), + ], + metadata=[ + StringMetadata(name="cpu_name", data=self._cpu_hardware_info["name"]), + IntMetadata(name="physical_cores", data=self._cpu_hardware_info["physical_cores"]), + ], + ) diff --git a/source/isaaclab/isaaclab/test/benchmark/recorders/record_gpu_info.py b/source/isaaclab/isaaclab/test/benchmark/recorders/record_gpu_info.py new file mode 100644 index 000000000000..8ef734b53885 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/recorders/record_gpu_info.py @@ -0,0 +1,303 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import contextlib +import math +import subprocess + +import torch + +from isaaclab.test.benchmark.interfaces import MeasurementData, MeasurementDataRecorder +from isaaclab.test.benchmark.measurements import ( + DictMetadata, + IntMetadata, + SingleMeasurement, + StringMetadata, +) + + +class GPUInfoRecorder(MeasurementDataRecorder): + def __init__(self): + # Hardware and runtime information + self._gpu_hardware_info = {} + self._gpu_runtime_info = {} + self._device_count = 0 + + # Per-device Welford stats for memory (bytes) + self._mem_mean = [] + self._mem_std = [] + self._mem_n = [] + self._mem_m2 = [] + + # Per-device Welford stats for utilization (%) + self._util_mean = [] + self._util_std = [] + self._util_n = [] + self._util_m2 = [] + + # pynvml device handles (one per GPU) + self._handles = [] + self._nvml_available = False + + self._get_hardware_info() + + def _get_hardware_info(self) -> None: + if not torch.cuda.is_available(): + self._gpu_hardware_info["available"] = False + return + + self._gpu_hardware_info["available"] = True + self._device_count = torch.cuda.device_count() + self._gpu_hardware_info["device_count"] = self._device_count + self._gpu_hardware_info["current_device"] = torch.cuda.current_device() + + # Collect info for all devices + self._gpu_hardware_info["devices"] = [] + for i in range(self._device_count): + gpu_props = torch.cuda.get_device_properties(i) + device_info = { + "index": i, + "name": gpu_props.name, + "total_memory_gb": round(gpu_props.total_memory / (1024**3), 2), + "compute_capability": f"{gpu_props.major}.{gpu_props.minor}", + "multi_processor_count": gpu_props.multi_processor_count, + } + self._gpu_hardware_info["devices"].append(device_info) + + # Initialize Welford stats for this device + self._mem_mean.append(0) + self._mem_std.append(0) + self._mem_n.append(0) + self._mem_m2.append(0) + self._util_mean.append(0) + self._util_std.append(0) + self._util_n.append(0) + self._util_m2.append(0) + + # CUDA version + with contextlib.suppress(Exception): + import torch.version as torch_version + + cuda_version = getattr(torch_version, "cuda", None) + self._gpu_hardware_info["cuda_version"] = cuda_version if cuda_version else "Unknown" + + # Initialize pynvml for GPU utilization monitoring (all devices) + with contextlib.suppress(Exception): + import pynvml + + pynvml.nvmlInit() + for i in range(self._device_count): + handle = pynvml.nvmlDeviceGetHandleByIndex(i) + self._handles.append(handle) + self._nvml_available = True + + # Check if nvidia-smi is available as fallback + self._nvidia_smi_available = False + if not self._nvml_available: + with contextlib.suppress(Exception): + result = subprocess.run( + ["nvidia-smi", "--query-gpu=utilization.gpu", "--format=csv,noheader,nounits"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + self._nvidia_smi_available = True + + def _get_runtime_info(self) -> None: + if not torch.cuda.is_available(): + return + + # Initialize runtime info structure if needed + if "devices" not in self._gpu_runtime_info: + self._gpu_runtime_info["devices"] = [{} for _ in range(self._device_count)] + + # Query nvidia-smi once for all GPUs if needed (more efficient than per-device calls) + nvidia_smi_data = None + if self._nvidia_smi_available: + with contextlib.suppress(Exception): + result = subprocess.run( + ["nvidia-smi", "--query-gpu=memory.used,utilization.gpu", "--format=csv,noheader,nounits"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + nvidia_smi_data = [] + for line in result.stdout.strip().split("\n"): + parts = line.split(",") + if len(parts) >= 2: + nvidia_smi_data.append( + { + "memory_used_mb": float(parts[0].strip()), + "utilization": float(parts[1].strip()), + } + ) + + for i in range(self._device_count): + # GPU memory usage per device + memory_bytes = None + + # Try pynvml first + if self._nvml_available and i < len(self._handles): + with contextlib.suppress(Exception): + import pynvml + + mem_info = pynvml.nvmlDeviceGetMemoryInfo(self._handles[i]) + memory_bytes = mem_info.used + + # Fall back to nvidia-smi + elif nvidia_smi_data and i < len(nvidia_smi_data): + memory_bytes = nvidia_smi_data[i]["memory_used_mb"] * 1024 * 1024 # MB to bytes + + # Last resort: PyTorch memory_allocated (only tracks PyTorch tensors) + if memory_bytes is None: + memory_bytes = torch.cuda.memory_allocated(i) + + self._mem_n[i] += 1 + delta = memory_bytes - self._mem_mean[i] + self._mem_mean[i] += delta / self._mem_n[i] + delta2 = memory_bytes - self._mem_mean[i] + self._mem_m2[i] += delta * delta2 + if self._mem_n[i] > 1: + self._mem_std[i] = math.sqrt(self._mem_m2[i] / (self._mem_n[i] - 1)) + + self._gpu_runtime_info["devices"][i]["memory_used_mean_bytes"] = self._mem_mean[i] + self._gpu_runtime_info["devices"][i]["memory_used_std_bytes"] = self._mem_std[i] + self._gpu_runtime_info["devices"][i]["memory_n"] = self._mem_n[i] + + # GPU utilization from pynvml or nvidia-smi fallback + gpu_util = None + + if self._nvml_available and i < len(self._handles): + with contextlib.suppress(Exception): + import pynvml + + util = pynvml.nvmlDeviceGetUtilizationRates(self._handles[i]) + gpu_util = util.gpu + + elif nvidia_smi_data and i < len(nvidia_smi_data): + gpu_util = nvidia_smi_data[i]["utilization"] + + if gpu_util is not None: + self._util_n[i] += 1 + delta = gpu_util - self._util_mean[i] + self._util_mean[i] += delta / self._util_n[i] + delta2 = gpu_util - self._util_mean[i] + self._util_m2[i] += delta * delta2 + if self._util_n[i] > 1: + self._util_std[i] = math.sqrt(self._util_m2[i] / (self._util_n[i] - 1)) + + self._gpu_runtime_info["devices"][i]["utilization_mean_percent"] = self._util_mean[i] + self._gpu_runtime_info["devices"][i]["utilization_std_percent"] = self._util_std[i] + self._gpu_runtime_info["devices"][i]["utilization_n"] = self._util_n[i] + + def update(self) -> None: + self._get_runtime_info() + + def get_initial_data(self) -> dict: + return { + "gpu_metadata": self._gpu_hardware_info, + } + + def get_runtime_data(self) -> dict: + return { + "gpu_utilization": self._gpu_runtime_info, + } + + def _bytes_to_gb(self, bytes_value: float) -> float: + """Convert bytes to gigabytes, rounded to 2 decimal places.""" + return round(bytes_value / (1024**3), 2) + + def get_data(self) -> MeasurementData: + measurements = [] + metadata = [] + + if not self._gpu_hardware_info.get("available", False): + return MeasurementData(measurements=measurements, metadata=metadata) + + # Global metadata + metadata.append(IntMetadata(name="gpu_device_count", data=self._device_count)) + metadata.append(IntMetadata(name="gpu_current_device", data=self._gpu_hardware_info["current_device"])) + metadata.append( + StringMetadata(name="cuda_version", data=self._gpu_hardware_info.get("cuda_version", "Unknown")) + ) + + # Per-device hardware info as a dict + devices_data = {} + for i in range(self._device_count): + device_hw = ( + self._gpu_hardware_info.get("devices", [{}])[i] + if i < len(self._gpu_hardware_info.get("devices", [])) + else {} + ) + + device_data = { + "name": device_hw.get("name", "Unknown"), + "total_memory_gb": device_hw.get("total_memory_gb", 0), + "compute_capability": device_hw.get("compute_capability", "Unknown"), + "multi_processor_count": device_hw.get("multi_processor_count", 0), + } + + devices_data[str(i)] = device_data + + metadata.append(DictMetadata(name="gpu_devices", data=devices_data)) + + # Runtime measurements - GPU memory and utilization + for i in range(self._device_count): + device_runtime = self._gpu_runtime_info.get("devices", [{}] * self._device_count) + if i < len(device_runtime): + runtime = device_runtime[i] + prefix = f"GPU {i} " if self._device_count > 1 else "GPU " + + # Memory used + if "memory_used_mean_bytes" in runtime: + measurements.append( + SingleMeasurement( + name=f"{prefix}Memory Used", + value=self._bytes_to_gb(runtime["memory_used_mean_bytes"]), + unit="GB", + ) + ) + measurements.append( + SingleMeasurement( + name=f"{prefix}Memory Used std", + value=self._bytes_to_gb(runtime["memory_used_std_bytes"]), + unit="GB", + ) + ) + measurements.append( + SingleMeasurement( + name=f"{prefix}Memory Used n", + value=runtime["memory_n"], + unit="", + ) + ) + + # GPU Utilization + if "utilization_mean_percent" in runtime: + measurements.append( + SingleMeasurement( + name=f"{prefix}Utilization", + value=round(runtime["utilization_mean_percent"], 2), + unit="%", + ) + ) + measurements.append( + SingleMeasurement( + name=f"{prefix}Utilization std", + value=round(runtime["utilization_std_percent"], 2), + unit="%", + ) + ) + measurements.append( + SingleMeasurement( + name=f"{prefix}Utilization n", + value=runtime["utilization_n"], + unit="", + ) + ) + + return MeasurementData(measurements=measurements, metadata=metadata) diff --git a/source/isaaclab/isaaclab/test/benchmark/recorders/record_memory_info.py b/source/isaaclab/isaaclab/test/benchmark/recorders/record_memory_info.py new file mode 100644 index 000000000000..8ad0f54304e3 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/recorders/record_memory_info.py @@ -0,0 +1,160 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math +import os + +import psutil + +from isaaclab.test.benchmark.interfaces import MeasurementData, MeasurementDataRecorder +from isaaclab.test.benchmark.measurements import FloatMetadata, SingleMeasurement + + +class MemoryInfoRecorder(MeasurementDataRecorder): + def __init__(self): + # Empty dictionaries to store hardware and runtime information + self._memory_hardware_info = {} + self._memory_runtime_info = {} + + # Welford's algorithm stats for RSS (Resident Set Size) + self._rss_mean = 0 + self._rss_m2 = 0 + self._rss_n = 0 + + # Welford's algorithm stats for VMS (Virtual Memory Size) + self._vms_mean = 0 + self._vms_m2 = 0 + self._vms_n = 0 + + # Welford's algorithm stats for USS (Unique Set Size) + self._uss_mean = 0 + self._uss_m2 = 0 + self._uss_n = 0 + + # Process handle + self._process = psutil.Process(os.getpid()) + self._get_hardware_info() + + def _get_hardware_info(self) -> None: + mem = psutil.virtual_memory() + self._memory_hardware_info["total_ram_gb"] = round(mem.total / (1024**3), 2) + + def _update_welford(self, value: float, mean: float, m2: float, n: int) -> tuple[float, float, int, float]: + """Update Welford's online algorithm for mean and variance. + + Returns: + Tuple of (new_mean, new_m2, new_n, std) + """ + n += 1 + delta = value - mean + mean += delta / n + delta2 = value - mean + m2 += delta * delta2 + std = math.sqrt(m2 / (n - 1)) if n > 1 else 0 + return mean, m2, n, std + + def _get_runtime_info(self) -> None: + mem_info = self._process.memory_info() + + # RSS (Resident Set Size) - physical memory used + self._rss_mean, self._rss_m2, self._rss_n, rss_std = self._update_welford( + mem_info.rss, self._rss_mean, self._rss_m2, self._rss_n + ) + self._memory_runtime_info["rss_mean"] = self._rss_mean + self._memory_runtime_info["rss_std"] = rss_std + self._memory_runtime_info["rss_n"] = self._rss_n + + # VMS (Virtual Memory Size) - total virtual memory + self._vms_mean, self._vms_m2, self._vms_n, vms_std = self._update_welford( + mem_info.vms, self._vms_mean, self._vms_m2, self._vms_n + ) + self._memory_runtime_info["vms_mean"] = self._vms_mean + self._memory_runtime_info["vms_std"] = vms_std + self._memory_runtime_info["vms_n"] = self._vms_n + + # USS (Unique Set Size) - memory unique to process (not shared) + try: + uss = self._process.memory_full_info().uss + self._uss_mean, self._uss_m2, self._uss_n, uss_std = self._update_welford( + uss, self._uss_mean, self._uss_m2, self._uss_n + ) + self._memory_runtime_info["uss_mean"] = self._uss_mean + self._memory_runtime_info["uss_std"] = uss_std + self._memory_runtime_info["uss_n"] = self._uss_n + except (psutil.AccessDenied, AttributeError): + # USS may not be available on all platforms + pass + + def update(self) -> None: + self._get_runtime_info() + + def get_initial_data(self) -> dict: + return { + "memory_metadata": self._memory_hardware_info, + } + + def get_runtime_data(self) -> dict: + return { + "memory_utilization": self._memory_runtime_info, + } + + def _bytes_to_gb(self, bytes_value: float) -> float: + """Convert bytes to gigabytes, rounded to 2 decimal places.""" + return round(bytes_value / (1024**3), 2) + + def get_data(self) -> MeasurementData: + measurements = [ + # RSS (Resident Set Size) + SingleMeasurement( + name="System Memory RSS", + value=self._bytes_to_gb(self._memory_runtime_info.get("rss_mean", 0)), + unit="GB", + ), + SingleMeasurement( + name="System Memory RSS std", + value=self._bytes_to_gb(self._memory_runtime_info.get("rss_std", 0)), + unit="GB", + ), + SingleMeasurement(name="System Memory RSS n", value=self._memory_runtime_info.get("rss_n", 0), unit=""), + # VMS (Virtual Memory Size) + SingleMeasurement( + name="System Memory VMS", + value=self._bytes_to_gb(self._memory_runtime_info.get("vms_mean", 0)), + unit="GB", + ), + SingleMeasurement( + name="System Memory VMS std", + value=self._bytes_to_gb(self._memory_runtime_info.get("vms_std", 0)), + unit="GB", + ), + SingleMeasurement(name="System Memory VMS n", value=self._memory_runtime_info.get("vms_n", 0), unit=""), + ] + + # USS (Unique Set Size) - only if available + if "uss_mean" in self._memory_runtime_info: + measurements.extend( + [ + SingleMeasurement( + name="System Memory USS", + value=self._bytes_to_gb(self._memory_runtime_info.get("uss_mean", 0)), + unit="GB", + ), + SingleMeasurement( + name="System Memory USS std", + value=self._bytes_to_gb(self._memory_runtime_info.get("uss_std", 0)), + unit="GB", + ), + SingleMeasurement( + name="System Memory USS n", value=self._memory_runtime_info.get("uss_n", 0), unit="" + ), + ] + ) + + return MeasurementData( + measurements=measurements, + metadata=[ + FloatMetadata(name="total_ram_gb", data=self._memory_hardware_info["total_ram_gb"]), + ], + ) diff --git a/source/isaaclab/isaaclab/test/benchmark/recorders/record_version_info.py b/source/isaaclab/isaaclab/test/benchmark/recorders/record_version_info.py new file mode 100644 index 000000000000..a4396371b3d3 --- /dev/null +++ b/source/isaaclab/isaaclab/test/benchmark/recorders/record_version_info.py @@ -0,0 +1,187 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import importlib.metadata +import os +import subprocess + +from isaaclab.test.benchmark.interfaces import MeasurementData, MeasurementDataRecorder +from isaaclab.test.benchmark.measurements import DictMetadata, StringMetadata + +# Path to the repository root. +_REPO_ROOT = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), *[".."] * 6)) + + +class VersionInfoRecorder(MeasurementDataRecorder): + def __init__(self): + self._version_info = {} + self._dev_info = {} + self._get_version_info() + self._get_git_info() + + def _get_version(self, module_name: str, version_attr: str = "__version__") -> str | None: + """Attempt to get version from a module. + + Args: + module_name: Name of the module to import. + version_attr: Attribute name containing the version. + + Returns: + Version string or None if not available. + """ + try: + module = __import__(module_name) + # Handle nested attributes like "config.version" + for attr in version_attr.split("."): + module = getattr(module, attr) + return str(module) + except Exception: + return None + + def _get_pkg_version(self, pip_name: str) -> str | None: + """Get version via importlib.metadata (pip package name, no module import).""" + try: + return importlib.metadata.version(pip_name) + except Exception: + return None + + def _record(self, key: str, version: str | None) -> None: + """Store a version entry only if version is non-empty.""" + if version: + self._version_info[key] = version + + def _get_version_info(self) -> None: + # isaaclab + self._record("isaaclab", self._get_version("isaaclab")) + + # warp - try config.version first, then __version__ + version = self._get_version("warp", "config.version") or self._get_version("warp") + self._record("warp", version) + + # isaacsim + self._record("isaacsim", self._get_version("isaacsim")) + + # kit (from omni.kit if available) + version = self._get_version("omni.kit", "app.get_app().get_build_version") + if not version: + version = self._get_version("carb", "settings.get_settings().get('/app/version')") + self._record("kit", version) + + # torch + self._record("torch", self._get_version("torch")) + + # numpy + self._record("numpy", self._get_version("numpy")) + + # IsaacLab sub-packages + self._record("isaaclab_newton", self._get_pkg_version("isaaclab_newton")) + self._record("isaaclab_physx", self._get_pkg_version("isaaclab_physx")) + self._record("isaaclab_ov", self._get_pkg_version("isaaclab_ov")) + self._record("isaaclab_tasks", self._get_pkg_version("isaaclab_tasks")) + self._record("isaaclab_rl", self._get_pkg_version("isaaclab_rl")) + + # Renderers & physics engines + self._record("ovrtx", self._get_pkg_version("ovrtx")) + self._record("newton", self._get_pkg_version("newton")) + self._record("mujoco", self._get_pkg_version("mujoco")) + self._record("mujoco_warp", self._get_pkg_version("mujoco-warp")) + + # RL frameworks + self._record("rl_games", self._get_pkg_version("rl_games")) + self._record("rsl_rl", self._get_pkg_version("rsl-rl-lib")) + self._record("stable_baselines3", self._get_pkg_version("stable_baselines3")) + self._record("skrl", self._get_pkg_version("skrl")) + + # Key dependencies + self._record("gymnasium", self._get_pkg_version("gymnasium")) + self._record("cuda_bindings", self._get_pkg_version("cuda-bindings")) + self._record("usd_core", self._get_pkg_version("usd-core")) + + # Release version from root VERSION file + version_file = os.path.join(_REPO_ROOT, "VERSION") + try: + with open(version_file) as f: + self._record("isaaclab_release", f.read().strip()) + except Exception: + pass + + def _get_git_info(self) -> None: + """Get git repository information.""" + script_dir = os.path.dirname(os.path.abspath(__file__)) + + try: + # Get full commit hash + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + cwd=script_dir, + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + self._dev_info["commit_hash"] = result.stdout.strip() + self._dev_info["commit_hash_short"] = result.stdout.strip()[:8] + + # Get branch name + result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + cwd=script_dir, + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + self._dev_info["branch"] = result.stdout.strip() + + # Get commit date + result = subprocess.run( + ["git", "log", "-1", "--format=%ci"], + cwd=script_dir, + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + self._dev_info["commit_date"] = result.stdout.strip() + + # Check if working directory is dirty + result = subprocess.run( + ["git", "status", "--porcelain"], + cwd=script_dir, + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + self._dev_info["dirty"] = len(result.stdout.strip()) > 0 + + except Exception: + pass + + def update(self) -> None: + """No-op for version info as it doesn't change during runtime.""" + pass + + def get_initial_data(self) -> dict: + return { + "version_metadata": self._version_info, + "dev": self._dev_info, + } + + def get_runtime_data(self) -> dict: + return {} + + def get_data(self) -> MeasurementData: + metadata = [] + + # Add version metadata + for package, version in self._version_info.items(): + metadata.append(StringMetadata(name=f"{package}_version", data=version)) + + # Add dev/git info as a dict metadata entry + if self._dev_info: + metadata.append(DictMetadata(name="dev", data=self._dev_info)) + + return MeasurementData(measurements=[], metadata=metadata) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py new file mode 100644 index 000000000000..40ef92ec8e0a --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock interfaces for IsaacLab sensors and assets. + +This module provides mock implementations of sensor and asset classes for unit testing +without requiring the full Isaac Sim simulation environment. + +Example usage: + + .. code-block:: python + + from isaaclab.test.mock_interfaces.sensors import MockContactSensor + from isaaclab.test.mock_interfaces.assets import MockArticulation + + # Create mock sensor + sensor = MockContactSensor(num_instances=4, num_bodies=4, device="cpu") + sensor.data.set_mock_data(net_forces_w=torch.randn(4, 4, 3)) + + # Create mock articulation + robot = MockArticulation( + num_instances=4, + num_joints=12, + num_bodies=13, + joint_names=["joint_" + str(i) for i in range(12)], + device="cpu" + ) + +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/__init__.pyi b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.pyi new file mode 100644 index 000000000000..176171a508a4 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.pyi @@ -0,0 +1,66 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MockArticulation", + "MockArticulationData", + "MockRigidObject", + "MockRigidObjectData", + "MockRigidObjectCollection", + "MockRigidObjectCollectionData", + "create_mock_articulation", + "create_mock_humanoid", + "create_mock_quadruped", + "create_mock_rigid_object", + "create_mock_rigid_object_collection", + "MockContactSensor", + "MockContactSensorData", + "MockFrameTransformer", + "MockFrameTransformerData", + "MockImu", + "MockImuData", + "create_mock_contact_sensor", + "create_mock_foot_contact_sensor", + "create_mock_frame_transformer", + "create_mock_imu", + "MockArticulationBuilder", + "MockSensorBuilder", + "MockWrenchComposer", + "patch_articulation", + "patch_sensor", +] + +from .assets import ( + MockArticulation, + MockArticulationData, + MockRigidObject, + MockRigidObjectData, + MockRigidObjectCollection, + MockRigidObjectCollectionData, + create_mock_articulation, + create_mock_humanoid, + create_mock_quadruped, + create_mock_rigid_object, + create_mock_rigid_object_collection, +) +from .sensors import ( + MockContactSensor, + MockContactSensorData, + MockFrameTransformer, + MockFrameTransformerData, + MockImu, + MockImuData, + create_mock_contact_sensor, + create_mock_foot_contact_sensor, + create_mock_frame_transformer, + create_mock_imu, +) +from .utils import ( + MockArticulationBuilder, + MockSensorBuilder, + MockWrenchComposer, + patch_articulation, + patch_sensor, +) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py new file mode 100644 index 000000000000..7c1403a81586 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock asset interfaces for testing without Isaac Sim.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.pyi b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.pyi new file mode 100644 index 000000000000..2beeef9e032b --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.pyi @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MockArticulation", + "MockArticulationData", + "MockRigidObject", + "MockRigidObjectData", + "MockRigidObjectCollection", + "MockRigidObjectCollectionData", + "create_mock_articulation", + "create_mock_humanoid", + "create_mock_quadruped", + "create_mock_rigid_object", + "create_mock_rigid_object_collection", +] + +from .mock_articulation import MockArticulation, MockArticulationData +from .mock_rigid_object import MockRigidObject, MockRigidObjectData +from .mock_rigid_object_collection import MockRigidObjectCollection, MockRigidObjectCollectionData +from .factories import ( + create_mock_articulation, + create_mock_humanoid, + create_mock_quadruped, + create_mock_rigid_object, + create_mock_rigid_object_collection, +) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py new file mode 100644 index 000000000000..c135c80005f2 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py @@ -0,0 +1,209 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory functions for creating pre-configured mock assets.""" + +from __future__ import annotations + +import torch + +from .mock_articulation import MockArticulation +from .mock_rigid_object import MockRigidObject +from .mock_rigid_object_collection import MockRigidObjectCollection + + +def create_mock_articulation( + num_instances: int = 1, + num_joints: int = 1, + num_bodies: int = 2, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + device: str = "cpu", +) -> MockArticulation: + """Create a mock articulation with default configuration. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + device: Device for tensor allocation. + + Returns: + Configured MockArticulation instance. + """ + return MockArticulation( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=joint_names, + body_names=body_names, + is_fixed_base=is_fixed_base, + device=device, + ) + + +def create_mock_quadruped( + num_instances: int = 1, + device: str = "cpu", +) -> MockArticulation: + """Create a mock quadruped robot articulation. + + Creates a quadruped with 12 joints (3 per leg) and 13 bodies. + + Args: + num_instances: Number of robot instances. + device: Device for tensor allocation. + + Returns: + Configured MockArticulation instance for a quadruped. + """ + leg_names = ["FL", "FR", "RL", "RR"] + joint_suffixes = ["hip", "thigh", "calf"] + + joint_names = [f"{leg}_{suffix}" for leg in leg_names for suffix in joint_suffixes] + body_names = ["base"] + [f"{leg}_{part}" for leg in leg_names for part in ["hip", "thigh", "calf"]] + + robot = MockArticulation( + num_instances=num_instances, + num_joints=12, + num_bodies=13, + joint_names=joint_names, + body_names=body_names, + is_fixed_base=False, + device=device, + ) + + # Set reasonable default joint limits for a quadruped + joint_pos_limits = torch.zeros(num_instances, 12, 2, device=device) + joint_pos_limits[..., 0] = -1.57 # Lower limit + joint_pos_limits[..., 1] = 1.57 # Upper limit + robot.data.set_joint_pos_limits(joint_pos_limits) + + return robot + + +def create_mock_humanoid( + num_instances: int = 1, + device: str = "cpu", +) -> MockArticulation: + """Create a mock humanoid robot articulation. + + Creates a humanoid with 21 joints and 22 bodies. + + Args: + num_instances: Number of robot instances. + device: Device for tensor allocation. + + Returns: + Configured MockArticulation instance for a humanoid. + """ + # Simplified humanoid joint structure + joint_names = [ + # Torso + "torso_yaw", + "torso_pitch", + "torso_roll", + # Left arm + "L_shoulder_pitch", + "L_shoulder_roll", + "L_shoulder_yaw", + "L_elbow", + # Right arm + "R_shoulder_pitch", + "R_shoulder_roll", + "R_shoulder_yaw", + "R_elbow", + # Left leg + "L_hip_yaw", + "L_hip_roll", + "L_hip_pitch", + "L_knee", + "L_ankle_pitch", + # Right leg + "R_hip_yaw", + "R_hip_roll", + "R_hip_pitch", + "R_knee", + "R_ankle_pitch", + ] + + body_names = [ + "pelvis", + "torso", + "L_upper_arm", + "L_lower_arm", + "L_hand", + "R_upper_arm", + "R_lower_arm", + "R_hand", + "L_thigh", + "L_shin", + "L_foot", + "R_thigh", + "R_shin", + "R_foot", + "head", + ] + + return MockArticulation( + num_instances=num_instances, + num_joints=21, + num_bodies=len(body_names), + joint_names=joint_names, + body_names=body_names, + is_fixed_base=False, + device=device, + ) + + +def create_mock_rigid_object( + num_instances: int = 1, + body_names: list[str] | None = None, + device: str = "cpu", +) -> MockRigidObject: + """Create a mock rigid object with default configuration. + + Args: + num_instances: Number of rigid object instances. + body_names: Names of bodies. + device: Device for tensor allocation. + + Returns: + Configured MockRigidObject instance. + """ + return MockRigidObject( + num_instances=num_instances, + body_names=body_names, + device=device, + ) + + +def create_mock_rigid_object_collection( + num_instances: int = 1, + num_bodies: int = 1, + body_names: list[str] | None = None, + device: str = "cpu", +) -> MockRigidObjectCollection: + """Create a mock rigid object collection with default configuration. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies in the collection. + body_names: Names of bodies. + device: Device for tensor allocation. + + Returns: + Configured MockRigidObjectCollection instance. + """ + return MockRigidObjectCollection( + num_instances=num_instances, + num_bodies=num_bodies, + body_names=body_names, + device=device, + ) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py new file mode 100644 index 000000000000..b7a29a55b389 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py @@ -0,0 +1,2569 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock articulation asset for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +from collections.abc import Sequence + +import numpy as np +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +try: + from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData +except (ImportError, ModuleNotFoundError): + # Direct import bypassing isaaclab.assets.__init__.py (which needs omni.timeline) + import importlib.util + from pathlib import Path + + _file = Path(__file__).resolve().parents[3] / "assets" / "articulation" / "base_articulation_data.py" + _spec = importlib.util.spec_from_file_location("_base_articulation_data", str(_file)) + _mod = importlib.util.module_from_spec(_spec) + _spec.loader.exec_module(_mod) + BaseArticulationData = _mod.BaseArticulationData + + +class MockArticulationData(BaseArticulationData): + """Mock data container for articulation asset. + + This class mimics the interface of BaseArticulationData for testing purposes. + All tensor properties return zero tensors with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_joints: int, + num_bodies: int, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + fixed_tendon_names: list[str] | None = None, + spatial_tendon_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock articulation data. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + num_fixed_tendons: Number of fixed tendons. + num_spatial_tendons: Number of spatial tendons. + fixed_tendon_names: Names of fixed tendons. + spatial_tendon_names: Names of spatial tendons. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_joints = num_joints + self._num_bodies = num_bodies + self._num_fixed_tendons = num_fixed_tendons + self._num_spatial_tendons = num_spatial_tendons + + # Names + self.joint_names = joint_names or [f"joint_{i}" for i in range(num_joints)] + self.body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + self.fixed_tendon_names = fixed_tendon_names or [f"fixed_tendon_{i}" for i in range(num_fixed_tendons)] + self.spatial_tendon_names = spatial_tendon_names or [f"spatial_tendon_{i}" for i in range(num_spatial_tendons)] + + # Call base class init (sets self.device) + super().__init__(root_view=None, device=device) + self._create_buffers() + + # -- Internal storage for mock data -- + # Default states + self._default_root_pose: wp.array | None = None + self._default_root_vel: wp.array | None = None + self._default_joint_pos: wp.array | None = None + self._default_joint_vel: wp.array | None = None + + # Joint commands + self._joint_pos_target: wp.array | None = None + self._joint_vel_target: wp.array | None = None + self._joint_effort_target: wp.array | None = None + self._computed_torque: wp.array | None = None + self._applied_torque: wp.array | None = None + + # Joint properties + self._joint_stiffness: wp.array | None = None + self._joint_damping: wp.array | None = None + self._joint_armature: wp.array | None = None + self._joint_friction_coeff: wp.array | None = None + self._joint_dynamic_friction_coeff: wp.array | None = None + self._joint_viscous_friction_coeff: wp.array | None = None + self._joint_pos_limits: wp.array | None = None + self._joint_vel_limits: wp.array | None = None + self._joint_effort_limits: wp.array | None = None + self._soft_joint_pos_limits: wp.array | None = None + self._soft_joint_vel_limits: wp.array | None = None + self._gear_ratio: wp.array | None = None + + # Joint state + self._joint_pos: wp.array | None = None + self._joint_vel: wp.array | None = None + self._joint_acc: wp.array | None = None + + # Root state (link frame) + self._root_link_pose_w: wp.array | None = None + self._root_link_vel_w: wp.array | None = None + + # Root state (CoM frame) + self._root_com_pose_w: wp.array | None = None + self._root_com_vel_w: wp.array | None = None + + # Body state (link frame) + self._body_link_pose_w: wp.array | None = None + self._body_link_vel_w: wp.array | None = None + + # Body state (CoM frame) + self._body_com_pose_w: wp.array | None = None + self._body_com_vel_w: wp.array | None = None + self._body_com_acc_w: wp.array | None = None + self._body_com_pose_b: wp.array | None = None + + # Body properties + self._body_mass: wp.array | None = None + self._body_inertia: wp.array | None = None + + # Tendon properties (fixed) + self._fixed_tendon_stiffness: wp.array | None = None + self._fixed_tendon_damping: wp.array | None = None + self._fixed_tendon_limit_stiffness: wp.array | None = None + self._fixed_tendon_rest_length: wp.array | None = None + self._fixed_tendon_offset: wp.array | None = None + self._fixed_tendon_pos_limits: wp.array | None = None + + # Tendon properties (spatial) + self._spatial_tendon_stiffness: wp.array | None = None + self._spatial_tendon_damping: wp.array | None = None + self._spatial_tendon_limit_stiffness: wp.array | None = None + self._spatial_tendon_offset: wp.array | None = None + + # ProxyArray caches + self._default_root_pose_ta: ProxyArray | None = None + self._default_root_vel_ta: ProxyArray | None = None + self._default_joint_pos_ta: ProxyArray | None = None + self._default_joint_vel_ta: ProxyArray | None = None + self._joint_pos_target_ta: ProxyArray | None = None + self._joint_vel_target_ta: ProxyArray | None = None + self._joint_effort_target_ta: ProxyArray | None = None + self._computed_torque_ta: ProxyArray | None = None + self._applied_torque_ta: ProxyArray | None = None + self._joint_stiffness_ta: ProxyArray | None = None + self._joint_damping_ta: ProxyArray | None = None + self._joint_armature_ta: ProxyArray | None = None + self._joint_friction_coeff_ta: ProxyArray | None = None + self._joint_dynamic_friction_coeff_ta: ProxyArray | None = None + self._joint_viscous_friction_coeff_ta: ProxyArray | None = None + self._joint_pos_limits_ta: ProxyArray | None = None + self._joint_vel_limits_ta: ProxyArray | None = None + self._joint_effort_limits_ta: ProxyArray | None = None + self._soft_joint_pos_limits_ta: ProxyArray | None = None + self._soft_joint_vel_limits_ta: ProxyArray | None = None + self._gear_ratio_ta: ProxyArray | None = None + self._joint_pos_ta: ProxyArray | None = None + self._joint_vel_ta: ProxyArray | None = None + self._joint_acc_ta: ProxyArray | None = None + self._root_link_pose_w_ta: ProxyArray | None = None + self._root_link_vel_w_ta: ProxyArray | None = None + self._root_com_pose_w_ta: ProxyArray | None = None + self._root_com_vel_w_ta: ProxyArray | None = None + self._body_link_pose_w_ta: ProxyArray | None = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_vel_w_ta: ProxyArray | None = None + self._body_com_acc_w_ta: ProxyArray | None = None + self._body_com_pose_b_ta: ProxyArray | None = None + self._body_mass_ta: ProxyArray | None = None + self._body_inertia_ta: ProxyArray | None = None + self._fixed_tendon_stiffness_ta: ProxyArray | None = None + self._fixed_tendon_damping_ta: ProxyArray | None = None + self._fixed_tendon_limit_stiffness_ta: ProxyArray | None = None + self._fixed_tendon_rest_length_ta: ProxyArray | None = None + self._fixed_tendon_offset_ta: ProxyArray | None = None + self._fixed_tendon_pos_limits_ta: ProxyArray | None = None + self._spatial_tendon_stiffness_ta: ProxyArray | None = None + self._spatial_tendon_damping_ta: ProxyArray | None = None + self._spatial_tendon_limit_stiffness_ta: ProxyArray | None = None + self._spatial_tendon_offset_ta: ProxyArray | None = None + + # -- Helper for identity quaternion -- + def _identity_quat(self, *shape: int) -> wp.array: + """Create identity quaternion array (w, x, y, z) = (1, 0, 0, 0).""" + np_arr = np.zeros((*shape, 4), dtype=np.float32) + np_arr[..., 0] = 1.0 + return wp.array(np_arr, dtype=wp.float32, device=self.device) + + # -- Default state properties -- + + @property + def default_root_pose(self) -> ProxyArray: + """Default root pose. dtype=wp.transformf, shape: (N,).""" + if self._default_root_pose is None: + pose_np = np.zeros((self._num_instances, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._default_root_pose = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._default_root_pose_ta = None + if self._default_root_pose_ta is None: + self._default_root_pose_ta = ProxyArray(self._default_root_pose) + return self._default_root_pose_ta + + @property + def default_root_vel(self) -> ProxyArray: + """Default root velocity. dtype=wp.spatial_vectorf, shape: (N,).""" + if self._default_root_vel is None: + self._default_root_vel = wp.zeros((self._num_instances, 6), dtype=wp.float32, device=self.device).view( + wp.spatial_vectorf + ) + self._default_root_vel_ta = None + if self._default_root_vel_ta is None: + self._default_root_vel_ta = ProxyArray(self._default_root_vel) + return self._default_root_vel_ta + + @property + def default_root_state(self) -> ProxyArray: + """Default root state [pose(7), vel(6)]. Shape: (N, 13).""" + pose_t = self.default_root_pose.torch + vel_t = self.default_root_vel.torch + return ProxyArray(wp.from_torch(torch.cat([pose_t, vel_t], dim=-1))) + + @property + def default_joint_pos(self) -> ProxyArray: + """Default joint positions. Shape: (N, num_joints).""" + if self._default_joint_pos is None: + self._default_joint_pos = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._default_joint_pos_ta = None + if self._default_joint_pos_ta is None: + self._default_joint_pos_ta = ProxyArray(self._default_joint_pos) + return self._default_joint_pos_ta + + @property + def default_joint_vel(self) -> ProxyArray: + """Default joint velocities. Shape: (N, num_joints).""" + if self._default_joint_vel is None: + self._default_joint_vel = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._default_joint_vel_ta = None + if self._default_joint_vel_ta is None: + self._default_joint_vel_ta = ProxyArray(self._default_joint_vel) + return self._default_joint_vel_ta + + # -- Joint command properties -- + + @property + def joint_pos_target(self) -> ProxyArray: + """Joint position targets. Shape: (N, num_joints).""" + if self._joint_pos_target is None: + self._joint_pos_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_pos_target_ta = None + if self._joint_pos_target_ta is None: + self._joint_pos_target_ta = ProxyArray(self._joint_pos_target) + return self._joint_pos_target_ta + + @property + def joint_vel_target(self) -> ProxyArray: + """Joint velocity targets. Shape: (N, num_joints).""" + if self._joint_vel_target is None: + self._joint_vel_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_vel_target_ta = None + if self._joint_vel_target_ta is None: + self._joint_vel_target_ta = ProxyArray(self._joint_vel_target) + return self._joint_vel_target_ta + + @property + def joint_effort_target(self) -> ProxyArray: + """Joint effort targets. Shape: (N, num_joints).""" + if self._joint_effort_target is None: + self._joint_effort_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_effort_target_ta = None + if self._joint_effort_target_ta is None: + self._joint_effort_target_ta = ProxyArray(self._joint_effort_target) + return self._joint_effort_target_ta + + @property + def computed_torque(self) -> ProxyArray: + """Computed torques before clipping. Shape: (N, num_joints).""" + if self._computed_torque is None: + self._computed_torque = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._computed_torque_ta = None + if self._computed_torque_ta is None: + self._computed_torque_ta = ProxyArray(self._computed_torque) + return self._computed_torque_ta + + @property + def applied_torque(self) -> ProxyArray: + """Applied torques after clipping. Shape: (N, num_joints).""" + if self._applied_torque is None: + self._applied_torque = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._applied_torque_ta = None + if self._applied_torque_ta is None: + self._applied_torque_ta = ProxyArray(self._applied_torque) + return self._applied_torque_ta + + # -- Joint properties -- + + @property + def joint_stiffness(self) -> ProxyArray: + """Joint stiffness. Shape: (N, num_joints).""" + if self._joint_stiffness is None: + self._joint_stiffness = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_stiffness_ta = None + if self._joint_stiffness_ta is None: + self._joint_stiffness_ta = ProxyArray(self._joint_stiffness) + return self._joint_stiffness_ta + + @property + def joint_damping(self) -> ProxyArray: + """Joint damping. Shape: (N, num_joints).""" + if self._joint_damping is None: + self._joint_damping = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_damping_ta = None + if self._joint_damping_ta is None: + self._joint_damping_ta = ProxyArray(self._joint_damping) + return self._joint_damping_ta + + @property + def joint_armature(self) -> ProxyArray: + """Joint armature. Shape: (N, num_joints).""" + if self._joint_armature is None: + self._joint_armature = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_armature_ta = None + if self._joint_armature_ta is None: + self._joint_armature_ta = ProxyArray(self._joint_armature) + return self._joint_armature_ta + + @property + def joint_friction_coeff(self) -> ProxyArray: + """Joint static friction coefficient. Shape: (N, num_joints).""" + if self._joint_friction_coeff is None: + self._joint_friction_coeff = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_friction_coeff_ta = None + if self._joint_friction_coeff_ta is None: + self._joint_friction_coeff_ta = ProxyArray(self._joint_friction_coeff) + return self._joint_friction_coeff_ta + + @property + def joint_dynamic_friction_coeff(self) -> ProxyArray: + """Joint dynamic friction coefficient. Shape: (N, num_joints).""" + if self._joint_dynamic_friction_coeff is None: + self._joint_dynamic_friction_coeff = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_dynamic_friction_coeff_ta = None + if self._joint_dynamic_friction_coeff_ta is None: + self._joint_dynamic_friction_coeff_ta = ProxyArray(self._joint_dynamic_friction_coeff) + return self._joint_dynamic_friction_coeff_ta + + @property + def joint_viscous_friction_coeff(self) -> ProxyArray: + """Joint viscous friction coefficient. Shape: (N, num_joints).""" + if self._joint_viscous_friction_coeff is None: + self._joint_viscous_friction_coeff = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_viscous_friction_coeff_ta = None + if self._joint_viscous_friction_coeff_ta is None: + self._joint_viscous_friction_coeff_ta = ProxyArray(self._joint_viscous_friction_coeff) + return self._joint_viscous_friction_coeff_ta + + @property + def joint_pos_limits(self) -> ProxyArray: + """Joint position limits [lower, upper]. dtype=wp.vec2f, shape: (N, num_joints).""" + if self._joint_pos_limits is None: + np_limits = np.zeros((self._num_instances, self._num_joints, 2), dtype=np.float32) + np_limits[..., 0] = -float("inf") + np_limits[..., 1] = float("inf") + self._joint_pos_limits = wp.array(np_limits, dtype=wp.float32, device=self.device).view(wp.vec2f) + self._joint_pos_limits_ta = None + if self._joint_pos_limits_ta is None: + self._joint_pos_limits_ta = ProxyArray(self._joint_pos_limits) + return self._joint_pos_limits_ta + + @property + def joint_vel_limits(self) -> ProxyArray: + """Joint velocity limits. Shape: (N, num_joints).""" + if self._joint_vel_limits is None: + self._joint_vel_limits = wp.full( + (self._num_instances, self._num_joints), dtype=wp.float32, value=float("inf"), device=self.device + ) + self._joint_vel_limits_ta = None + if self._joint_vel_limits_ta is None: + self._joint_vel_limits_ta = ProxyArray(self._joint_vel_limits) + return self._joint_vel_limits_ta + + @property + def joint_effort_limits(self) -> ProxyArray: + """Joint effort limits. Shape: (N, num_joints).""" + if self._joint_effort_limits is None: + self._joint_effort_limits = wp.full( + (self._num_instances, self._num_joints), dtype=wp.float32, value=float("inf"), device=self.device + ) + self._joint_effort_limits_ta = None + if self._joint_effort_limits_ta is None: + self._joint_effort_limits_ta = ProxyArray(self._joint_effort_limits) + return self._joint_effort_limits_ta + + @property + def soft_joint_pos_limits(self) -> ProxyArray: + """Soft joint position limits. Shape: (N, num_joints, 2).""" + if self._soft_joint_pos_limits is None: + self._soft_joint_pos_limits = wp.clone(self.joint_pos_limits.warp, self.device) + self._soft_joint_pos_limits_ta = None + if self._soft_joint_pos_limits_ta is None: + self._soft_joint_pos_limits_ta = ProxyArray(self._soft_joint_pos_limits) + return self._soft_joint_pos_limits_ta + + @property + def soft_joint_vel_limits(self) -> ProxyArray: + """Soft joint velocity limits. Shape: (N, num_joints).""" + if self._soft_joint_vel_limits is None: + self._soft_joint_vel_limits = wp.clone(self.joint_vel_limits.warp, self.device) + self._soft_joint_vel_limits_ta = None + if self._soft_joint_vel_limits_ta is None: + self._soft_joint_vel_limits_ta = ProxyArray(self._soft_joint_vel_limits) + return self._soft_joint_vel_limits_ta + + @property + def gear_ratio(self) -> ProxyArray: + """Gear ratio. Shape: (N, num_joints).""" + if self._gear_ratio is None: + self._gear_ratio = wp.ones((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._gear_ratio_ta = None + if self._gear_ratio_ta is None: + self._gear_ratio_ta = ProxyArray(self._gear_ratio) + return self._gear_ratio_ta + + # -- Joint state properties -- + + @property + def joint_pos(self) -> ProxyArray: + """Joint positions. Shape: (N, num_joints).""" + if self._joint_pos is None: + self._joint_pos = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._joint_pos_ta = None + if self._joint_pos_ta is None: + self._joint_pos_ta = ProxyArray(self._joint_pos) + return self._joint_pos_ta + + @property + def joint_vel(self) -> ProxyArray: + """Joint velocities. Shape: (N, num_joints).""" + if self._joint_vel is None: + self._joint_vel = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._joint_vel_ta = None + if self._joint_vel_ta is None: + self._joint_vel_ta = ProxyArray(self._joint_vel) + return self._joint_vel_ta + + @property + def joint_acc(self) -> ProxyArray: + """Joint accelerations. Shape: (N, num_joints).""" + if self._joint_acc is None: + self._joint_acc = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._joint_acc_ta = None + if self._joint_acc_ta is None: + self._joint_acc_ta = ProxyArray(self._joint_acc) + return self._joint_acc_ta + + # -- Root state properties (link frame) -- + + @property + def root_link_pose_w(self) -> ProxyArray: + """Root link pose in world frame. dtype=wp.transformf, shape: (N,).""" + if self._root_link_pose_w is None: + pose_np = np.zeros((self._num_instances, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._root_link_pose_w = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._root_link_pose_w_ta = None + if self._root_link_pose_w_ta is None: + self._root_link_pose_w_ta = ProxyArray(self._root_link_pose_w) + return self._root_link_pose_w_ta + + @property + def root_link_vel_w(self) -> ProxyArray: + """Root link velocity in world frame. dtype=wp.spatial_vectorf, shape: (N,).""" + if self._root_link_vel_w is None: + self._root_link_vel_w = wp.zeros((self._num_instances, 6), dtype=wp.float32, device=self.device).view( + wp.spatial_vectorf + ) + self._root_link_vel_w_ta = None + if self._root_link_vel_w_ta is None: + self._root_link_vel_w_ta = ProxyArray(self._root_link_vel_w) + return self._root_link_vel_w_ta + + @property + def root_link_state_w(self) -> ProxyArray: + """Root link state in world frame. Shape: (N, 13).""" + pose_t = self.root_link_pose_w.torch + vel_t = self.root_link_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose_t, vel_t], dim=-1).contiguous())) + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def root_link_pos_w(self) -> ProxyArray: + """Root link position in world frame. Shape: (N,), dtype=wp.vec3f.""" + t = self.root_link_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def root_link_quat_w(self) -> ProxyArray: + """Root link orientation in world frame (x, y, z, w). Shape: (N,), dtype=wp.quatf.""" + t = self.root_link_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def root_link_lin_vel_w(self) -> ProxyArray: + """Root link linear velocity in world frame. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_link_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def root_link_ang_vel_w(self) -> ProxyArray: + """Root link angular velocity in world frame. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_link_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + # -- Root state properties (CoM frame) -- + + @property + def root_com_pose_w(self) -> ProxyArray: + """Root CoM pose in world frame. dtype=wp.transformf, shape: (N,).""" + if self._root_com_pose_w is None: + self._root_com_pose_w = wp.clone(self.root_link_pose_w.warp, self.device) + self._root_com_pose_w_ta = None + if self._root_com_pose_w_ta is None: + self._root_com_pose_w_ta = ProxyArray(self._root_com_pose_w) + return self._root_com_pose_w_ta + + @property + def root_com_vel_w(self) -> ProxyArray: + """Root CoM velocity in world frame. dtype=wp.spatial_vectorf, shape: (N,).""" + if self._root_com_vel_w is None: + self._root_com_vel_w = wp.clone(self.root_link_vel_w.warp, self.device) + self._root_com_vel_w_ta = None + if self._root_com_vel_w_ta is None: + self._root_com_vel_w_ta = ProxyArray(self._root_com_vel_w) + return self._root_com_vel_w_ta + + @property + def root_com_state_w(self) -> ProxyArray: + """Root CoM state in world frame. Shape: (N, 13).""" + pose_t = self.root_com_pose_w.torch + vel_t = self.root_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose_t, vel_t], dim=-1).contiguous())) + + @property + def root_state_w(self) -> ProxyArray: + """Root state (link pose + CoM velocity). Shape: (N, 13).""" + pose_t = self.root_link_pose_w.torch + vel_t = self.root_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose_t, vel_t], dim=-1).contiguous())) + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def root_com_pos_w(self) -> ProxyArray: + """Root CoM position in world frame. Shape: (N,), dtype=wp.vec3f.""" + t = self.root_com_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def root_com_quat_w(self) -> ProxyArray: + """Root CoM orientation in world frame. Shape: (N,), dtype=wp.quatf.""" + t = self.root_com_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def root_com_lin_vel_w(self) -> ProxyArray: + """Root CoM linear velocity in world frame. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_com_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def root_com_ang_vel_w(self) -> ProxyArray: + """Root CoM angular velocity in world frame. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_com_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + # -- Body state properties (link frame) -- + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link poses in world frame. dtype=wp.transformf, shape: (N, num_bodies).""" + if self._body_link_pose_w is None: + pose_np = np.zeros((self._num_instances, self._num_bodies, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._body_link_pose_w = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._body_link_pose_w_ta = None + if self._body_link_pose_w_ta is None: + self._body_link_pose_w_ta = ProxyArray(self._body_link_pose_w) + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocities in world frame. dtype=wp.spatial_vectorf, shape: (N, num_bodies).""" + if self._body_link_vel_w is None: + self._body_link_vel_w = wp.zeros( + (self._num_instances, self._num_bodies, 6), dtype=wp.float32, device=self.device + ).view(wp.spatial_vectorf) + self._body_link_vel_w_ta = None + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w) + return self._body_link_vel_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """Body link states in world frame. Shape: (N, num_bodies, 13).""" + pose_t = self.body_link_pose_w.torch + vel_t = self.body_link_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose_t, vel_t], dim=-1).contiguous())) + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def body_link_pos_w(self) -> ProxyArray: + """Body link positions. Shape: (N, num_bodies), dtype=wp.vec3f.""" + t = self.body_link_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_link_quat_w(self) -> ProxyArray: + """Body link orientations. Shape: (N, num_bodies), dtype=wp.quatf.""" + t = self.body_link_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Body link linear velocities in world frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_link_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Body link angular velocities in world frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_link_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + # -- Body state properties (CoM frame) -- + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body CoM poses in world frame. dtype=wp.transformf, shape: (N, num_bodies).""" + if self._body_com_pose_w is None: + self._body_com_pose_w = wp.clone(self.body_link_pose_w.warp, self.device) + self._body_com_pose_w_ta = None + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body CoM velocities in world frame. dtype=wp.spatial_vectorf, shape: (N, num_bodies).""" + if self._body_com_vel_w is None: + self._body_com_vel_w = wp.clone(self.body_link_vel_w.warp, self.device) + self._body_com_vel_w_ta = None + if self._body_com_vel_w_ta is None: + self._body_com_vel_w_ta = ProxyArray(self._body_com_vel_w) + return self._body_com_vel_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """Body CoM states in world frame. Shape: (N, num_bodies, 13).""" + pose_t = self.body_com_pose_w.torch + vel_t = self.body_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose_t, vel_t], dim=-1).contiguous())) + + @property + def body_state_w(self) -> ProxyArray: + """Body states (link pose + CoM velocity). Shape: (N, num_bodies, 13).""" + pose_t = self.body_link_pose_w.torch + vel_t = self.body_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose_t, vel_t], dim=-1).contiguous())) + + @property + def body_com_acc_w(self) -> ProxyArray: + """Body CoM accelerations in world frame. dtype=wp.spatial_vectorf, shape: (N, num_bodies).""" + if self._body_com_acc_w is None: + self._body_com_acc_w = wp.zeros( + (self._num_instances, self._num_bodies, 6), dtype=wp.float32, device=self.device + ).view(wp.spatial_vectorf) + self._body_com_acc_w_ta = None + if self._body_com_acc_w_ta is None: + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w) + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Body CoM poses in body frame. dtype=wp.transformf, shape: (N, num_bodies).""" + if self._body_com_pose_b is None: + pose_np = np.zeros((self._num_instances, self._num_bodies, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._body_com_pose_b = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._body_com_pose_b_ta = None + if self._body_com_pose_b_ta is None: + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b) + return self._body_com_pose_b_ta + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def body_com_pos_w(self) -> ProxyArray: + """Body CoM positions. Shape: (N, num_bodies), dtype=wp.vec3f.""" + t = self.body_com_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_com_quat_w(self) -> ProxyArray: + """Body CoM orientations. Shape: (N, num_bodies), dtype=wp.quatf.""" + t = self.body_com_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Body CoM linear velocities in world frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_com_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Body CoM angular velocities in world frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_com_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Body CoM linear accelerations in world frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + a = self.body_com_acc_w.warp + return ProxyArray(wp.array(ptr=a.ptr, shape=a.shape, dtype=wp.vec3f, strides=a.strides, device=self.device)) + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Body CoM angular accelerations in world frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + a = self.body_com_acc_w.warp + return ProxyArray( + wp.array(ptr=a.ptr + 3 * 4, shape=a.shape, dtype=wp.vec3f, strides=a.strides, device=self.device) + ) + + @property + def body_com_pos_b(self) -> ProxyArray: + """Body CoM positions in body frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + t = self.body_com_pose_b.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_com_quat_b(self) -> ProxyArray: + """Body CoM orientations in body frame. Shape: (N, num_bodies), dtype=wp.quatf.""" + t = self.body_com_pose_b.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + # -- Body properties -- + + @property + def body_mass(self) -> ProxyArray: + """Body masses. Shape: (N, num_bodies).""" + if self._body_mass is None: + self._body_mass = wp.ones((self._num_instances, self._num_bodies), dtype=wp.float32, device=self.device) + self._body_mass_ta = None + if self._body_mass_ta is None: + self._body_mass_ta = ProxyArray(self._body_mass) + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Body inertias (flattened 3x3). Shape: (N, num_bodies, 9).""" + if self._body_inertia is None: + np_inertia = np.zeros((self._num_instances, self._num_bodies, 9), dtype=np.float32) + np_inertia[..., 0] = 1.0 # Ixx + np_inertia[..., 4] = 1.0 # Iyy + np_inertia[..., 8] = 1.0 # Izz + self._body_inertia = wp.array(np_inertia, dtype=wp.float32, device=self.device) + self._body_inertia_ta = None + if self._body_inertia_ta is None: + self._body_inertia_ta = ProxyArray(self._body_inertia) + return self._body_inertia_ta + + # -- Derived properties -- + + @property + def projected_gravity_b(self) -> ProxyArray: + """Gravity projection on base. Shape: (N,), dtype=wp.vec3f.""" + np_gravity = np.zeros((self._num_instances, 3), dtype=np.float32) + np_gravity[:, 2] = -1.0 + return ProxyArray(wp.array(np_gravity, dtype=wp.float32, device=self.device).view(wp.vec3f)) + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading in world frame. Shape: (N,).""" + return ProxyArray(wp.zeros((self._num_instances,), dtype=wp.float32, device=self.device)) + + @property + def root_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_link_lin_vel_w.warp, self.device)) + + @property + def root_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_link_ang_vel_w.warp, self.device)) + + @property + def root_com_lin_vel_b(self) -> ProxyArray: + """Root CoM linear velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_com_lin_vel_w.warp, self.device)) + + @property + def root_com_ang_vel_b(self) -> ProxyArray: + """Root CoM angular velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_com_ang_vel_w.warp, self.device)) + + # com_pos_b and com_quat_b are inherited from BaseArticulationData + # (they delegate to body_com_pos_b and body_com_quat_b respectively) + + # -- Fixed tendon properties -- + + @property + def fixed_tendon_stiffness(self) -> ProxyArray: + """Fixed tendon stiffness. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._fixed_tendon_stiffness is None: + self._fixed_tendon_stiffness = wp.zeros( + (self._num_instances, self._num_fixed_tendons), dtype=wp.float32, device=self.device + ) + self._fixed_tendon_stiffness_ta = None + if self._fixed_tendon_stiffness_ta is None: + self._fixed_tendon_stiffness_ta = ProxyArray(self._fixed_tendon_stiffness) + return self._fixed_tendon_stiffness_ta + + @property + def fixed_tendon_damping(self) -> ProxyArray: + """Fixed tendon damping. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._fixed_tendon_damping is None: + self._fixed_tendon_damping = wp.zeros( + (self._num_instances, self._num_fixed_tendons), dtype=wp.float32, device=self.device + ) + self._fixed_tendon_damping_ta = None + if self._fixed_tendon_damping_ta is None: + self._fixed_tendon_damping_ta = ProxyArray(self._fixed_tendon_damping) + return self._fixed_tendon_damping_ta + + @property + def fixed_tendon_limit_stiffness(self) -> ProxyArray: + """Fixed tendon limit stiffness. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._fixed_tendon_limit_stiffness is None: + self._fixed_tendon_limit_stiffness = wp.zeros( + (self._num_instances, self._num_fixed_tendons), dtype=wp.float32, device=self.device + ) + self._fixed_tendon_limit_stiffness_ta = None + if self._fixed_tendon_limit_stiffness_ta is None: + self._fixed_tendon_limit_stiffness_ta = ProxyArray(self._fixed_tendon_limit_stiffness) + return self._fixed_tendon_limit_stiffness_ta + + @property + def fixed_tendon_rest_length(self) -> ProxyArray: + """Fixed tendon rest length. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._fixed_tendon_rest_length is None: + self._fixed_tendon_rest_length = wp.zeros( + (self._num_instances, self._num_fixed_tendons), dtype=wp.float32, device=self.device + ) + self._fixed_tendon_rest_length_ta = None + if self._fixed_tendon_rest_length_ta is None: + self._fixed_tendon_rest_length_ta = ProxyArray(self._fixed_tendon_rest_length) + return self._fixed_tendon_rest_length_ta + + @property + def fixed_tendon_offset(self) -> ProxyArray: + """Fixed tendon offset. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._fixed_tendon_offset is None: + self._fixed_tendon_offset = wp.zeros( + (self._num_instances, self._num_fixed_tendons), dtype=wp.float32, device=self.device + ) + self._fixed_tendon_offset_ta = None + if self._fixed_tendon_offset_ta is None: + self._fixed_tendon_offset_ta = ProxyArray(self._fixed_tendon_offset) + return self._fixed_tendon_offset_ta + + @property + def fixed_tendon_pos_limits(self) -> ProxyArray: + """Fixed tendon position limits. dtype=wp.vec2f, shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0, 2), dtype=wp.float32, device=self.device)) + if self._fixed_tendon_pos_limits is None: + np_limits = np.zeros((self._num_instances, self._num_fixed_tendons, 2), dtype=np.float32) + np_limits[..., 0] = -float("inf") + np_limits[..., 1] = float("inf") + self._fixed_tendon_pos_limits = wp.array(np_limits, dtype=wp.float32, device=self.device).view(wp.vec2f) + self._fixed_tendon_pos_limits_ta = None + if self._fixed_tendon_pos_limits_ta is None: + self._fixed_tendon_pos_limits_ta = ProxyArray(self._fixed_tendon_pos_limits) + return self._fixed_tendon_pos_limits_ta + + # -- Spatial tendon properties -- + + @property + def spatial_tendon_stiffness(self) -> ProxyArray: + """Spatial tendon stiffness. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._spatial_tendon_stiffness is None: + self._spatial_tendon_stiffness = wp.zeros( + (self._num_instances, self._num_spatial_tendons), dtype=wp.float32, device=self.device + ) + self._spatial_tendon_stiffness_ta = None + if self._spatial_tendon_stiffness_ta is None: + self._spatial_tendon_stiffness_ta = ProxyArray(self._spatial_tendon_stiffness) + return self._spatial_tendon_stiffness_ta + + @property + def spatial_tendon_damping(self) -> ProxyArray: + """Spatial tendon damping. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._spatial_tendon_damping is None: + self._spatial_tendon_damping = wp.zeros( + (self._num_instances, self._num_spatial_tendons), dtype=wp.float32, device=self.device + ) + self._spatial_tendon_damping_ta = None + if self._spatial_tendon_damping_ta is None: + self._spatial_tendon_damping_ta = ProxyArray(self._spatial_tendon_damping) + return self._spatial_tendon_damping_ta + + @property + def spatial_tendon_limit_stiffness(self) -> ProxyArray: + """Spatial tendon limit stiffness. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._spatial_tendon_limit_stiffness is None: + self._spatial_tendon_limit_stiffness = wp.zeros( + (self._num_instances, self._num_spatial_tendons), dtype=wp.float32, device=self.device + ) + self._spatial_tendon_limit_stiffness_ta = None + if self._spatial_tendon_limit_stiffness_ta is None: + self._spatial_tendon_limit_stiffness_ta = ProxyArray(self._spatial_tendon_limit_stiffness) + return self._spatial_tendon_limit_stiffness_ta + + @property + def spatial_tendon_offset(self) -> ProxyArray: + """Spatial tendon offset. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return ProxyArray(wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device)) + if self._spatial_tendon_offset is None: + self._spatial_tendon_offset = wp.zeros( + (self._num_instances, self._num_spatial_tendons), dtype=wp.float32, device=self.device + ) + self._spatial_tendon_offset_ta = None + if self._spatial_tendon_offset_ta is None: + self._spatial_tendon_offset_ta = ProxyArray(self._spatial_tendon_offset) + return self._spatial_tendon_offset_ta + + # -- Update method -- + + def update(self, dt: float) -> None: + """Update data (no-op for mock).""" + pass + + # -- Internal helper -- + + def _identity_quat_np(self, *shape: int) -> np.ndarray: + """Create identity quaternion numpy array (w, x, y, z) = (1, 0, 0, 0).""" + quat = np.zeros((*shape, 4), dtype=np.float32) + quat[..., 0] = 1.0 + return quat + + # -- Setters -- + + def set_default_root_pose(self, value: torch.Tensor) -> None: + self._default_root_pose = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._default_root_pose_ta = None + + def set_default_root_vel(self, value: torch.Tensor) -> None: + self._default_root_vel = wp.from_torch(value.to(self.device).contiguous()) + self._default_root_vel_ta = None + + def set_default_joint_pos(self, value: torch.Tensor) -> None: + self._default_joint_pos = wp.from_torch(value.to(self.device).contiguous()) + self._default_joint_pos_ta = None + + def set_default_joint_vel(self, value: torch.Tensor) -> None: + self._default_joint_vel = wp.from_torch(value.to(self.device).contiguous()) + self._default_joint_vel_ta = None + + def set_joint_pos_target(self, value: torch.Tensor) -> None: + self._joint_pos_target = wp.from_torch(value.to(self.device).contiguous()) + self._joint_pos_target_ta = None + + def set_joint_vel_target(self, value: torch.Tensor) -> None: + self._joint_vel_target = wp.from_torch(value.to(self.device).contiguous()) + self._joint_vel_target_ta = None + + def set_joint_effort_target(self, value: torch.Tensor) -> None: + self._joint_effort_target = wp.from_torch(value.to(self.device).contiguous()) + self._joint_effort_target_ta = None + + def set_computed_torque(self, value: torch.Tensor) -> None: + self._computed_torque = wp.from_torch(value.to(self.device).contiguous()) + self._computed_torque_ta = None + + def set_applied_torque(self, value: torch.Tensor) -> None: + self._applied_torque = wp.from_torch(value.to(self.device).contiguous()) + self._applied_torque_ta = None + + def set_joint_stiffness(self, value: torch.Tensor) -> None: + self._joint_stiffness = wp.from_torch(value.to(self.device).contiguous()) + self._joint_stiffness_ta = None + + def set_joint_damping(self, value: torch.Tensor) -> None: + self._joint_damping = wp.from_torch(value.to(self.device).contiguous()) + self._joint_damping_ta = None + + def set_joint_armature(self, value: torch.Tensor) -> None: + self._joint_armature = wp.from_torch(value.to(self.device).contiguous()) + self._joint_armature_ta = None + + def set_joint_friction_coeff(self, value: torch.Tensor) -> None: + self._joint_friction_coeff = wp.from_torch(value.to(self.device).contiguous()) + self._joint_friction_coeff_ta = None + + def set_joint_pos_limits(self, value: torch.Tensor) -> None: + self._joint_pos_limits = wp.from_torch(value.to(self.device).contiguous()) + self._joint_pos_limits_ta = None + + def set_joint_vel_limits(self, value: torch.Tensor) -> None: + self._joint_vel_limits = wp.from_torch(value.to(self.device).contiguous()) + self._joint_vel_limits_ta = None + + def set_joint_effort_limits(self, value: torch.Tensor) -> None: + self._joint_effort_limits = wp.from_torch(value.to(self.device).contiguous()) + self._joint_effort_limits_ta = None + + def set_soft_joint_pos_limits(self, value: torch.Tensor) -> None: + self._soft_joint_pos_limits = wp.from_torch(value.to(self.device).contiguous()) + self._soft_joint_pos_limits_ta = None + + def set_soft_joint_vel_limits(self, value: torch.Tensor) -> None: + self._soft_joint_vel_limits = wp.from_torch(value.to(self.device).contiguous()) + self._soft_joint_vel_limits_ta = None + + def set_gear_ratio(self, value: torch.Tensor) -> None: + self._gear_ratio = wp.from_torch(value.to(self.device).contiguous()) + self._gear_ratio_ta = None + + def set_joint_pos(self, value: torch.Tensor) -> None: + self._joint_pos = wp.from_torch(value.to(self.device).contiguous()) + self._joint_pos_ta = None + + def set_joint_vel(self, value: torch.Tensor) -> None: + self._joint_vel = wp.from_torch(value.to(self.device).contiguous()) + self._joint_vel_ta = None + + def set_joint_acc(self, value: torch.Tensor) -> None: + self._joint_acc = wp.from_torch(value.to(self.device).contiguous()) + self._joint_acc_ta = None + + def set_root_link_pose_w(self, value: torch.Tensor) -> None: + self._root_link_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._root_link_pose_w_ta = None + + def set_root_link_vel_w(self, value: torch.Tensor) -> None: + self._root_link_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._root_link_vel_w_ta = None + + def set_root_com_pose_w(self, value: torch.Tensor) -> None: + self._root_com_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._root_com_pose_w_ta = None + + def set_root_com_vel_w(self, value: torch.Tensor) -> None: + self._root_com_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._root_com_vel_w_ta = None + + def set_body_link_pose_w(self, value: torch.Tensor) -> None: + self._body_link_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_link_pose_w_ta = None + + def set_body_link_vel_w(self, value: torch.Tensor) -> None: + self._body_link_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._body_link_vel_w_ta = None + + def set_body_com_pose_w(self, value: torch.Tensor) -> None: + self._body_com_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_com_pose_w_ta = None + + def set_body_com_vel_w(self, value: torch.Tensor) -> None: + self._body_com_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._body_com_vel_w_ta = None + + def set_body_com_acc_w(self, value: torch.Tensor) -> None: + self._body_com_acc_w = wp.from_torch(value.to(self.device).contiguous()) + self._body_com_acc_w_ta = None + + def set_body_com_pose_b(self, value: torch.Tensor) -> None: + self._body_com_pose_b = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_com_pose_b_ta = None + + def set_body_mass(self, value: torch.Tensor) -> None: + self._body_mass = wp.from_torch(value.to(self.device).contiguous()) + self._body_mass_ta = None + + def set_body_inertia(self, value: torch.Tensor) -> None: + self._body_inertia = wp.from_torch(value.to(self.device).contiguous()) + self._body_inertia_ta = None + + def set_fixed_tendon_stiffness(self, value: torch.Tensor) -> None: + self._fixed_tendon_stiffness = wp.from_torch(value.to(self.device).contiguous()) + self._fixed_tendon_stiffness_ta = None + + def set_fixed_tendon_damping(self, value: torch.Tensor) -> None: + self._fixed_tendon_damping = wp.from_torch(value.to(self.device).contiguous()) + self._fixed_tendon_damping_ta = None + + def set_fixed_tendon_limit_stiffness(self, value: torch.Tensor) -> None: + self._fixed_tendon_limit_stiffness = wp.from_torch(value.to(self.device).contiguous()) + self._fixed_tendon_limit_stiffness_ta = None + + def set_fixed_tendon_rest_length(self, value: torch.Tensor) -> None: + self._fixed_tendon_rest_length = wp.from_torch(value.to(self.device).contiguous()) + self._fixed_tendon_rest_length_ta = None + + def set_fixed_tendon_offset(self, value: torch.Tensor) -> None: + self._fixed_tendon_offset = wp.from_torch(value.to(self.device).contiguous()) + self._fixed_tendon_offset_ta = None + + def set_fixed_tendon_pos_limits(self, value: torch.Tensor) -> None: + self._fixed_tendon_pos_limits = wp.from_torch(value.to(self.device).contiguous()) + self._fixed_tendon_pos_limits_ta = None + + def set_spatial_tendon_stiffness(self, value: torch.Tensor) -> None: + self._spatial_tendon_stiffness = wp.from_torch(value.to(self.device).contiguous()) + self._spatial_tendon_stiffness_ta = None + + def set_spatial_tendon_damping(self, value: torch.Tensor) -> None: + self._spatial_tendon_damping = wp.from_torch(value.to(self.device).contiguous()) + self._spatial_tendon_damping_ta = None + + def set_spatial_tendon_limit_stiffness(self, value: torch.Tensor) -> None: + self._spatial_tendon_limit_stiffness = wp.from_torch(value.to(self.device).contiguous()) + self._spatial_tendon_limit_stiffness_ta = None + + def set_spatial_tendon_offset(self, value: torch.Tensor) -> None: + self._spatial_tendon_offset = wp.from_torch(value.to(self.device).contiguous()) + self._spatial_tendon_offset_ta = None + + def set_mock_data(self, **kwargs) -> None: + """Bulk setter for mock data. + + Accepts any property name as a keyword argument with a tensor value. + """ + for key, value in kwargs.items(): + setter = getattr(self, f"set_{key}", None) + if setter is not None: + setter(value) + else: + raise ValueError(f"Unknown property: {key}") + + +class MockArticulation: + """Mock articulation asset for testing without Isaac Sim. + + This class mimics the interface of BaseArticulation for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_joints: int, + num_bodies: int, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + fixed_tendon_names: list[str] | None = None, + spatial_tendon_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock articulation. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + num_fixed_tendons: Number of fixed tendons. + num_spatial_tendons: Number of spatial tendons. + fixed_tendon_names: Names of fixed tendons. + spatial_tendon_names: Names of spatial tendons. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_joints = num_joints + self._num_bodies = num_bodies + self._is_fixed_base = is_fixed_base + self._num_fixed_tendons = num_fixed_tendons + self._num_spatial_tendons = num_spatial_tendons + self._device = device + + self._joint_names = joint_names or [f"joint_{i}" for i in range(num_joints)] + self._body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + self._fixed_tendon_names = fixed_tendon_names or [f"fixed_tendon_{i}" for i in range(num_fixed_tendons)] + self._spatial_tendon_names = spatial_tendon_names or [f"spatial_tendon_{i}" for i in range(num_spatial_tendons)] + + self._data = MockArticulationData( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=self._joint_names, + body_names=self._body_names, + num_fixed_tendons=num_fixed_tendons, + num_spatial_tendons=num_spatial_tendons, + fixed_tendon_names=self._fixed_tendon_names, + spatial_tendon_names=self._spatial_tendon_names, + device=device, + ) + + # Actuators (empty dict for mock) + self.actuators: dict = {} + + # -- Properties -- + + @property + def data(self) -> MockArticulationData: + """Data container for the articulation.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of articulation instances.""" + return self._num_instances + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation has a fixed base.""" + return self._is_fixed_base + + @property + def num_joints(self) -> int: + """Number of joints.""" + return self._num_joints + + @property + def num_bodies(self) -> int: + """Number of bodies.""" + return self._num_bodies + + @property + def num_fixed_tendons(self) -> int: + """Number of fixed tendons.""" + return self._num_fixed_tendons + + @property + def num_spatial_tendons(self) -> int: + """Number of spatial tendons.""" + return self._num_spatial_tendons + + @property + def joint_names(self) -> list[str]: + """Ordered joint names.""" + return self._joint_names + + @property + def body_names(self) -> list[str]: + """Ordered body names.""" + return self._body_names + + @property + def fixed_tendon_names(self) -> list[str]: + """Ordered fixed tendon names.""" + return self._fixed_tendon_names + + @property + def spatial_tendon_names(self) -> list[str]: + """Ordered spatial tendon names.""" + return self._spatial_tendon_names + + @property + def root_view(self) -> None: + """Returns None (no physics view in mock).""" + return None + + @property + def instantaneous_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + @property + def permanent_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + # -- Core methods -- + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Reset articulation state for specified environments.""" + pass + + def write_data_to_sim(self) -> None: + """Write data to simulation (no-op for mock).""" + pass + + def update(self, dt: float) -> None: + """Update articulation data.""" + self._data.update(dt) + + # -- Finder methods -- + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies by name regex patterns.""" + return self._find_by_regex(self._body_names, name_keys, preserve_order) + + def find_joints( + self, + name_keys: str | Sequence[str], + joint_subset: list[str] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find joints by name regex patterns.""" + names = self._joint_names + if joint_subset is not None: + subset_indices = [self._joint_names.index(n) for n in joint_subset] + names = joint_subset + indices, matched_names = self._find_by_regex(names, name_keys, preserve_order) + indices = [subset_indices[i] for i in indices] + return indices, matched_names + return self._find_by_regex(names, name_keys, preserve_order) + + def find_fixed_tendons( + self, + name_keys: str | Sequence[str], + tendon_subsets: list[str] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find fixed tendons by name regex patterns.""" + names = self._fixed_tendon_names + if tendon_subsets is not None: + subset_indices = [self._fixed_tendon_names.index(n) for n in tendon_subsets] + names = tendon_subsets + indices, matched_names = self._find_by_regex(names, name_keys, preserve_order) + indices = [subset_indices[i] for i in indices] + return indices, matched_names + return self._find_by_regex(names, name_keys, preserve_order) + + def find_spatial_tendons( + self, + name_keys: str | Sequence[str], + tendon_subsets: list[str] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find spatial tendons by name regex patterns.""" + names = self._spatial_tendon_names + if tendon_subsets is not None: + subset_indices = [self._spatial_tendon_names.index(n) for n in tendon_subsets] + names = tendon_subsets + indices, matched_names = self._find_by_regex(names, name_keys, preserve_order) + indices = [subset_indices[i] for i in indices] + return indices, matched_names + return self._find_by_regex(names, name_keys, preserve_order) + + def _find_by_regex( + self, names: list[str], name_keys: str | Sequence[str], preserve_order: bool + ) -> tuple[list[int], list[str]]: + """Find items by regex patterns.""" + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + # -- State writer methods (no-op for mock) -- + + def write_root_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root state to simulation.""" + pass + + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root CoM state to simulation.""" + pass + + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root link state to simulation.""" + pass + + def write_root_pose_to_sim( + self, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root pose to simulation.""" + pass + + def write_root_link_pose_to_sim( + self, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root link pose to simulation.""" + pass + + def write_root_com_pose_to_sim( + self, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root CoM pose to simulation.""" + pass + + def write_root_velocity_to_sim( + self, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root velocity to simulation.""" + pass + + def write_root_com_velocity_to_sim( + self, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root CoM velocity to simulation.""" + pass + + def write_root_link_velocity_to_sim( + self, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root link velocity to simulation.""" + pass + + def write_joint_state_to_sim( + self, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint state to simulation.""" + pass + + def write_joint_position_to_sim( + self, + position: torch.Tensor | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint positions to simulation.""" + pass + + def write_joint_velocity_to_sim( + self, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint velocities to simulation.""" + pass + + def write_joint_stiffness_to_sim( + self, + stiffness: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint stiffness to simulation.""" + pass + + def write_joint_damping_to_sim( + self, + damping: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint damping to simulation.""" + pass + + def write_joint_position_limit_to_sim( + self, + limits: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + warn_limit_violation: bool = True, + ) -> None: + """Write joint position limits to simulation.""" + pass + + def write_joint_velocity_limit_to_sim( + self, + limits: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint velocity limits to simulation.""" + pass + + def write_joint_effort_limit_to_sim( + self, + limits: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint effort limits to simulation.""" + pass + + def write_joint_armature_to_sim( + self, + armature: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint armature to simulation.""" + pass + + def write_joint_friction_coefficient_to_sim( + self, + coeff: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint friction coefficient to simulation.""" + pass + + def write_joint_friction_to_sim( + self, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint friction to simulation.""" + pass + + def write_joint_limits_to_sim( + self, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write joint limits to simulation.""" + pass + + # -- Setter methods -- + + def set_masses( + self, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body masses.""" + pass + + def set_coms( + self, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body centers of mass.""" + pass + + def set_inertias( + self, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body inertias.""" + pass + + def set_external_force_and_torque( + self, + forces: torch.Tensor | wp.array, + torques: torch.Tensor | wp.array, + positions: torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + is_global: bool = False, + ) -> None: + """Set external forces and torques.""" + pass + + def set_joint_position_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint position targets.""" + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + if self._data._joint_pos_target is None: + self._data._joint_pos_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self._device + ) + pos_target = wp.to_torch(self._data._joint_pos_target) + pos_target[env_ids, joint_ids] = target.to(self._device) + self._data._joint_pos_target = wp.from_torch(pos_target) + self._data._joint_pos_target_ta = None + + def set_joint_velocity_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint velocity targets.""" + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + if self._data._joint_vel_target is None: + self._data._joint_vel_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self._device + ) + vel_target = wp.to_torch(self._data._joint_vel_target) + vel_target[env_ids, joint_ids] = target.to(self._device) + self._data._joint_vel_target = wp.from_torch(vel_target) + self._data._joint_vel_target_ta = None + + def set_joint_effort_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint effort targets.""" + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + if self._data._joint_effort_target is None: + self._data._joint_effort_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self._device + ) + effort_target = wp.to_torch(self._data._joint_effort_target) + effort_target[env_ids, joint_ids] = target.to(self._device) + self._data._joint_effort_target = wp.from_torch(effort_target) + self._data._joint_effort_target_ta = None + + # -- Tendon methods (fixed) -- + + def set_fixed_tendon_stiffness( + self, + stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon stiffness.""" + pass + + def set_fixed_tendon_damping( + self, + damping: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon damping.""" + pass + + def set_fixed_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon limit stiffness.""" + pass + + def set_fixed_tendon_position_limit( + self, + limit: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon position limit.""" + pass + + def set_fixed_tendon_limit( + self, + limit: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon limit (alias for set_fixed_tendon_position_limit).""" + pass + + def set_fixed_tendon_rest_length( + self, + rest_length: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon rest length.""" + pass + + def set_fixed_tendon_offset( + self, + offset: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon offset.""" + pass + + def write_fixed_tendon_properties_to_sim( + self, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write fixed tendon properties to simulation.""" + pass + + # -- Tendon methods (spatial) -- + + def set_spatial_tendon_stiffness( + self, + stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon stiffness.""" + pass + + def set_spatial_tendon_damping( + self, + damping: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon damping.""" + pass + + def set_spatial_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon limit stiffness.""" + pass + + def set_spatial_tendon_offset( + self, + offset: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon offset.""" + pass + + def write_spatial_tendon_properties_to_sim( + self, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write spatial tendon properties to simulation.""" + pass + + # -- Shape validation helpers -- + + _DTYPE_TO_TORCH_TRAILING_DIMS: dict[type, tuple[int, ...]] = { + wp.float32: (), + wp.int32: (), + wp.vec2f: (2,), + wp.vec3f: (3,), + wp.vec4f: (4,), + wp.transformf: (7,), + wp.spatial_vectorf: (6,), + } + + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" + ) -> None: + if __debug__: + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): + return + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + offset = self._DTYPE_TO_TORCH_TRAILING_DIMS.get(dtype) + if offset is None: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), ( + f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + ) + + def _resolve_n_envs(self, env_ids): + if env_ids is None: + return self._num_instances + if isinstance(env_ids, (torch.Tensor, wp.array)): + return env_ids.shape[0] + return len(env_ids) + + def _resolve_n_items(self, item_ids, total): + if item_ids is None or item_ids == slice(None): + return total + if isinstance(item_ids, (torch.Tensor, wp.array)): + return item_ids.shape[0] + return len(item_ids) + + # -- Index/Mask methods -- + + # Root pose/velocity + + def write_root_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + + def write_root_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + + def write_root_link_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + + def write_root_link_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + + def write_root_com_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + + def write_root_com_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + + def write_root_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + + def write_root_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + + def write_root_com_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + + def write_root_com_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + + def write_root_link_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + + def write_root_link_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + + # Joint write methods + + def write_joint_position_to_sim_index( + self, + *, + position: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(position, (n_e, n_j), wp.float32, "position") + + def write_joint_position_to_sim_mask( + self, + *, + position: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(position, (self._num_instances, self._num_joints), wp.float32, "position") + + def write_joint_velocity_to_sim_index( + self, + *, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(velocity, (n_e, n_j), wp.float32, "velocity") + + def write_joint_velocity_to_sim_mask( + self, + *, + velocity: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(velocity, (self._num_instances, self._num_joints), wp.float32, "velocity") + + def write_joint_stiffness_to_sim_index( + self, + *, + stiffness: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(stiffness, (n_e, n_j), wp.float32, "stiffness") + + def write_joint_stiffness_to_sim_mask( + self, + *, + stiffness: torch.Tensor | float | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(stiffness, (self._num_instances, self._num_joints), wp.float32, "stiffness") + + def write_joint_damping_to_sim_index( + self, + *, + damping: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(damping, (n_e, n_j), wp.float32, "damping") + + def write_joint_damping_to_sim_mask( + self, + *, + damping: torch.Tensor | float | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(damping, (self._num_instances, self._num_joints), wp.float32, "damping") + + def write_joint_position_limit_to_sim_index( + self, + *, + limits: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + warn_limit_violation: bool = True, + ) -> None: + if isinstance(limits, (int, float)): + raise ValueError("Float scalars are not supported for position limits (vec2f dtype)") + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(limits, (n_e, n_j), wp.vec2f, "limits") + + def write_joint_position_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | float | wp.array, + warn_limit_violation: bool = True, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + if isinstance(limits, (int, float)): + raise ValueError("Float scalars are not supported for position limits (vec2f dtype)") + self.assert_shape_and_dtype(limits, (self._num_instances, self._num_joints), wp.vec2f, "limits") + + def write_joint_velocity_limit_to_sim_index( + self, + *, + limits: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(limits, (n_e, n_j), wp.float32, "limits") + + def write_joint_velocity_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | float | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(limits, (self._num_instances, self._num_joints), wp.float32, "limits") + + def write_joint_effort_limit_to_sim_index( + self, + *, + limits: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(limits, (n_e, n_j), wp.float32, "limits") + + def write_joint_effort_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | float | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(limits, (self._num_instances, self._num_joints), wp.float32, "limits") + + def write_joint_armature_to_sim_index( + self, + *, + armature: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(armature, (n_e, n_j), wp.float32, "armature") + + def write_joint_armature_to_sim_mask( + self, + *, + armature: torch.Tensor | float | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(armature, (self._num_instances, self._num_joints), wp.float32, "armature") + + def write_joint_friction_coefficient_to_sim_index( + self, + *, + joint_friction_coeff: torch.Tensor | float | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(joint_friction_coeff, (n_e, n_j), wp.float32, "joint_friction_coeff") + + def write_joint_friction_coefficient_to_sim_mask( + self, + *, + joint_friction_coeff: torch.Tensor | float | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + joint_friction_coeff, (self._num_instances, self._num_joints), wp.float32, "joint_friction_coeff" + ) + + # Body setter methods + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(masses, (n_e, n_b), wp.float32, "masses") + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(masses, (self._num_instances, self._num_bodies), wp.float32, "masses") + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(coms, (n_e, n_b), wp.transformf, "coms") + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(coms, (self._num_instances, self._num_bodies), wp.transformf, "coms") + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(inertias, (n_e, n_b, 9), wp.float32, "inertias") + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(inertias, (self._num_instances, self._num_bodies, 9), wp.float32, "inertias") + + # Joint target methods + + def set_joint_position_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(target, (n_e, n_j), wp.float32, "target") + + def set_joint_position_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(target, (self._num_instances, self._num_joints), wp.float32, "target") + + def set_joint_velocity_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(target, (n_e, n_j), wp.float32, "target") + + def set_joint_velocity_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(target, (self._num_instances, self._num_joints), wp.float32, "target") + + def set_joint_effort_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_j = self._resolve_n_items(joint_ids, self._num_joints) + self.assert_shape_and_dtype(target, (n_e, n_j), wp.float32, "target") + + def set_joint_effort_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(target, (self._num_instances, self._num_joints), wp.float32, "target") + + # Fixed tendon methods + + def set_fixed_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(fixed_tendon_ids, self._num_fixed_tendons) + self.assert_shape_and_dtype(stiffness, (n_e, n_t), wp.float32, "stiffness") + + def set_fixed_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(stiffness, (self._num_instances, self._num_fixed_tendons), wp.float32, "stiffness") + + def set_fixed_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(fixed_tendon_ids, self._num_fixed_tendons) + self.assert_shape_and_dtype(damping, (n_e, n_t), wp.float32, "damping") + + def set_fixed_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(damping, (self._num_instances, self._num_fixed_tendons), wp.float32, "damping") + + def set_fixed_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(fixed_tendon_ids, self._num_fixed_tendons) + self.assert_shape_and_dtype(limit_stiffness, (n_e, n_t), wp.float32, "limit_stiffness") + + def set_fixed_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + limit_stiffness, (self._num_instances, self._num_fixed_tendons), wp.float32, "limit_stiffness" + ) + + def set_fixed_tendon_position_limit_index( + self, + *, + limit: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(fixed_tendon_ids, self._num_fixed_tendons) + self.assert_shape_and_dtype(limit, (n_e, n_t), wp.float32, "limit") + + def set_fixed_tendon_position_limit_mask( + self, + *, + limit: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(limit, (self._num_instances, self._num_fixed_tendons), wp.float32, "limit") + + def set_fixed_tendon_rest_length_index( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(fixed_tendon_ids, self._num_fixed_tendons) + self.assert_shape_and_dtype(rest_length, (n_e, n_t), wp.float32, "rest_length") + + def set_fixed_tendon_rest_length_mask( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + rest_length, (self._num_instances, self._num_fixed_tendons), wp.float32, "rest_length" + ) + + def set_fixed_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(fixed_tendon_ids, self._num_fixed_tendons) + self.assert_shape_and_dtype(offset, (n_e, n_t), wp.float32, "offset") + + def set_fixed_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(offset, (self._num_instances, self._num_fixed_tendons), wp.float32, "offset") + + def write_fixed_tendon_properties_to_sim_index( + self, + *, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + pass + + def write_fixed_tendon_properties_to_sim_mask( + self, + *, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + pass + + # Spatial tendon methods + + def set_spatial_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(spatial_tendon_ids, self._num_spatial_tendons) + self.assert_shape_and_dtype(stiffness, (n_e, n_t), wp.float32, "stiffness") + + def set_spatial_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + stiffness, (self._num_instances, self._num_spatial_tendons), wp.float32, "stiffness" + ) + + def set_spatial_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(spatial_tendon_ids, self._num_spatial_tendons) + self.assert_shape_and_dtype(damping, (n_e, n_t), wp.float32, "damping") + + def set_spatial_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(damping, (self._num_instances, self._num_spatial_tendons), wp.float32, "damping") + + def set_spatial_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(spatial_tendon_ids, self._num_spatial_tendons) + self.assert_shape_and_dtype(limit_stiffness, (n_e, n_t), wp.float32, "limit_stiffness") + + def set_spatial_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + limit_stiffness, (self._num_instances, self._num_spatial_tendons), wp.float32, "limit_stiffness" + ) + + def set_spatial_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_t = self._resolve_n_items(spatial_tendon_ids, self._num_spatial_tendons) + self.assert_shape_and_dtype(offset, (n_e, n_t), wp.float32, "offset") + + def set_spatial_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(offset, (self._num_instances, self._num_spatial_tendons), wp.float32, "offset") + + def write_spatial_tendon_properties_to_sim_index( + self, + *, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + pass + + def write_spatial_tendon_properties_to_sim_mask( + self, + *, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py new file mode 100644 index 000000000000..32f5f2bc3017 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py @@ -0,0 +1,1043 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock rigid object asset for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +from collections.abc import Sequence + +import numpy as np +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +try: + from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData +except (ImportError, ModuleNotFoundError): + # Direct import bypassing isaaclab.assets.__init__.py (which needs omni.timeline) + import importlib.util + from pathlib import Path + + _file = Path(__file__).resolve().parents[3] / "assets" / "rigid_object" / "base_rigid_object_data.py" + _spec = importlib.util.spec_from_file_location("_base_rigid_object_data", str(_file)) + _mod = importlib.util.module_from_spec(_spec) + _spec.loader.exec_module(_mod) + BaseRigidObjectData = _mod.BaseRigidObjectData + + +class MockRigidObjectData(BaseRigidObjectData): + """Mock data container for rigid object asset. + + This class inherits from BaseRigidObjectData to get shorthand and deprecated properties + for free (e.g., root_pose_w, body_pos_w, com_pos_b, default_mass, etc.). + All tensor properties return zero warp arrays with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object data. + + Args: + num_instances: Number of rigid object instances. + body_names: Names of bodies (single body for rigid object). + device: Device for tensor allocation. + """ + super().__init__(root_view=None, device=device) + self._create_buffers() + + self._num_instances = num_instances + self._num_bodies = 1 # Rigid object always has 1 body + + self.body_names = body_names or ["body"] + + # Default states + self._default_root_pose: wp.array | None = None + self._default_root_vel: wp.array | None = None + + # Root state (link frame) + self._root_link_pose_w: wp.array | None = None + self._root_link_vel_w: wp.array | None = None + + # Root state (CoM frame) + self._root_com_pose_w: wp.array | None = None + self._root_com_vel_w: wp.array | None = None + + # Body state (link frame) + self._body_link_pose_w: wp.array | None = None + self._body_link_vel_w: wp.array | None = None + + # Body state (CoM frame) + self._body_com_pose_w: wp.array | None = None + self._body_com_vel_w: wp.array | None = None + self._body_com_acc_w: wp.array | None = None + self._body_com_pose_b: wp.array | None = None + + # Body properties + self._body_mass: wp.array | None = None + self._body_inertia: wp.array | None = None + + # ProxyArray caches + self._default_root_pose_ta: ProxyArray | None = None + self._default_root_vel_ta: ProxyArray | None = None + self._root_link_pose_w_ta: ProxyArray | None = None + self._root_link_vel_w_ta: ProxyArray | None = None + self._root_com_pose_w_ta: ProxyArray | None = None + self._root_com_vel_w_ta: ProxyArray | None = None + self._body_link_pose_w_ta: ProxyArray | None = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_vel_w_ta: ProxyArray | None = None + self._body_com_acc_w_ta: ProxyArray | None = None + self._body_com_pose_b_ta: ProxyArray | None = None + self._body_mass_ta: ProxyArray | None = None + self._body_inertia_ta: ProxyArray | None = None + + def _identity_quat(self, *shape: int) -> wp.array: + """Create identity quaternion warp array (w, x, y, z) = (1, 0, 0, 0).""" + quat_np = np.zeros((*shape, 4), dtype=np.float32) + quat_np[..., 0] = 1.0 + return wp.array(quat_np, dtype=wp.float32, device=self.device) + + # -- Default state properties -- + + @property + def default_root_pose(self) -> ProxyArray: + """Default root pose. dtype=wp.transformf, shape: (N,).""" + if self._default_root_pose is None: + pose_np = np.zeros((self._num_instances, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._default_root_pose = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._default_root_pose_ta = None + if self._default_root_pose_ta is None: + self._default_root_pose_ta = ProxyArray(self._default_root_pose) + return self._default_root_pose_ta + + @property + def default_root_vel(self) -> ProxyArray: + """Default root velocity. dtype=wp.spatial_vectorf, shape: (N,).""" + if self._default_root_vel is None: + self._default_root_vel = wp.zeros((self._num_instances, 6), dtype=wp.float32, device=self.device).view( + wp.spatial_vectorf + ) + self._default_root_vel_ta = None + if self._default_root_vel_ta is None: + self._default_root_vel_ta = ProxyArray(self._default_root_vel) + return self._default_root_vel_ta + + @property + def default_root_state(self) -> ProxyArray: + """Default root state. Shape: (N, 13).""" + pose = self.default_root_pose.torch + vel = self.default_root_vel.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + # -- Root state properties (link frame) -- + + @property + def root_link_pose_w(self) -> ProxyArray: + """Root link pose in world frame. dtype=wp.transformf, shape: (N,).""" + if self._root_link_pose_w is None: + pose_np = np.zeros((self._num_instances, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._root_link_pose_w = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._root_link_pose_w_ta = None + if self._root_link_pose_w_ta is None: + self._root_link_pose_w_ta = ProxyArray(self._root_link_pose_w) + return self._root_link_pose_w_ta + + @property + def root_link_vel_w(self) -> ProxyArray: + """Root link velocity in world frame. dtype=wp.spatial_vectorf, shape: (N,).""" + if self._root_link_vel_w is None: + self._root_link_vel_w = wp.zeros((self._num_instances, 6), dtype=wp.float32, device=self.device).view( + wp.spatial_vectorf + ) + self._root_link_vel_w_ta = None + if self._root_link_vel_w_ta is None: + self._root_link_vel_w_ta = ProxyArray(self._root_link_vel_w) + return self._root_link_vel_w_ta + + @property + def root_link_state_w(self) -> ProxyArray: + """Root link state in world frame. Shape: (N, 13).""" + pose = self.root_link_pose_w.torch + vel = self.root_link_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def root_link_pos_w(self) -> ProxyArray: + """Root link position. Shape: (N,), dtype=wp.vec3f.""" + t = self.root_link_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + # Sliced properties (convert through torch for simplicity in mock) + @property + def root_link_quat_w(self) -> ProxyArray: + """Root link orientation. Shape: (N,), dtype=wp.quatf.""" + t = self.root_link_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def root_link_lin_vel_w(self) -> ProxyArray: + """Root link linear velocity. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_link_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def root_link_ang_vel_w(self) -> ProxyArray: + """Root link angular velocity. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_link_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + # -- Root state properties (CoM frame) -- + + @property + def root_com_pose_w(self) -> ProxyArray: + """Root CoM pose in world frame. dtype=wp.transformf, shape: (N,).""" + if self._root_com_pose_w is None: + self._root_com_pose_w = wp.clone(self.root_link_pose_w.warp, self.device) + self._root_com_pose_w_ta = None + if self._root_com_pose_w_ta is None: + self._root_com_pose_w_ta = ProxyArray(self._root_com_pose_w) + return self._root_com_pose_w_ta + + @property + def root_com_vel_w(self) -> ProxyArray: + """Root CoM velocity in world frame. dtype=wp.spatial_vectorf, shape: (N,).""" + if self._root_com_vel_w is None: + self._root_com_vel_w = wp.clone(self.root_link_vel_w.warp, self.device) + self._root_com_vel_w_ta = None + if self._root_com_vel_w_ta is None: + self._root_com_vel_w_ta = ProxyArray(self._root_com_vel_w) + return self._root_com_vel_w_ta + + @property + def root_com_state_w(self) -> ProxyArray: + """Root CoM state in world frame. Shape: (N, 13).""" + pose = self.root_com_pose_w.torch + vel = self.root_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + @property + def root_state_w(self) -> ProxyArray: + """Root state (link pose + CoM velocity). Shape: (N, 13).""" + pose = self.root_link_pose_w.torch + vel = self.root_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def root_com_pos_w(self) -> ProxyArray: + """Root CoM position. Shape: (N,), dtype=wp.vec3f.""" + t = self.root_com_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + # Sliced properties (convert through torch for simplicity in mock) + @property + def root_com_quat_w(self) -> ProxyArray: + """Root CoM orientation. Shape: (N,), dtype=wp.quatf.""" + t = self.root_com_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def root_com_lin_vel_w(self) -> ProxyArray: + """Root CoM linear velocity. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_com_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def root_com_ang_vel_w(self) -> ProxyArray: + """Root CoM angular velocity. Shape: (N,), dtype=wp.vec3f.""" + v = self.root_com_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + # -- Body state properties (link frame) -- + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose in world frame. dtype=wp.transformf, shape: (N, 1).""" + if self._body_link_pose_w is None: + pose_np = np.zeros((self._num_instances, 1, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._body_link_pose_w = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._body_link_pose_w_ta = None + if self._body_link_pose_w_ta is None: + self._body_link_pose_w_ta = ProxyArray(self._body_link_pose_w) + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity in world frame. dtype=wp.spatial_vectorf, shape: (N, 1).""" + if self._body_link_vel_w is None: + self._body_link_vel_w = wp.zeros((self._num_instances, 1, 6), dtype=wp.float32, device=self.device).view( + wp.spatial_vectorf + ) + self._body_link_vel_w_ta = None + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w) + return self._body_link_vel_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """Body link state in world frame. Shape: (N, 1, 13).""" + pose = self.body_link_pose_w.torch + vel = self.body_link_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def body_link_pos_w(self) -> ProxyArray: + """Body link position. Shape: (N, 1), dtype=wp.vec3f.""" + t = self.body_link_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + # Sliced properties (convert through torch for simplicity in mock) + @property + def body_link_quat_w(self) -> ProxyArray: + """Body link orientation. Shape: (N, 1), dtype=wp.quatf.""" + t = self.body_link_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Body link linear velocity. Shape: (N, 1), dtype=wp.vec3f.""" + v = self.body_link_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Body link angular velocity. Shape: (N, 1), dtype=wp.vec3f.""" + v = self.body_link_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + # -- Body state properties (CoM frame) -- + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body CoM pose in world frame. dtype=wp.transformf, shape: (N, 1).""" + if self._body_com_pose_w is None: + self._body_com_pose_w = wp.clone(self.body_link_pose_w.warp, self.device) + self._body_com_pose_w_ta = None + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body CoM velocity in world frame. dtype=wp.spatial_vectorf, shape: (N, 1).""" + if self._body_com_vel_w is None: + self._body_com_vel_w = wp.clone(self.body_link_vel_w.warp, self.device) + self._body_com_vel_w_ta = None + if self._body_com_vel_w_ta is None: + self._body_com_vel_w_ta = ProxyArray(self._body_com_vel_w) + return self._body_com_vel_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """Body CoM state in world frame. Shape: (N, 1, 13).""" + pose = self.body_com_pose_w.torch + vel = self.body_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + @property + def body_state_w(self) -> ProxyArray: + """Body state (link pose + CoM vel). Shape: (N, 1, 13).""" + pose = self.body_link_pose_w.torch + vel = self.body_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + @property + def body_com_acc_w(self) -> ProxyArray: + """Body CoM acceleration in world frame. dtype=wp.spatial_vectorf, shape: (N, 1).""" + if self._body_com_acc_w is None: + self._body_com_acc_w = wp.zeros((self._num_instances, 1, 6), dtype=wp.float32, device=self.device).view( + wp.spatial_vectorf + ) + self._body_com_acc_w_ta = None + if self._body_com_acc_w_ta is None: + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w) + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Body CoM pose in body frame. dtype=wp.transformf, shape: (N, 1).""" + if self._body_com_pose_b is None: + pose_np = np.zeros((self._num_instances, 1, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._body_com_pose_b = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._body_com_pose_b_ta = None + if self._body_com_pose_b_ta is None: + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b) + return self._body_com_pose_b_ta + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def body_com_pos_w(self) -> ProxyArray: + """Body CoM position. Shape: (N, 1), dtype=wp.vec3f.""" + t = self.body_com_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_com_quat_w(self) -> ProxyArray: + """Body CoM orientation. Shape: (N, 1), dtype=wp.quatf.""" + t = self.body_com_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Body CoM linear velocity. Shape: (N, 1), dtype=wp.vec3f.""" + v = self.body_com_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Body CoM angular velocity. Shape: (N, 1), dtype=wp.vec3f.""" + v = self.body_com_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Body CoM linear acceleration. Shape: (N, 1), dtype=wp.vec3f.""" + a = self.body_com_acc_w.warp + return ProxyArray(wp.array(ptr=a.ptr, shape=a.shape, dtype=wp.vec3f, strides=a.strides, device=self.device)) + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Body CoM angular acceleration. Shape: (N, 1), dtype=wp.vec3f.""" + a = self.body_com_acc_w.warp + return ProxyArray( + wp.array(ptr=a.ptr + 3 * 4, shape=a.shape, dtype=wp.vec3f, strides=a.strides, device=self.device) + ) + + @property + def body_com_pos_b(self) -> ProxyArray: + """Body CoM position in body frame. Shape: (N, 1), dtype=wp.vec3f.""" + t = self.body_com_pose_b.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_com_quat_b(self) -> ProxyArray: + """Body CoM orientation in body frame. Shape: (N, 1), dtype=wp.quatf.""" + t = self.body_com_pose_b.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + # -- Body properties -- + + @property + def body_mass(self) -> ProxyArray: + """Body mass. Shape: (N, 1).""" + if self._body_mass is None: + self._body_mass = wp.ones((self._num_instances, 1), dtype=wp.float32, device=self.device) + self._body_mass_ta = None + if self._body_mass_ta is None: + self._body_mass_ta = ProxyArray(self._body_mass) + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Body inertia (flattened 3x3). Shape: (N, 1, 9).""" + if self._body_inertia is None: + inertia_np = np.zeros((self._num_instances, 1, 9), dtype=np.float32) + inertia_np[..., 0] = 1.0 # Ixx + inertia_np[..., 4] = 1.0 # Iyy + inertia_np[..., 8] = 1.0 # Izz + self._body_inertia = wp.array(inertia_np, dtype=wp.float32, device=self.device) + self._body_inertia_ta = None + if self._body_inertia_ta is None: + self._body_inertia_ta = ProxyArray(self._body_inertia) + return self._body_inertia_ta + + # -- Derived properties -- + + @property + def projected_gravity_b(self) -> ProxyArray: + """Gravity projection on body. Shape: (N,), dtype=wp.vec3f.""" + gravity_np = np.zeros((self._num_instances, 3), dtype=np.float32) + gravity_np[:, 2] = -1.0 + return ProxyArray(wp.array(gravity_np, dtype=wp.float32, device=self.device).view(wp.vec3f)) + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading in world frame. Shape: (N,).""" + return ProxyArray(wp.zeros((self._num_instances,), dtype=wp.float32, device=self.device)) + + @property + def root_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_link_lin_vel_w.warp, self.device)) + + @property + def root_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_link_ang_vel_w.warp, self.device)) + + @property + def root_com_lin_vel_b(self) -> ProxyArray: + """Root CoM linear velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_com_lin_vel_w.warp, self.device)) + + @property + def root_com_ang_vel_b(self) -> ProxyArray: + """Root CoM angular velocity in body frame. Shape: (N,), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.root_com_ang_vel_w.warp, self.device)) + + # -- Shorthand properties (root_pose_w, body_pos_w, com_pos_b, etc.) -- + + # com_pos_b and com_quat_b are inherited from BaseRigidObjectData + # (they delegate to body_com_pos_b and body_com_quat_b respectively) + + # Inherited from BaseRigidObjectData + + # -- Deprecated properties (default_mass, default_inertia) -- + # Inherited from BaseRigidObjectData + + # -- Update method -- + + def update(self, dt: float) -> None: + """Update data (no-op for mock).""" + pass + + # -- Setters -- + + def set_default_root_pose(self, value: torch.Tensor) -> None: + self._default_root_pose = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._default_root_pose_ta = None + + def set_default_root_vel(self, value: torch.Tensor) -> None: + self._default_root_vel = wp.from_torch(value.to(self.device).contiguous()) + self._default_root_vel_ta = None + + def set_root_link_pose_w(self, value: torch.Tensor) -> None: + self._root_link_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._root_link_pose_w_ta = None + + def set_root_link_vel_w(self, value: torch.Tensor) -> None: + self._root_link_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._root_link_vel_w_ta = None + + def set_root_com_pose_w(self, value: torch.Tensor) -> None: + self._root_com_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._root_com_pose_w_ta = None + + def set_root_com_vel_w(self, value: torch.Tensor) -> None: + self._root_com_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._root_com_vel_w_ta = None + + def set_body_link_pose_w(self, value: torch.Tensor) -> None: + self._body_link_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_link_pose_w_ta = None + + def set_body_link_vel_w(self, value: torch.Tensor) -> None: + self._body_link_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._body_link_vel_w_ta = None + + def set_body_com_pose_w(self, value: torch.Tensor) -> None: + self._body_com_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_com_pose_w_ta = None + + def set_body_com_vel_w(self, value: torch.Tensor) -> None: + self._body_com_vel_w = wp.from_torch(value.to(self.device).contiguous()) + self._body_com_vel_w_ta = None + + def set_body_com_acc_w(self, value: torch.Tensor) -> None: + self._body_com_acc_w = wp.from_torch(value.to(self.device).contiguous()) + self._body_com_acc_w_ta = None + + def set_body_com_pose_b(self, value: torch.Tensor) -> None: + self._body_com_pose_b = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_com_pose_b_ta = None + + def set_body_mass(self, value: torch.Tensor) -> None: + self._body_mass = wp.from_torch(value.to(self.device).contiguous()) + self._body_mass_ta = None + + def set_body_inertia(self, value: torch.Tensor) -> None: + self._body_inertia = wp.from_torch(value.to(self.device).contiguous()) + self._body_inertia_ta = None + + def set_mock_data(self, **kwargs) -> None: + """Bulk setter for mock data.""" + for key, value in kwargs.items(): + setter = getattr(self, f"set_{key}", None) + if setter is not None: + setter(value) + else: + raise ValueError(f"Unknown property: {key}") + + +class MockRigidObject: + """Mock rigid object asset for testing without Isaac Sim. + + This class mimics the interface of BaseRigidObject for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object. + + Args: + num_instances: Number of rigid object instances. + body_names: Names of bodies (single body for rigid object). + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_bodies = 1 + self._device = device + self._body_names = body_names or ["body"] + + self._data = MockRigidObjectData( + num_instances=num_instances, + body_names=self._body_names, + device=device, + ) + + # -- Properties -- + + @property + def data(self) -> MockRigidObjectData: + """Data container for the rigid object.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of rigid object instances.""" + return self._num_instances + + @property + def num_bodies(self) -> int: + """Number of bodies (always 1 for rigid object).""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Body names.""" + return self._body_names + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + @property + def root_view(self) -> None: + """Returns None (no physics view in mock).""" + return None + + @property + def instantaneous_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + @property + def permanent_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + # -- Core methods -- + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Reset rigid object state for specified environments.""" + pass + + def write_data_to_sim(self) -> None: + """Write data to simulation (no-op for mock).""" + pass + + def update(self, dt: float) -> None: + """Update rigid object data.""" + self._data.update(dt) + + # -- Finder methods -- + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies by name regex patterns.""" + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._body_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._body_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + # -- State writer methods (no-op for mock) -- + + def write_root_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root state to simulation.""" + pass + + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root CoM state to simulation.""" + pass + + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root link state to simulation.""" + pass + + def write_root_pose_to_sim( + self, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root pose to simulation.""" + pass + + def write_root_link_pose_to_sim( + self, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root link pose to simulation.""" + pass + + def write_root_com_pose_to_sim( + self, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root CoM pose to simulation.""" + pass + + def write_root_velocity_to_sim( + self, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root velocity to simulation.""" + pass + + def write_root_com_velocity_to_sim( + self, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root CoM velocity to simulation.""" + pass + + def write_root_link_velocity_to_sim( + self, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write root link velocity to simulation.""" + pass + + # -- Setter methods -- + + def set_masses( + self, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body masses.""" + pass + + def set_coms( + self, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body centers of mass.""" + pass + + def set_inertias( + self, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body inertias.""" + pass + + def set_external_force_and_torque( + self, + forces: torch.Tensor | wp.array, + torques: torch.Tensor | wp.array, + positions: torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + is_global: bool = False, + ) -> None: + """Set external forces and torques.""" + pass + + # -- Shape validation helpers -- + + _DTYPE_TO_TORCH_TRAILING_DIMS: dict[type, tuple[int, ...]] = { + wp.float32: (), + wp.int32: (), + wp.vec2f: (2,), + wp.vec3f: (3,), + wp.vec4f: (4,), + wp.transformf: (7,), + wp.spatial_vectorf: (6,), + } + + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" + ) -> None: + if __debug__: + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): + return + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + offset = self._DTYPE_TO_TORCH_TRAILING_DIMS.get(dtype) + if offset is None: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), ( + f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + ) + + def _resolve_n_envs(self, env_ids): + if env_ids is None: + return self._num_instances + if isinstance(env_ids, (torch.Tensor, wp.array)): + return env_ids.shape[0] + return len(env_ids) + + def _resolve_n_items(self, item_ids, total): + if item_ids is None or item_ids == slice(None): + return total + if isinstance(item_ids, (torch.Tensor, wp.array)): + return item_ids.shape[0] + return len(item_ids) + + # -- Index/Mask methods -- + + # Root pose writers + + def write_root_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + + def write_root_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + + def write_root_link_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + + def write_root_link_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + + def write_root_com_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + + def write_root_com_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + + # Root velocity writers + + def write_root_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + + def write_root_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + + def write_root_com_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + + def write_root_com_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + + def write_root_link_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n = self._resolve_n_envs(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + + def write_root_link_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + + # Body property setters + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(masses, (n_e, n_b), wp.float32, "masses") + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(masses, (self._num_instances, self._num_bodies), wp.float32, "masses") + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(coms, (n_e, n_b), wp.transformf, "coms") + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(coms, (self._num_instances, self._num_bodies), wp.transformf, "coms") + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(inertias, (n_e, n_b, 9), wp.float32, "inertias") + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(inertias, (self._num_instances, self._num_bodies, 9), wp.float32, "inertias") diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py new file mode 100644 index 000000000000..0af998188a89 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py @@ -0,0 +1,1022 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock rigid object collection asset for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +from collections.abc import Sequence + +import numpy as np +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +try: + from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData +except (ImportError, ModuleNotFoundError): + # Direct import bypassing isaaclab.assets.__init__.py (which needs omni.timeline) + import importlib.util + from pathlib import Path + + _file = ( + Path(__file__).resolve().parents[3] + / "assets" + / "rigid_object_collection" + / "base_rigid_object_collection_data.py" + ) + _spec = importlib.util.spec_from_file_location("_base_rigid_object_collection_data", str(_file)) + _mod = importlib.util.module_from_spec(_spec) + _spec.loader.exec_module(_mod) + BaseRigidObjectCollectionData = _mod.BaseRigidObjectCollectionData + + +class MockRigidObjectCollectionData(BaseRigidObjectCollectionData): + """Mock data container for rigid object collection asset. + + This class inherits from BaseRigidObjectCollectionData to get shorthand and deprecated properties + for free (e.g., body_pose_w, body_pos_w, com_pos_b, default_mass, etc.). + All tensor properties return zero warp arrays with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object collection data. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies in the collection. + body_names: Names of bodies. + device: Device for tensor allocation. + """ + super().__init__(root_view=None, num_objects=num_bodies, device=device) + self._create_buffers() + + self._num_instances = num_instances + self._num_bodies = num_bodies + + self.body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + + # Default states + self._default_body_pose: wp.array | None = None + self._default_body_vel: wp.array | None = None + + # Body state (link frame) + self._body_link_pose_w: wp.array | None = None + self._body_link_vel_w: wp.array | None = None + + # Body state (CoM frame) + self._body_com_pose_w: wp.array | None = None + self._body_com_vel_w: wp.array | None = None + self._body_com_acc_w: wp.array | None = None + self._body_com_pose_b: wp.array | None = None + + # Body properties + self._body_mass: wp.array | None = None + self._body_inertia: wp.array | None = None + + # ProxyArray caches + self._default_body_pose_ta: ProxyArray | None = None + self._default_body_vel_ta: ProxyArray | None = None + self._body_link_pose_w_ta: ProxyArray | None = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_vel_w_ta: ProxyArray | None = None + self._body_com_acc_w_ta: ProxyArray | None = None + self._body_com_pose_b_ta: ProxyArray | None = None + self._body_mass_ta: ProxyArray | None = None + self._body_inertia_ta: ProxyArray | None = None + + def _identity_quat(self, *shape: int) -> wp.array: + """Create identity quaternion warp array (w, x, y, z) = (1, 0, 0, 0).""" + quat_np = np.zeros((*shape, 4), dtype=np.float32) + quat_np[..., 0] = 1.0 + return wp.array(quat_np, dtype=wp.float32, device=self.device) + + # -- Default state properties -- + + @property + def default_body_pose(self) -> ProxyArray: + """Default body poses. dtype=wp.transformf, shape: (N, num_bodies).""" + if self._default_body_pose is None: + pose_np = np.zeros((self._num_instances, self._num_bodies, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout + self._default_body_pose = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._default_body_pose_ta = None + if self._default_body_pose_ta is None: + self._default_body_pose_ta = ProxyArray(self._default_body_pose) + return self._default_body_pose_ta + + @property + def default_body_vel(self) -> ProxyArray: + """Default body velocities. dtype=wp.spatial_vectorf, shape: (N, num_bodies).""" + if self._default_body_vel is None: + self._default_body_vel = wp.zeros( + (self._num_instances, self._num_bodies, 6), dtype=wp.float32, device=self.device + ).view(wp.spatial_vectorf) + self._default_body_vel_ta = None + if self._default_body_vel_ta is None: + self._default_body_vel_ta = ProxyArray(self._default_body_vel) + return self._default_body_vel_ta + + @property + def default_body_state(self) -> ProxyArray: + """Default body states. Shape: (N, num_bodies, 13).""" + pose = self.default_body_pose.torch + vel = self.default_body_vel.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + # -- Body state properties (link frame) -- + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link poses in world frame. dtype=wp.transformf, shape: (N, num_bodies).""" + if self._body_link_pose_w is None: + pose_np = np.zeros((self._num_instances, self._num_bodies, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._body_link_pose_w = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._body_link_pose_w_ta = None + if self._body_link_pose_w_ta is None: + self._body_link_pose_w_ta = ProxyArray(self._body_link_pose_w) + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocities in world frame. dtype=wp.spatial_vectorf, shape: (N, num_bodies).""" + if self._body_link_vel_w is None: + self._body_link_vel_w = wp.zeros( + (self._num_instances, self._num_bodies, 6), dtype=wp.float32, device=self.device + ).view(wp.spatial_vectorf) + self._body_link_vel_w_ta = None + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w) + return self._body_link_vel_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """Body link states in world frame. Shape: (N, num_bodies, 13).""" + pose = self.body_link_pose_w.torch + vel = self.body_link_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def body_link_pos_w(self) -> ProxyArray: + """Body link positions. Shape: (N, num_bodies), dtype=wp.vec3f.""" + t = self.body_link_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_link_quat_w(self) -> ProxyArray: + """Body link orientations. Shape: (N, num_bodies), dtype=wp.quatf.""" + t = self.body_link_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Body link linear velocities. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_link_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Body link angular velocities. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_link_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + # -- Body state properties (CoM frame) -- + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body CoM poses in world frame. dtype=wp.transformf, shape: (N, num_bodies).""" + if self._body_com_pose_w is None: + self._body_com_pose_w = wp.clone(self.body_link_pose_w.warp, self.device) + self._body_com_pose_w_ta = None + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body CoM velocities in world frame. dtype=wp.spatial_vectorf, shape: (N, num_bodies).""" + if self._body_com_vel_w is None: + self._body_com_vel_w = wp.clone(self.body_link_vel_w.warp, self.device) + self._body_com_vel_w_ta = None + if self._body_com_vel_w_ta is None: + self._body_com_vel_w_ta = ProxyArray(self._body_com_vel_w) + return self._body_com_vel_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """Body CoM states in world frame. Shape: (N, num_bodies, 13).""" + pose = self.body_com_pose_w.torch + vel = self.body_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + @property + def body_com_acc_w(self) -> ProxyArray: + """Body CoM accelerations in world frame. dtype=wp.spatial_vectorf, shape: (N, num_bodies).""" + if self._body_com_acc_w is None: + self._body_com_acc_w = wp.zeros( + (self._num_instances, self._num_bodies, 6), dtype=wp.float32, device=self.device + ).view(wp.spatial_vectorf) + self._body_com_acc_w_ta = None + if self._body_com_acc_w_ta is None: + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w) + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Body CoM poses in body frame. dtype=wp.transformf, shape: (N, num_bodies).""" + if self._body_com_pose_b is None: + pose_np = np.zeros((self._num_instances, self._num_bodies, 7), dtype=np.float32) + pose_np[..., 6] = 1.0 # identity quat qw=1, transformf layout: (px,py,pz,qx,qy,qz,qw) + self._body_com_pose_b = wp.array(pose_np, dtype=wp.float32, device=self.device).view(wp.transformf) + self._body_com_pose_b_ta = None + if self._body_com_pose_b_ta is None: + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b) + return self._body_com_pose_b_ta + + # Sliced properties (zero-copy pointer arithmetic on transformf) + @property + def body_com_pos_w(self) -> ProxyArray: + """Body CoM positions. Shape: (N, num_bodies), dtype=wp.vec3f.""" + t = self.body_com_pose_w.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_com_quat_w(self) -> ProxyArray: + """Body CoM orientations. Shape: (N, num_bodies), dtype=wp.quatf.""" + t = self.body_com_pose_w.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Body CoM linear velocities. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_com_vel_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Body CoM angular velocities. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_com_vel_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Body CoM linear accelerations. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_com_acc_w.warp + return ProxyArray(wp.array(ptr=v.ptr, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device)) + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Body CoM angular accelerations. Shape: (N, num_bodies), dtype=wp.vec3f.""" + v = self.body_com_acc_w.warp + return ProxyArray( + wp.array(ptr=v.ptr + 3 * 4, shape=v.shape, dtype=wp.vec3f, strides=v.strides, device=self.device) + ) + + @property + def body_com_pos_b(self) -> ProxyArray: + """Body CoM positions in body frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + t = self.body_com_pose_b.warp + return ProxyArray(wp.array(ptr=t.ptr, shape=t.shape, dtype=wp.vec3f, strides=t.strides, device=self.device)) + + @property + def body_com_quat_b(self) -> ProxyArray: + """Body CoM orientations in body frame. Shape: (N, num_bodies), dtype=wp.quatf.""" + t = self.body_com_pose_b.warp + return ProxyArray( + wp.array(ptr=t.ptr + 3 * 4, shape=t.shape, dtype=wp.quatf, strides=t.strides, device=self.device) + ) + + # -- Body properties -- + + @property + def body_mass(self) -> ProxyArray: + """Body masses. Shape: (N, num_bodies).""" + if self._body_mass is None: + self._body_mass = wp.ones((self._num_instances, self._num_bodies), dtype=wp.float32, device=self.device) + self._body_mass_ta = None + if self._body_mass_ta is None: + self._body_mass_ta = ProxyArray(self._body_mass) + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Body inertias (flattened 3x3). Shape: (N, num_bodies, 9).""" + if self._body_inertia is None: + inertia_np = np.zeros((self._num_instances, self._num_bodies, 9), dtype=np.float32) + inertia_np[..., 0] = 1.0 # Ixx + inertia_np[..., 4] = 1.0 # Iyy + inertia_np[..., 8] = 1.0 # Izz + self._body_inertia = wp.array(inertia_np, dtype=wp.float32, device=self.device) + self._body_inertia_ta = None + if self._body_inertia_ta is None: + self._body_inertia_ta = ProxyArray(self._body_inertia) + return self._body_inertia_ta + + # -- Derived properties -- + + @property + def projected_gravity_b(self) -> ProxyArray: + """Gravity projection on bodies. Shape: (N, num_bodies), dtype=wp.vec3f.""" + gravity_np = np.zeros((self._num_instances, self._num_bodies, 3), dtype=np.float32) + gravity_np[..., 2] = -1.0 + return ProxyArray(wp.array(gravity_np, dtype=wp.float32, device=self.device).view(wp.vec3f)) + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading per body. Shape: (N, num_bodies).""" + return ProxyArray(wp.zeros((self._num_instances, self._num_bodies), dtype=wp.float32, device=self.device)) + + @property + def body_link_lin_vel_b(self) -> ProxyArray: + """Body link linear velocities in body frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.body_link_lin_vel_w.warp, self.device)) + + @property + def body_link_ang_vel_b(self) -> ProxyArray: + """Body link angular velocities in body frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.body_link_ang_vel_w.warp, self.device)) + + @property + def body_com_lin_vel_b(self) -> ProxyArray: + """Body CoM linear velocities in body frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.body_com_lin_vel_w.warp, self.device)) + + @property + def body_com_ang_vel_b(self) -> ProxyArray: + """Body CoM angular velocities in body frame. Shape: (N, num_bodies), dtype=wp.vec3f.""" + return ProxyArray(wp.clone(self.body_com_ang_vel_w.warp, self.device)) + + # -- Body state (abstract) -- + + @property + def body_state_w(self) -> ProxyArray: + """Body states (link pose + CoM velocity). Shape: (N, num_bodies, 13).""" + pose = self.body_link_pose_w.torch + vel = self.body_com_vel_w.torch + return ProxyArray(wp.from_torch(torch.cat([pose, vel], dim=-1))) + + # -- Shorthand properties (body_pose_w, body_pos_w, com_pos_b, etc.) -- + # Inherited from BaseRigidObjectCollectionData + + # -- Deprecated properties (default_mass, default_inertia, default_object_*, object_*, etc.) -- + # Inherited from BaseRigidObjectCollectionData + + # -- Update method -- + + def update(self, dt: float) -> None: + """Update data (no-op for mock).""" + pass + + # -- Setters -- + + def set_default_body_pose(self, value: torch.Tensor) -> None: + self._default_body_pose = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._default_body_pose_ta = None + + def set_default_body_vel(self, value: torch.Tensor) -> None: + self._default_body_vel = wp.from_torch(value.to(self.device).contiguous()).view(wp.spatial_vectorf) + self._default_body_vel_ta = None + + def set_body_link_pose_w(self, value: torch.Tensor) -> None: + self._body_link_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_link_pose_w_ta = None + + def set_body_link_vel_w(self, value: torch.Tensor) -> None: + self._body_link_vel_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.spatial_vectorf) + self._body_link_vel_w_ta = None + + def set_body_com_pose_w(self, value: torch.Tensor) -> None: + self._body_com_pose_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_com_pose_w_ta = None + + def set_body_com_vel_w(self, value: torch.Tensor) -> None: + self._body_com_vel_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.spatial_vectorf) + self._body_com_vel_w_ta = None + + def set_body_com_acc_w(self, value: torch.Tensor) -> None: + self._body_com_acc_w = wp.from_torch(value.to(self.device).contiguous()).view(wp.spatial_vectorf) + self._body_com_acc_w_ta = None + + def set_body_com_pose_b(self, value: torch.Tensor) -> None: + self._body_com_pose_b = wp.from_torch(value.to(self.device).contiguous()).view(wp.transformf) + self._body_com_pose_b_ta = None + + def set_body_mass(self, value: torch.Tensor) -> None: + self._body_mass = wp.from_torch(value.to(self.device).contiguous()) + self._body_mass_ta = None + + def set_body_inertia(self, value: torch.Tensor) -> None: + self._body_inertia = wp.from_torch(value.to(self.device).contiguous()) + self._body_inertia_ta = None + + def set_mock_data(self, **kwargs) -> None: + """Bulk setter for mock data.""" + for key, value in kwargs.items(): + setter = getattr(self, f"set_{key}", None) + if setter is not None: + setter(value) + else: + raise ValueError(f"Unknown property: {key}") + + +class MockRigidObjectCollection: + """Mock rigid object collection asset for testing without Isaac Sim. + + This class mimics the interface of BaseRigidObjectCollection for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object collection. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies in the collection. + body_names: Names of bodies. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_bodies = num_bodies + self._device = device + self._body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + + self._data = MockRigidObjectCollectionData( + num_instances=num_instances, + num_bodies=num_bodies, + body_names=self._body_names, + device=device, + ) + + # -- Properties -- + + @property + def data(self) -> MockRigidObjectCollectionData: + """Data container for the rigid object collection.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of environment instances.""" + return self._num_instances + + @property + def num_bodies(self) -> int: + """Number of bodies in the collection.""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Body names.""" + return self._body_names + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + @property + def root_view(self) -> None: + """Returns None (no physics view in mock).""" + return None + + @property + def instantaneous_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + @property + def permanent_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + @property + def num_objects(self) -> int: + return self.num_bodies + + @property + def object_names(self) -> list[str]: + return self.body_names + + # -- Core methods -- + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + object_ids: slice | torch.Tensor | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Reset rigid object collection state for specified environments.""" + pass + + def write_data_to_sim(self) -> None: + """Write data to simulation (no-op for mock).""" + pass + + def update(self, dt: float) -> None: + """Update rigid object collection data.""" + self._data.update(dt) + + # -- Finder methods -- + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str]]: + """Find bodies by name regex patterns. + + Returns: + Tuple of (body_mask, body_names). + """ + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._body_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._body_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + # Create body mask + body_mask = torch.zeros(self._num_bodies, dtype=torch.bool, device=self._device) + body_mask[matched_indices] = True + + return body_mask, matched_names + + def find_objects(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + return self.find_bodies(name_keys, preserve_order) + + # -- State writer methods (no-op for mock) -- + + def write_body_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body states to simulation.""" + pass + + def write_body_com_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body CoM states to simulation.""" + pass + + def write_body_link_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body link states to simulation.""" + pass + + def write_body_pose_to_sim( + self, + body_poses: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body poses to simulation.""" + pass + + def write_body_link_pose_to_sim( + self, + body_poses: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body link poses to simulation.""" + pass + + def write_body_com_pose_to_sim( + self, + body_poses: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body CoM poses to simulation.""" + pass + + def write_body_velocity_to_sim( + self, + body_velocities: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body velocities to simulation.""" + pass + + def write_body_com_velocity_to_sim( + self, + body_velocities: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body CoM velocities to simulation.""" + pass + + def write_body_link_velocity_to_sim( + self, + body_velocities: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body link velocities to simulation.""" + pass + + # -- Setter methods -- + + def set_masses( + self, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body masses.""" + pass + + def set_coms( + self, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body centers of mass.""" + pass + + def set_inertias( + self, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set body inertias.""" + pass + + def set_external_force_and_torque( + self, + forces: torch.Tensor | wp.array, + torques: torch.Tensor | wp.array, + positions: torch.Tensor | wp.array | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + is_global: bool = False, + ) -> None: + """Set external forces and torques.""" + pass + + # -- Shape validation helpers -- + + _DTYPE_TO_TORCH_TRAILING_DIMS: dict[type, tuple[int, ...]] = { + wp.float32: (), + wp.int32: (), + wp.vec2f: (2,), + wp.vec3f: (3,), + wp.vec4f: (4,), + wp.transformf: (7,), + wp.spatial_vectorf: (6,), + } + + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" + ) -> None: + if __debug__: + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, float): + return + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + offset = self._DTYPE_TO_TORCH_TRAILING_DIMS.get(dtype) + if offset is None: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), ( + f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + ) + + def _resolve_n_envs(self, env_ids): + if env_ids is None: + return self._num_instances + if isinstance(env_ids, (torch.Tensor, wp.array)): + return env_ids.shape[0] + return len(env_ids) + + def _resolve_n_items(self, item_ids, total): + if item_ids is None or item_ids == slice(None): + return total + if isinstance(item_ids, (torch.Tensor, wp.array)): + return item_ids.shape[0] + return len(item_ids) + + # -- Index/Mask methods -- + + # Body pose writers + + def write_body_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(body_poses, (n_e, n_b), wp.transformf, "body_poses") + + def write_body_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(body_poses, (self._num_instances, self._num_bodies), wp.transformf, "body_poses") + + def write_body_link_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(body_poses, (n_e, n_b), wp.transformf, "body_poses") + + def write_body_link_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(body_poses, (self._num_instances, self._num_bodies), wp.transformf, "body_poses") + + def write_body_com_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(body_poses, (n_e, n_b), wp.transformf, "body_poses") + + def write_body_com_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(body_poses, (self._num_instances, self._num_bodies), wp.transformf, "body_poses") + + # Body velocity writers + + def write_body_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(body_velocities, (n_e, n_b), wp.spatial_vectorf, "body_velocities") + + def write_body_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + body_velocities, (self._num_instances, self._num_bodies), wp.spatial_vectorf, "body_velocities" + ) + + def write_body_com_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(body_velocities, (n_e, n_b), wp.spatial_vectorf, "body_velocities") + + def write_body_com_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + body_velocities, (self._num_instances, self._num_bodies), wp.spatial_vectorf, "body_velocities" + ) + + def write_body_link_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(body_velocities, (n_e, n_b), wp.spatial_vectorf, "body_velocities") + + def write_body_link_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype( + body_velocities, (self._num_instances, self._num_bodies), wp.spatial_vectorf, "body_velocities" + ) + + # Body property setters + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(masses, (n_e, n_b), wp.float32, "masses") + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(masses, (self._num_instances, self._num_bodies), wp.float32, "masses") + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(coms, (n_e, n_b), wp.transformf, "coms") + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(coms, (self._num_instances, self._num_bodies), wp.transformf, "coms") + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + n_e = self._resolve_n_envs(env_ids) + n_b = self._resolve_n_items(body_ids, self._num_bodies) + self.assert_shape_and_dtype(inertias, (n_e, n_b, 9), wp.float32, "inertias") + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + self.assert_shape_and_dtype(inertias, (self._num_instances, self._num_bodies, 9), wp.float32, "inertias") + + # -- Deprecated object_* methods (no-op for mock) -- + + def write_object_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_com_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_link_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_link_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_com_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_com_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass + + def write_object_link_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py new file mode 100644 index 000000000000..3c8c36c3e7d6 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock sensor interfaces for testing without Isaac Sim.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.pyi b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.pyi new file mode 100644 index 000000000000..61ea41691655 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.pyi @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MockContactSensor", + "MockContactSensorData", + "MockFrameTransformer", + "MockFrameTransformerData", + "MockImu", + "MockImuData", + "MockPva", + "MockPvaData", + "create_mock_contact_sensor", + "create_mock_foot_contact_sensor", + "create_mock_frame_transformer", + "create_mock_imu", + "create_mock_pva", +] + +from .mock_contact_sensor import MockContactSensor, MockContactSensorData +from .mock_frame_transformer import MockFrameTransformer, MockFrameTransformerData +from .mock_imu import MockImu, MockImuData +from .mock_pva import MockPva, MockPvaData +from .factories import ( + create_mock_contact_sensor, + create_mock_foot_contact_sensor, + create_mock_frame_transformer, + create_mock_imu, + create_mock_pva, +) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py new file mode 100644 index 000000000000..b16ca53f1128 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py @@ -0,0 +1,138 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory functions for creating pre-configured mock sensors.""" + +from __future__ import annotations + +import torch + +from .mock_contact_sensor import MockContactSensor +from .mock_frame_transformer import MockFrameTransformer +from .mock_imu import MockImu +from .mock_pva import MockPva + + +def create_mock_imu( + num_instances: int = 1, + device: str = "cpu", +) -> MockImu: + """Create a mock IMU sensor with default configuration. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + + Returns: + Configured MockImu instance. + """ + return MockImu(num_instances=num_instances, device=device) + + +def create_mock_pva( + num_instances: int = 1, + device: str = "cpu", + gravity: tuple[float, float, float] = (0.0, 0.0, -1.0), +) -> MockPva: + """Create a mock PVA sensor with default configuration. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + gravity: Default gravity direction in body frame. + + Returns: + Configured MockPva instance. + """ + pva = MockPva(num_instances=num_instances, device=device) + pva.data.set_projected_gravity_b(torch.tensor([gravity], device=device).expand(num_instances, -1).clone()) + return pva + + +def create_mock_contact_sensor( + num_instances: int = 1, + num_bodies: int = 1, + body_names: list[str] | None = None, + device: str = "cpu", + history_length: int = 0, + num_filter_bodies: int = 0, +) -> MockContactSensor: + """Create a mock contact sensor with default configuration. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies with contact sensors. + body_names: Names of bodies with contact sensors. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + num_filter_bodies: Number of filter bodies for force matrix. + + Returns: + Configured MockContactSensor instance. + """ + return MockContactSensor( + num_instances=num_instances, + num_bodies=num_bodies, + body_names=body_names, + device=device, + history_length=history_length, + num_filter_bodies=num_filter_bodies, + ) + + +def create_mock_foot_contact_sensor( + num_instances: int = 1, + num_feet: int = 4, + foot_names: list[str] | None = None, + device: str = "cpu", + history_length: int = 0, +) -> MockContactSensor: + """Create a mock foot contact sensor for quadruped robots. + + Args: + num_instances: Number of environment instances. + num_feet: Number of feet (default 4 for quadruped). + foot_names: Names of feet. Defaults to FL, FR, RL, RR. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + + Returns: + Configured MockContactSensor instance for foot contacts. + """ + if foot_names is None: + foot_names = ["FL_foot", "FR_foot", "RL_foot", "RR_foot"][:num_feet] + + return MockContactSensor( + num_instances=num_instances, + num_bodies=num_feet, + body_names=foot_names, + device=device, + history_length=history_length, + ) + + +def create_mock_frame_transformer( + num_instances: int = 1, + num_target_frames: int = 1, + target_frame_names: list[str] | None = None, + device: str = "cpu", +) -> MockFrameTransformer: + """Create a mock frame transformer sensor with default configuration. + + Args: + num_instances: Number of environment instances. + num_target_frames: Number of target frames to track. + target_frame_names: Names of target frames. + device: Device for tensor allocation. + + Returns: + Configured MockFrameTransformer instance. + """ + return MockFrameTransformer( + num_instances=num_instances, + num_target_frames=num_target_frames, + target_frame_names=target_frame_names, + device=device, + ) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py new file mode 100644 index 000000000000..4c6c9ad8c0b1 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py @@ -0,0 +1,546 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock contact sensor for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +from collections.abc import Sequence + +import numpy as np +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +try: + from isaaclab.sensors.contact_sensor.base_contact_sensor_data import BaseContactSensorData +except (ImportError, ModuleNotFoundError): + # Direct import bypassing isaaclab.sensors.__init__.py (which needs omni) + import importlib.util + from pathlib import Path + + _file = Path(__file__).resolve().parents[3] / "sensors" / "contact_sensor" / "base_contact_sensor_data.py" + _spec = importlib.util.spec_from_file_location("_base_contact_sensor_data", str(_file)) + _mod = importlib.util.module_from_spec(_spec) + _spec.loader.exec_module(_mod) + BaseContactSensorData = _mod.BaseContactSensorData + + +class MockContactSensorData(BaseContactSensorData): + """Mock data container for contact sensor. + + This class mimics the interface of BaseContactSensorData for testing purposes. + All tensor properties return zero warp arrays with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + device: str = "cpu", + history_length: int = 0, + num_filter_bodies: int = 0, + ): + """Initialize mock contact sensor data. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies with contact sensors. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + num_filter_bodies: Number of filter bodies for force matrix. + """ + self._num_instances = num_instances + self._num_bodies = num_bodies + self.device = device + self._history_length = history_length + self._num_filter_bodies = num_filter_bodies + + # Internal storage for mock data + self._pos_w: wp.array | None = None + self._quat_w: wp.array | None = None + self._net_forces_w: wp.array | None = None + self._net_forces_w_history: wp.array | None = None + self._force_matrix_w: wp.array | None = None + self._force_matrix_w_history: torch.Tensor | None = None # 5D, exceeds warp's 4D limit + self._contact_pos_w: wp.array | None = None + self._friction_forces_w: wp.array | None = None + self._last_air_time: wp.array | None = None + self._current_air_time: wp.array | None = None + self._last_contact_time: wp.array | None = None + self._current_contact_time: wp.array | None = None + + # ProxyArray caches + self._pos_w_ta: ProxyArray | None = None + self._quat_w_ta: ProxyArray | None = None + self._net_forces_w_ta: ProxyArray | None = None + self._net_forces_w_history_ta: ProxyArray | None = None + self._force_matrix_w_ta: ProxyArray | None = None + self._contact_pos_w_ta: ProxyArray | None = None + self._friction_forces_w_ta: ProxyArray | None = None + self._last_air_time_ta: ProxyArray | None = None + self._current_air_time_ta: ProxyArray | None = None + self._last_contact_time_ta: ProxyArray | None = None + self._current_contact_time_ta: ProxyArray | None = None + + # -- Properties -- + + @property + def pos_w(self) -> ProxyArray | None: + """Position of sensor origins in world frame. Shape: (N, B, 3).""" + if self._pos_w is None: + self._pos_w = wp.zeros( + shape=(self._num_instances, self._num_bodies, 3), dtype=wp.float32, device=self.device + ) + self._pos_w_ta = None + if self._pos_w_ta is None: + self._pos_w_ta = ProxyArray(self._pos_w) + return self._pos_w_ta + + @property + def quat_w(self) -> ProxyArray | None: + """Orientation (w, x, y, z) in world frame. Shape: (N, B, 4).""" + if self._quat_w is None: + # Default to identity quaternion + quat_np = np.zeros((self._num_instances, self._num_bodies, 4), dtype=np.float32) + quat_np[..., 0] = 1.0 + self._quat_w = wp.array(quat_np, dtype=wp.float32, device=self.device) + self._quat_w_ta = None + if self._quat_w_ta is None: + self._quat_w_ta = ProxyArray(self._quat_w) + return self._quat_w_ta + + @property + def pose_w(self) -> ProxyArray | None: + """Pose in world frame (pos + quat). Shape: (N, B, 7).""" + pos_t = self.pos_w.torch + quat_t = self.quat_w.torch + pose_t = torch.cat([pos_t, quat_t], dim=-1) + return ProxyArray(wp.from_torch(pose_t.contiguous(), dtype=wp.float32)) + + @property + def net_forces_w(self) -> ProxyArray: + """Net normal contact forces in world frame. Shape: (N, B, 3).""" + if self._net_forces_w is None: + self._net_forces_w = wp.zeros( + shape=(self._num_instances, self._num_bodies, 3), dtype=wp.float32, device=self.device + ) + self._net_forces_w_ta = None + if self._net_forces_w_ta is None: + self._net_forces_w_ta = ProxyArray(self._net_forces_w) + return self._net_forces_w_ta + + @property + def net_forces_w_history(self) -> ProxyArray | None: + """History of net forces. Shape: (N, T, B, 3).""" + if self._history_length == 0: + return None + if self._net_forces_w_history is None: + self._net_forces_w_history = wp.zeros( + shape=(self._num_instances, self._history_length, self._num_bodies, 3), + dtype=wp.float32, + device=self.device, + ) + self._net_forces_w_history_ta = None + if self._net_forces_w_history_ta is None: + self._net_forces_w_history_ta = ProxyArray(self._net_forces_w_history) + return self._net_forces_w_history_ta + + @property + def force_matrix_w(self) -> ProxyArray | None: + """Filtered contact forces. Shape: (N, B, M, 3).""" + if self._num_filter_bodies == 0: + return None + if self._force_matrix_w is None: + self._force_matrix_w = wp.zeros( + shape=(self._num_instances, self._num_bodies, self._num_filter_bodies, 3), + dtype=wp.float32, + device=self.device, + ) + self._force_matrix_w_ta = None + if self._force_matrix_w_ta is None: + self._force_matrix_w_ta = ProxyArray(self._force_matrix_w) + return self._force_matrix_w_ta + + @property + def force_matrix_w_history(self) -> torch.Tensor | None: + """History of filtered forces. Shape: (N, T, B, M, 3). + + Note: Returns torch.Tensor instead of wp.array due to 5D shape limitation in warp (max 4D). + """ + if self._history_length == 0 or self._num_filter_bodies == 0: + return None + if self._force_matrix_w_history is None: + return torch.zeros( + (self._num_instances, self._history_length, self._num_bodies, self._num_filter_bodies, 3), + dtype=torch.float32, + device=self.device, + ) + return self._force_matrix_w_history + + @property + def contact_pos_w(self) -> ProxyArray | None: + """Contact point positions in world frame. Shape: (N, B, M, 3).""" + if self._num_filter_bodies == 0: + return None + if self._contact_pos_w is None: + self._contact_pos_w = wp.zeros( + shape=(self._num_instances, self._num_bodies, self._num_filter_bodies, 3), + dtype=wp.float32, + device=self.device, + ) + self._contact_pos_w_ta = None + if self._contact_pos_w_ta is None: + self._contact_pos_w_ta = ProxyArray(self._contact_pos_w) + return self._contact_pos_w_ta + + @property + def friction_forces_w(self) -> ProxyArray | None: + """Friction forces in world frame. Shape: (N, B, M, 3).""" + if self._num_filter_bodies == 0: + return None + if self._friction_forces_w is None: + self._friction_forces_w = wp.zeros( + shape=(self._num_instances, self._num_bodies, self._num_filter_bodies, 3), + dtype=wp.float32, + device=self.device, + ) + self._friction_forces_w_ta = None + if self._friction_forces_w_ta is None: + self._friction_forces_w_ta = ProxyArray(self._friction_forces_w) + return self._friction_forces_w_ta + + @property + def last_air_time(self) -> ProxyArray: + """Time in air before last contact. Shape: (N, B).""" + if self._last_air_time is None: + self._last_air_time = wp.zeros( + shape=(self._num_instances, self._num_bodies), dtype=wp.float32, device=self.device + ) + self._last_air_time_ta = None + if self._last_air_time_ta is None: + self._last_air_time_ta = ProxyArray(self._last_air_time) + return self._last_air_time_ta + + @property + def current_air_time(self) -> ProxyArray: + """Current time in air. Shape: (N, B).""" + if self._current_air_time is None: + self._current_air_time = wp.zeros( + shape=(self._num_instances, self._num_bodies), dtype=wp.float32, device=self.device + ) + self._current_air_time_ta = None + if self._current_air_time_ta is None: + self._current_air_time_ta = ProxyArray(self._current_air_time) + return self._current_air_time_ta + + @property + def last_contact_time(self) -> ProxyArray: + """Time in contact before last detach. Shape: (N, B).""" + if self._last_contact_time is None: + self._last_contact_time = wp.zeros( + shape=(self._num_instances, self._num_bodies), dtype=wp.float32, device=self.device + ) + self._last_contact_time_ta = None + if self._last_contact_time_ta is None: + self._last_contact_time_ta = ProxyArray(self._last_contact_time) + return self._last_contact_time_ta + + @property + def current_contact_time(self) -> ProxyArray: + """Current time in contact. Shape: (N, B).""" + if self._current_contact_time is None: + self._current_contact_time = wp.zeros( + shape=(self._num_instances, self._num_bodies), dtype=wp.float32, device=self.device + ) + self._current_contact_time_ta = None + if self._current_contact_time_ta is None: + self._current_contact_time_ta = ProxyArray(self._current_contact_time) + return self._current_contact_time_ta + + # -- Setters -- + + def set_pos_w(self, value: torch.Tensor) -> None: + """Set position in world frame.""" + self._pos_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._pos_w_ta = None + + def set_quat_w(self, value: torch.Tensor) -> None: + """Set orientation in world frame.""" + self._quat_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._quat_w_ta = None + + def set_net_forces_w(self, value: torch.Tensor) -> None: + """Set net contact forces.""" + self._net_forces_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._net_forces_w_ta = None + + def set_net_forces_w_history(self, value: torch.Tensor) -> None: + """Set net forces history.""" + self._net_forces_w_history = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._net_forces_w_history_ta = None + + def set_force_matrix_w(self, value: torch.Tensor) -> None: + """Set filtered contact forces.""" + self._force_matrix_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._force_matrix_w_ta = None + + def set_force_matrix_w_history(self, value: torch.Tensor) -> None: + """Set filtered forces history. + + Note: Stores as torch.Tensor instead of wp.array due to 5D shape limitation in warp (max 4D). + """ + self._force_matrix_w_history = value.to(self.device).contiguous() + + def set_contact_pos_w(self, value: torch.Tensor) -> None: + """Set contact point positions.""" + self._contact_pos_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._contact_pos_w_ta = None + + def set_friction_forces_w(self, value: torch.Tensor) -> None: + """Set friction forces.""" + self._friction_forces_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._friction_forces_w_ta = None + + def set_last_air_time(self, value: torch.Tensor) -> None: + """Set last air time.""" + self._last_air_time = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._last_air_time_ta = None + + def set_current_air_time(self, value: torch.Tensor) -> None: + """Set current air time.""" + self._current_air_time = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._current_air_time_ta = None + + def set_last_contact_time(self, value: torch.Tensor) -> None: + """Set last contact time.""" + self._last_contact_time = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._last_contact_time_ta = None + + def set_current_contact_time(self, value: torch.Tensor) -> None: + """Set current contact time.""" + self._current_contact_time = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._current_contact_time_ta = None + + def set_mock_data( + self, + pos_w: torch.Tensor | None = None, + quat_w: torch.Tensor | None = None, + net_forces_w: torch.Tensor | None = None, + net_forces_w_history: torch.Tensor | None = None, + force_matrix_w: torch.Tensor | None = None, + force_matrix_w_history: torch.Tensor | None = None, + contact_pos_w: torch.Tensor | None = None, + friction_forces_w: torch.Tensor | None = None, + last_air_time: torch.Tensor | None = None, + current_air_time: torch.Tensor | None = None, + last_contact_time: torch.Tensor | None = None, + current_contact_time: torch.Tensor | None = None, + ) -> None: + """Bulk setter for mock data. + + Args: + pos_w: Position in world frame. Shape: (N, B, 3). + quat_w: Orientation in world frame. Shape: (N, B, 4). + net_forces_w: Net contact forces. Shape: (N, B, 3). + net_forces_w_history: History of net forces. Shape: (N, T, B, 3). + force_matrix_w: Filtered contact forces. Shape: (N, B, M, 3). + force_matrix_w_history: History of filtered forces. Shape: (N, T, B, M, 3). + contact_pos_w: Contact point positions. Shape: (N, B, M, 3). + friction_forces_w: Friction forces. Shape: (N, B, M, 3). + last_air_time: Time in air before last contact. Shape: (N, B). + current_air_time: Current time in air. Shape: (N, B). + last_contact_time: Time in contact before last detach. Shape: (N, B). + current_contact_time: Current time in contact. Shape: (N, B). + """ + if pos_w is not None: + self.set_pos_w(pos_w) + if quat_w is not None: + self.set_quat_w(quat_w) + if net_forces_w is not None: + self.set_net_forces_w(net_forces_w) + if net_forces_w_history is not None: + self.set_net_forces_w_history(net_forces_w_history) + if force_matrix_w is not None: + self.set_force_matrix_w(force_matrix_w) + if force_matrix_w_history is not None: + self.set_force_matrix_w_history(force_matrix_w_history) + if contact_pos_w is not None: + self.set_contact_pos_w(contact_pos_w) + if friction_forces_w is not None: + self.set_friction_forces_w(friction_forces_w) + if last_air_time is not None: + self.set_last_air_time(last_air_time) + if current_air_time is not None: + self.set_current_air_time(current_air_time) + if last_contact_time is not None: + self.set_last_contact_time(last_contact_time) + if current_contact_time is not None: + self.set_current_contact_time(current_contact_time) + + +class MockContactSensor: + """Mock contact sensor for testing without Isaac Sim. + + This class mimics the interface of BaseContactSensor for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + body_names: list[str] | None = None, + device: str = "cpu", + history_length: int = 0, + num_filter_bodies: int = 0, + ): + """Initialize mock contact sensor. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies with contact sensors. + body_names: Names of bodies with contact sensors. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + num_filter_bodies: Number of filter bodies for force matrix. + """ + self._num_instances = num_instances + self._num_bodies = num_bodies + self._body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + self._device = device + self._data = MockContactSensorData(num_instances, num_bodies, device, history_length, num_filter_bodies) + + # -- Properties -- + + @property + def data(self) -> MockContactSensorData: + """Data container for the sensor.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of sensor instances.""" + return self._num_instances + + @property + def num_sensors(self) -> int: + """Number of sensors (primary property).""" + return self._num_bodies + + @property + def num_bodies(self) -> int: + """Number of bodies with contact sensors (deprecated, use num_sensors).""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors.""" + return self._body_names + + @property + def contact_view(self) -> None: + """Returns None (no PhysX view in mock).""" + return None + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Methods -- + + def find_sensors(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find sensors by name regex patterns (primary method). + + Args: + name_keys: Regex pattern(s) to match sensor/body names. + preserve_order: If True, preserve order of name_keys in output. + + Returns: + Tuple of (sensor_indices, sensor_names) matching the patterns. + """ + return self.find_bodies(name_keys, preserve_order) + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies by name regex patterns (deprecated, use find_sensors). + + Args: + name_keys: Regex pattern(s) to match body names. + preserve_order: If True, preserve order of name_keys in output. + + Returns: + Tuple of (body_indices, body_names) matching the patterns. + """ + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._body_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._body_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> wp.array: + """Check which bodies established contact within dt seconds. + + Args: + dt: Time window to check for first contact. + abs_tol: Absolute tolerance for contact time comparison. + + Returns: + Boolean warp array of shape (N, B) indicating first contact. + """ + result = self._data.current_contact_time.torch < (dt + abs_tol) + return wp.from_torch(result) + + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> wp.array: + """Check which bodies broke contact within dt seconds. + + Args: + dt: Time window to check for first air. + abs_tol: Absolute tolerance for air time comparison. + + Returns: + Boolean warp array of shape (N, B) indicating first air. + """ + result = self._data.current_air_time.torch < (dt + abs_tol) + return wp.from_torch(result) + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset sensor state for specified environments. + + Args: + env_ids: Environment indices to reset. If None, resets all. + """ + # No-op for mock - data persists until explicitly changed + pass + + def update(self, dt: float, force_recompute: bool = False) -> None: + """Update sensor. + + Args: + dt: Time step since last update. + force_recompute: Force recomputation of buffers. + """ + # No-op for mock - data is set explicitly + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py new file mode 100644 index 000000000000..5d4b9cad6153 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py @@ -0,0 +1,350 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock frame transformer sensor for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +from collections.abc import Sequence + +import numpy as np +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +try: + from isaaclab.sensors.frame_transformer.base_frame_transformer_data import BaseFrameTransformerData +except (ImportError, ModuleNotFoundError): + # Direct import bypassing isaaclab.sensors.__init__.py (which needs omni) + import importlib.util + from pathlib import Path + + _file = Path(__file__).resolve().parents[3] / "sensors" / "frame_transformer" / "base_frame_transformer_data.py" + _spec = importlib.util.spec_from_file_location("_base_frame_transformer_data", str(_file)) + _mod = importlib.util.module_from_spec(_spec) + _spec.loader.exec_module(_mod) + BaseFrameTransformerData = _mod.BaseFrameTransformerData + + +class MockFrameTransformerData(BaseFrameTransformerData): + """Mock data container for frame transformer sensor. + + This class mimics the interface of BaseFrameTransformerData for testing purposes. + All tensor properties return zero warp arrays with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_target_frames: int, + target_frame_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock frame transformer data. + + Args: + num_instances: Number of environment instances. + num_target_frames: Number of target frames to track. + target_frame_names: Names of target frames. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_target_frames = num_target_frames + self._target_frame_names = target_frame_names or [f"frame_{i}" for i in range(num_target_frames)] + self.device = device + + # Internal storage for mock data + self._source_pos_w: wp.array | None = None + self._source_quat_w: wp.array | None = None + self._target_pos_w: wp.array | None = None + self._target_quat_w: wp.array | None = None + self._target_pos_source: wp.array | None = None + self._target_quat_source: wp.array | None = None + + # ProxyArray caches + self._source_pos_w_ta: ProxyArray | None = None + self._source_quat_w_ta: ProxyArray | None = None + self._target_pos_w_ta: ProxyArray | None = None + self._target_quat_w_ta: ProxyArray | None = None + self._target_pos_source_ta: ProxyArray | None = None + self._target_quat_source_ta: ProxyArray | None = None + + # -- Properties -- + + @property + def target_frame_names(self) -> list[str]: + """Names of target frames.""" + return self._target_frame_names + + @property + def source_pos_w(self) -> ProxyArray: + """Position of source frame in world frame. Shape: (N, 3).""" + if self._source_pos_w is None: + self._source_pos_w = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._source_pos_w_ta = None + if self._source_pos_w_ta is None: + self._source_pos_w_ta = ProxyArray(self._source_pos_w) + return self._source_pos_w_ta + + @property + def source_quat_w(self) -> ProxyArray: + """Orientation (w, x, y, z) of source frame in world frame. Shape: (N, 4).""" + if self._source_quat_w is None: + quat_np = np.zeros((self._num_instances, 4), dtype=np.float32) + quat_np[:, 0] = 1.0 + self._source_quat_w = wp.array(quat_np, dtype=wp.float32, device=self.device) + self._source_quat_w_ta = None + if self._source_quat_w_ta is None: + self._source_quat_w_ta = ProxyArray(self._source_quat_w) + return self._source_quat_w_ta + + @property + def source_pose_w(self) -> ProxyArray: + """Pose of source frame in world frame. Shape: (N, 7).""" + pos_t = self.source_pos_w.torch + quat_t = self.source_quat_w.torch + pose_t = torch.cat([pos_t, quat_t], dim=-1) + return ProxyArray(wp.from_torch(pose_t.contiguous(), dtype=wp.float32)) + + @property + def target_pos_w(self) -> ProxyArray: + """Position of target frames in world frame. Shape: (N, M, 3).""" + if self._target_pos_w is None: + self._target_pos_w = wp.zeros( + shape=(self._num_instances, self._num_target_frames, 3), dtype=wp.float32, device=self.device + ) + self._target_pos_w_ta = None + if self._target_pos_w_ta is None: + self._target_pos_w_ta = ProxyArray(self._target_pos_w) + return self._target_pos_w_ta + + @property + def target_quat_w(self) -> ProxyArray: + """Orientation (w, x, y, z) of target frames in world frame. Shape: (N, M, 4).""" + if self._target_quat_w is None: + quat_np = np.zeros((self._num_instances, self._num_target_frames, 4), dtype=np.float32) + quat_np[..., 0] = 1.0 + self._target_quat_w = wp.array(quat_np, dtype=wp.float32, device=self.device) + self._target_quat_w_ta = None + if self._target_quat_w_ta is None: + self._target_quat_w_ta = ProxyArray(self._target_quat_w) + return self._target_quat_w_ta + + @property + def target_pose_w(self) -> ProxyArray: + """Pose of target frames in world frame. Shape: (N, M, 7).""" + pos_t = self.target_pos_w.torch + quat_t = self.target_quat_w.torch + pose_t = torch.cat([pos_t, quat_t], dim=-1) + return ProxyArray(wp.from_torch(pose_t.contiguous(), dtype=wp.float32)) + + @property + def target_pos_source(self) -> ProxyArray: + """Position of target frames relative to source frame. Shape: (N, M, 3).""" + if self._target_pos_source is None: + self._target_pos_source = wp.zeros( + shape=(self._num_instances, self._num_target_frames, 3), dtype=wp.float32, device=self.device + ) + self._target_pos_source_ta = None + if self._target_pos_source_ta is None: + self._target_pos_source_ta = ProxyArray(self._target_pos_source) + return self._target_pos_source_ta + + @property + def target_quat_source(self) -> ProxyArray: + """Orientation (w, x, y, z) of target frames relative to source frame. Shape: (N, M, 4).""" + if self._target_quat_source is None: + quat_np = np.zeros((self._num_instances, self._num_target_frames, 4), dtype=np.float32) + quat_np[..., 0] = 1.0 + self._target_quat_source = wp.array(quat_np, dtype=wp.float32, device=self.device) + self._target_quat_source_ta = None + if self._target_quat_source_ta is None: + self._target_quat_source_ta = ProxyArray(self._target_quat_source) + return self._target_quat_source_ta + + @property + def target_pose_source(self) -> ProxyArray: + """Pose of target frames relative to source frame. Shape: (N, M, 7).""" + pos_t = self.target_pos_source.torch + quat_t = self.target_quat_source.torch + pose_t = torch.cat([pos_t, quat_t], dim=-1) + return ProxyArray(wp.from_torch(pose_t.contiguous(), dtype=wp.float32)) + + # -- Setters -- + + def set_source_pos_w(self, value: torch.Tensor) -> None: + """Set source position in world frame.""" + self._source_pos_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._source_pos_w_ta = None + + def set_source_quat_w(self, value: torch.Tensor) -> None: + """Set source orientation in world frame.""" + self._source_quat_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._source_quat_w_ta = None + + def set_target_pos_w(self, value: torch.Tensor) -> None: + """Set target positions in world frame.""" + self._target_pos_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._target_pos_w_ta = None + + def set_target_quat_w(self, value: torch.Tensor) -> None: + """Set target orientations in world frame.""" + self._target_quat_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._target_quat_w_ta = None + + def set_target_pos_source(self, value: torch.Tensor) -> None: + """Set target positions relative to source.""" + self._target_pos_source = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._target_pos_source_ta = None + + def set_target_quat_source(self, value: torch.Tensor) -> None: + """Set target orientations relative to source.""" + self._target_quat_source = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._target_quat_source_ta = None + + def set_mock_data( + self, + source_pos_w: torch.Tensor | None = None, + source_quat_w: torch.Tensor | None = None, + target_pos_w: torch.Tensor | None = None, + target_quat_w: torch.Tensor | None = None, + target_pos_source: torch.Tensor | None = None, + target_quat_source: torch.Tensor | None = None, + ) -> None: + """Bulk setter for mock data. + + Args: + source_pos_w: Source position in world frame. Shape: (N, 3). + source_quat_w: Source orientation in world frame. Shape: (N, 4). + target_pos_w: Target positions in world frame. Shape: (N, M, 3). + target_quat_w: Target orientations in world frame. Shape: (N, M, 4). + target_pos_source: Target positions relative to source. Shape: (N, M, 3). + target_quat_source: Target orientations relative to source. Shape: (N, M, 4). + """ + if source_pos_w is not None: + self.set_source_pos_w(source_pos_w) + if source_quat_w is not None: + self.set_source_quat_w(source_quat_w) + if target_pos_w is not None: + self.set_target_pos_w(target_pos_w) + if target_quat_w is not None: + self.set_target_quat_w(target_quat_w) + if target_pos_source is not None: + self.set_target_pos_source(target_pos_source) + if target_quat_source is not None: + self.set_target_quat_source(target_quat_source) + + +class MockFrameTransformer: + """Mock frame transformer sensor for testing without Isaac Sim. + + This class mimics the interface of BaseFrameTransformer for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_target_frames: int, + target_frame_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock frame transformer sensor. + + Args: + num_instances: Number of environment instances. + num_target_frames: Number of target frames to track. + target_frame_names: Names of target frames. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_target_frames = num_target_frames + self._target_frame_names = target_frame_names or [f"frame_{i}" for i in range(num_target_frames)] + self._device = device + self._data = MockFrameTransformerData(num_instances, num_target_frames, self._target_frame_names, device) + + # -- Properties -- + + @property + def data(self) -> MockFrameTransformerData: + """Data container for the sensor.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of sensor instances.""" + return self._num_instances + + @property + def num_bodies(self) -> int: + """Number of target bodies being tracked.""" + return self._num_target_frames + + @property + def body_names(self) -> list[str]: + """Names of target bodies being tracked.""" + return self._target_frame_names + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Methods -- + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find target frames by name regex patterns. + + Args: + name_keys: Regex pattern(s) to match frame names. + preserve_order: If True, preserve order of name_keys in output. + + Returns: + Tuple of (frame_indices, frame_names) matching the patterns. + """ + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._target_frame_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._target_frame_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset sensor state for specified environments. + + Args: + env_ids: Environment indices to reset. If None, resets all. + """ + # No-op for mock - data persists until explicitly changed + pass + + def update(self, dt: float, force_recompute: bool = False) -> None: + """Update sensor. + + Args: + dt: Time step since last update. + force_recompute: Force recomputation of buffers. + """ + # No-op for mock - data is set explicitly + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py new file mode 100644 index 000000000000..f32f0a5f567a --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py @@ -0,0 +1,162 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock IMU sensor for testing without Isaac Sim.""" + +from __future__ import annotations + +from collections.abc import Sequence + +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +try: + from isaaclab.sensors.imu.base_imu_data import BaseImuData +except (ImportError, ModuleNotFoundError): + # Direct import bypassing isaaclab.sensors.__init__.py (which needs omni) + import importlib.util + from pathlib import Path + + _file = Path(__file__).resolve().parents[3] / "sensors" / "imu" / "base_imu_data.py" + _spec = importlib.util.spec_from_file_location("_base_imu_data", str(_file)) + _mod = importlib.util.module_from_spec(_spec) + _spec.loader.exec_module(_mod) + BaseImuData = _mod.BaseImuData + + +class MockImuData(BaseImuData): + """Mock data container for IMU sensor. + + This class mimics the interface of BaseImuData for testing purposes. + The IMU only provides angular velocity and linear acceleration. + """ + + def __init__(self, num_instances: int, device: str = "cpu"): + """Initialize mock IMU data. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self.device = device + + self._ang_vel_b: wp.array | None = None + self._lin_acc_b: wp.array | None = None + + # ProxyArray caches + self._ang_vel_b_ta: ProxyArray | None = None + self._lin_acc_b_ta: ProxyArray | None = None + + # -- Properties -- + + @property + def ang_vel_b(self) -> ProxyArray: + """Angular velocity in IMU body frame [rad/s]. Shape: (N, 3).""" + if self._ang_vel_b is None: + self._ang_vel_b = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._ang_vel_b_ta = None + if self._ang_vel_b_ta is None: + self._ang_vel_b_ta = ProxyArray(self._ang_vel_b) + return self._ang_vel_b_ta + + @property + def lin_acc_b(self) -> ProxyArray: + """Linear acceleration in IMU body frame [m/s^2]. Shape: (N, 3).""" + if self._lin_acc_b is None: + self._lin_acc_b = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._lin_acc_b_ta = None + if self._lin_acc_b_ta is None: + self._lin_acc_b_ta = ProxyArray(self._lin_acc_b) + return self._lin_acc_b_ta + + # -- Setters -- + + def set_ang_vel_b(self, value: torch.Tensor) -> None: + """Set angular velocity in body frame.""" + self._ang_vel_b = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._ang_vel_b_ta = None + + def set_lin_acc_b(self, value: torch.Tensor) -> None: + """Set linear acceleration in body frame.""" + self._lin_acc_b = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._lin_acc_b_ta = None + + def set_mock_data( + self, + ang_vel_b: torch.Tensor | None = None, + lin_acc_b: torch.Tensor | None = None, + ) -> None: + """Bulk setter for mock data. + + Args: + ang_vel_b: Angular velocity in body frame [rad/s]. Shape: (N, 3). + lin_acc_b: Linear acceleration in body frame [m/s^2]. Shape: (N, 3). + """ + if ang_vel_b is not None: + self.set_ang_vel_b(ang_vel_b) + if lin_acc_b is not None: + self.set_lin_acc_b(lin_acc_b) + + +class MockImu: + """Mock IMU sensor for testing without Isaac Sim. + + This class mimics the interface of BaseImu for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + device: str = "cpu", + ): + """Initialize mock IMU sensor. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._device = device + self._data = MockImuData(num_instances, device) + + # -- Properties -- + + @property + def data(self) -> MockImuData: + """Data container for the sensor.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of sensor instances.""" + return self._num_instances + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Methods -- + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset sensor state for specified environments. + + Args: + env_ids: Environment indices to reset. If None, resets all. + """ + pass + + def update(self, dt: float, force_recompute: bool = False) -> None: + """Update sensor. + + Args: + dt: Time step since last update. + force_recompute: Force recomputation of buffers. + """ + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_pva.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_pva.py new file mode 100644 index 000000000000..0fb24b551b1f --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_pva.py @@ -0,0 +1,283 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock PVA sensor for testing without Isaac Sim.""" + +from __future__ import annotations + +from collections.abc import Sequence + +import numpy as np +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +try: + from isaaclab.sensors.pva.base_pva_data import BasePvaData +except (ImportError, ModuleNotFoundError): + # Direct import bypassing isaaclab.sensors.__init__.py (which needs omni) + import importlib.util + from pathlib import Path + + _file = Path(__file__).resolve().parents[3] / "sensors" / "pva" / "base_pva_data.py" + _spec = importlib.util.spec_from_file_location("_base_pva_data", str(_file)) + _mod = importlib.util.module_from_spec(_spec) + _spec.loader.exec_module(_mod) + BasePvaData = _mod.BasePvaData + + +class MockPvaData(BasePvaData): + """Mock data container for PVA sensor. + + This class mimics the interface of BasePvaData for testing purposes. + All tensor properties return zero warp arrays with correct shapes if not explicitly set. + """ + + def __init__(self, num_instances: int, device: str = "cpu"): + """Initialize mock PVA data. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self.device = device + + # Internal storage for mock data + self._pos_w: wp.array | None = None + self._quat_w: wp.array | None = None + self._projected_gravity_b: wp.array | None = None + self._lin_vel_b: wp.array | None = None + self._ang_vel_b: wp.array | None = None + self._lin_acc_b: wp.array | None = None + self._ang_acc_b: wp.array | None = None + + # ProxyArray caches + self._pos_w_ta: ProxyArray | None = None + self._quat_w_ta: ProxyArray | None = None + self._projected_gravity_b_ta: ProxyArray | None = None + self._lin_vel_b_ta: ProxyArray | None = None + self._ang_vel_b_ta: ProxyArray | None = None + self._lin_acc_b_ta: ProxyArray | None = None + self._ang_acc_b_ta: ProxyArray | None = None + + # -- Properties -- + + @property + def pos_w(self) -> ProxyArray: + """Position of sensor origin in world frame. Shape: (N, 3).""" + if self._pos_w is None: + self._pos_w = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._pos_w_ta = None + if self._pos_w_ta is None: + self._pos_w_ta = ProxyArray(self._pos_w) + return self._pos_w_ta + + @property + def quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) in world frame. Shape: (N, 4).""" + if self._quat_w is None: + # Default to identity quaternion (x, y, z, w) = (0, 0, 0, 1) + quat_np = np.zeros((self._num_instances, 4), dtype=np.float32) + quat_np[:, 3] = 1.0 # w component is at index 3 in XYZW format + self._quat_w = wp.array(quat_np, dtype=wp.float32, device=self.device) + self._quat_w_ta = None + if self._quat_w_ta is None: + self._quat_w_ta = ProxyArray(self._quat_w) + return self._quat_w_ta + + @property + def pose_w(self) -> ProxyArray: + """Pose in world frame (pos + quat). Shape: (N, 7).""" + pos_t = self.pos_w.torch + quat_t = self.quat_w.torch + pose_t = torch.cat([pos_t, quat_t], dim=-1) + return ProxyArray(wp.from_torch(pose_t.contiguous(), dtype=wp.float32)) + + @property + def projected_gravity_b(self) -> ProxyArray: + """Gravity direction in PVA body frame. Shape: (N, 3).""" + if self._projected_gravity_b is None: + # Default gravity pointing down in body frame + gravity_np = np.zeros((self._num_instances, 3), dtype=np.float32) + gravity_np[:, 2] = -1.0 + self._projected_gravity_b = wp.array(gravity_np, dtype=wp.float32, device=self.device) + self._projected_gravity_b_ta = None + if self._projected_gravity_b_ta is None: + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b) + return self._projected_gravity_b_ta + + @property + def lin_vel_b(self) -> ProxyArray: + """Linear velocity in PVA body frame. Shape: (N, 3).""" + if self._lin_vel_b is None: + self._lin_vel_b = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._lin_vel_b_ta = None + if self._lin_vel_b_ta is None: + self._lin_vel_b_ta = ProxyArray(self._lin_vel_b) + return self._lin_vel_b_ta + + @property + def ang_vel_b(self) -> ProxyArray: + """Angular velocity in PVA body frame. Shape: (N, 3).""" + if self._ang_vel_b is None: + self._ang_vel_b = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._ang_vel_b_ta = None + if self._ang_vel_b_ta is None: + self._ang_vel_b_ta = ProxyArray(self._ang_vel_b) + return self._ang_vel_b_ta + + @property + def lin_acc_b(self) -> ProxyArray: + """Linear acceleration in PVA body frame. Shape: (N, 3).""" + if self._lin_acc_b is None: + self._lin_acc_b = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._lin_acc_b_ta = None + if self._lin_acc_b_ta is None: + self._lin_acc_b_ta = ProxyArray(self._lin_acc_b) + return self._lin_acc_b_ta + + @property + def ang_acc_b(self) -> ProxyArray: + """Angular acceleration in PVA body frame. Shape: (N, 3).""" + if self._ang_acc_b is None: + self._ang_acc_b = wp.zeros(shape=(self._num_instances, 3), dtype=wp.float32, device=self.device) + self._ang_acc_b_ta = None + if self._ang_acc_b_ta is None: + self._ang_acc_b_ta = ProxyArray(self._ang_acc_b) + return self._ang_acc_b_ta + + # -- Setters -- + + def set_pos_w(self, value: torch.Tensor) -> None: + """Set position in world frame.""" + self._pos_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._pos_w_ta = None + + def set_quat_w(self, value: torch.Tensor) -> None: + """Set orientation in world frame.""" + self._quat_w = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._quat_w_ta = None + + def set_projected_gravity_b(self, value: torch.Tensor) -> None: + """Set projected gravity in body frame.""" + self._projected_gravity_b = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._projected_gravity_b_ta = None + + def set_lin_vel_b(self, value: torch.Tensor) -> None: + """Set linear velocity in body frame.""" + self._lin_vel_b = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._lin_vel_b_ta = None + + def set_ang_vel_b(self, value: torch.Tensor) -> None: + """Set angular velocity in body frame.""" + self._ang_vel_b = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._ang_vel_b_ta = None + + def set_lin_acc_b(self, value: torch.Tensor) -> None: + """Set linear acceleration in body frame.""" + self._lin_acc_b = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._lin_acc_b_ta = None + + def set_ang_acc_b(self, value: torch.Tensor) -> None: + """Set angular acceleration in body frame.""" + self._ang_acc_b = wp.from_torch(value.to(self.device).contiguous(), dtype=wp.float32) + self._ang_acc_b_ta = None + + def set_mock_data( + self, + pos_w: torch.Tensor | None = None, + quat_w: torch.Tensor | None = None, + projected_gravity_b: torch.Tensor | None = None, + lin_vel_b: torch.Tensor | None = None, + ang_vel_b: torch.Tensor | None = None, + lin_acc_b: torch.Tensor | None = None, + ang_acc_b: torch.Tensor | None = None, + ) -> None: + """Bulk setter for mock data. + + Args: + pos_w: Position in world frame. Shape: (N, 3). + quat_w: Orientation (x, y, z, w) in world frame. Shape: (N, 4). + projected_gravity_b: Gravity direction in body frame. Shape: (N, 3). + lin_vel_b: Linear velocity in body frame. Shape: (N, 3). + ang_vel_b: Angular velocity in body frame. Shape: (N, 3). + lin_acc_b: Linear acceleration in body frame. Shape: (N, 3). + ang_acc_b: Angular acceleration in body frame. Shape: (N, 3). + """ + if pos_w is not None: + self.set_pos_w(pos_w) + if quat_w is not None: + self.set_quat_w(quat_w) + if projected_gravity_b is not None: + self.set_projected_gravity_b(projected_gravity_b) + if lin_vel_b is not None: + self.set_lin_vel_b(lin_vel_b) + if ang_vel_b is not None: + self.set_ang_vel_b(ang_vel_b) + if lin_acc_b is not None: + self.set_lin_acc_b(lin_acc_b) + if ang_acc_b is not None: + self.set_ang_acc_b(ang_acc_b) + + +class MockPva: + """Mock PVA sensor for testing without Isaac Sim. + + This class mimics the interface of BasePva for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + device: str = "cpu", + ): + """Initialize mock PVA sensor. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._device = device + self._data = MockPvaData(num_instances, device) + + # -- Properties -- + + @property + def data(self) -> MockPvaData: + """Data container for the sensor.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of sensor instances.""" + return self._num_instances + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Methods -- + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset sensor state for specified environments. + + Args: + env_ids: Environment indices to reset. If None, resets all. + """ + pass + + def update(self, dt: float, force_recompute: bool = False) -> None: + """Update sensor. + + Args: + dt: Time step since last update. + force_recompute: Force recomputation of buffers. + """ + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py new file mode 100644 index 000000000000..df94a8cbf74f --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for creating and using mock interfaces.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.pyi b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.pyi new file mode 100644 index 000000000000..0bc81a9cf886 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MockArticulationBuilder", + "MockSensorBuilder", + "MockWrenchComposer", + "patch_articulation", + "patch_sensor", +] + +from .mock_generator import MockArticulationBuilder, MockSensorBuilder +from .mock_wrench_composer import MockWrenchComposer +from .patching import patch_articulation, patch_sensor diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py new file mode 100644 index 000000000000..830751b58c40 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py @@ -0,0 +1,357 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for generating custom mock objects.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from ..assets import MockArticulation + from ..sensors import MockContactSensor, MockFrameTransformer, MockImu, MockPva + + +class MockArticulationBuilder: + """Builder class for creating custom MockArticulation instances. + + Example: + >>> robot = ( + ... MockArticulationBuilder() + ... .with_joints(["hip", "knee", "ankle"], default_pos=[0.0, 0.5, -0.5]) + ... .with_bodies(["base", "thigh", "shin", "foot"]) + ... .with_fixed_base(True) + ... .with_num_instances(8) + ... .build() + ... ) + """ + + def __init__(self): + """Initialize the builder with default values.""" + self._num_instances = 1 + self._joint_names: list[str] = [] + self._body_names: list[str] = [] + self._is_fixed_base = False + self._device = "cpu" + self._default_joint_pos: list[float] | None = None + self._default_joint_vel: list[float] | None = None + self._joint_pos_limits: tuple[float, float] | None = None + self._num_fixed_tendons = 0 + self._num_spatial_tendons = 0 + self._fixed_tendon_names: list[str] | None = None + self._spatial_tendon_names: list[str] | None = None + + def with_num_instances(self, num_instances: int) -> MockArticulationBuilder: + """Set the number of articulation instances. + + Args: + num_instances: Number of instances. + + Returns: + Self for method chaining. + """ + self._num_instances = num_instances + return self + + def with_joints( + self, + joint_names: list[str], + default_pos: list[float] | None = None, + default_vel: list[float] | None = None, + ) -> MockArticulationBuilder: + """Set joint configuration. + + Args: + joint_names: Names of the joints. + default_pos: Default joint positions. + default_vel: Default joint velocities. + + Returns: + Self for method chaining. + """ + self._joint_names = joint_names + self._default_joint_pos = default_pos + self._default_joint_vel = default_vel + return self + + def with_bodies(self, body_names: list[str]) -> MockArticulationBuilder: + """Set body configuration. + + Args: + body_names: Names of the bodies. + + Returns: + Self for method chaining. + """ + self._body_names = body_names + return self + + def with_fixed_base(self, is_fixed: bool) -> MockArticulationBuilder: + """Set whether the articulation has a fixed base. + + Args: + is_fixed: True for fixed base, False for floating base. + + Returns: + Self for method chaining. + """ + self._is_fixed_base = is_fixed + return self + + def with_device(self, device: str) -> MockArticulationBuilder: + """Set the device for tensor allocation. + + Args: + device: Device string (e.g., "cpu", "cuda:0"). + + Returns: + Self for method chaining. + """ + self._device = device + return self + + def with_joint_limits(self, lower: float, upper: float) -> MockArticulationBuilder: + """Set uniform joint position limits for all joints. + + Args: + lower: Lower joint limit. + upper: Upper joint limit. + + Returns: + Self for method chaining. + """ + self._joint_pos_limits = (lower, upper) + return self + + def with_fixed_tendons(self, tendon_names: list[str]) -> MockArticulationBuilder: + """Set fixed tendon configuration. + + Args: + tendon_names: Names of fixed tendons. + + Returns: + Self for method chaining. + """ + self._fixed_tendon_names = tendon_names + self._num_fixed_tendons = len(tendon_names) + return self + + def with_spatial_tendons(self, tendon_names: list[str]) -> MockArticulationBuilder: + """Set spatial tendon configuration. + + Args: + tendon_names: Names of spatial tendons. + + Returns: + Self for method chaining. + """ + self._spatial_tendon_names = tendon_names + self._num_spatial_tendons = len(tendon_names) + return self + + def build(self) -> MockArticulation: + """Build the MockArticulation instance. + + Returns: + Configured MockArticulation instance. + """ + from ..assets import MockArticulation + + num_joints = len(self._joint_names) if self._joint_names else 1 + num_bodies = len(self._body_names) if self._body_names else num_joints + 1 + + robot = MockArticulation( + num_instances=self._num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=self._joint_names or None, + body_names=self._body_names or None, + is_fixed_base=self._is_fixed_base, + num_fixed_tendons=self._num_fixed_tendons, + num_spatial_tendons=self._num_spatial_tendons, + fixed_tendon_names=self._fixed_tendon_names, + spatial_tendon_names=self._spatial_tendon_names, + device=self._device, + ) + + # Set default joint positions + if self._default_joint_pos is not None: + default_pos = torch.tensor( + [self._default_joint_pos] * self._num_instances, + device=self._device, + ) + robot.data.set_default_joint_pos(default_pos) + robot.data.set_joint_pos(default_pos) + + # Set default joint velocities + if self._default_joint_vel is not None: + default_vel = torch.tensor( + [self._default_joint_vel] * self._num_instances, + device=self._device, + ) + robot.data.set_default_joint_vel(default_vel) + robot.data.set_joint_vel(default_vel) + + # Set joint limits + if self._joint_pos_limits is not None: + limits = torch.zeros(self._num_instances, num_joints, 2, device=self._device) + limits[..., 0] = self._joint_pos_limits[0] + limits[..., 1] = self._joint_pos_limits[1] + robot.data.set_joint_pos_limits(limits) + + return robot + + +class MockSensorBuilder: + """Builder class for creating custom mock sensor instances. + + Example: + >>> sensor = ( + ... MockSensorBuilder("contact") + ... .with_num_instances(4) + ... .with_bodies(["FL_foot", "FR_foot", "RL_foot", "RR_foot"]) + ... .with_device("cuda") + ... .build() + ... ) + """ + + def __init__(self, sensor_type: str): + """Initialize the builder. + + Args: + sensor_type: Type of sensor ("contact", "imu", "pva", or "frame_transformer"). + """ + if sensor_type not in ("contact", "imu", "pva", "frame_transformer"): + raise ValueError(f"Unknown sensor type: {sensor_type}") + self._sensor_type = sensor_type + self._num_instances = 1 + self._device = "cpu" + + # Contact sensor specific + self._body_names: list[str] = [] + self._history_length = 0 + self._num_filter_bodies = 0 + + # Frame transformer specific + self._target_frame_names: list[str] = [] + + def with_num_instances(self, num_instances: int) -> MockSensorBuilder: + """Set the number of sensor instances. + + Args: + num_instances: Number of instances. + + Returns: + Self for method chaining. + """ + self._num_instances = num_instances + return self + + def with_device(self, device: str) -> MockSensorBuilder: + """Set the device for tensor allocation. + + Args: + device: Device string. + + Returns: + Self for method chaining. + """ + self._device = device + return self + + def with_bodies(self, body_names: list[str]) -> MockSensorBuilder: + """Set body names (for contact sensor). + + Args: + body_names: Names of bodies with contact sensors. + + Returns: + Self for method chaining. + """ + self._body_names = body_names + return self + + def with_history_length(self, length: int) -> MockSensorBuilder: + """Set history buffer length (for contact sensor). + + Args: + length: Length of history buffer. + + Returns: + Self for method chaining. + """ + self._history_length = length + return self + + def with_filter_bodies(self, num_filter_bodies: int) -> MockSensorBuilder: + """Set number of filter bodies (for contact sensor). + + Args: + num_filter_bodies: Number of filter bodies for force matrix. + + Returns: + Self for method chaining. + """ + self._num_filter_bodies = num_filter_bodies + return self + + def with_target_frames(self, frame_names: list[str]) -> MockSensorBuilder: + """Set target frame names (for frame transformer). + + Args: + frame_names: Names of target frames. + + Returns: + Self for method chaining. + """ + self._target_frame_names = frame_names + return self + + def build(self) -> MockContactSensor | MockImu | MockPva | MockFrameTransformer: + """Build the mock sensor instance. + + Returns: + Configured mock sensor instance. + """ + if self._sensor_type == "contact": + from ..sensors import MockContactSensor + + num_bodies = len(self._body_names) if self._body_names else 1 + return MockContactSensor( + num_instances=self._num_instances, + num_bodies=num_bodies, + body_names=self._body_names or None, + device=self._device, + history_length=self._history_length, + num_filter_bodies=self._num_filter_bodies, + ) + elif self._sensor_type == "imu": + from ..sensors import MockImu + + return MockImu( + num_instances=self._num_instances, + device=self._device, + ) + elif self._sensor_type == "pva": + from ..sensors import MockPva + + return MockPva( + num_instances=self._num_instances, + device=self._device, + ) + elif self._sensor_type == "frame_transformer": + from ..sensors import MockFrameTransformer + + num_frames = len(self._target_frame_names) if self._target_frame_names else 1 + return MockFrameTransformer( + num_instances=self._num_instances, + num_target_frames=num_frames, + target_frame_names=self._target_frame_names or None, + device=self._device, + ) + else: + raise ValueError(f"Unknown sensor type: {self._sensor_type}") diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_wrench_composer.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_wrench_composer.py new file mode 100644 index 000000000000..a20dbeb01274 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_wrench_composer.py @@ -0,0 +1,388 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock WrenchComposer for testing and benchmarking. + +This module provides a mock implementation of the WrenchComposer class that can be used +in testing and benchmarking without requiring the full simulation environment. +""" + +from __future__ import annotations + +import warnings +from typing import TYPE_CHECKING + +import torch +import warp as wp + +if TYPE_CHECKING: + from isaaclab.assets import BaseArticulation, BaseRigidObject, BaseRigidObjectCollection + + +class MockWrenchComposer: + """Mock WrenchComposer matching the dual-buffer API for testing. + + This class provides a mock implementation of WrenchComposer that matches the real interface + but does not launch Warp kernels. It can be used for testing and benchmarking asset classes + without requiring the full simulation environment. + + The mock maintains the 5 input buffers and 2 output buffers matching the real WrenchComposer, + and sets the active flag when forces/torques are added. The ``compose_to_body_frame()`` method + simply copies the local buffers to the output buffers (since mock assets typically use identity + transforms). + """ + + def __init__(self, asset: BaseArticulation | BaseRigidObject | BaseRigidObjectCollection) -> None: + """Initialize the mock wrench composer. + + Args: + asset: Asset to use (Articulation, RigidObject, or RigidObjectCollection). + """ + self.num_envs = asset.num_instances + if hasattr(asset, "num_bodies"): + self.num_bodies = asset.num_bodies + else: + raise ValueError(f"Unsupported asset type: {asset.__class__.__name__}") + self.device = asset.device + self._asset = asset + + # -- Tracking flags -- + self._active: bool = False + self._dirty: bool = False + + shape = (self.num_envs, self.num_bodies) + + # -- 5 input buffers -- + self._global_force_w = wp.zeros(shape, dtype=wp.vec3f, device=self.device) + self._global_torque_w = wp.zeros(shape, dtype=wp.vec3f, device=self.device) + self._global_force_at_com_w = wp.zeros(shape, dtype=wp.vec3f, device=self.device) + self._local_force_b = wp.zeros(shape, dtype=wp.vec3f, device=self.device) + self._local_torque_b = wp.zeros(shape, dtype=wp.vec3f, device=self.device) + + # -- 2 output buffers -- + self._out_force_b = wp.zeros(shape, dtype=wp.vec3f, device=self.device) + self._out_torque_b = wp.zeros(shape, dtype=wp.vec3f, device=self.device) + + # Create index arrays + self._ALL_ENV_INDICES_WP = wp.from_torch( + torch.arange(self.num_envs, dtype=torch.int32, device=self.device), dtype=wp.int32 + ) + self._ALL_BODY_INDICES_WP = wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device), dtype=wp.int32 + ) + self._ALL_ENV_INDICES_TORCH = wp.to_torch(self._ALL_ENV_INDICES_WP) + self._ALL_BODY_INDICES_TORCH = wp.to_torch(self._ALL_BODY_INDICES_WP) + + # ------------------------------------------------------------------ + # Properties + # ------------------------------------------------------------------ + + @property + def active(self) -> bool: + """Whether any forces or torques have been written since the last full reset.""" + return self._active + + # -- Input buffer accessors (read-only) -- + + @property + def global_force_w(self) -> wp.array: + """Positional global forces buffer. Shape ``(num_envs, num_bodies)``, dtype ``wp.vec3f``.""" + return self._global_force_w + + @property + def global_torque_w(self) -> wp.array: + """Global torques buffer (about world origin). Shape ``(num_envs, num_bodies)``, dtype ``wp.vec3f``.""" + return self._global_torque_w + + @property + def global_force_at_com_w(self) -> wp.array: + """Global forces at CoM buffer (no positional torque). Shape ``(num_envs, num_bodies)``, dtype ``wp.vec3f``.""" + return self._global_force_at_com_w + + @property + def local_force_b(self) -> wp.array: + """Body-frame forces buffer. Shape ``(num_envs, num_bodies)``, dtype ``wp.vec3f``.""" + return self._local_force_b + + @property + def local_torque_b(self) -> wp.array: + """Body-frame torques buffer. Shape ``(num_envs, num_bodies)``, dtype ``wp.vec3f``.""" + return self._local_torque_b + + # -- Output buffer accessors -- + + @property + def out_force_b(self) -> wp.array: + """Composed force in the body (link) frame. Shape ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. + + Triggers composition from input buffers if dirty. + """ + self._ensure_composed() + return self._out_force_b + + @property + def out_torque_b(self) -> wp.array: + """Composed torque in the body (link) frame. Shape ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. + + Triggers composition from input buffers if dirty. + """ + self._ensure_composed() + return self._out_torque_b + + # -- Legacy composed_force / composed_torque properties for backward compat -- + + @property + def composed_force(self) -> wp.array: + """Composed force at the body's link frame. + + .. deprecated:: 4.5.33 + Use :attr:`out_force_b` instead. + """ + warnings.warn( + "The property 'composed_force' is deprecated. Use 'out_force_b' instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.out_force_b + + @property + def composed_torque(self) -> wp.array: + """Composed torque at the body's link frame. + + .. deprecated:: 4.5.33 + Use :attr:`out_torque_b` instead. + """ + warnings.warn( + "The property 'composed_torque' is deprecated. Use 'out_torque_b' instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.out_torque_b + + # ------------------------------------------------------------------ + # Composition + # ------------------------------------------------------------------ + + def compose_to_body_frame(self): + """Mock composition: sums all input buffers to output assuming identity transforms. + + Under identity transforms (no rotation), global-frame values equal body-frame values, + so all five input buffers are summed directly into the two output buffers. + """ + # Zero output buffers + self._out_force_b.zero_() + self._out_torque_b.zero_() + + # Use torch views for the accumulation + out_force_torch = wp.to_torch(self._out_force_b) + out_torque_torch = wp.to_torch(self._out_torque_b) + + # Sum all force contributions (identity: no rotation needed) + out_force_torch.add_(wp.to_torch(self._local_force_b)) + out_force_torch.add_(wp.to_torch(self._global_force_w)) + out_force_torch.add_(wp.to_torch(self._global_force_at_com_w)) + + # Sum all torque contributions + out_torque_torch.add_(wp.to_torch(self._local_torque_b)) + out_torque_torch.add_(wp.to_torch(self._global_torque_w)) + + self._dirty = False + + # ------------------------------------------------------------------ + # Buffer merging + # ------------------------------------------------------------------ + + def add_raw_buffers_from(self, other: MockWrenchComposer): + """Element-wise add another composer's five input buffers into this one. + + Args: + other: Another :class:`MockWrenchComposer` whose input buffers will be added into this one. + """ + # Use torch views for element-wise addition + wp.to_torch(self._global_force_w).add_(wp.to_torch(other._global_force_w)) + wp.to_torch(self._global_torque_w).add_(wp.to_torch(other._global_torque_w)) + wp.to_torch(self._global_force_at_com_w).add_(wp.to_torch(other._global_force_at_com_w)) + wp.to_torch(self._local_force_b).add_(wp.to_torch(other._local_force_b)) + wp.to_torch(self._local_torque_b).add_(wp.to_torch(other._local_torque_b)) + + if other._active: + self._active = True + self._dirty = True + + # ------------------------------------------------------------------ + # Add / Set methods + # ------------------------------------------------------------------ + + def add_forces_and_torques( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: wp.array | torch.Tensor | None = None, + env_ids: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Add forces and torques (deprecated, use add_forces_and_torques_index). + + Args: + forces: Forces. (num_envs, num_bodies, 3). Defaults to None. + torques: Torques. (num_envs, num_bodies, 3). Defaults to None. + positions: Positions. (num_envs, num_bodies, 3). Defaults to None. + body_ids: Body ids. Defaults to None (all bodies). + env_ids: Environment ids. Defaults to None (all environments). + is_global: Whether the forces and torques are applied in the global frame. Defaults to False. + """ + self.add_forces_and_torques_index( + forces=forces, + torques=torques, + positions=positions, + body_ids=body_ids, + env_ids=env_ids, + is_global=is_global, + ) + + def set_forces_and_torques( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: wp.array | torch.Tensor | None = None, + env_ids: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Set forces and torques (deprecated, use set_forces_and_torques_index). + + Args: + forces: Forces. (num_envs, num_bodies, 3). Defaults to None. + torques: Torques. (num_envs, num_bodies, 3). Defaults to None. + positions: Positions. (num_envs, num_bodies, 3). Defaults to None. + body_ids: Body ids. Defaults to None (all bodies). + env_ids: Environment ids. Defaults to None (all environments). + is_global: Whether the forces and torques are applied in the global frame. Defaults to False. + """ + self.set_forces_and_torques_index( + forces=forces, + torques=torques, + positions=positions, + body_ids=body_ids, + env_ids=env_ids, + is_global=is_global, + ) + + # -- Index/Mask method variants -- + + def add_forces_and_torques_index( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: torch.Tensor | None = None, + env_ids: torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Add forces and torques by index (mock - sets active/dirty flags).""" + if forces is not None or torques is not None: + self._active = True + self._dirty = True + + def add_forces_and_torques_mask( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_mask: wp.array | torch.Tensor | None = None, + env_mask: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Add forces and torques by mask (mock - sets active/dirty flags).""" + if forces is not None or torques is not None: + self._active = True + self._dirty = True + + def set_forces_and_torques_index( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: wp.array | torch.Tensor | None = None, + env_ids: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Set forces and torques by index (mock - sets active/dirty flags).""" + if forces is not None or torques is not None: + self._active = True + self._dirty = True + + def set_forces_and_torques_mask( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_mask: wp.array | torch.Tensor | None = None, + env_mask: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Set forces and torques by mask (mock - sets active/dirty flags).""" + if forces is not None or torques is not None: + self._active = True + self._dirty = True + + # ------------------------------------------------------------------ + # Reset + # ------------------------------------------------------------------ + + def reset(self, env_ids: wp.array | torch.Tensor | None = None, env_mask: wp.array | None = None) -> None: + """Reset all 7 buffers (5 input + 2 output) and clear all flags. + + Args: + env_ids: Environment ids to reset. Defaults to None (all environments). + env_mask: Environment mask to reset. Defaults to None (all environments). + """ + if env_ids is None and env_mask is None: + # Full reset: zero all 7 buffers and clear flags + self._global_force_w.zero_() + self._global_torque_w.zero_() + self._global_force_at_com_w.zero_() + self._local_force_b.zero_() + self._local_torque_b.zero_() + self._out_force_b.zero_() + self._out_torque_b.zero_() + self._active = False + self._dirty = False + else: + # For partial reset, just zero the specified environments across all 7 buffers + if isinstance(env_ids, torch.Tensor): + indices = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + elif isinstance(env_ids, list): + indices = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + indices = env_ids + + # Zero all 7 buffers for the specified environments + # Use torch views for the indexing operation + for buf in [ + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, + self._out_force_b, + self._out_torque_b, + ]: + buf_torch = wp.to_torch(buf) + if isinstance(env_ids, torch.Tensor): + buf_torch[env_ids.long()] = 0.0 + else: + idx_torch = wp.to_torch(indices).long() + buf_torch[idx_torch] = 0.0 + + # ------------------------------------------------------------------ + # Internal helpers + # ------------------------------------------------------------------ + + def _ensure_composed(self): + """Compose input buffers into output buffers if dirty.""" + if self._dirty: + self.compose_to_body_frame() diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py new file mode 100644 index 000000000000..c330e932f070 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py @@ -0,0 +1,274 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for patching real classes with mock implementations in tests.""" + +from __future__ import annotations + +import functools +from collections.abc import Callable, Generator +from contextlib import contextmanager +from typing import Any, TypeVar +from unittest.mock import patch + +F = TypeVar("F", bound=Callable[..., Any]) + + +@contextmanager +def patch_articulation( + target: str, + num_instances: int = 1, + num_joints: int = 12, + num_bodies: int = 13, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + device: str = "cpu", + **kwargs: Any, +) -> Generator[Any, None, None]: + """Context manager for patching an articulation class with MockArticulation. + + Args: + target: The target to patch (e.g., "my_module.Articulation"). + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for MockArticulation. + + Yields: + The mock articulation class. + + Example: + >>> with patch_articulation("my_module.robot", num_joints=12) as MockRobot: + ... robot = MockRobot() + ... robot.data.set_joint_pos(torch.zeros(1, 12)) + ... result = my_function_using_robot(robot) + """ + from ..assets import MockArticulation + + def create_mock(*args: Any, **create_kwargs: Any) -> MockArticulation: + # Merge configuration with any runtime kwargs + return MockArticulation( + num_instances=create_kwargs.get("num_instances", num_instances), + num_joints=create_kwargs.get("num_joints", num_joints), + num_bodies=create_kwargs.get("num_bodies", num_bodies), + joint_names=create_kwargs.get("joint_names", joint_names), + body_names=create_kwargs.get("body_names", body_names), + is_fixed_base=create_kwargs.get("is_fixed_base", is_fixed_base), + device=create_kwargs.get("device", device), + **{k: v for k, v in kwargs.items() if k not in create_kwargs}, + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +@contextmanager +def patch_sensor( + target: str, + sensor_type: str, + num_instances: int = 1, + device: str = "cpu", + **kwargs: Any, +) -> Generator[Any, None, None]: + """Context manager for patching a sensor class with a mock sensor. + + Args: + target: The target to patch (e.g., "my_module.ContactSensor"). + sensor_type: Type of sensor ("contact", "imu", "pva", or "frame_transformer"). + num_instances: Number of sensor instances. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for the mock sensor. + + Yields: + The mock sensor class. + + Example: + >>> with patch_sensor("my_env.ContactSensor", "contact", num_bodies=4) as MockSensor: + ... sensor = MockSensor() + ... sensor.data.set_net_forces_w(torch.randn(1, 4, 3)) + ... result = my_function(sensor) + """ + if sensor_type == "contact": + from ..sensors import MockContactSensor + + def create_mock(*args: Any, **create_kwargs: Any) -> MockContactSensor: + return MockContactSensor( + num_instances=create_kwargs.get("num_instances", num_instances), + num_bodies=create_kwargs.get("num_bodies", kwargs.get("num_bodies", 1)), + body_names=create_kwargs.get("body_names", kwargs.get("body_names")), + device=create_kwargs.get("device", device), + history_length=create_kwargs.get("history_length", kwargs.get("history_length", 0)), + num_filter_bodies=create_kwargs.get("num_filter_bodies", kwargs.get("num_filter_bodies", 0)), + ) + + elif sensor_type == "imu": + from ..sensors import MockImu + + def create_mock(*args: Any, **create_kwargs: Any) -> MockImu: + return MockImu( + num_instances=create_kwargs.get("num_instances", num_instances), + device=create_kwargs.get("device", device), + ) + + elif sensor_type == "pva": + from ..sensors import MockPva + + def create_mock(*args: Any, **create_kwargs: Any) -> MockPva: + return MockPva( + num_instances=create_kwargs.get("num_instances", num_instances), + device=create_kwargs.get("device", device), + ) + + elif sensor_type == "frame_transformer": + from ..sensors import MockFrameTransformer + + def create_mock(*args: Any, **create_kwargs: Any) -> MockFrameTransformer: + return MockFrameTransformer( + num_instances=create_kwargs.get("num_instances", num_instances), + num_target_frames=create_kwargs.get("num_target_frames", kwargs.get("num_target_frames", 1)), + target_frame_names=create_kwargs.get("target_frame_names", kwargs.get("target_frame_names")), + device=create_kwargs.get("device", device), + ) + + else: + raise ValueError(f"Unknown sensor type: {sensor_type}") + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +def mock_articulation( + num_instances: int = 1, + num_joints: int = 12, + num_bodies: int = 13, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + device: str = "cpu", + **kwargs: Any, +) -> Callable[[F], F]: + """Decorator for injecting a MockArticulation into a test function. + + The mock articulation is passed as the first argument to the decorated function. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for MockArticulation. + + Returns: + Decorator function. + + Example: + >>> @mock_articulation(num_joints=12, num_bodies=13) + ... def test_my_function(mock_robot): + ... mock_robot.data.set_joint_pos(torch.zeros(1, 12)) + ... result = compute_something(mock_robot) + ... assert result.shape == (1, 12) + """ + from ..assets import MockArticulation + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args: Any, **wrap_kwargs: Any) -> Any: + mock = MockArticulation( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=joint_names, + body_names=body_names, + is_fixed_base=is_fixed_base, + device=device, + **kwargs, + ) + return func(mock, *args, **wrap_kwargs) + + return wrapper # type: ignore + + return decorator + + +def mock_sensor( + sensor_type: str, + num_instances: int = 1, + device: str = "cpu", + **kwargs: Any, +) -> Callable[[F], F]: + """Decorator for injecting a mock sensor into a test function. + + The mock sensor is passed as the first argument to the decorated function. + + Args: + sensor_type: Type of sensor ("contact", "imu", "pva", or "frame_transformer"). + num_instances: Number of sensor instances. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for the mock sensor. + + Returns: + Decorator function. + + Example: + >>> @mock_sensor("contact", num_instances=4, num_bodies=4) + ... def test_contact_reward(mock_contact): + ... mock_contact.data.set_net_forces_w(torch.randn(4, 4, 3)) + ... reward = compute_contact_reward(mock_contact) + ... assert reward.shape == (4,) + """ + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args: Any, **wrap_kwargs: Any) -> Any: + if sensor_type == "contact": + from ..sensors import MockContactSensor + + mock = MockContactSensor( + num_instances=num_instances, + num_bodies=kwargs.get("num_bodies", 1), + body_names=kwargs.get("body_names"), + device=device, + history_length=kwargs.get("history_length", 0), + num_filter_bodies=kwargs.get("num_filter_bodies", 0), + ) + elif sensor_type == "imu": + from ..sensors import MockImu + + mock = MockImu( + num_instances=num_instances, + device=device, + ) + elif sensor_type == "pva": + from ..sensors import MockPva + + mock = MockPva( + num_instances=num_instances, + device=device, + ) + elif sensor_type == "frame_transformer": + from ..sensors import MockFrameTransformer + + mock = MockFrameTransformer( + num_instances=num_instances, + num_target_frames=kwargs.get("num_target_frames", 1), + target_frame_names=kwargs.get("target_frame_names"), + device=device, + ) + else: + raise ValueError(f"Unknown sensor type: {sensor_type}") + + return func(mock, *args, **wrap_kwargs) + + return wrapper # type: ignore + + return decorator diff --git a/source/isaaclab/isaaclab/ui/widgets/__init__.py b/source/isaaclab/isaaclab/ui/widgets/__init__.py index cfd6de31fa2b..e14e0f6d52c5 100644 --- a/source/isaaclab/isaaclab/ui/widgets/__init__.py +++ b/source/isaaclab/isaaclab/ui/widgets/__init__.py @@ -3,7 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -from .image_plot import ImagePlot -from .line_plot import LiveLinePlot -from .manager_live_visualizer import ManagerLiveVisualizer -from .ui_visualizer_base import UiVisualizerBase +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/ui/widgets/__init__.pyi b/source/isaaclab/isaaclab/ui/widgets/__init__.pyi new file mode 100644 index 000000000000..09a383f0c7e4 --- /dev/null +++ b/source/isaaclab/isaaclab/ui/widgets/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ImagePlot", + "LiveLinePlot", + "ManagerLiveVisualizer", + "UiVisualizerBase", +] + +from .image_plot import ImagePlot +from .line_plot import LiveLinePlot +from .manager_live_visualizer import ManagerLiveVisualizer +from .ui_visualizer_base import UiVisualizerBase diff --git a/source/isaaclab/isaaclab/ui/widgets/manager_live_visualizer.py b/source/isaaclab/isaaclab/ui/widgets/manager_live_visualizer.py index 0b20b10dabce..cc0a2186c89e 100644 --- a/source/isaaclab/isaaclab/ui/widgets/manager_live_visualizer.py +++ b/source/isaaclab/isaaclab/ui/widgets/manager_live_visualizer.py @@ -12,8 +12,6 @@ import numpy -import omni.kit.app - from isaaclab.managers import ManagerBase from isaaclab.sim import SimulationContext from isaaclab.utils import configclass @@ -196,6 +194,8 @@ def _set_debug_vis_impl(self, debug_vis: bool): if debug_vis: # if enabled create a subscriber for the post update event if it doesn't exist if not hasattr(self, "_debug_vis_handle") or self._debug_vis_handle is None: + import omni.kit.app + app_interface = omni.kit.app.get_app_interface() self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) diff --git a/source/isaaclab/isaaclab/ui/xr_widgets/__init__.py b/source/isaaclab/isaaclab/ui/xr_widgets/__init__.py index e0b341c8d2d5..e14e0f6d52c5 100644 --- a/source/isaaclab/isaaclab/ui/xr_widgets/__init__.py +++ b/source/isaaclab/isaaclab/ui/xr_widgets/__init__.py @@ -2,6 +2,7 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause -from .instruction_widget import hide_instruction, show_instruction, update_instruction -from .scene_visualization import DataCollector, TriggerType, VisualizationManager, XRVisualization -from .teleop_visualization_manager import TeleopVisualizationManager + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/ui/xr_widgets/__init__.pyi b/source/isaaclab/isaaclab/ui/xr_widgets/__init__.pyi new file mode 100644 index 000000000000..65f05633095e --- /dev/null +++ b/source/isaaclab/isaaclab/ui/xr_widgets/__init__.pyi @@ -0,0 +1,19 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "hide_instruction", + "show_instruction", + "update_instruction", + "DataCollector", + "TriggerType", + "VisualizationManager", + "XRVisualization", + "TeleopVisualizationManager", +] + +from .instruction_widget import hide_instruction, show_instruction, update_instruction +from .scene_visualization import DataCollector, TriggerType, VisualizationManager, XRVisualization +from .teleop_visualization_manager import TeleopVisualizationManager diff --git a/source/isaaclab/isaaclab/ui/xr_widgets/instruction_widget.py b/source/isaaclab/isaaclab/ui/xr_widgets/instruction_widget.py index 7d6fe00d7f6e..96a69fce563e 100644 --- a/source/isaaclab/isaaclab/ui/xr_widgets/instruction_widget.py +++ b/source/isaaclab/isaaclab/ui/xr_widgets/instruction_widget.py @@ -3,20 +3,26 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + import asyncio import functools import textwrap -from typing import Any, TypeAlias +from typing import TYPE_CHECKING, Any import omni.kit.commands import omni.ui as ui -from omni.kit.xr.scene_view.utils import UiContainer, WidgetComponent -from omni.kit.xr.scene_view.utils.spatial_source import SpatialSource -from pxr import Gf +import omni.usd import isaaclab.sim as sim_utils -Vec3Type: TypeAlias = Gf.Vec3f | Gf.Vec3d +if TYPE_CHECKING: + from typing import TypeAlias + + from omni.kit.scene_view.xr_utils import UiContainer + from pxr import Gf + + Vec3Type: TypeAlias = Gf.Vec3f | Gf.Vec3d camera_facing_widget_container = {} camera_facing_widget_timers = {} @@ -122,7 +128,7 @@ def compute_widget_dimensions( def show_instruction( text: str, prim_path_source: str | None = None, - translation: Gf.Vec3d = Gf.Vec3d(0, 0, 0), + translation: Vec3Type = (0, 0, 0), display_duration: float | None = 5.0, max_width: float = 2.5, min_width: float = 1.0, # Prevent widget from being too narrow. @@ -151,6 +157,15 @@ def show_instruction( Returns: UiContainer | None: The container that owns the instruction widget, or ``None`` if creation failed. """ + + try: + import carb + from omni.kit.scene_view.xr import XRSceneView + from omni.kit.scene_view.xr_utils import SpatialSource, UiContainer, WidgetComponent + except Exception as e: + print(f"Failed to import XR widget dependencies: {e}") + return None + global camera_facing_widget_container, camera_facing_widget_timers # Check if widget exists and has different text @@ -167,12 +182,6 @@ def show_instruction( container.root.clear() del camera_facing_widget_container[target_prim_path] - # Obtain stage handle - stage = sim_utils.get_current_stage() - # Clean up existing widget - if stage.GetPrimAtPath(target_prim_path).IsValid(): - sim_utils.delete_prim(target_prim_path) - width, height, wrapped_text = compute_widget_dimensions(text, font_size, max_width, min_width) # Create the widget component. @@ -184,18 +193,66 @@ def show_instruction( widget_args=[wrapped_text, {"font_size": font_size, "color": text_color}, width], ) - copied_prim = omni.kit.commands.execute( - "CopyPrim", - path_from=prim_path_source, - path_to=target_prim_path, - exclusive_select=False, - copy_to_introducing_layer=False, - ) + # Copy source to target_prim_path so the widget has a controlled transform. Prefer CopyFabricPrim + # when Fabric is on (preserves transform for XR); else CopyPrim. Clear target on Fabric first so + # CopyFabricPrim creates at target_prim_path (it otherwise uses get_stage_next_free_path). + copy_succeeded = False + if prim_path_source: + try: + use_fabric = carb.settings.get_settings().get_as_bool("/app/useFabricSceneDelegate") + except Exception: + use_fabric = False + if use_fabric: + try: + import usdrt + + rt_stage = usdrt.Usd.Stage.Attach(omni.usd.get_context().get_stage_id()) + if rt_stage.GetPrimAtPath(usdrt.Sdf.Path(target_prim_path)): + omni.kit.commands.execute("DeleteFabricPrims", paths=[target_prim_path]) + success, _ = omni.kit.commands.execute( + "CopyFabricPrim", + path_from=prim_path_source, + path_to=target_prim_path, + exclusive_select=False, + ) + copy_succeeded = success + except Exception: + pass + if not copy_succeeded: + # Obtain stage handle + stage = sim_utils.get_current_stage() + # Clean up existing widget + existing = stage.GetPrimAtPath(target_prim_path) + if existing and existing.IsValid(): + sim_utils.delete_prim(target_prim_path) + + success, _ = omni.kit.commands.execute( + "CopyPrim", + path_from=prim_path_source, + path_to=target_prim_path, + exclusive_select=False, + copy_to_introducing_layer=False, + ) + copy_succeeded = success space_stack = [] - if copied_prim is not None: + if copy_succeeded: space_stack.append(SpatialSource.new_prim_path_source(target_prim_path)) + # Unselect the copied instruction prim so it is not left in selection (CopyFabricPrim/CopyPrim + # select it). Otherwise on env reset the manipulator can receive invalid prim paths and raise + # "Accessed invalid null prim". Only unselect our path so the user's selection is preserved. + if copy_succeeded: + try: + sel = omni.usd.get_context().get_selection() + for source_type in ( + omni.usd.Selection.SourceType.FABRIC, + omni.usd.Selection.SourceType.USD, + ): + sel.set_prim_path_selected(target_prim_path, False, False, False, True, source_type) + except Exception: + pass + space_stack.extend( [ SpatialSource.new_translation_source(translation), @@ -205,6 +262,7 @@ def show_instruction( # Create the UI container with the widget. container = UiContainer( + XRSceneView, widget_component, space_stack=space_stack, ) diff --git a/source/isaaclab/isaaclab/utils/__init__.py b/source/isaaclab/isaaclab/utils/__init__.py index 1295715857f4..fe57e45acd63 100644 --- a/source/isaaclab/isaaclab/utils/__init__.py +++ b/source/isaaclab/isaaclab/utils/__init__.py @@ -5,15 +5,8 @@ """Sub-package containing utilities for common operations and helper functions.""" -from .array import * -from .buffers import * from .configclass import configclass -from .dict import * -from .interpolation import * -from .logger import * -from .mesh import * -from .modifiers import * -from .string import * -from .timer import Timer -from .types import * -from .version import * + +import lazy_loader as lazy + +__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__) diff --git a/source/isaaclab/isaaclab/utils/__init__.pyi b/source/isaaclab/isaaclab/utils/__init__.pyi new file mode 100644 index 000000000000..e9f5e201ca21 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/__init__.pyi @@ -0,0 +1,110 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Timer", + "TensorData", + "TENSOR_TYPES", + "TENSOR_TYPE_CONVERSIONS", + "convert_to_torch", + "CircularBuffer", + "DelayBuffer", + "TimestampedBuffer", + "TimestampedBufferWarp", + "class_to_dict", + "update_class_from_dict", + "dict_to_md5_hash", + "convert_dict_to_backend", + "update_dict", + "replace_slices_with_strings", + "replace_strings_with_slices", + "print_dict", + "LinearInterpolation", + "configure_logging", + "ColoredFormatter", + "RateLimitFilter", + "create_trimesh_from_geom_mesh", + "create_trimesh_from_geom_shape", + "convert_faces_to_triangles", + "PRIMITIVE_MESH_TYPES", + "ModifierCfg", + "ModifierBase", + "DigitalFilter", + "DigitalFilterCfg", + "Integrator", + "IntegratorCfg", + "bias", + "clip", + "scale", + "to_camel_case", + "to_snake_case", + "string_to_slice", + "is_lambda_expression", + "callable_to_string", + "string_to_callable", + "ResolvableString", + "resolve_matching_names", + "clear_resolve_matching_names_cache", + "resolve_matching_names_values", + "find_unique_string_name", + "find_root_prim_path_from_regex", + "ArticulationActions", + "has_kit", + "get_isaac_sim_version", + "compare_versions", + "configclass", + "resolve_cfg_presets", + "checked_apply", +] + +from .timer import Timer +from .array import TensorData, TENSOR_TYPES, TENSOR_TYPE_CONVERSIONS, convert_to_torch +from .buffers import CircularBuffer, DelayBuffer, TimestampedBuffer, TimestampedBufferWarp +from .dict import ( + class_to_dict, + update_class_from_dict, + dict_to_md5_hash, + convert_dict_to_backend, + update_dict, + replace_slices_with_strings, + replace_strings_with_slices, + print_dict, +) +from .interpolation import LinearInterpolation +from .logger import configure_logging, ColoredFormatter, RateLimitFilter +from .mesh import ( + create_trimesh_from_geom_mesh, + create_trimesh_from_geom_shape, + convert_faces_to_triangles, + PRIMITIVE_MESH_TYPES, +) +from .modifiers import ( + ModifierCfg, + ModifierBase, + DigitalFilter, + DigitalFilterCfg, + Integrator, + IntegratorCfg, + bias, + clip, + scale, +) +from .string import ( + to_camel_case, + to_snake_case, + string_to_slice, + is_lambda_expression, + callable_to_string, + string_to_callable, + ResolvableString, + resolve_matching_names, + clear_resolve_matching_names_cache, + resolve_matching_names_values, + find_unique_string_name, + find_root_prim_path_from_regex, +) +from .types import ArticulationActions +from .version import has_kit, get_isaac_sim_version, compare_versions +from .configclass import checked_apply, configclass, resolve_cfg_presets diff --git a/source/isaaclab/isaaclab/utils/assets.py b/source/isaaclab/isaaclab/utils/assets.py index 22c5141587f6..79ccdefced4a 100644 --- a/source/isaaclab/isaaclab/utils/assets.py +++ b/source/isaaclab/isaaclab/utils/assets.py @@ -13,30 +13,42 @@ .. _Omniverse Nucleus: https://docs.omniverse.nvidia.com/nucleus/latest/overview/overview.html """ -import asyncio import io import logging import os +import posixpath +import re import tempfile -import time from typing import Literal +from urllib.parse import urlparse -import carb -import omni.client - -# import logger logger = logging.getLogger(__name__) -NUCLEUS_ASSET_ROOT_DIR = carb.settings.get_settings().get("/persistent/isaac/asset_root/cloud") +_UDIM_RE = re.compile(r"", re.IGNORECASE) + + +def _parse_kit_asset_root() -> str: + """Parse ``persistent.isaac.asset_root.cloud`` from ``apps/isaaclab.python.kit``.""" + _ISAACLAB_ROOT = os.path.join(os.path.dirname(__file__), *([".."] * 4)) + kit_path = os.path.normpath(os.path.join(_ISAACLAB_ROOT, "apps", "isaaclab.python.kit")) + with open(kit_path) as f: + for line in reversed(f.readlines()): # read from the last line since it's the last setting defined + m = re.match(r'\s*persistent\.isaac\.asset_root\.cloud\s*=\s*"([^"]*)"', line) + if m: + return m.group(1) + return "" + + +NUCLEUS_ASSET_ROOT_DIR: str = _parse_kit_asset_root() """Path to the root directory on the Nucleus Server.""" -NVIDIA_NUCLEUS_DIR = f"{NUCLEUS_ASSET_ROOT_DIR}/NVIDIA" +NVIDIA_NUCLEUS_DIR: str = f"{NUCLEUS_ASSET_ROOT_DIR}/NVIDIA" """Path to the root directory on the NVIDIA Nucleus Server.""" -ISAAC_NUCLEUS_DIR = f"{NUCLEUS_ASSET_ROOT_DIR}/Isaac" +ISAAC_NUCLEUS_DIR: str = f"{NUCLEUS_ASSET_ROOT_DIR}/Isaac" """Path to the ``Isaac`` directory on the NVIDIA Nucleus Server.""" -ISAACLAB_NUCLEUS_DIR = f"{ISAAC_NUCLEUS_DIR}/IsaacLab" +ISAACLAB_NUCLEUS_DIR: str = f"{ISAAC_NUCLEUS_DIR}/IsaacLab" """Path to the ``Isaac/IsaacLab`` directory on the NVIDIA Nucleus Server.""" @@ -55,14 +67,16 @@ def check_file_path(path: str) -> Literal[0, 1, 2]: """ if os.path.isfile(path): return 1 - # we need to convert backslash to forward slash on Windows for omni.client API - elif omni.client.stat(path.replace(os.sep, "/"))[0] == omni.client.Result.OK: + + import omni.client # noqa: PLC0415 + + if omni.client.stat(path.replace(os.sep, "/"))[0] == omni.client.Result.OK: return 2 else: return 0 -def retrieve_file_path(path: str, download_dir: str | None = None, force_download: bool = True) -> str: +def retrieve_file_path(path: str, download_dir: str | None = None, force_download: bool = False) -> str: """Retrieves the path to a file on the Nucleus Server or locally. If the file exists locally, then the absolute path to the file is returned. @@ -74,7 +88,7 @@ def retrieve_file_path(path: str, download_dir: str | None = None, force_downloa download_dir: The directory where the file should be downloaded. Defaults to None, in which case the file is downloaded to the system's temporary directory. force_download: Whether to force download the file from the Nucleus Server. This will overwrite - the local file if it exists. Defaults to True. + the local file if it exists. Defaults to False. Returns: The path to the file on the local machine. @@ -89,6 +103,8 @@ def retrieve_file_path(path: str, download_dir: str | None = None, force_downloa if file_status == 1: return os.path.abspath(path) elif file_status == 2: + import omni.client # noqa: PLC0415 + # resolve download directory if download_dir is None: download_dir = tempfile.gettempdir() @@ -97,16 +113,52 @@ def retrieve_file_path(path: str, download_dir: str | None = None, force_downloa # create download directory if it does not exist if not os.path.exists(download_dir): os.makedirs(download_dir) - # download file in temp directory using os - file_name = os.path.basename(omni.client.break_url(path.replace(os.sep, "/")).path) - target_path = os.path.join(download_dir, file_name) - # check if file already exists locally - if not os.path.isfile(target_path) or force_download: - # copy file to local machine - result = omni.client.copy(path.replace(os.sep, "/"), target_path, omni.client.CopyBehavior.OVERWRITE) - if result != omni.client.Result.OK and force_download: - raise RuntimeError(f"Unable to copy file: '{path}'. Is the Nucleus Server running?") - return os.path.abspath(target_path) + # recursive download: mirror remote tree under download_dir + remote_url = path.replace(os.sep, "/") + to_visit = [remote_url] + visited = set() + local_root = None + + while to_visit: + cur_url = to_visit.pop() + if cur_url in visited: + continue + visited.add(cur_url) + + # UDIM textures use a placeholder (e.g. texture..png) that does not + # correspond to a real file. Expand to individual tile URLs by probing tile numbers + # starting at 1001; UDIM tiles are contiguous so stop at the first missing tile. + if _UDIM_RE.search(cur_url): + for tile in range(1001, 1101): + tile_url = _UDIM_RE.sub(str(tile), cur_url) + if omni.client.stat(tile_url.replace(os.sep, "/"))[0] == omni.client.Result.OK: + if tile_url not in visited: + to_visit.append(tile_url) + else: + break + continue + + cur_rel = urlparse(cur_url).path.lstrip("/") + target_path = os.path.join(download_dir, cur_rel) + os.makedirs(os.path.dirname(target_path), exist_ok=True) + + if not os.path.isfile(target_path) or force_download: + result = omni.client.copy(cur_url, target_path, omni.client.CopyBehavior.OVERWRITE) + if result != omni.client.Result.OK and force_download: + raise RuntimeError(f"Unable to copy file: '{cur_url}'. Is the Nucleus Server running?") + + if local_root is None: + local_root = target_path + + # recurse into USD dependencies (sublayers, references, payloads, textures, etc.) + suffix = os.path.splitext(target_path)[1].lower() + if suffix in {".usd", ".usda", ".usdc", ".usdz"}: + for ref in _find_usd_dependencies(target_path): + ref_url = _resolve_reference_url(cur_url, ref) + if ref_url and ref_url not in visited: + to_visit.append(ref_url) + + return os.path.abspath(local_root) else: raise FileNotFoundError(f"Unable to find the file: {path}") @@ -129,83 +181,72 @@ def read_file(path: str) -> io.BytesIO: with open(path, "rb") as f: return io.BytesIO(f.read()) elif file_status == 2: + import omni.client # noqa: PLC0415 + file_content = omni.client.read_file(path.replace(os.sep, "/"))[2] return io.BytesIO(memoryview(file_content).tobytes()) else: raise FileNotFoundError(f"Unable to find the file: {path}") -""" -Nucleus Connection. -""" - - -def check_usd_path_with_timeout(usd_path: str, timeout: float = 300, log_interval: float = 30) -> bool: - """Checks whether the given USD file path is available on the NVIDIA Nucleus server. - - This function synchronously runs an asynchronous USD path availability check, - logging progress periodically until it completes. The file is available on the server - if the HTTP status code is 200. Otherwise, the file is not available on the server. +def _find_usd_dependencies(local_usd_path: str) -> set[str]: + """Use UsdUtils to collect all asset dependencies from a USD file. - This is useful for checking server responsiveness before attempting to load a remote - asset. It will block execution until the check completes or times out. + This uses :func:`UsdUtils.ComputeAllDependencies` — the same approach as + ``isaacsim.storage.native`` — to discover sublayers, references, payloads, + and non-layer assets (textures, etc.) without maintaining a hardcoded list + of file extensions. Args: - usd_path: The remote USD file path to check. - timeout: Maximum time (in seconds) to wait for the server check. - log_interval: Interval (in seconds) at which progress is logged. + local_usd_path: Path to a local USD file. Returns: - Whether the given USD path is available on the server. + Set of asset path strings as they appear in the USD layer (unresolved). """ - start_time = time.time() - loop = asyncio.get_event_loop() + from pxr import Sdf, UsdUtils # noqa: PLC0415 - coroutine = _is_usd_path_available(usd_path, timeout) - task = asyncio.ensure_future(coroutine) + try: + layer = Sdf.Layer.FindOrOpen(local_usd_path) + except Exception: + logger.warning("Failed to open USD layer: %s", local_usd_path, exc_info=True) + return set() - next_log_time = start_time + log_interval + if layer is None: + return set() - first_log = True - while not task.done(): - now = time.time() - if now >= next_log_time: - elapsed = int(now - start_time) - if first_log: - logger.warning(f"Checking server availability for USD path: {usd_path} (timeout: {timeout}s)") - first_log = False - logger.warning(f"Waiting for server response... ({elapsed}s elapsed)") - next_log_time += log_interval - loop.run_until_complete(asyncio.sleep(0.1)) # Yield to allow async work + # Collect every asset path referenced from this layer. + # UsdUtils.ModifyAssetPaths walks sublayers, references, payloads, + # variant selections, and attribute values — exactly the set we need. + refs: set[str] = set() - return task.result() + def _collect(path: str) -> str: + if path: + refs.add(path) + return path # return unchanged — we are only reading, not modifying + UsdUtils.ModifyAssetPaths(layer, _collect) -""" -Helper functions. -""" + return refs -async def _is_usd_path_available(usd_path: str, timeout: float) -> bool: - """Checks whether the given USD path is available on the Omniverse Nucleus server. +def _resolve_reference_url(base_url: str, ref: str) -> str: + """Resolve a USD asset reference against a base URL (http/local).""" + ref = ref.strip() + if not ref: + return ref - This function is a asynchronous routine to check the availability of the given USD path on - the Omniverse Nucleus server. It will return True if the USD path is available on the server, - False otherwise. + parsed_ref = urlparse(ref) + if parsed_ref.scheme: + return ref - Args: - usd_path: The remote or local USD file path to check. - timeout: Timeout in seconds for the async stat call. + base = urlparse(base_url) + if base.scheme == "": + base_dir = os.path.dirname(base_url) + return os.path.normpath(os.path.join(base_dir, ref)) - Returns: - Whether the given USD path is available on the server. - """ - try: - result, _ = await asyncio.wait_for(omni.client.stat_async(usd_path), timeout=timeout) - return result == omni.client.Result.OK - except asyncio.TimeoutError: - logger.warning(f"Timed out after {timeout}s while checking for USD: {usd_path}") - return False - except Exception as ex: - logger.warning(f"Exception during USD file check: {type(ex).__name__}: {ex}") - return False + base_dir = posixpath.dirname(base.path) + if ref.startswith("/"): + new_path = posixpath.normpath(ref) + else: + new_path = posixpath.normpath(posixpath.join(base_dir, ref)) + return f"{base.scheme}://{base.netloc}{new_path}" diff --git a/source/isaaclab/isaaclab/utils/backend_utils.py b/source/isaaclab/isaaclab/utils/backend_utils.py new file mode 100644 index 000000000000..9f69a66aa04d --- /dev/null +++ b/source/isaaclab/isaaclab/utils/backend_utils.py @@ -0,0 +1,103 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import importlib +import logging + +logger = logging.getLogger(__name__) + + +class FactoryBase: + """A generic factory class that dynamically loads backends.""" + + def __init_subclass__(cls, **kwargs): + """Initializes a new factory subclass.""" + super().__init_subclass__(**kwargs) + cls._registry = {} + # Determine the module subpath for dynamic loading. + # e.g., if factory is in 'isaaclab.assets.articulation.articulation', + # the subpath becomes 'assets.articulation'. + module_parts = cls.__module__.split(".") + if module_parts[0] != "isaaclab": + raise ImportError(f"Factory class {cls.__name__} must be defined within the 'isaaclab' package.") + # The subpath is what comes between 'isaaclab' and the final module name. + cls._module_subpath = ".".join(module_parts[1:-1]) + + @classmethod + def register(cls, name: str, sub_class) -> None: + """Register a new implementation class.""" + if name in cls._registry and cls._registry[name] is not sub_class: + raise ValueError(f"Backend {name!r} already registered with a different class for factory {cls.__name__}.") + cls._registry[name] = sub_class + logger.info(f"Registered backend {name!r} for factory {cls.__name__}.") + + @classmethod + def _get_backend(cls, *args, **kwargs) -> str: + """Return active backend name for this factory. + + Falls back to ``"physx"`` for backward compatibility when no simulation + context is initialized yet. + """ + # Import lazily to avoid import cycles at module load time. + from isaaclab.sim.simulation_context import SimulationContext + + manager_name = SimulationContext.instance().physics_manager.__name__.lower() + if manager_name.startswith("newton"): + return "newton" + if manager_name.startswith("ovphysx"): + return "ovphysx" + if manager_name.startswith("physx"): + return "physx" + else: + raise ValueError(f"Unknown physics manager: {manager_name}") + + @classmethod + def _get_module_name(cls, backend: str) -> str: + """Return module path that hosts backend implementation for a given backend key.""" + return f"isaaclab_{backend}.{cls._module_subpath}" + + def __new__(cls, *args, **kwargs): + """Create a new instance of an implementation based on the backend.""" + backend = cls._get_backend(*args, **kwargs) + + if cls == FactoryBase: + raise TypeError("FactoryBase cannot be instantiated directly. Please subclass it.") + + # If backend is not in registry, try to import it and register the class. + # This is done to only import the module once. + if backend not in cls._registry: + # Construct the module name from the backend and the determined subpath. + module_name = cls._get_module_name(backend) + try: + module = importlib.import_module(module_name) + class_name = getattr(cls, "_backend_class_names", {}).get(backend, cls.__name__) + module_class = getattr(module, class_name) + # Manually register the class + cls.register(backend, module_class) + + except ImportError as e: + raise ValueError( + f"Could not import module for backend {backend!r} for factory {cls.__name__}. " + f"Attempted to import from '{module_name}'.\n" + f"Original error: {e}" + ) from e + + # Now check registry again. The import should have registered the class. + try: + impl = cls._registry[backend] + except KeyError: + available = list(cls.get_registry_keys()) + raise ValueError( + f"Unknown backend {backend!r} for {cls.__name__}. " + f"A module was found at '{module_name}', but it did not contain a class with the name {class_name!r}.\n" + f"Currently available backends: {available}." + ) from None + # Return an instance of the chosen class. + return impl(*args, **kwargs) + + @classmethod + def get_registry_keys(cls) -> list[str]: + """Returns a list of registered backend names.""" + return list(cls._registry.keys()) diff --git a/source/isaaclab/isaaclab/utils/buffers/__init__.py b/source/isaaclab/isaaclab/utils/buffers/__init__.py index 64da4f6e6ae9..31d62c46d8d1 100644 --- a/source/isaaclab/isaaclab/utils/buffers/__init__.py +++ b/source/isaaclab/isaaclab/utils/buffers/__init__.py @@ -5,6 +5,6 @@ """Sub-module containing different buffers.""" -from .circular_buffer import CircularBuffer -from .delay_buffer import DelayBuffer -from .timestamped_buffer import TimestampedBuffer +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/buffers/__init__.pyi b/source/isaaclab/isaaclab/utils/buffers/__init__.pyi new file mode 100644 index 000000000000..9078587d30c1 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/buffers/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "CircularBuffer", + "DelayBuffer", + "TimestampedBuffer", + "TimestampedBufferWarp", +] + +from .circular_buffer import CircularBuffer +from .delay_buffer import DelayBuffer +from .timestamped_buffer import TimestampedBuffer +from .timestamped_buffer_warp import TimestampedBufferWarp diff --git a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py index c5c9fe9ff6ad..c72907b176b3 100644 --- a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py +++ b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py @@ -11,10 +11,11 @@ class CircularBuffer: """Circular buffer for storing a history of batched tensor data. - This class implements a circular buffer for storing a history of batched tensor data. The buffer is - initialized with a maximum length and a batch size. The data is stored in a circular fashion, and the - data can be retrieved in a LIFO (Last-In-First-Out) fashion. The buffer is designed to be used in - multi-environment settings, where each environment has its own data. + This class stores a history of batched tensor data with the oldest entry at + index 0 and the most recent entry at index ``max_len - 1`` of the internal + buffer. The public indexing API remains LIFO (last-in-first-out), while the + ordered internal layout keeps ``buffer`` retrieval cheap and makes the + implementation compatible with tracing-based export flows. The shape of the appended data is expected to be (batch_size, ...), where the first dimension is the batch dimension. Correspondingly, the shape of the ring buffer is (max_len, batch_size, ...). @@ -42,8 +43,6 @@ def __init__(self, max_len: int, batch_size: int, device: str): self._max_len = torch.full((batch_size,), max_len, dtype=torch.int, device=device) # number of data pushes passed since the last call to :meth:`reset` self._num_pushes = torch.zeros(batch_size, dtype=torch.long, device=device) - # the pointer to the current head of the circular buffer (-1 means not initialized) - self._pointer: int = -1 # the actual buffer for data storage # note: this is initialized on the first call to :meth:`append` self._buffer: torch.Tensor = None # type: ignore @@ -80,14 +79,11 @@ def current_length(self) -> torch.Tensor: def buffer(self) -> torch.Tensor: """Complete circular buffer with most recent entry at the end and oldest entry at the beginning. - The shape of the buffer is (batch_size, max_length, ...). - - Note: - The oldest entry is at the beginning of dimension 1. + Returns: + Complete circular buffer with most recent entry at the end and oldest entry at the beginning of + dimension 1. The shape is [batch_size, max_length, data.shape[1:]]. """ - buf = self._buffer.clone() - buf = torch.roll(buf, shifts=self.max_length - self._pointer - 1, dims=0) - return torch.transpose(buf, dim0=0, dim1=1) + return torch.transpose(self._buffer, dim0=0, dim1=1) """ Operations. @@ -99,15 +95,17 @@ def reset(self, batch_ids: Sequence[int] | None = None): Args: batch_ids: Elements to reset in the batch dimension. Default is None, which resets all the batch indices. """ - # resolve all indices + batch_ids_resolved: Sequence[int] | slice if batch_ids is None: - batch_ids = slice(None) + batch_ids_resolved = slice(None) + else: + batch_ids_resolved = batch_ids # reset the number of pushes for the specified batch indices - self._num_pushes[batch_ids] = 0 + self._num_pushes[batch_ids_resolved] = 0 if self._buffer is not None: - # set buffer at batch_id reset indices to 0.0 so that the buffer() - # getter returns the cleared circular buffer after reset. - self._buffer[:, batch_ids, :] = 0.0 + # set buffer at batch_id reset indices to 0.0 so that the buffer() getter returns + # the cleared circular buffer after reset. + self._buffer[:, batch_ids_resolved] = 0.0 def append(self, data: torch.Tensor): """Append the data to the circular buffer. @@ -125,21 +123,19 @@ def append(self, data: torch.Tensor): # move the data to the device data = data.to(self._device) - # at the first call, initialize the buffer size - if self._buffer is None: - self._pointer = -1 - self._buffer = torch.empty((self.max_length, *data.shape), dtype=data.dtype, device=self._device) - # move the head to the next slot - self._pointer = (self._pointer + 1) % self.max_length - # add the new data to the last layer - self._buffer[self._pointer] = data - # Check for batches with zero pushes and initialize all values in batch to first append is_first_push = self._num_pushes == 0 + if self._buffer is None: + self._buffer = data.unsqueeze(0).expand(self.max_length, *data.shape).clone() if torch.any(is_first_push): self._buffer[:, is_first_push] = data[is_first_push] # increment number of number of pushes for all batches + self._append(data) self._num_pushes += 1 + def _append(self, data: torch.Tensor): + self._buffer = torch.roll(self._buffer, shifts=-1, dims=0) + self._buffer[-1] = data + def __getitem__(self, key: torch.Tensor) -> torch.Tensor: """Retrieve the data from the circular buffer in last-in-first-out (LIFO) fashion. @@ -160,13 +156,14 @@ def __getitem__(self, key: torch.Tensor) -> torch.Tensor: # check the batch size if len(key) != self.batch_size: raise ValueError(f"The argument 'key' has length {key.shape[0]}, while expecting {self.batch_size}") - # check if the buffer is empty - if torch.any(self._num_pushes == 0) or self._buffer is None: - raise RuntimeError("Attempting to retrieve data on an empty circular buffer. Please append data first.") - - # admissible lag - valid_keys = torch.minimum(key, self._num_pushes - 1) - # the index in the circular buffer (pointer points to the last+1 index) - index_in_buffer = torch.remainder(self._pointer - valid_keys, self.max_length) + if self._buffer is None: + raise RuntimeError("The buffer is empty. Please append data before retrieving.") + + # admissible lag — clamp to [0, ..] so batches with _num_pushes == 0 + # return the zeroed-out slot instead of indexing out of bounds. + valid_keys = torch.clamp(torch.minimum(key, self._num_pushes - 1), min=0) + # The buffer is stored oldest->newest along dimension 0, so the most + # recent item lives at the last index. + index_in_buffer = (self.max_length - 1 - valid_keys).to(dtype=torch.long) # return output return self._buffer[index_in_buffer, self._ALL_INDICES] diff --git a/source/isaaclab/isaaclab/utils/buffers/timestamped_buffer_warp.py b/source/isaaclab/isaaclab/utils/buffers/timestamped_buffer_warp.py new file mode 100644 index 000000000000..0398c04acf86 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/buffers/timestamped_buffer_warp.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +class TimestampedBufferWarp: + """A buffer class containing data and its timestamp. + + This class is a simple data container that stores a tensor and its timestamp. The timestamp is used to + track the last update of the buffer. The timestamp is set to -1.0 by default, indicating that the buffer + has not been updated yet. The timestamp should be updated whenever the data in the buffer is updated. This + way the buffer can be used to check whether the data is outdated and needs to be refreshed. + + The buffer is useful for creating lazy buffers that only update the data when it is outdated. This can be + useful when the data is expensive to compute or retrieve. For example usage, refer to the data classes in + the :mod:`isaaclab.assets` module. + """ + + def __init__(self, shape: tuple, device: str, dtype: type) -> None: + """Initializes the timestamped buffer. + + .. note:: Unlike the :class:`TimestampedBuffer` class in the :mod:`isaaclab.utils.buffers` module, + this class allocates the memory on init. Ideally, users should avoid to overwrite the data after + initialization and should use data.assign(...) whenever possible. + + Args: + shape: The shape of the data. + device: The device used for the data. + dtype: The data type of the data. + """ + self.data = wp.zeros(shape, dtype=dtype, device=device) + self.timestamp = -1.0 diff --git a/source/isaaclab/isaaclab/utils/configclass.py b/source/isaaclab/isaaclab/utils/configclass.py index e46280a61ccf..59605f2797cc 100644 --- a/source/isaaclab/isaaclab/utils/configclass.py +++ b/source/isaaclab/isaaclab/utils/configclass.py @@ -5,7 +5,9 @@ """Sub-module that provides a wrapper around the Python 3.7 onwards ``dataclasses`` module.""" +import dataclasses import inspect +import re import types from collections.abc import Callable from copy import deepcopy @@ -13,6 +15,10 @@ from typing import Any, ClassVar from .dict import class_to_dict, update_class_from_dict +from .string import ResolvableString + +_CALLABLE_STR_RE = re.compile(r"^[A-Za-z_][A-Za-z0-9_\\.]*:[A-Za-z_][A-Za-z0-9_]*$") +_CALLABLE_STR_WITH_DIR_RE = re.compile(r"^\{DIR\}(?:\.[A-Za-z_][A-Za-z0-9_]*)*:[A-Za-z_][A-Za-z0-9_]*$") _CONFIGCLASS_METHODS = ["to_dict", "from_dict", "replace", "copy", "validate"] """List of class methods added at runtime to dataclass.""" @@ -84,6 +90,11 @@ class EnvCfg: .. _dataclass: https://docs.python.org/3/library/dataclasses.html """ + # snapshot field names declared in *this* class body before configclass + # merges parent fields — used by _field_module_dir to resolve {DIR} correctly. + _own_ann = set(cls.__dict__.get("__annotations__", {}).keys()) + _own_body = {k for k in cls.__dict__ if not k.startswith("__")} + cls.__configclass_own_fields__ = frozenset(_own_ann | _own_body) # add type annotations _add_annotation_types(cls) # add field factory @@ -174,6 +185,71 @@ def _copy_class(obj: object) -> object: return replace(obj) +def _field_module_dir(obj: Any, key: str | None = None) -> str | None: + """Return module parent package path for an object or one of its declared fields.""" + cls = type(obj) + if key is not None: + # Use nearest declaration in MRO (subclass override wins). + # We prefer __configclass_own_fields__ (the snapshot taken before + # _process_mutable_types copies parent fields into every subclass's + # __dict__) so that {DIR} resolves relative to the class that + # *originally* declared the field, not the subclass that inherited it. + for mro_cls in cls.__mro__: + if mro_cls is object: + continue + own_fields = getattr(mro_cls, "__configclass_own_fields__", None) + if own_fields is not None: + if key in own_fields: + cls = mro_cls + break + elif key in mro_cls.__dict__: + cls = mro_cls + break + module_name = getattr(cls, "__module__", "") + return module_name.rsplit(".", 1)[0] if "." in module_name else (module_name or None) + + +def _wrap_resolvable_strings(value: Any, module_dir: str | None = None, _seen: set[int] | None = None) -> Any: + """Recursively wrap callable-like strings with :class:`ResolvableString`.""" + if isinstance(value, str) and (_CALLABLE_STR_RE.match(value) or _CALLABLE_STR_WITH_DIR_RE.match(value)): + if "{DIR}" in value: + if module_dir is None: + raise ValueError(f"Cannot resolve '{{DIR}}' in '{value}' because no module context is available.") + value = value.replace("{DIR}", module_dir) + return ResolvableString(value) + is_dataclass_instance = hasattr(value, "__dataclass_fields__") and hasattr(value, "__dict__") + is_container = isinstance(value, (list, tuple, dict)) + if is_dataclass_instance or is_container: + if _seen is None: + _seen = set() + value_id = id(value) + if value_id in _seen: + return value + _seen.add(value_id) + if isinstance(value, list): + wrapped = [_wrap_resolvable_strings(item, module_dir=module_dir, _seen=_seen) for item in value] + if len(wrapped) == len(value) and all(new_item is old_item for new_item, old_item in zip(wrapped, value)): + return value + return wrapped + if isinstance(value, tuple): + wrapped = tuple(_wrap_resolvable_strings(item, module_dir=module_dir, _seen=_seen) for item in value) + if len(wrapped) == len(value) and all(new_item is old_item for new_item, old_item in zip(wrapped, value)): + return value + return wrapped + if isinstance(value, dict): + wrapped = { + key: _wrap_resolvable_strings(item, module_dir=module_dir, _seen=_seen) for key, item in value.items() + } + if len(wrapped) == len(value) and all(wrapped[key] is value[key] for key in value): + return value + return wrapped + if is_dataclass_instance: + for key, item in value.__dict__.items(): + nested_module_dir = _field_module_dir(value, key) + setattr(value, key, _wrap_resolvable_strings(item, module_dir=nested_module_dir, _seen=_seen)) + return value + + """ Private helper functions. """ @@ -247,7 +323,16 @@ def _validate(obj: object, prefix: str = "") -> list[str]: """Check the validity of configclass object. This function checks if the object is a valid configclass object. A valid configclass object contains no MISSING - entries. + entries. Additionally, if the top-level object defines a ``_validate_config`` method, it is called to perform + domain-specific validation. + + Subclasses can define ``validate_config(self)`` to add custom checks:: + + @configclass + class MyEnvCfg(ManagerBasedEnvCfg): + def validate_config(self): + if self.some_field == "bad": + raise ValueError("some_field cannot be 'bad'.") Args: obj: The object to check. @@ -297,6 +382,11 @@ def _validate(obj: object, prefix: str = "") -> list[str]: f"Missing values detected in object {obj.__class__.__name__} for the following" f" fields:\n{formatted_message}\n" ) + # invoke custom validation hook if defined on the object + if prefix == "": + custom_validate = getattr(obj, "validate_config", None) + if callable(custom_validate): + custom_validate() return missing_fields @@ -399,7 +489,8 @@ def _custom_post_init(obj): ann = obj.__class__.__dict__.get(key) # duplicate data members that are mutable if not callable(value) and not isinstance(ann, property): - setattr(obj, key, deepcopy(value)) + copied_value = deepcopy(value) + setattr(obj, key, _wrap_resolvable_strings(copied_value, module_dir=_field_module_dir(obj, key))) def _combined_function(f1: Callable, f2: Callable) -> Callable: @@ -500,3 +591,80 @@ def _wrap(): return deepcopy(f) return _wrap + + +def resolve_cfg_presets(cfg: object) -> object: + """Recursively replace preset-wrapper fields with their *default* preset. + + Task configs may use two preset-selector patterns to support multiple physics backends + (PhysX / Newton) or observation modes. Both patterns produce wrapper objects that are + **not** valid as the concrete cfg that downstream managers / scene builders expect. + This function resolves them in-place so the config can be used without a Hydra CLI + override (e.g. in unit tests or when creating environments directly). + + Supported patterns: + + * **New style** (``PresetCfg`` subclass): a configclass whose MRO contains a class named + ``PresetCfg``. The active variant is stored in the ``default`` attribute. + * **Old style** (``presets`` dict): a configclass that has a ``presets: dict[str, Cfg]`` + attribute with a ``"default"`` key. + + Args: + cfg: Any configclass instance (or any object; non-configclasses are returned as-is). + + Returns: + The same ``cfg`` object, modified in-place with preset wrappers replaced. + """ + if not hasattr(cfg, "__dataclass_fields__"): + return cfg + for field_name in list(cfg.__dataclass_fields__): + value = getattr(cfg, field_name, None) + if value is None or not hasattr(value, "__dataclass_fields__"): + continue + # New-style PresetCfg: class hierarchy contains a class named "PresetCfg". + if any(cls.__name__ == "PresetCfg" for cls in type(value).__mro__): + resolved = value.default + setattr(cfg, field_name, resolved) + resolve_cfg_presets(resolved) + # Old-style preset: configclass with a ``presets`` dict that has a ``"default"`` key. + elif isinstance(getattr(value, "presets", None), dict) and "default" in value.presets: + resolved = value.presets["default"] + setattr(cfg, field_name, resolved) + resolve_cfg_presets(resolved) + else: + resolve_cfg_presets(value) + return cfg + + +def checked_apply(src: Any, target: Any) -> None: + """Forward every declared field on ``src`` (a dataclass) onto ``target``. + + Used by Isaac Lab configclasses that mirror an upstream/external dataclass + (for example, Newton's ``ShapeConfig``): declare the overridable fields + once on the wrapper, then forward them to the upstream object via this + helper instead of writing ``setattr`` lines per field. + + Raises :class:`AttributeError` if ``target`` is missing a field declared + on ``src``. The two structures must match — the check guards against + silent no-ops when the upstream API drifts (the bug class PR #5289 fixed + for Newton ``ShapeConfig.contact_margin`` → ``margin``). + + Args: + src: Dataclass instance whose declared fields will be forwarded. + Field names live here; this is the single source of truth. + target: Object to receive the field values. Must already expose + an attribute for every declared field on ``src``. + + Raises: + AttributeError: If ``target`` does not already have an attribute + matching one of ``src``'s declared field names. + """ + if not hasattr(src, "__dataclass_fields__"): + raise TypeError(f"checked_apply: src must be a dataclass, got {type(src).__name__}") + for f in dataclasses.fields(src): + if not hasattr(target, f.name): + target_path = f"{type(target).__module__}.{type(target).__name__}" + raise AttributeError( + f"{target_path} has no attribute `{f.name}`. {type(src).__name__} is out of sync with target." + ) + setattr(target, f.name, getattr(src, f.name)) diff --git a/source/isaaclab/isaaclab/utils/datasets/__init__.py b/source/isaaclab/isaaclab/utils/datasets/__init__.py index fce0fa308fa7..c656bdd684ef 100644 --- a/source/isaaclab/isaaclab/utils/datasets/__init__.py +++ b/source/isaaclab/isaaclab/utils/datasets/__init__.py @@ -12,6 +12,6 @@ Submodule for datasets classes and methods. """ -from .dataset_file_handler_base import DatasetFileHandlerBase -from .episode_data import EpisodeData -from .hdf5_dataset_file_handler import HDF5DatasetFileHandler +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/datasets/__init__.pyi b/source/isaaclab/isaaclab/utils/datasets/__init__.pyi new file mode 100644 index 000000000000..bc7764a443f2 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/datasets/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "DatasetFileHandlerBase", + "EpisodeData", + "HDF5DatasetFileHandler", +] + +from .dataset_file_handler_base import DatasetFileHandlerBase +from .episode_data import EpisodeData +from .hdf5_dataset_file_handler import HDF5DatasetFileHandler diff --git a/source/isaaclab/isaaclab/utils/datasets/episode_data.py b/source/isaaclab/isaaclab/utils/datasets/episode_data.py index 55df8ebbcbe7..dcb4791457f5 100644 --- a/source/isaaclab/isaaclab/utils/datasets/episode_data.py +++ b/source/isaaclab/isaaclab/utils/datasets/episode_data.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2024-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - from __future__ import annotations import torch @@ -90,7 +85,7 @@ def is_empty(self): """Check if the episode data is empty.""" return not bool(self._data) - def add(self, key: str, value: torch.Tensor | dict): + def add(self, key: str, value: torch.Tensor | dict, clone: bool = True): """Add a key-value pair to the dataset. The key can be nested by using the "/" character. @@ -99,13 +94,15 @@ def add(self, key: str, value: torch.Tensor | dict): Args: key: The key name. value: The corresponding value of tensor type or of dict type. + clone: Whether to clone the tensor value before storing it in the episode data. """ # check datatype if isinstance(value, dict): for sub_key, sub_value in value.items(): - self.add(f"{key}/{sub_key}", sub_value) + self.add(f"{key}/{sub_key}", sub_value, clone=clone) return + stored = value.clone() if (clone and isinstance(value, torch.Tensor)) else value sub_keys = key.split("/") current_dataset_pointer = self._data for sub_key_index in range(len(sub_keys)): @@ -113,9 +110,9 @@ def add(self, key: str, value: torch.Tensor | dict): # Add value to the final dict layer # Use lists to prevent slow tensor copy during concatenation if sub_keys[sub_key_index] not in current_dataset_pointer: - current_dataset_pointer[sub_keys[sub_key_index]] = [value.clone()] + current_dataset_pointer[sub_keys[sub_key_index]] = [stored] else: - current_dataset_pointer[sub_keys[sub_key_index]].append(value.clone()) + current_dataset_pointer[sub_keys[sub_key_index]].append(stored) break # key index if sub_keys[sub_key_index] not in current_dataset_pointer: diff --git a/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py b/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py index 46aeead2fd93..1977527b2cdf 100644 --- a/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py +++ b/source/isaaclab/isaaclab/utils/datasets/hdf5_dataset_file_handler.py @@ -3,10 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2024-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations import json import os @@ -16,9 +13,34 @@ import numpy as np import torch +from isaaclab.utils.math import convert_quat + from .dataset_file_handler_base import DatasetFileHandlerBase from .episode_data import EpisodeData +# Current dataset format version +# Version 1: XYZW quaternion format (current) +# Version 0 (or missing): Legacy WXYZ quaternion format +DATASET_FORMAT_VERSION = 1 + + +def convert_pose_quat_wxyz_to_xyzw(pose: np.ndarray) -> np.ndarray: + """Convert pose quaternion from WXYZ format to XYZW format. + + The pose is expected to have shape (..., 7) where the first 3 elements are position + and the last 4 elements are the quaternion. + + Args: + pose: Pose array with shape (..., 7) where quaternion is in WXYZ format. + + Returns: + Pose array with shape (..., 7) where quaternion is in XYZW format. + """ + position = pose[..., :3] + quat_wxyz = pose[..., 3:7] + quat_xyzw = convert_quat(quat_wxyz, to="xyzw") + return np.concatenate([position, quat_xyzw], axis=-1) + class HDF5DatasetFileHandler(DatasetFileHandlerBase): """HDF5 dataset file handler for storing and loading episode data.""" @@ -49,6 +71,9 @@ def create(self, file_path: str, env_name: str = None): os.makedirs(dir_path) self._hdf5_file_stream = h5py.File(file_path, "w") + # Set the dataset format version + self._hdf5_file_stream.attrs["format_version"] = DATASET_FORMAT_VERSION + # set up a data group in the file self._hdf5_data_group = self._hdf5_file_stream.create_group("data") self._hdf5_data_group.attrs["total"] = 0 @@ -105,24 +130,67 @@ def demo_count(self) -> int: Operations. """ - def load_episode(self, episode_name: str, device: str) -> EpisodeData | None: - """Load episode data from the file.""" + def get_format_version(self) -> int: + """Get the dataset format version. + + Returns: + The format version number. Returns 0 for legacy datasets without version info. + """ + self._raise_if_not_initialized() + if "format_version" in self._hdf5_file_stream.attrs: + return int(self._hdf5_file_stream.attrs["format_version"]) + return 0 # Legacy format + + def is_legacy_quaternion_format(self) -> bool: + """Check if the dataset uses the legacy WXYZ quaternion format. + + Returns: + True if the dataset uses WXYZ format (version 0), False if it uses XYZW format. + """ + return self.get_format_version() < DATASET_FORMAT_VERSION + + def load_episode( + self, episode_name: str, device: str, convert_legacy_quat: bool | None = None + ) -> EpisodeData | None: + """Load episode data from the file. + + Args: + episode_name: Name of the episode to load. + device: Device to load tensors to. + convert_legacy_quat: If True, convert quaternions from legacy WXYZ to XYZW format. + If None (default), auto-detect based on dataset version. + + Returns: + The loaded episode data, or None if the episode doesn't exist. + """ self._raise_if_not_initialized() if episode_name not in self._hdf5_data_group: return None + + # Auto-detect if conversion is needed + if convert_legacy_quat is None: + convert_legacy_quat = self.is_legacy_quaternion_format() + episode = EpisodeData() h5_episode_group = self._hdf5_data_group[episode_name] - def load_dataset_helper(group): + def load_dataset_helper(group, path=""): """Helper method to load dataset that contains recursive dict objects.""" data = {} for key in group: + current_path = f"{path}/{key}" if path else key if isinstance(group[key], h5py.Group): - data[key] = load_dataset_helper(group[key]) + data[key] = load_dataset_helper(group[key], current_path) else: # Converting group[key] to numpy array greatly improves the performance # when converting to torch tensor - data[key] = torch.tensor(np.array(group[key]), device=device) + np_data = np.array(group[key]) + + # Convert legacy quaternions if needed + if convert_legacy_quat and key == "root_pose" and np_data.shape[-1] == 7: + np_data = convert_pose_quat_wxyz_to_xyzw(np_data) + + data[key] = torch.tensor(np_data, device=device) return data episode.data = load_dataset_helper(h5_episode_group) @@ -137,7 +205,7 @@ def load_dataset_helper(group): return episode - def write_episode(self, episode: EpisodeData, demo_id: int | None = None): + def write_episode(self, episode: EpisodeData, demo_id: int | None = None, dataset_compression: bool = True): """Add an episode to the dataset. Args: @@ -178,7 +246,10 @@ def create_dataset_helper(group, key, value): for sub_key, sub_value in value.items(): create_dataset_helper(key_group, sub_key, sub_value) else: - group.create_dataset(key, data=value.cpu().numpy(), compression="gzip") + if dataset_compression: + group.create_dataset(key, data=value.cpu().numpy(), compression="gzip", compression_opts=2) + else: + group.create_dataset(key, data=value.cpu().numpy()) for key, value in episode.data.items(): create_dataset_helper(h5_episode_group, key, value) @@ -207,3 +278,67 @@ def _raise_if_not_initialized(self): """Raise an error if the dataset file handler is not initialized.""" if self._hdf5_file_stream is None: raise RuntimeError("HDF5 dataset file stream is not initialized") + + @staticmethod + def convert_dataset_to_xyzw(input_path: str, output_path: str | None = None) -> str: + """Convert a legacy dataset from WXYZ to XYZW quaternion format. + + This method reads a dataset file, converts all quaternions from the legacy WXYZ format + to the current XYZW format, and writes the result to a new file. + + Args: + input_path: Path to the input dataset file (legacy WXYZ format). + output_path: Path for the output dataset file. If None, appends '_xyzw' to input filename. + + Returns: + Path to the converted dataset file. + + Raises: + FileNotFoundError: If the input file does not exist. + ValueError: If the dataset is already in XYZW format. + """ + if not os.path.exists(input_path): + raise FileNotFoundError(f"Input dataset file not found: {input_path}") + + # Generate output path if not provided + if output_path is None: + base, ext = os.path.splitext(input_path) + output_path = f"{base}_xyzw{ext}" + + def convert_group_quaternions(src_group, dst_group): + """Recursively copy and convert quaternions in groups.""" + # Copy attributes + for attr_name, attr_value in src_group.attrs.items(): + dst_group.attrs[attr_name] = attr_value + + # Process items + for key in src_group: + if isinstance(src_group[key], h5py.Group): + # Recursively handle groups + dst_subgroup = dst_group.create_group(key) + convert_group_quaternions(src_group[key], dst_subgroup) + else: + # Handle datasets + data = np.array(src_group[key]) + + # Convert root_pose quaternions + if key == "root_pose" and data.shape[-1] == 7: + data = convert_pose_quat_wxyz_to_xyzw(data) + + # Preserve compression settings if possible + compression = src_group[key].compression + dst_group.create_dataset(key, data=data, compression=compression) + + with h5py.File(input_path, "r") as src_file: + # Check if already converted + if "format_version" in src_file.attrs and src_file.attrs["format_version"] >= DATASET_FORMAT_VERSION: + raise ValueError(f"Dataset is already in XYZW format (version {src_file.attrs['format_version']})") + + with h5py.File(output_path, "w") as dst_file: + # Set the new format version + dst_file.attrs["format_version"] = DATASET_FORMAT_VERSION + + # Copy and convert all data + convert_group_quaternions(src_file, dst_file) + + return output_path diff --git a/source/isaaclab/isaaclab/utils/dict.py b/source/isaaclab/isaaclab/utils/dict.py index de2062d66979..6036931234e9 100644 --- a/source/isaaclab/isaaclab/utils/dict.py +++ b/source/isaaclab/isaaclab/utils/dict.py @@ -14,7 +14,7 @@ import torch from .array import TENSOR_TYPE_CONVERSIONS, TENSOR_TYPES -from .string import callable_to_string, string_to_callable, string_to_slice +from .string import ResolvableString, callable_to_string, string_to_slice """ Dictionary <-> Class operations. @@ -58,8 +58,12 @@ def class_to_dict(obj: object) -> dict[str, Any]: # disregard builtin attributes if key.startswith("__"): continue + # Keep lazy callable references as strings; don't force callable introspection. + if isinstance(value, ResolvableString): + data[key] = str(value) # check if attribute is callable -- function - if callable(value): + # check if attribute is callable -- function + elif callable(value): data[key] = callable_to_string(value) # check if attribute is a dictionary elif hasattr(value, "__dict__") or isinstance(value, dict): @@ -140,10 +144,16 @@ def update_class_from_dict(obj, data: dict[str, Any], _ns: str = "") -> None: if not set_obj: continue - # -- 3) callable attribute → resolve string -------------- + # -- 3) callable attribute → keep string lazily resolvable -------------- elif callable(obj_mem): - # update function name - value = string_to_callable(value) + if isinstance(value, str): + if not isinstance(value, ResolvableString): + value = ResolvableString(value) + elif not callable(value): + raise ValueError( + f"[Config]: Incorrect type under namespace: {key_ns}." + f" Expected callable or callable-string, Received: {type(value)}." + ) # -- 4) simple scalar / explicit None --------------------- elif value is None or isinstance(value, type(obj_mem)): diff --git a/source/isaaclab/isaaclab/utils/interpolation/__init__.py b/source/isaaclab/isaaclab/utils/interpolation/__init__.py index 25f6be5f0014..b4a78cd919a3 100644 --- a/source/isaaclab/isaaclab/utils/interpolation/__init__.py +++ b/source/isaaclab/isaaclab/utils/interpolation/__init__.py @@ -7,4 +7,6 @@ Submodule for different interpolation methods. """ -from .linear_interpolation import LinearInterpolation +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/interpolation/__init__.pyi b/source/isaaclab/isaaclab/utils/interpolation/__init__.pyi new file mode 100644 index 000000000000..d4d27446c9f0 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/interpolation/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "LinearInterpolation", +] + +from .linear_interpolation import LinearInterpolation diff --git a/source/isaaclab/isaaclab/utils/io/__init__.py b/source/isaaclab/isaaclab/utils/io/__init__.py index 9a8b16ed157a..b747c128f640 100644 --- a/source/isaaclab/isaaclab/utils/io/__init__.py +++ b/source/isaaclab/isaaclab/utils/io/__init__.py @@ -7,5 +7,6 @@ Submodules for files IO operations. """ -from .torchscript import load_torchscript_model -from .yaml import dump_yaml, load_yaml +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/io/__init__.pyi b/source/isaaclab/isaaclab/utils/io/__init__.pyi new file mode 100644 index 000000000000..3d61292f55a7 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/io/__init__.pyi @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "load_torchscript_model", + "dump_yaml", + "load_yaml", +] + +from .torchscript import load_torchscript_model +from .yaml import dump_yaml, load_yaml diff --git a/source/isaaclab/isaaclab/utils/leapp/__init__.py b/source/isaaclab/isaaclab/utils/leapp/__init__.py new file mode 100644 index 000000000000..f39b0e4d7eea --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for LEAPP export annotation and proxy-based policy tracing.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/leapp/__init__.pyi b/source/isaaclab/isaaclab/utils/leapp/__init__.pyi new file mode 100644 index 000000000000..e2b8f497b5f3 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/__init__.pyi @@ -0,0 +1,61 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ExportPatcher", + "InputKindEnum", + "LeappTensorSemantics", + "OutputKindEnum", + "POSE6_ELEMENT_NAMES", + "POSE7_ELEMENT_NAMES", + "QUAT_XYZW_ELEMENT_NAMES", + "WRENCH6_ELEMENT_NAMES", + "XYZ_ELEMENT_NAMES", + "body_names_resolver", + "body_pose6_resolver", + "body_pose_resolver", + "body_quat_resolver", + "body_wrench_resolver", + "body_xyz_resolver", + "build_command_connection", + "build_state_connection", + "build_write_connection", + "joint_names_resolver", + "leapp_tensor_semantics", + "patch_env_for_export", + "resolve_leapp_element_names", + "target_frame_pose_resolver", + "target_frame_quat_resolver", + "target_frame_xyz_resolver", +] + +from .export_annotator import ExportPatcher, patch_env_for_export +from .leapp_semantics import ( + InputKindEnum, + OutputKindEnum, + POSE6_ELEMENT_NAMES, + POSE7_ELEMENT_NAMES, + QUAT_XYZW_ELEMENT_NAMES, + WRENCH6_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + LeappTensorSemantics, + body_names_resolver, + body_pose6_resolver, + body_pose_resolver, + body_quat_resolver, + body_wrench_resolver, + body_xyz_resolver, + joint_names_resolver, + leapp_tensor_semantics, + resolve_leapp_element_names, + target_frame_pose_resolver, + target_frame_quat_resolver, + target_frame_xyz_resolver, +) +from .utils import ( + build_command_connection, + build_state_connection, + build_write_connection, +) diff --git a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py new file mode 100644 index 000000000000..fb335811535a --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -0,0 +1,708 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Export annotations for Isaac Lab policies using proxy-based patching. + +Observation and action annotation share a unified dedup cache so that a +state property (e.g. ``joint_pos``) read by both an observation term and +an action term resolves to one LEAPP input edge. + +- Observation term functions see an ``_EnvProxy`` whose scene returns + ``_EntityProxy`` objects with annotating data proxies. + +- Action terms have their ``_asset`` attribute replaced with an + _ArticulationWriteProxy that intercepts ``_leapp_semantics``-decorated + write methods **and** routes ``.data`` reads through the same annotating + data proxy used by observations. + +Cache lifecycle (assuming single-env play-mode export): + + compute() clear cache → obs terms populate cache + policy inference TracedTensors propagate through NN + process_action() register_buffer for raw_actions + apply_action() [tracing] reuse cached TracedTensors for state reads, + capture write outputs, call output_tensors(), + then clear cache + apply_action() [decim.] clear cache → fresh reads for simulation + ... + compute() clear cache → fresh reads for next obs +""" + +from __future__ import annotations + +import inspect +import logging +from collections.abc import Callable +from contextlib import suppress +from typing import TYPE_CHECKING, Any + +import torch +from leapp import annotate +from leapp.utils.tensor_description import TensorSemantics + +from isaaclab.assets.articulation.base_articulation import BaseArticulation +from isaaclab.managers import ManagerTermBase + +from .leapp_semantics import select_element_names +from .proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy +from .utils import ( + TracedProxyArray, + build_command_connection, + build_write_connection, +) + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + +VARIABLE_IMPEDANCE_MODES = frozenset({"variable", "variable_kp"}) + + +# ══════════════════════════════════════════════════════════════════ +# ExportPatcher +# ══════════════════════════════════════════════════════════════════ + + +class ExportPatcher: + """Unified patcher that annotates observation inputs and action outputs for LEAPP export. + + Observation-side property semantics are resolved lazily inside + ``_DataProxy`` by combining: + + - the concrete runtime getter from the backend data class + - the nearest ``_leapp_semantics`` metadata found while walking the MRO + + This lets backends override property implementations without duplicating + decorators from the abstract API. + + - The observation proxy chain (``_EnvProxy`` → ``_SceneProxy`` → + ``_EntityProxy`` → ``_DataProxy``) for state reads + by observation term functions. + - The ``_ArticulationWriteProxy`` on each action term, which intercepts + target writes **and** routes ``.data`` reads through the same + ``_DataProxy`` / cache. + + """ + + def __init__(self, export_method: str, required_obs_groups: set[str] | None = None): + """Initialize the export patcher. + + Args: + export_method: LEAPP export backend passed to + :func:`annotate.output_tensors`. + required_obs_groups: Observation groups that should be patched, or + ``None`` to patch all groups. + """ + self.task_name: str | None = None + self.export_method = export_method + self.required_obs_groups = required_obs_groups + self._annotated_tensor_cache: dict[tuple[int, str], TracedProxyArray] = {} + self._data_property_resolution_cache: dict[tuple[type, str], tuple[Callable, object] | None] = {} + self._write_method_resolution_cache: dict[ + tuple[type, str], tuple[Callable, object, inspect.Signature] | None + ] = {} + self._action_output_cache: list[TensorSemantics] = [] + self._captured_write_term_names: set[str] = set() + self._fallback_term_names: set[str] = set() + self._pending_action_output_export: bool = False + self._uses_last_action_state: bool = False + self._action_term_scene_keys: dict[str, str] = {} + + def setup(self, env): + """Patch the environment in place for LEAPP-aware export. + + Args: + env: Wrapped manager-based environment whose unwrapped instance + should be patched. + """ + unwrapped = env.env.unwrapped + task_name = str(unwrapped.spec.id) + self.task_name = task_name + + proxy_env = _EnvProxy( + unwrapped, + task_name, + self._data_property_resolution_cache, + self._annotated_tensor_cache, + ) + + self._disable_training_managers(unwrapped) + self._patch_observation_manager(unwrapped.observation_manager, proxy_env) + self._patch_history_buffers(unwrapped.observation_manager) + self._patch_action_manager( + unwrapped.action_manager, + self._annotated_tensor_cache, + ) + + # ── Disable training-only managers ───────────────────────────── + + @staticmethod + def _disable_training_managers(unwrapped): + """Replace training-only manager methods with no-ops. + + During export the curriculum, reward, termination, and recorder + managers serve no purpose. Disabling them avoids side-effect + crashes (e.g. ADR curriculum terms accessing nullified noise + configs) and removes unnecessary computation. + + Args: + unwrapped: Unwrapped environment whose training-only managers + should be disabled. + """ + num_envs = unwrapped.num_envs + device = unwrapped.device + _zero_reward = torch.zeros(num_envs, device=device) + _no_termination = torch.zeros(num_envs, dtype=torch.bool, device=device) + + def _noop_curriculum(env_ids=None): + return None + + def _zero_reward_compute(dt): + return _zero_reward + + def _no_termination_compute(): + return _no_termination + + def _noop(*args, **kwargs): + return None + + if hasattr(unwrapped, "curriculum_manager"): + unwrapped.curriculum_manager.compute = _noop_curriculum + + if hasattr(unwrapped, "reward_manager"): + unwrapped.reward_manager.compute = _zero_reward_compute + + if hasattr(unwrapped, "termination_manager"): + unwrapped.termination_manager.compute = _no_termination_compute + + if hasattr(unwrapped, "recorder_manager"): + rm = unwrapped.recorder_manager + + rm.record_pre_step = _noop + rm.record_post_step = _noop + rm.record_pre_reset = _noop + rm.record_post_reset = _noop + rm.record_post_physics_decimation_step = _noop + + @staticmethod + def _resolve_scene_entity_key(scene, entity: Any) -> str | None: + """Return the scene dictionary key for an entity. + + Args: + scene: Scene object that stores entity dictionaries. + entity: Entity instance to locate. + + Returns: + The scene key for ``entity`` if found, otherwise ``None``. + """ + for attr_value in vars(scene).values(): + if not isinstance(attr_value, dict): + continue + for key, candidate in attr_value.items(): + if candidate is entity: + return key + return None + + # ── Observation manager patches ─────────────────────────────── + + def _patch_history_buffers(self, obs_manager): + """Patch history-enabled observation buffers to export as LEAPP state. + + Args: + obs_manager: Observation manager whose history buffers should be + wrapped. + """ + history_buffers = getattr(obs_manager, "_group_obs_term_history_buffer", {}) + term_names_by_group = getattr(obs_manager, "_group_obs_term_names", {}) + + for group_name, term_cfgs in obs_manager._group_obs_term_cfgs.items(): + if self.required_obs_groups is not None and group_name not in self.required_obs_groups: + continue + group_buffers = history_buffers.get(group_name, {}) + group_term_names = term_names_by_group.get(group_name, []) + + for index, term_cfg in enumerate(term_cfgs): + history_length = getattr(term_cfg, "history_length", 0) or 0 + if history_length <= 0: + continue + + if index >= len(group_term_names): + continue + + term_name = group_term_names[index] + circular_buffer = group_buffers.get(term_name) + if circular_buffer is None: + continue + + state_name = f"h_{group_name}_{term_name}" + self._patch_history_buffer_append(circular_buffer, state_name) + + def _patch_history_buffer_append(self, circular_buffer, state_name: str): + """Wrap ``_append`` so history buffers become explicit LEAPP state. + + Args: + circular_buffer: Circular buffer instance to patch. + state_name: LEAPP state tensor name for the buffer contents. + """ + if hasattr(circular_buffer, "_leapp_original_append"): + return + + task_name = self.task_name + original_append = circular_buffer._append + + def patched_append(data: torch.Tensor): + """Annotate history buffer updates as LEAPP state transitions. + + Args: + data: New observation slice appended to the buffer. + + Returns: + ``None``. + """ + if circular_buffer._buffer is not None: + circular_buffer._buffer = annotate.state_tensors(task_name, {state_name: circular_buffer._buffer}) + + original_append(data) + + if circular_buffer._buffer is not None: + circular_buffer._buffer = annotate.update_state(task_name, {state_name: circular_buffer._buffer}) + + circular_buffer._leapp_original_append = original_append + circular_buffer._append = patched_append + + def _patch_observation_manager(self, obs_manager, proxy_env): + """Patch observation terms to use annotating proxies and disable noise. + + Args: + obs_manager: Observation manager instance to patch. + proxy_env: Proxy environment routed into observation terms. + """ + for group_name, term_cfgs in obs_manager._group_obs_term_cfgs.items(): + if self.required_obs_groups is not None and group_name not in self.required_obs_groups: + continue + for term_cfg in term_cfgs: + original_func = term_cfg.func + func_name = getattr(original_func, "__name__", None) + + if func_name == "last_action": + self._uses_last_action_state = True + term_cfg.func = self._wrap_last_action(original_func) + elif func_name == "generated_commands": + term_cfg.func = self._wrap_generated_commands(original_func, term_cfg) + else: + term_cfg.func = self._wrap_with_proxy(original_func, proxy_env) + + term_cfg.noise = None + + original_compute = obs_manager.compute + cache = self._annotated_tensor_cache + + def patched_compute(*args, **kwargs): + """Clear the tensor dedup cache once per full observation pass.""" + cache.clear() + return original_compute(*args, **kwargs) + + obs_manager.compute = patched_compute + + # ── Action manager patches ──────────────────────────────────── + + def _patch_action_manager(self, action_manager, cache): + """Patch action terms with write/read proxies and manager hooks. + + Args: + action_manager: Action manager instance to patch. + cache: Shared tensor dedup cache for annotated state reads. + """ + assert self.task_name is not None + scene = action_manager._env.scene + for term_name, term in action_manager._terms.items(): + asset = getattr(term, "_asset", None) + if isinstance(asset, BaseArticulation): + real_asset: BaseArticulation = asset + scene_key = self._resolve_scene_entity_key(scene, real_asset) or "ego" + data_proxy = _DataProxy( + real_asset.data, + scene_key, + self.task_name, + self._data_property_resolution_cache, + cache, + input_name_resolver=lambda prop_name, k=scene_key: f"{k}_{prop_name}", + ) + term._asset = _ArticulationWriteProxy( + real_asset=real_asset, + entity_name=scene_key, + term_name=term_name, + output_cache=self._action_output_cache, + method_resolution_cache=self._write_method_resolution_cache, + captured_write_term_names=self._captured_write_term_names, + data_proxy=data_proxy, + ) + self._action_term_scene_keys[term_name] = scene_key + + self._patch_action_manager_methods(action_manager) + + def _patch_action_manager_methods(self, action_manager): + """Patch ``process_action`` and ``apply_action`` on the action manager instance. + + ``process_action`` registers raw_action buffers for LEAPP tracing and + preserves the action tensor clone. + + ``apply_action`` coordinates the cache and output lifecycle: + + - **Tracing pass** (first ``apply_action`` after ``process_action``): + The cache still holds TracedTensors populated by ``compute_group``. + Action terms that read state (e.g. ``RelativeJointPositionAction`` + reading ``joint_pos``) get those TracedTensors from the cache, + keeping the LEAPP graph connected. After ``output_tensors()`` the + cache is cleared so subsequent decimation sub-steps read fresh values. + + - **Non-tracing passes** (remaining decimation sub-steps and all + subsequent iterations): The cache is cleared **before** running + action terms so every ``.data`` read returns the current simulator + value, preserving simulation correctness. + + Args: + action_manager: Action manager whose instance methods should be + wrapped. + """ + original_process = action_manager.process_action + original_apply = action_manager.apply_action + task_name = self.task_name + cache = self._annotated_tensor_cache + + def patched_process_action(action: torch.Tensor): + """Register raw_action buffers, call real process_action, preserve action clone.""" + original_process(action) + action_manager._action = action.clone() + self._pending_action_output_export = True + + def patched_apply_action(): + """Coordinate cache lifecycle and LEAPP output annotation.""" + if not self._pending_action_output_export: + cache.clear() + return original_apply() + + # Tracing pass: cache still holds TracedTensors from compute_group. + self._action_output_cache.clear() + self._captured_write_term_names.clear() + original_apply() + + self._action_output_cache.extend(self._collect_action_outputs(action_manager)) + self._action_output_cache.extend(self._collect_processed_action_fallbacks(action_manager)) + if self._uses_last_action_state: + annotate.update_state(task_name, {"last_action": action_manager._action}) + fallback_terms = self._fallback_term_names + static_values = self._collect_action_static_outputs(action_manager, fallback_terms) + annotate.output_tensors( + task_name, + self._action_output_cache, + static_outputs=static_values or None, + export_with=self.export_method, + ) + self._pending_action_output_export = False + self._action_output_cache.clear() + cache.clear() + return None + + action_manager.process_action = patched_process_action + action_manager.apply_action = patched_apply_action + + # ── Observation term wrappers ───────────────────────────────── + + @staticmethod + def _wrap_with_proxy(original_func, proxy_env): + """Wrap a term function so it receives the proxy env. + + Args: + original_func: Original observation term function or manager term. + proxy_env: Proxy environment routed into the wrapped callable. + + Returns: + Wrapped callable that substitutes ``proxy_env`` for the real env. + """ + + if isinstance(original_func, ManagerTermBase): + return _ManagerTermProxy(original_func, proxy_env) + + def wrapped(*args, **kwargs): + """Invoke the original function with the proxy environment. + + Args: + *args: Original positional arguments. + **kwargs: Original keyword arguments. + + Returns: + Result of the wrapped observation term. + """ + if args: + args = (proxy_env, *args[1:]) + else: + args = (proxy_env,) + return original_func(*args, **kwargs) + + wrapped.__name__ = getattr(original_func, "__name__", "unknown") + return wrapped + + def _wrap_last_action(self, original_func): + """Wrap ``last_action`` as a LEAPP state tensor. + + ``last_action`` is feedback state, not a regular dangling input. We + therefore register it through ``annotate.state_tensors(...)`` on the + observation side and update it through ``annotate.update_state(...)`` + after the traced action pass. + + Args: + original_func: Original ``last_action`` observation term. + + Returns: + Wrapped callable that exports ``last_action`` as LEAPP state. + """ + task_name = self.task_name + + def wrapped(env, action_name=None, **kwargs): + """Run the wrapped ``last_action`` term and annotate its output. + + Args: + env: Environment passed by the observation manager. + action_name: Optional action term name. + **kwargs: Additional keyword arguments for the term. + + Returns: + Annotated last-action tensor. + """ + result = original_func(env, action_name, **kwargs) + return annotate.state_tensors(task_name, {"last_action": result}) + + wrapped.__name__ = original_func.__name__ + return wrapped + + def _wrap_generated_commands(self, original_func, term_cfg): + """Wrap the ``generated_commands`` observation term to annotate its output as a LEAPP input. + + Resolves command semantics (kind, element_names) from the command manager + configuration when available. + + Args: + original_func: Original ``generated_commands`` observation term. + term_cfg: Observation term config used to resolve the command name. + + Returns: + Wrapped callable that exports generated commands as LEAPP inputs. + """ + task_name = self.task_name + command_name_from_cfg = term_cfg.params.get("command_name") + + def wrapped(env, command_name=None, **kwargs): + """Run the wrapped command term and annotate its output. + + Args: + env: Environment passed by the observation manager. + command_name: Optional command term name override. + **kwargs: Additional keyword arguments for the term. + + Returns: + Annotated command tensor. + """ + result = original_func(env, command_name, **kwargs) + leapp_input_name = command_name or command_name_from_cfg or "commands" + command_cfg = None + with suppress(AttributeError, KeyError): + command_cfg = env.command_manager.get_term(leapp_input_name).cfg + sem = TensorSemantics( + name=leapp_input_name, + ref=result, + kind=getattr(command_cfg, "cmd_kind", None), + element_names=getattr(command_cfg, "element_names", None), + extra=build_command_connection(leapp_input_name), + ) + return annotate.input_tensors(task_name, sem) + + wrapped.__name__ = original_func.__name__ + return wrapped + + # ── Output collection ───────────────────────────────────────── + + def _collect_action_outputs(self, action_manager) -> list[TensorSemantics]: + """Collect non-writer action tensors that should be exported. + + Args: + action_manager: Action manager whose terms should be inspected. + + Returns: + Exportable tensor semantics for dynamic action outputs such as OSC + gains. + """ + tensors: list[TensorSemantics] = [] + for term_name, term in action_manager._terms.items(): + osc = getattr(term, "_osc", None) + if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in VARIABLE_IMPEDANCE_MODES: + asset = getattr(term, "_asset", None) + real_asset = getattr(asset, "_real_asset", asset) + joint_ids = getattr(term, "_joint_ids", None) + joint_names = getattr(real_asset, "joint_names", None) if real_asset else None + scene_key = self._action_term_scene_keys.get(term_name, "ego") + tensors.append( + TensorSemantics( + name=f"{term_name}_kp_gains", + ref=torch.diagonal(osc._motion_p_gains_task, dim1=-2, dim2=-1), + kind="kp", + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim_index"), + ) + ) + tensors.append( + TensorSemantics( + name=f"{term_name}_kd_gains", + ref=torch.diagonal(osc._motion_d_gains_task, dim1=-2, dim2=-1), + kind="kd", + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_damping_to_sim_index"), + ) + ) + return tensors + + def _collect_processed_action_fallbacks(self, action_manager) -> list[TensorSemantics]: + """Fallback: use ``term.processed_actions`` for terms that produced no write outputs. + + When an action term does not call any ``_leapp_semantics``-decorated write method + (e.g. ``PreTrainedPolicyAction`` which delegates writes to a nested sub-policy), + we fall back to capturing ``term.processed_actions`` as the output tensor. + + Args: + action_manager: Action manager whose terms should be inspected. + + Returns: + Fallback tensor semantics built from ``processed_actions``. + """ + logger = logging.getLogger(__name__) + fallback_terms: set[str] = set() + tensors: list[TensorSemantics] = [] + for term_name, term in action_manager._terms.items(): + if term_name in self._captured_write_term_names: + continue + processed = getattr(term, "processed_actions", None) + if processed is None: + continue + if isinstance(processed, torch.Tensor): + logger.warning( + "Action term '%s' did not write to any asset directly. Falling back to processed_actions as the" + " export output.\nIf you wish to add semantic data to this policy, you need to manually annotate it" + " with output_tensors.", + term_name, + ) + tensors.append( + TensorSemantics( + name=term_name, + ref=processed.clone(), + kind=None, + element_names=None, + ) + ) + fallback_terms.add(term_name) + self._fallback_term_names = fallback_terms + return tensors + + def _collect_action_static_outputs( + self, action_manager, skip_terms: set[str] | None = None + ) -> list[TensorSemantics]: + """Collect static kp/kd gain values from action terms for export metadata. + + Terms in ``skip_terms`` are excluded — these are terms that fell back + to ``processed_actions`` and whose static gains (kp/kd) belong to a + lower abstraction level that is not part of the exported policy. + + Args: + action_manager: Action manager whose terms should be inspected. + skip_terms: Action term names whose static outputs should be + skipped. + + Returns: + Static tensor semantics for action gains exported as metadata. + """ + static_values: list[TensorSemantics] = [] + for term_name, term in action_manager._terms.items(): + if skip_terms and term_name in skip_terms: + continue + osc = getattr(term, "_osc", None) + if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in VARIABLE_IMPEDANCE_MODES: + continue + asset = getattr(term, "_asset", None) + real_asset = getattr(asset, "_real_asset", asset) + if real_asset and hasattr(real_asset, "data"): + data = real_asset.data + joint_ids = getattr(term, "_joint_ids", None) + joint_names = getattr(real_asset, "joint_names", None) + scene_key = self._action_term_scene_keys.get(term_name, "ego") + if hasattr(data, "default_joint_stiffness") and data.default_joint_stiffness is not None: + gains = data.default_joint_stiffness.torch + static_values.append( + TensorSemantics( + name=f"{term_name}_kp_gains", + ref=gains[:, joint_ids] if joint_ids else gains, + kind="kp", + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim_index"), + ) + ) + if hasattr(data, "default_joint_damping") and data.default_joint_damping is not None: + gains = data.default_joint_damping.torch + static_values.append( + TensorSemantics( + name=f"{term_name}_kd_gains", + ref=gains[:, joint_ids] if joint_ids else gains, + kind="kd", + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_damping_to_sim_index"), + ) + ) + return static_values + + +# ══════════════════════════════════════════════════════════════════ +# Public entry point +# ══════════════════════════════════════════════════════════════════ + + +def patch_env_for_export( + env: ManagerBasedEnv, + export_method: str, + required_obs_groups: set[str] | None = None, +) -> None: + """Patch the env's observation and action managers for LEAPP export. + + This is a thin public entry point around ``ExportPatcher``. It mutates + the provided env instance in-place so that: + + - Observation terms route through proxy objects that annotate tensor + reads from **any** scene entity data class (articulations, rigid + objects, sensors, etc.). + - Action terms route through proxy objects that annotate both data + reads **and** ``Articulation`` write methods. + + Data properties are resolved lazily through proxies — no hardcoded + class list is required. To produce LEAPP input annotations, the + accessed data property getter must carry ``_leapp_semantics``. + Likewise, action-side write methods must be annotated to produce + semantic LEAPP outputs. Undecorated reads and writes are forwarded + as normal runtime access, but they do not gain semantic annotation + metadata through this patching path. + + State reads are deduplicated across observation and action paths via a + shared cache, so a property like ``joint_pos`` that is read by both an + observation term and a relative-position action term appears as a single + LEAPP input edge. + + The underlying env, scene, assets, and tensors remain shared with the rest + of the pipeline; only the manager call paths are redirected. + + Args: + env: Manager-based environment to patch in place. + export_method: LEAPP export backend passed to + :func:`annotate.output_tensors`. + required_obs_groups: Observation groups that should be patched, or + ``None`` to patch all groups. + """ + patcher = ExportPatcher(export_method, required_obs_groups=required_obs_groups) + patcher.setup(env) diff --git a/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py b/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py new file mode 100644 index 000000000000..340291de16ad --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py @@ -0,0 +1,145 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""LEAPP semantic metadata helpers for raw tensor-producing functions.""" + +from __future__ import annotations + +from collections.abc import Callable +from contextlib import suppress +from dataclasses import dataclass +from typing import Any + +try: + from leapp import InputKindEnum, OutputKindEnum +except ImportError: + + class _LeappEnumSentinel: + """Stand-in when leapp is not installed. + + Any attribute access returns ``None`` so that + ``@leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE)`` + silently stores ``kind=None`` instead of crashing at import time. + The real enum values are only needed at export time, when leapp + *is* guaranteed to be available. + """ + + def __getattr__(self, name: str): + return None + + InputKindEnum = _LeappEnumSentinel() # type: ignore[assignment,misc] + OutputKindEnum = _LeappEnumSentinel() # type: ignore[assignment,misc] + + +@dataclass(frozen=True) +class LeappTensorSemantics: + """Semantic metadata attached directly to a raw tensor-producing function.""" + + kind: Any = None + element_names: list[str] | list[list[str]] | None = None + element_names_resolver: Callable | None = None + const: bool = False + + +XYZ_ELEMENT_NAMES: list[str] = ["x", "y", "z"] +QUAT_XYZW_ELEMENT_NAMES: list[str] = ["qx", "qy", "qz", "qw"] +POSE7_ELEMENT_NAMES: list[str] = ["x", "y", "z", "qx", "qy", "qz", "qw"] +POSE6_ELEMENT_NAMES: list[str] = ["x", "y", "z", "angular_x", "angular_y", "angular_z"] +WRENCH6_ELEMENT_NAMES: list[str] = ["fx", "fy", "fz", "tx", "ty", "tz"] + + +def select_element_names(names: list[str] | None, indices: Any = None) -> list[str] | None: + """Select element names using optional runtime indices.""" + if names is None: + return None + if indices is None or indices == slice(None): + return list(names) + if isinstance(indices, slice): + return list(names[indices]) + with suppress(AttributeError): + indices = indices.tolist() + if isinstance(indices, (list, tuple)): + return [names[int(index)] for index in indices] + if isinstance(indices, int): + return [names[indices]] + return None + + +def leapp_tensor_semantics( + *, + kind: Any = None, + element_names: list[str] | list[list[str]] | None = None, + element_names_resolver: Callable | None = None, + const: bool = False, +) -> Callable: + """Attach LEAPP semantic metadata to a raw tensor-producing function.""" + + semantics = LeappTensorSemantics( + kind=kind, + element_names=element_names, + element_names_resolver=element_names_resolver, + const=const, + ) + + def _apply(func: Callable) -> Callable: + func._leapp_semantics = semantics + return func + + return _apply + + +def resolve_leapp_element_names(semantics: LeappTensorSemantics | None, data_self) -> list | None: + """Resolve element names from attached semantics and a tensor-producing object.""" + if semantics is None: + return None + if semantics.element_names is not None: + return semantics.element_names + if semantics.element_names_resolver is not None: + return semantics.element_names_resolver(data_self) + return None + + +# ── Predefined element-name resolvers ───────────────────────────── + + +def joint_names_resolver(data_self) -> list[str] | None: + """Resolve joint element names from the data object at trace time.""" + return select_element_names( + getattr(data_self, "joint_names", getattr(data_self, "_joint_names", None)), + getattr(data_self, "_joint_ids", None), + ) + + +def body_names_resolver(data_self) -> list[str] | None: + """Resolve body element names from the data object at trace time.""" + return select_element_names( + getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), + getattr(data_self, "_body_ids", None), + ) + + +def _compound_resolver(outer_fn: Callable, inner_names: list[str]) -> Callable: + """Build a 2D resolver: ``[outer_names, inner_constant_names]``.""" + + def resolver(data_self) -> list | None: + outer = outer_fn(data_self) + return [outer, inner_names] if outer else None + + return resolver + + +def _target_frame_names(data_self) -> list[str] | None: + names = getattr(data_self, "target_frame_names", None) + return list(names) if names is not None else None + + +body_xyz_resolver = _compound_resolver(body_names_resolver, XYZ_ELEMENT_NAMES) +body_pose_resolver = _compound_resolver(body_names_resolver, POSE7_ELEMENT_NAMES) +body_pose6_resolver = _compound_resolver(body_names_resolver, POSE6_ELEMENT_NAMES) +body_quat_resolver = _compound_resolver(body_names_resolver, QUAT_XYZW_ELEMENT_NAMES) +body_wrench_resolver = _compound_resolver(body_names_resolver, WRENCH6_ELEMENT_NAMES) +target_frame_xyz_resolver = _compound_resolver(_target_frame_names, XYZ_ELEMENT_NAMES) +target_frame_quat_resolver = _compound_resolver(_target_frame_names, QUAT_XYZW_ELEMENT_NAMES) +target_frame_pose_resolver = _compound_resolver(_target_frame_names, POSE7_ELEMENT_NAMES) diff --git a/source/isaaclab/isaaclab/utils/leapp/proxy.py b/source/isaaclab/isaaclab/utils/leapp/proxy.py new file mode 100644 index 000000000000..49ef86e7265f --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/proxy.py @@ -0,0 +1,521 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import inspect +from collections.abc import Callable +from typing import Any, cast + +import torch +from leapp.utils.tensor_description import TensorSemantics + +from isaaclab.managers import ManagerTermBase +from isaaclab.utils.warp.proxy_array import ProxyArray + +from .leapp_semantics import resolve_leapp_element_names +from .utils import TracedProxyArray, build_write_connection + + +def _resolve_annotated_property( + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + real_data: Any, + name: str, +) -> tuple[Callable, Any] | None: + """Resolve a concrete property getter and inherited semantics metadata. + + The execution getter always comes from the concrete runtime class. Semantic + metadata is resolved independently by walking the MRO until a property + definition with ``_leapp_semantics`` is found. This mirrors the output-side + export path, where semantics are authored on the base API while concrete + backends provide the runtime implementation. + """ + cache_key = (type(real_data), name) + if cache_key in property_resolution_cache: + return property_resolution_cache[cache_key] + + execution_prop = getattr(type(real_data), name, None) + if not isinstance(execution_prop, property) or execution_prop.fget is None: + property_resolution_cache[cache_key] = None + return None + + semantics_meta = None + for data_cls in type(real_data).__mro__: + prop = data_cls.__dict__.get(name) + if not isinstance(prop, property) or prop.fget is None: + continue + candidate = getattr(prop.fget, "_leapp_semantics", None) + if candidate is None: + continue + if getattr(candidate, "const", False): + property_resolution_cache[cache_key] = None + return None + semantics_meta = candidate + break + + if semantics_meta is None: + property_resolution_cache[cache_key] = None + return None + + resolution = (execution_prop.fget, semantics_meta) + property_resolution_cache[cache_key] = resolution + return resolution + + +def _resolve_annotated_method( + method_resolution_cache: dict[tuple[type, str], tuple[Callable, Any, inspect.Signature] | None], + real_asset: Any, + name: str, +) -> tuple[Callable, Any, inspect.Signature] | None: + """Resolve a concrete bound method and inherited semantics metadata.""" + cache_key = (type(real_asset), name) + if cache_key in method_resolution_cache: + return method_resolution_cache[cache_key] + + original_method = getattr(real_asset, name, None) + if not callable(original_method): + method_resolution_cache[cache_key] = None + return None + + for asset_cls in type(real_asset).__mro__: + candidate = asset_cls.__dict__.get(name) + if not callable(candidate): + continue + semantics_meta = getattr(candidate, "_leapp_semantics", None) + if semantics_meta is None: + continue + resolution = (original_method, semantics_meta, inspect.signature(candidate)) + method_resolution_cache[cache_key] = resolution + return resolution + + method_resolution_cache[cache_key] = None + return None + + +class _WriteJointNameContext: + """Resolve runtime joint-name subsets for lazy write interception.""" + + __slots__ = ("joint_names", "_joint_ids") + + def __init__(self, joint_names: list[str], joint_ids): + self.joint_names = joint_names + self._joint_ids = joint_ids + + +def _unique_output_name(term_name: str, method_name: str, output_cache: list[TensorSemantics]) -> str: + """Return a stable, unique output name for an action write entry.""" + existing = {t.name for t in output_cache} + candidate = term_name + if candidate in existing: + candidate = f"{term_name}_{method_name}" + suffix = 2 + while candidate in existing: + candidate = f"{term_name}_{method_name}_{suffix}" + suffix += 1 + return candidate + + +class _DataProxy: + """Proxy around a real data object that intercepts tensor-returning property reads. + + The real data object may be any scene entity data class (``ArticulationData``, + ``RigidObjectData``, sensor data classes, etc.). The proxy resolves property + semantics lazily on first access by walking the runtime class MRO. This lets + concrete backend overrides reuse semantic metadata authored on abstract base + properties without copying decorators onto every implementation. + + When a semantic property returns a :class:`~isaaclab.utils.warp.ProxyArray`, + the result is wrapped in a ``TracedProxyArray`` and cached for + deduplication. Non-proxy results and ordinary attributes are forwarded + transparently. + + All other attribute access is forwarded transparently to the real object. + """ + + def __init__( + self, + real_data: Any, + entity_name: str, + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + cache: dict, + input_name_resolver: Callable, + ): + object.__setattr__(self, "_real_data", real_data) + object.__setattr__(self, "_entity_name", entity_name) + object.__setattr__(self, "_task_name", task_name) + object.__setattr__(self, "_property_resolution_cache", property_resolution_cache) + object.__setattr__(self, "_cache", cache) + object.__setattr__(self, "_input_name_resolver", input_name_resolver) + + def __getattr__(self, name): + """Intercept semantic property reads; forward everything else.""" + real_data = object.__getattribute__(self, "_real_data") + resolution = _resolve_annotated_property( + object.__getattribute__(self, "_property_resolution_cache"), real_data, name + ) + if resolution is None: + return getattr(real_data, name) + + cache = object.__getattribute__(self, "_cache") + cache_key = (id(real_data), name) + if cache_key in cache: + return cache[cache_key] + + execution_fget, semantics_meta = resolution + result = execution_fget(real_data) + if not isinstance(result, ProxyArray): + return result + + input_name = object.__getattribute__(self, "_input_name_resolver")(name) + traced = TracedProxyArray( + result, + input_name=input_name, + semantics_meta=semantics_meta, + real_data=real_data, + entity_name=object.__getattribute__(self, "_entity_name"), + property_name=name, + task_name=object.__getattribute__(self, "_task_name"), + ) + cache[cache_key] = traced + return traced + + +class _EntityProxy: + """Proxy around a real scene entity that returns a ``_DataProxy`` for ``.data``. + + All other attribute access is forwarded transparently to the real asset. + """ + + def __init__(self, real_entity: Any, data_proxy: _DataProxy): + object.__setattr__(self, "_real_entity", real_entity) + object.__setattr__(self, "_data_proxy", data_proxy) + + @property + def data(self): + """Return the annotating data proxy instead of the real data object.""" + return object.__getattribute__(self, "_data_proxy") + + def __getattr__(self, name): + """Forward all non-data attribute access to the real scene entity.""" + return getattr(object.__getattribute__(self, "_real_entity"), name) + + +class _EntityMappingProxy: + """Proxy around a mapping of scene entities that lazily wraps data-producing entries.""" + + def __init__( + self, + real_mapping, + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + cache: dict, + ): + object.__setattr__(self, "_real_mapping", real_mapping) + object.__setattr__(self, "_task_name", task_name) + object.__setattr__(self, "_property_resolution_cache", property_resolution_cache) + object.__setattr__(self, "_cache", cache) + object.__setattr__(self, "_proxied", {}) + + def __getitem__(self, key): + """Return a proxied entity when it has a ``.data`` attribute.""" + proxied = object.__getattribute__(self, "_proxied") + if key in proxied: + return proxied[key] + real_mapping = object.__getattribute__(self, "_real_mapping") + entity = real_mapping[key] + data = getattr(entity, "data", None) + if data is None: + return entity + data_proxy = _DataProxy( + data, + key, + object.__getattribute__(self, "_task_name"), + object.__getattribute__(self, "_property_resolution_cache"), + object.__getattribute__(self, "_cache"), + input_name_resolver=lambda prop_name: f"{key}_{prop_name}", + ) + proxy = _EntityProxy(entity, data_proxy) + proxied[key] = proxy + return proxy + + def get(self, key, default=None): + """Return a proxied entity when present, default otherwise.""" + real_mapping = object.__getattribute__(self, "_real_mapping") + if key not in real_mapping: + return default + return self[key] + + def __iter__(self): + return iter(object.__getattribute__(self, "_real_mapping")) + + def __len__(self): + return len(object.__getattribute__(self, "_real_mapping")) + + def __getattr__(self, name): + """Forward all other mapping access to the real mapping.""" + return getattr(object.__getattribute__(self, "_real_mapping"), name) + + +class _SceneProxy: + """Proxy around the real InteractiveScene. + + When an observation term looks up a scene entity by name, this proxy lazily + wraps any entity that has a ``.data`` attribute. All tensor-returning + properties on the data object are intercepted for LEAPP annotation. This + covers articulations, rigid objects, and sensors through both + ``scene["name"]`` and ``scene.sensors["name"]`` access paths. + """ + + def __init__( + self, + real_scene, + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + cache: dict, + ): + object.__setattr__(self, "_real_scene", real_scene) + object.__setattr__(self, "_task_name", task_name) + object.__setattr__(self, "_property_resolution_cache", property_resolution_cache) + object.__setattr__(self, "_cache", cache) + object.__setattr__(self, "_proxied", {}) + object.__setattr__(self, "_sensor_mapping_proxy", None) + + def _maybe_proxy_entity(self, key: str, entity: Any): + """Return a proxy for any entity that has a ``.data`` attribute.""" + proxied = object.__getattribute__(self, "_proxied") + if key in proxied: + return proxied[key] + + data = getattr(entity, "data", None) + if data is None: + return entity + + cache = object.__getattribute__(self, "_cache") + data_proxy = _DataProxy( + data, + key, + object.__getattribute__(self, "_task_name"), + object.__getattribute__(self, "_property_resolution_cache"), + cache, + input_name_resolver=lambda prop_name, k=key: f"{k}_{prop_name}", + ) + proxy = _EntityProxy(entity, data_proxy) + proxied[key] = proxy + return proxy + + def __getitem__(self, key): + """Return a proxied entity when it exposes annotated data getters.""" + real_scene = object.__getattribute__(self, "_real_scene") + entity = real_scene[key] + return self._maybe_proxy_entity(key, entity) + + @property + def sensors(self): + """Return a mapping proxy for scene sensors.""" + sensor_mapping_proxy = object.__getattribute__(self, "_sensor_mapping_proxy") + if sensor_mapping_proxy is None: + real_scene = object.__getattribute__(self, "_real_scene") + sensor_mapping_proxy = _EntityMappingProxy( + real_scene.sensors, + object.__getattribute__(self, "_task_name"), + object.__getattribute__(self, "_property_resolution_cache"), + object.__getattribute__(self, "_cache"), + ) + object.__setattr__(self, "_sensor_mapping_proxy", sensor_mapping_proxy) + return sensor_mapping_proxy + + def __getattr__(self, name): + """Forward all other scene access to the real scene.""" + return getattr(object.__getattribute__(self, "_real_scene"), name) + + +class _EnvProxy: + """Proxy around the real env that returns a _SceneProxy for ``.scene``. + + All other attribute access (``num_envs``, ``command_manager``, etc.) + is forwarded transparently to the real env. + """ + + def __init__( + self, + real_env, + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + cache: dict, + ): + object.__setattr__(self, "_real_env", real_env) + object.__setattr__( + self, + "_scene_proxy", + _SceneProxy(real_env.scene, task_name, property_resolution_cache, cache), + ) + + @property + def scene(self): + """Return the scene proxy instead of the real scene.""" + return object.__getattribute__(self, "_scene_proxy") + + def __getattr__(self, name): + """Forward all non-scene attribute access to the real env.""" + return getattr(object.__getattribute__(self, "_real_env"), name) + + +def _build_scene_entity_lookup(real_scene) -> dict[int, tuple[str, str]]: + """Map real scene entity object ids to their lookup path.""" + lookup: dict[int, tuple[str, str]] = {} + for attr_name, attr_value in vars(real_scene).items(): + if not isinstance(attr_value, dict): + continue + container_kind = "sensors" if attr_name == "sensors" else "scene" + for key, entity in attr_value.items(): + lookup[id(entity)] = (container_kind, key) + return lookup + + +class _ManagerTermProxy(ManagerTermBase): + """Proxy a class-based manager term while preserving its lifecycle methods. + + Observation manager terms can be stateful ``ManagerTermBase`` instances that + expose ``reset()`` and ``serialize()`` in addition to being callable. This + proxy preserves that interface while swapping the env argument passed into + ``__call__`` for the observation-side proxy env. + """ + + def __init__(self, target: ManagerTermBase, proxy_env: _EnvProxy): + super().__init__(target.cfg, target._env) + self._target = target + self._proxy_env = proxy_env + self._entity_lookup = _build_scene_entity_lookup(target._env.scene) + + @property + def __name__(self) -> str: + """Expose the wrapped term name for compatibility and debugging.""" + return getattr(self._target, "__name__", self._target.__class__.__name__) + + def reset(self, env_ids=None) -> None: + """Forward resets to the wrapped term instance.""" + self._target.reset(env_ids=env_ids) + + def serialize(self) -> dict: + """Forward serialization to the wrapped term instance.""" + return self._target.serialize() + + def __call__(self, *args, **kwargs): + """Call the wrapped term with the proxy env in place of the real env.""" + if args: + args = (self._proxy_env, *args[1:]) + else: + args = (self._proxy_env,) + swapped_attrs: list[tuple[str, Any]] = [] + for attr_name, attr_value in vars(self._target).items(): + lookup = self._entity_lookup.get(id(attr_value)) + if lookup is None: + continue + + container_kind, key = lookup + proxy_entity = ( + self._proxy_env.scene.sensors[key] if container_kind == "sensors" else self._proxy_env.scene[key] + ) + swapped_attrs.append((attr_name, attr_value)) + setattr(self._target, attr_name, proxy_entity) + + try: + return self._target(*args, **kwargs) + finally: + for attr_name, attr_value in swapped_attrs: + setattr(self._target, attr_name, attr_value) + + def __getattr__(self, name): + """Forward all other attribute access to the wrapped term instance.""" + return getattr(self._target, name) + + +# ══════════════════════════════════════════════════════════════════ +# Action-side proxy +# ══════════════════════════════════════════════════════════════════ + + +class _ArticulationWriteProxy: + """Proxy around a real articulation implementation for action terms. + + Intercepts ``_leapp_semantics``-decorated write methods **and** routes + ``.data`` reads through a shared ``_DataProxy`` so that + action-side state reads (e.g. ``self._asset.data.joint_pos`` inside + ``RelativeJointPositionAction``) participate in LEAPP annotation and + share the dedup cache with observation-side reads. + + All other attribute access is forwarded transparently to the real asset. + """ + + def __init__( + self, + real_asset: Any, + entity_name: str, + term_name: str, + output_cache: list[TensorSemantics], + method_resolution_cache: dict[tuple[type, str], tuple[Callable, Any, inspect.Signature] | None], + captured_write_term_names: set[str], + data_proxy: _DataProxy, + ): + object.__setattr__(self, "_real_asset", real_asset) + object.__setattr__(self, "_entity_name", entity_name) + object.__setattr__(self, "_term_name", term_name) + object.__setattr__(self, "_output_cache", output_cache) + object.__setattr__(self, "_method_resolution_cache", method_resolution_cache) + object.__setattr__(self, "_captured_write_term_names", captured_write_term_names) + object.__setattr__(self, "_data_proxy", data_proxy) + + @property + def data(self): + """Return the shared annotating data proxy.""" + return object.__getattribute__(self, "_data_proxy") + + def __getattr__(self, name): + """Return an annotating wrapper for semantic write methods; forward everything else.""" + real_asset = object.__getattribute__(self, "_real_asset") + resolution = _resolve_annotated_method( + object.__getattribute__(self, "_method_resolution_cache"), + real_asset, + name, + ) + if resolution is None: + return getattr(real_asset, name) + + original_method, semantics_meta, signature = resolution + term_name = object.__getattribute__(self, "_term_name") + output_cache = object.__getattribute__(self, "_output_cache") + captured_write_term_names = object.__getattribute__(self, "_captured_write_term_names") + + def interceptor(*args, **kwargs): + result = original_method(*args, **kwargs) + bound_args = signature.bind_partial(real_asset, *args, **kwargs) + target = bound_args.arguments.get("target") + + if not isinstance(target, torch.Tensor): + return result + + target_tensor = cast(torch.Tensor, target) + joint_ids = bound_args.arguments.get("joint_ids") + output_cache.append( + TensorSemantics( + name=_unique_output_name(term_name, name, output_cache), + ref=target_tensor.clone(), + kind=semantics_meta.kind, + element_names=resolve_leapp_element_names( + semantics_meta, + _WriteJointNameContext(real_asset.joint_names, joint_ids), + ), + extra=build_write_connection( + object.__getattribute__(self, "_entity_name"), + name, + ), + ) + ) + captured_write_term_names.add(term_name) + + return result + + return interceptor diff --git a/source/isaaclab/isaaclab/utils/leapp/utils.py b/source/isaaclab/isaaclab/utils/leapp/utils.py new file mode 100644 index 000000000000..2308f662ab80 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/utils.py @@ -0,0 +1,87 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from types import SimpleNamespace +from typing import Any + +import torch +from leapp import annotate +from leapp.utils.tensor_description import TensorSemantics + +from isaaclab.utils.warp.proxy_array import ProxyArray + +from .leapp_semantics import LeappTensorSemantics, resolve_leapp_element_names + + +class TracedProxyArray(ProxyArray): + _traced_array: torch.Tensor + + def __init__( + self, + proxy_array: ProxyArray, + *, + input_name: str, + semantics_meta: LeappTensorSemantics, + real_data: Any, + entity_name: str, + property_name: str, + task_name: str, + ) -> None: + super().__init__(proxy_array.warp) + astorch = super().torch + sem = TensorSemantics( + name=input_name, + ref=astorch, + kind=semantics_meta.kind, + element_names=resolve_leapp_element_names(semantics_meta, real_data), + extra=build_state_connection(entity_name, property_name), + ) + annotated = annotate.input_tensors(task_name, sem) + object.__setattr__(self, "_traced_array", annotated) + + @property + def torch(self) -> torch.Tensor: + return self._traced_array + + @property + def warp(self) -> Any: + raise AttributeError("warp arrays are not supported for leapp export") + + +def ensure_env_spec_id(env, fallback_task_name: str = "policy") -> str: + """Return ``env.unwrapped.spec.id``, creating a fallback spec when needed.""" + spec = getattr(env.unwrapped, "spec", None) + if spec is None: + env.unwrapped.spec = SimpleNamespace(id=fallback_task_name) + return fallback_task_name + + task_name = getattr(spec, "id", None) + if task_name is None: + spec.id = fallback_task_name + return fallback_task_name + + return task_name + + +# ══════════════════════════════════════════════════════════════════ +# Connection Builders +# ══════════════════════════════════════════════════════════════════ + + +def build_state_connection(entity_name: str, property_name: str) -> dict[str, str]: + """Return a compact deployment connection string for a state property.""" + return {"isaaclab_connection": f"state:{entity_name}:{property_name}"} + + +def build_command_connection(command_name: str) -> dict[str, str]: + """Return a compact deployment connection string for a command term.""" + return {"isaaclab_connection": f"command:{command_name}"} + + +def build_write_connection(entity_name: str, method_name: str) -> dict[str, str]: + """Return a compact deployment connection string for an articulation write target.""" + return {"isaaclab_connection": f"write:{entity_name}:{method_name}"} diff --git a/source/isaaclab/isaaclab/utils/logger.py b/source/isaaclab/isaaclab/utils/logger.py index c9293e931a72..891695a27889 100644 --- a/source/isaaclab/isaaclab/utils/logger.py +++ b/source/isaaclab/isaaclab/utils/logger.py @@ -58,8 +58,9 @@ def configure_logging( The root logger. """ root_logger = logging.getLogger() - # the root logger must be the lowest level to ensure that all messages are logged - root_logger.setLevel(logging.DEBUG) + # set root logger to the configured level to suppress third-party debug/info noise; + # isaaclab's own loggers inherit this level unless overridden + root_logger.setLevel(logging_level) # remove existing handlers # Note: iterate over a copy [:] to avoid modifying list during iteration diff --git a/source/isaaclab/isaaclab/utils/math.py b/source/isaaclab/isaaclab/utils/math.py index 96314abfbd09..b01d7e7ab4fe 100644 --- a/source/isaaclab/isaaclab/utils/math.py +++ b/source/isaaclab/isaaclab/utils/math.py @@ -153,12 +153,12 @@ def quat_unique(q: torch.Tensor) -> torch.Tensor: rotation. This function ensures the real part of the quaternion is non-negative. Args: - q: The quaternion orientation in (w, x, y, z). Shape is (..., 4). + q: The quaternion orientation in (x, y, z, w). Shape is (..., 4). Returns: Standardized quaternions. Shape is (..., 4). """ - return torch.where(q[..., 0:1] < 0, -q, q) + return torch.where(q[..., 3:4] < 0, -q, q) @torch.jit.script @@ -166,7 +166,7 @@ def matrix_from_quat(quaternions: torch.Tensor) -> torch.Tensor: """Convert rotations given as quaternions to rotation matrices. Args: - quaternions: The quaternion orientation in (w, x, y, z). Shape is (..., 4). + quaternions: The quaternion orientation in (x, y, z, w). Shape is (..., 4). Returns: Rotation matrices. The shape is (..., 3, 3). @@ -174,7 +174,8 @@ def matrix_from_quat(quaternions: torch.Tensor) -> torch.Tensor: Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L41-L70 """ - r, i, j, k = torch.unbind(quaternions, -1) + # xyzw format: x=i, y=j, z=k, w=r + i, j, k, r = torch.unbind(quaternions, -1) # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. two_s = 2.0 / (quaternions * quaternions).sum(-1) @@ -246,14 +247,14 @@ def quat_conjugate(q: torch.Tensor) -> torch.Tensor: """Computes the conjugate of a quaternion. Args: - q: The quaternion orientation in (w, x, y, z). Shape is (..., 4). + q: The quaternion orientation in (x, y, z, w). Shape is (..., 4). Returns: - The conjugate quaternion in (w, x, y, z). Shape is (..., 4). + The conjugate quaternion in (x, y, z, w). Shape is (..., 4). """ shape = q.shape q = q.reshape(-1, 4) - return torch.cat((q[..., 0:1], -q[..., 1:]), dim=-1).view(shape) + return torch.cat((-q[..., :3], q[..., 3:4]), dim=-1).view(shape) @torch.jit.script @@ -261,11 +262,11 @@ def quat_inv(q: torch.Tensor, eps: float = 1e-9) -> torch.Tensor: """Computes the inverse of a quaternion. Args: - q: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + q: The quaternion orientation in (x, y, z, w). Shape is (N, 4). eps: A small value to avoid division by zero. Defaults to 1e-9. Returns: - The inverse quaternion in (w, x, y, z). Shape is (N, 4). + The inverse quaternion in (x, y, z, w). Shape is (N, 4). """ return quat_conjugate(q) / q.pow(2).sum(dim=-1, keepdim=True).clamp(min=eps) @@ -283,7 +284,7 @@ def quat_from_euler_xyz(roll: torch.Tensor, pitch: torch.Tensor, yaw: torch.Tens yaw: Rotation around z-axis (in radians). Shape is (N,). Returns: - The quaternion in (w, x, y, z). Shape is (N, 4). + The quaternion in (x, y, z, w). Shape is (N, 4). """ cy = torch.cos(yaw * 0.5) sy = torch.sin(yaw * 0.5) @@ -297,7 +298,7 @@ def quat_from_euler_xyz(roll: torch.Tensor, pitch: torch.Tensor, yaw: torch.Tens qy = cy * cr * sp + sy * sr * cp qz = sy * cr * cp - cy * sr * sp - return torch.stack([qw, qx, qy, qz], dim=-1) + return torch.stack([qx, qy, qz, qw], dim=-1) @torch.jit.script @@ -321,7 +322,7 @@ def quat_from_matrix(matrix: torch.Tensor) -> torch.Tensor: matrix: The rotation matrices. Shape is (..., 3, 3). Returns: - The quaternion in (w, x, y, z). Shape is (..., 4). + The quaternion in (x, y, z, w). Shape is (..., 4). Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L102-L161 @@ -345,16 +346,17 @@ def quat_from_matrix(matrix: torch.Tensor) -> torch.Tensor: ) # we produce the desired quaternion multiplied by each of r, i, j, k + # output in xyzw order: [x, y, z, w] quat_by_rijk = torch.stack( [ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), + torch.stack([m21 - m12, m02 - m20, m10 - m01, q_abs[..., 0] ** 2], dim=-1), # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), + torch.stack([q_abs[..., 1] ** 2, m10 + m01, m02 + m20, m21 - m12], dim=-1), # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), + torch.stack([m10 + m01, q_abs[..., 2] ** 2, m12 + m21, m02 - m20], dim=-1), # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), + torch.stack([m20 + m02, m21 + m12, q_abs[..., 3] ** 2, m10 - m01], dim=-1), ], dim=-2, ) @@ -442,7 +444,7 @@ def euler_xyz_from_quat( The euler angles are assumed in XYZ extrinsic convention. Args: - quat: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + quat: The quaternion orientation in (x, y, z, w). Shape is (N, 4). wrap_to_2pi (bool): Whether to wrap output Euler angles into [0, 2π). If False, angles are returned in the default range (−π, π]. Defaults to False. @@ -453,7 +455,7 @@ def euler_xyz_from_quat( Reference: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles """ - q_w, q_x, q_y, q_z = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3] + q_x, q_y, q_z, q_w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3] # roll (x-axis rotation) sin_roll = 2.0 * (q_w * q_x + q_y * q_z) cos_roll = 1 - 2 * (q_x * q_x + q_y * q_y) @@ -478,7 +480,7 @@ def axis_angle_from_quat(quat: torch.Tensor, eps: float = 1.0e-6) -> torch.Tenso """Convert rotations given as quaternions to axis/angle. Args: - quat: The quaternion orientation in (w, x, y, z). Shape is (..., 4). + quat: The quaternion orientation in (x, y, z, w). Shape is (..., 4). eps: The tolerance for Taylor approximation. Defaults to 1.0e-6. Returns: @@ -488,21 +490,21 @@ def axis_angle_from_quat(quat: torch.Tensor, eps: float = 1.0e-6) -> torch.Tenso Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L526-L554 """ - # Modified to take in quat as [q_w, q_x, q_y, q_z] - # Quaternion is [q_w, q_x, q_y, q_z] = [cos(theta/2), n_x * sin(theta/2), n_y * sin(theta/2), n_z * sin(theta/2)] + # Modified to take in quat as [q_x, q_y, q_z, q_w] + # Quaternion is [q_x, q_y, q_z, q_w] = [n_x * sin(theta/2), n_y * sin(theta/2), n_z * sin(theta/2), cos(theta/2)] # Axis-angle is [a_x, a_y, a_z] = [theta * n_x, theta * n_y, theta * n_z] # Thus, axis-angle is [q_x, q_y, q_z] / (sin(theta/2) / theta) # When theta = 0, (sin(theta/2) / theta) is undefined # However, as theta --> 0, we can use the Taylor approximation 1/2 - theta^2 / 48 - quat = quat * (1.0 - 2.0 * (quat[..., 0:1] < 0.0)) - mag = torch.linalg.norm(quat[..., 1:], dim=-1) - half_angle = torch.atan2(mag, quat[..., 0]) + quat = quat * (1.0 - 2.0 * (quat[..., 3:4] < 0.0)) + mag = torch.linalg.norm(quat[..., :3], dim=-1) + half_angle = torch.atan2(mag, quat[..., 3]) angle = 2.0 * half_angle # check whether to apply Taylor approximation sin_half_angles_over_angles = torch.where( angle.abs() > eps, torch.sin(half_angle) / angle, 0.5 - angle * angle / 48 ) - return quat[..., 1:4] / sin_half_angles_over_angles.unsqueeze(-1) + return quat[..., :3] / sin_half_angles_over_angles.unsqueeze(-1) @torch.jit.script @@ -514,12 +516,12 @@ def quat_from_angle_axis(angle: torch.Tensor, axis: torch.Tensor) -> torch.Tenso axis: The axis of rotation. Shape is (N, 3). Returns: - The quaternion in (w, x, y, z). Shape is (N, 4). + The quaternion in (x, y, z, w). Shape is (N, 4). """ theta = (angle / 2).unsqueeze(-1) xyz = normalize(axis) * theta.sin() w = theta.cos() - return normalize(torch.cat([w, xyz], dim=-1)) + return normalize(torch.cat([xyz, w], dim=-1)) @torch.jit.script @@ -527,11 +529,11 @@ def quat_mul(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: """Multiply two quaternions together. Args: - q1: The first quaternion in (w, x, y, z). Shape is (..., 4). - q2: The second quaternion in (w, x, y, z). Shape is (..., 4). + q1: The first quaternion in (x, y, z, w). Shape is (..., 4). + q2: The second quaternion in (x, y, z, w). Shape is (..., 4). Returns: - The product of the two quaternions in (w, x, y, z). Shape is (..., 4). + The product of the two quaternions in (x, y, z, w). Shape is (..., 4). Raises: ValueError: Input shapes of ``q1`` and ``q2`` are not matching. @@ -544,9 +546,9 @@ def quat_mul(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: shape = q1.shape q1 = q1.reshape(-1, 4) q2 = q2.reshape(-1, 4) - # extract components from quaternions - w1, x1, y1, z1 = q1[:, 0], q1[:, 1], q1[:, 2], q1[:, 3] - w2, x2, y2, z2 = q2[:, 0], q2[:, 1], q2[:, 2], q2[:, 3] + # extract components from quaternions (xyzw format) + x1, y1, z1, w1 = q1[:, 0], q1[:, 1], q1[:, 2], q1[:, 3] + x2, y2, z2, w2 = q2[:, 0], q2[:, 1], q2[:, 2], q2[:, 3] # perform multiplication ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) @@ -558,7 +560,7 @@ def quat_mul(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) - return torch.stack([w, x, y, z], dim=-1).view(shape) + return torch.stack([x, y, z, w], dim=-1).view(shape) @torch.jit.script @@ -566,21 +568,21 @@ def yaw_quat(quat: torch.Tensor) -> torch.Tensor: """Extract the yaw component of a quaternion. Args: - quat: The orientation in (w, x, y, z). Shape is (..., 4) + quat: The orientation in (x, y, z, w). Shape is (..., 4) Returns: - A quaternion with only yaw component. + A quaternion with only yaw component in (x, y, z, w). """ shape = quat.shape quat_yaw = quat.view(-1, 4) - qw = quat_yaw[:, 0] - qx = quat_yaw[:, 1] - qy = quat_yaw[:, 2] - qz = quat_yaw[:, 3] + qx = quat_yaw[:, 0] + qy = quat_yaw[:, 1] + qz = quat_yaw[:, 2] + qw = quat_yaw[:, 3] yaw = torch.atan2(2 * (qw * qz + qx * qy), 1 - 2 * (qy * qy + qz * qz)) quat_yaw = torch.zeros_like(quat_yaw) - quat_yaw[:, 3] = torch.sin(yaw / 2) - quat_yaw[:, 0] = torch.cos(yaw / 2) + quat_yaw[:, 2] = torch.sin(yaw / 2) # z component + quat_yaw[:, 3] = torch.cos(yaw / 2) # w component quat_yaw = normalize(quat_yaw) return quat_yaw.view(shape) @@ -590,8 +592,8 @@ def quat_box_minus(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: """The box-minus operator (quaternion difference) between two quaternions. Args: - q1: The first quaternion in (w, x, y, z). Shape is (N, 4). - q2: The second quaternion in (w, x, y, z). Shape is (N, 4). + q1: The first quaternion in (x, y, z, w). Shape is (N, 4). + q2: The second quaternion in (x, y, z, w). Shape is (N, 4). Returns: The difference between the two quaternions. Shape is (N, 3). @@ -608,7 +610,7 @@ def quat_box_plus(q: torch.Tensor, delta: torch.Tensor, eps: float = 1.0e-6) -> """The box-plus operator (quaternion update) to apply an increment to a quaternion. Args: - q: The initial quaternion in (w, x, y, z). Shape is (N, 4). + q: The initial quaternion in (x, y, z, w). Shape is (N, 4). delta: The axis-angle perturbation. Shape is (N, 3). eps: A small value to avoid division by zero. Defaults to 1e-6. @@ -629,7 +631,7 @@ def quat_apply(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: """Apply a quaternion rotation to a vector. Args: - quat: The quaternion in (w, x, y, z). Shape is (..., 4). + quat: The quaternion in (x, y, z, w). Shape is (..., 4). vec: The vector in (x, y, z). Shape is (..., 3). Returns: @@ -640,10 +642,10 @@ def quat_apply(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: # reshape to (N, 3) for multiplication quat = quat.reshape(-1, 4) vec = vec.reshape(-1, 3) - # extract components from quaternions - xyz = quat[:, 1:] + # extract components from quaternions (xyzw format) + xyz = quat[:, :3] t = xyz.cross(vec, dim=-1) * 2 - return (vec + quat[:, 0:1] * t + xyz.cross(t, dim=-1)).view(shape) + return (vec + quat[:, 3:4] * t + xyz.cross(t, dim=-1)).view(shape) @torch.jit.script @@ -651,7 +653,7 @@ def quat_apply_inverse(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: """Apply an inverse quaternion rotation to a vector. Args: - quat: The quaternion in (w, x, y, z). Shape is (..., 4). + quat: The quaternion in (x, y, z, w). Shape is (..., 4). vec: The vector in (x, y, z). Shape is (..., 3). Returns: @@ -662,10 +664,10 @@ def quat_apply_inverse(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: # reshape to (N, 3) for multiplication quat = quat.reshape(-1, 4) vec = vec.reshape(-1, 3) - # extract components from quaternions - xyz = quat[:, 1:] + # extract components from quaternions (xyzw format) + xyz = quat[:, :3] t = xyz.cross(vec, dim=-1) * 2 - return (vec - quat[:, 0:1] * t + xyz.cross(t, dim=-1)).view(shape) + return (vec - quat[:, 3:4] * t + xyz.cross(t, dim=-1)).view(shape) @torch.jit.script @@ -673,7 +675,7 @@ def quat_apply_yaw(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: """Rotate a vector only around the yaw-direction. Args: - quat: The orientation in (w, x, y, z). Shape is (N, 4). + quat: The orientation in (x, y, z, w). Shape is (N, 4). vec: The vector in (x, y, z). Shape is (N, 3). Returns: @@ -690,7 +692,7 @@ def quat_rotate(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: This function will be removed in a future release in favor of the faster implementation :meth:`quat_apply`. Args: - q: The quaternion in (w, x, y, z). Shape is (..., 4). + q: The quaternion in (x, y, z, w). Shape is (..., 4). v: The vector in (x, y, z). Shape is (..., 3). Returns: @@ -712,7 +714,7 @@ def quat_rotate_inverse(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: :meth:`quat_apply_inverse`. Args: - q: The quaternion in (w, x, y, z). Shape is (..., 4). + q: The quaternion in (x, y, z, w). Shape is (..., 4). v: The vector in (x, y, z). Shape is (..., 3). Returns: @@ -730,14 +732,14 @@ def quat_error_magnitude(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: """Computes the rotation difference between two quaternions. Args: - q1: The first quaternion in (w, x, y, z). Shape is (..., 4). - q2: The second quaternion in (w, x, y, z). Shape is (..., 4). + q1: The first quaternion in (x, y, z, w). Shape is (..., 4). + q2: The second quaternion in (x, y, z, w). Shape is (..., 4). Returns: Angular error between input quaternions in radians. """ axis_angle_error = quat_box_minus(q1, q2) - return torch.norm(axis_angle_error, dim=-1) + return torch.linalg.norm(axis_angle_error, dim=-1) @torch.jit.script @@ -784,7 +786,7 @@ def is_identity_pose(pos: torch.tensor, rot: torch.tensor) -> bool: Args: pos: The cartesian position. Shape is (N, 3). - rot: The quaternion in (w, x, y, z). Shape is (N, 4). + rot: The quaternion in (x, y, z, w). Shape is (N, 4). Returns: True if all the input poses result in identity transform. Otherwise, False. @@ -792,7 +794,7 @@ def is_identity_pose(pos: torch.tensor, rot: torch.tensor) -> bool: # create identity transformations pos_identity = torch.zeros_like(pos) rot_identity = torch.zeros_like(rot) - rot_identity[..., 0] = 1 + rot_identity[..., 3] = 1 # w component is 1 for identity quaternion # compare input to identity return torch.allclose(pos, pos_identity) and torch.allclose(rot, rot_identity) @@ -808,10 +810,10 @@ def combine_frame_transforms( Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). - q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). + q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (x, y, z, w). Shape is (N, 4). t12: Position of frame 2 w.r.t. frame 1. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. - q12: Quaternion orientation of frame 2 w.r.t. frame 1 in (w, x, y, z). Shape is (N, 4). + q12: Quaternion orientation of frame 2 w.r.t. frame 1 in (x, y, z, w). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: @@ -857,15 +859,15 @@ def rigid_body_twist_transform( v0: Linear velocity of 0 in frame 0. Shape is (N, 3). w0: Angular velocity of 0 in frame 0. Shape is (N, 3). t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). - q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). + q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (x, y, z, w). Shape is (N, 4). Returns: A tuple containing: - The transformed linear velocity in frame 1. Shape is (N, 3). - The transformed angular velocity in frame 1. Shape is (N, 3). """ - w1 = quat_rotate_inverse(q01, w0) - v1 = quat_rotate_inverse(q01, v0 + torch.cross(w0, t01, dim=-1)) + w1 = quat_apply_inverse(q01, w0) + v1 = quat_apply_inverse(q01, v0 + torch.cross(w0, t01, dim=-1)) return v1, w1 @@ -880,10 +882,10 @@ def subtract_frame_transforms( Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). - q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). + q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (x, y, z, w). Shape is (N, 4). t02: Position of frame 2 w.r.t. frame 0. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. - q02: Quaternion orientation of frame 2 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). + q02: Quaternion orientation of frame 2 w.r.t. frame 0 in (x, y, z, w). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: @@ -916,9 +918,9 @@ def compute_pose_error( Args: t01: Position of source frame. Shape is (N, 3). - q01: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4). + q01: Quaternion orientation of source frame in (x, y, z, w). Shape is (N, 4). t02: Position of target frame. Shape is (N, 3). - q02: Quaternion orientation of target frame in (w, x, y, z). Shape is (N, 4). + q02: Quaternion orientation of target frame in (x, y, z, w). Shape is (N, 4). rot_error_type: The rotation error type to return: "quat", "axis_angle". Defaults to "axis_angle". @@ -937,7 +939,7 @@ def compute_pose_error( # Compute quaternion error (i.e., difference quaternion) # Reference: https://personal.utdallas.edu/~sxb027100/dock/quaternion.html # q_current_norm = q_current * q_current_conj - source_quat_norm = quat_mul(q01, quat_conjugate(q01))[:, 0] + source_quat_norm = quat_mul(q01, quat_conjugate(q01))[:, 3] # w component at index 3 # q_current_inv = q_current_conj / q_current_norm source_quat_inv = quat_conjugate(q01) / source_quat_norm.unsqueeze(-1) # q_error = q_target * q_current_inv @@ -969,7 +971,7 @@ def apply_delta_pose( Args: source_pos: Position of source frame. Shape is (N, 3). - source_rot: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4).. + source_rot: Quaternion orientation of source frame in (x, y, z, w). Shape is (N, 4).. delta_pose: Position and orientation displacements. Shape is (N, 6). eps: The tolerance to consider orientation displacement as zero. Defaults to 1.0e-6. @@ -987,8 +989,8 @@ def apply_delta_pose( rot_actions = delta_pose[:, 3:6] angle = torch.linalg.vector_norm(rot_actions, dim=1) axis = rot_actions / angle.unsqueeze(-1) - # change from axis-angle to quat convention - identity_quat = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat(num_poses, 1) + # change from axis-angle to quat convention (xyzw format: identity is [0, 0, 0, 1]) + identity_quat = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device).repeat(num_poses, 1) rot_delta_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > eps, quat_from_angle_axis(angle, axis), identity_quat ) @@ -1020,7 +1022,7 @@ def transform_points( points: Points to transform. Shape is (N, P, 3) or (P, 3). pos: Position of the target frame. Shape is (N, 3) or (3,). Defaults to None, in which case the position is assumed to be zero. - quat: Quaternion orientation of the target frame in (w, x, y, z). Shape is (N, 4) or (4,). + quat: Quaternion orientation of the target frame in (x, y, z, w). Shape is (N, 4) or (4,). Defaults to None, in which case the orientation is assumed to be identity. Returns: @@ -1235,7 +1237,7 @@ def unproject_depth(depth: torch.Tensor, intrinsics: torch.Tensor, is_ortho: boo indices_u = torch.arange(im_width, device=depth.device, dtype=depth.dtype) indices_v = torch.arange(im_height, device=depth.device, dtype=depth.dtype) img_indices = torch.stack(torch.meshgrid([indices_u, indices_v], indexing="ij"), dim=0).reshape(2, -1) - pixels = torch.nn.functional.pad(img_indices, (0, 0, 0, 1), mode="constant", value=1.0) + pixels = torch.nn.functional.pad(img_indices, (0, 0, 1, 0), mode="constant", value=1.0) pixels = pixels.unsqueeze(0) # (3, H x W) -> (1, 3, H x W) # unproject points into 3D space @@ -1327,10 +1329,10 @@ def default_orientation(num: int, device: str) -> torch.Tensor: device: Device to create tensor on. Returns: - Identity quaternion in (w, x, y, z). Shape is (num, 4). + Identity quaternion in (x, y, z, w). Shape is (num, 4). """ quat = torch.zeros((num, 4), dtype=torch.float, device=device) - quat[..., 0] = 1.0 + quat[..., 3] = 1.0 # w component at index 3 for xyzw format return quat @@ -1344,7 +1346,7 @@ def random_orientation(num: int, device: str) -> torch.Tensor: device: Device to create tensor on. Returns: - Sampled quaternion in (w, x, y, z). Shape is (num, 4). + Sampled quaternion in (x, y, z, w). Shape is (num, 4). Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html @@ -1364,7 +1366,7 @@ def random_yaw_orientation(num: int, device: str) -> torch.Tensor: device: Device to create tensor on. Returns: - Sampled quaternion in (w, x, y, z). Shape is (num, 4). + Sampled quaternion in (x, y, z, w). Shape is (num, 4). """ roll = torch.zeros(num, dtype=torch.float, device=device) pitch = torch.zeros(num, dtype=torch.float, device=device) @@ -1554,12 +1556,12 @@ def convert_camera_frame_orientation_convention( - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention Args: - orientation: Quaternion of form `(w, x, y, z)` with shape (..., 4) in source convention. + orientation: Quaternion of form `(x, y, z, w)` with shape (..., 4) in source convention. origin: Convention to convert from. Defaults to "opengl". target: Convention to convert to. Defaults to "ros". Returns: - Quaternion of form `(w, x, y, z)` with shape (..., 4) in target convention + Quaternion of form `(x, y, z, w)` with shape (..., 4) in target convention """ if target == origin: return orientation.clone() @@ -1746,12 +1748,12 @@ def quat_slerp(q1: torch.Tensor, q2: torch.Tensor, tau: float) -> torch.Tensor: This function does not support batch processing. Args: - q1: First quaternion in (w, x, y, z) format. - q2: Second quaternion in (w, x, y, z) format. + q1: First quaternion in (x, y, z, w) format. + q2: Second quaternion in (x, y, z, w) format. tau: Interpolation coefficient between 0 (q1) and 1 (q2). Returns: - Interpolated quaternion in (w, x, y, z) format. + Interpolated quaternion in (x, y, z, w) format. """ assert isinstance(q1, torch.Tensor), "Input must be a torch tensor" assert isinstance(q2, torch.Tensor), "Input must be a torch tensor" @@ -1831,8 +1833,8 @@ def interpolate_rotations(R1: torch.Tensor, R2: torch.Tensor, num_steps: int, ax def interpolate_poses( pose_1: torch.Tensor, pose_2: torch.Tensor, - num_steps: int = None, - step_size: float = None, + num_steps: int | None = None, + step_size: float | None = None, perturb: bool = False, ) -> tuple[torch.Tensor, int]: """Performs linear interpolation between two poses. @@ -1867,8 +1869,8 @@ def interpolate_poses( delta_pos = pos2 - pos1 if num_steps is None: - assert torch.norm(delta_pos) > 0 - num_steps = math.ceil(torch.norm(delta_pos) / step_size) + assert torch.linalg.norm(delta_pos) > 0 + num_steps = math.ceil(torch.linalg.norm(delta_pos) / step_size) num_steps += 1 # Include starting pose assert num_steps >= 2 diff --git a/source/isaaclab/isaaclab/utils/modifiers/__init__.py b/source/isaaclab/isaaclab/utils/modifiers/__init__.py index b79a5140a7a8..4158f29442cb 100644 --- a/source/isaaclab/isaaclab/utils/modifiers/__init__.py +++ b/source/isaaclab/isaaclab/utils/modifiers/__init__.py @@ -53,13 +53,6 @@ """ -# isort: off -from .modifier_cfg import ModifierCfg -from .modifier_base import ModifierBase -from .modifier import DigitalFilter -from .modifier_cfg import DigitalFilterCfg -from .modifier import Integrator -from .modifier_cfg import IntegratorCfg - -# isort: on -from .modifier import bias, clip, scale +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/modifiers/__init__.pyi b/source/isaaclab/isaaclab/utils/modifiers/__init__.pyi new file mode 100644 index 000000000000..e15f1284085c --- /dev/null +++ b/source/isaaclab/isaaclab/utils/modifiers/__init__.pyi @@ -0,0 +1,20 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ModifierCfg", + "DigitalFilterCfg", + "IntegratorCfg", + "ModifierBase", + "DigitalFilter", + "Integrator", + "bias", + "clip", + "scale", +] + +from .modifier_cfg import ModifierCfg, DigitalFilterCfg, IntegratorCfg +from .modifier_base import ModifierBase +from .modifier import DigitalFilter, Integrator, bias, clip, scale diff --git a/source/isaaclab/isaaclab/utils/module.py b/source/isaaclab/isaaclab/utils/module.py new file mode 100644 index 000000000000..3c3fcdc35bfc --- /dev/null +++ b/source/isaaclab/isaaclab/utils/module.py @@ -0,0 +1,238 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for Python module / namespace manipulation.""" + +from __future__ import annotations + +import ast +import importlib +import os +import sys +import tempfile +import warnings +from collections.abc import Callable + +import lazy_loader as lazy + + +def _parse_stub( + stub_file: str, +) -> tuple[str | None, list[str], list[str], dict[str, list[str]]]: + """Parse a ``.pyi`` stub in a single AST pass. + + Returns: + A 4-tuple ``(filtered_path, fallback_packages, relative_wildcards, + absolute_named)``. + + *filtered_path* is a temporary ``.pyi`` containing only explicit + relative imports (what ``lazy_loader`` can handle), or ``None`` when + no filtering was needed. The temporary file may be empty when the + stub contained only wildcard imports; this is intentional — passing + the original stub to ``lazy_loader`` would raise ``ValueError`` + because it does not support absolute or wildcard imports. + + *fallback_packages* lists fully-qualified package names extracted from + absolute wildcard imports (``from pkg import *``). + + *relative_wildcards* lists submodule names extracted from relative + wildcard imports (``from .mod import *``). + + *absolute_named* maps fully-qualified package names to the list of + explicit names imported from them (``from pkg import a, b``). + """ + with open(stub_file) as f: + source = f.read() + + tree = ast.parse(source) + + fallback_packages: list[str] = [] + relative_wildcards: list[str] = [] + absolute_named: dict[str, list[str]] = {} + filtered_body: list[ast.stmt] = [] + needs_filter = False + + for node in tree.body: + if isinstance(node, ast.ImportFrom): + is_star = any(alias.name == "*" for alias in node.names) + if node.level == 1 and not is_star: + filtered_body.append(node) + continue + if node.level == 0 and is_star and node.module: + fallback_packages.append(node.module) + elif node.level == 1 and is_star and node.module: + relative_wildcards.append(node.module) + elif node.level == 0 and not is_star and node.module: + names = [alias.name for alias in node.names] + absolute_named.setdefault(node.module, []).extend(names) + needs_filter = True + else: + filtered_body.append(node) + + if not needs_filter: + return None, fallback_packages, relative_wildcards, absolute_named + + filtered = ast.Module(body=filtered_body, type_ignores=[]) + with tempfile.NamedTemporaryFile(mode="w", suffix=".pyi", delete=False) as tmp: + tmp.write(ast.unparse(filtered)) + + return tmp.name, fallback_packages, relative_wildcards, absolute_named + + +def lazy_export( + *, + packages: list[str] | tuple[str, ...] | None = None, +) -> tuple[Callable[[str], object], Callable[[], list[str]], list[str]]: + """Lazy-load names from a ``.pyi`` stub. + + The ``.pyi`` stub is the single source of truth for what a module exports. + ``lazy_export()`` reads the stub and derives everything from it: + + * ``from .rewards import foo, bar`` — lazy-loads specific names from a + local submodule (existing ``lazy_loader`` behaviour). + * ``from .rewards import *`` — eagerly imports the submodule and + re-exports all of its public names at ``lazy_export()`` time. + * ``from isaaclab.envs.mdp import foo, bar`` — eagerly re-exports + specific names from an absolute package. + * ``from isaaclab.envs.mdp import *`` — sets up a lazy fallback so that + any name not found locally is resolved from the specified package. + + Basic usage (no wildcards):: + + from isaaclab.utils.module import lazy_export + + lazy_export() + + With a ``.pyi`` stub that contains ``from isaaclab.envs.mdp import *`` + and/or ``from .rewards import *``, no extra arguments are needed — + ``lazy_export()`` infers the behaviour from the stub. + + Args: + packages: **Deprecated.** Fallback packages are now inferred from + absolute wildcard imports in the ``.pyi`` stub. Passing this + argument still works but emits a :class:`DeprecationWarning`. + + Raises: + ImportError: If the ``.pyi`` stub declares ``from pkg import *`` but + *pkg* is not installed. + """ + caller_globals = sys._getframe(1).f_globals + package_name: str = caller_globals["__name__"] + caller_file: str = caller_globals["__file__"] + + if packages is not None: + warnings.warn( + "The 'packages' argument to lazy_export() is deprecated. " + "Add 'from import *' to your .pyi stub instead.", + DeprecationWarning, + stacklevel=2, + ) + + stub_file = f"{os.path.splitext(caller_file)[0]}.pyi" + has_stub = os.path.exists(stub_file) + + fallback_packages: list[str] = list(packages) if packages else [] + relative_wildcards: list[str] = [] + absolute_named: dict[str, list[str]] = {} + + if has_stub: + filtered_path, stub_fallbacks, relative_wildcards, absolute_named = _parse_stub(stub_file) + if stub_fallbacks: + fallback_packages = list(dict.fromkeys(fallback_packages + stub_fallbacks)) + + stub_path = filtered_path if filtered_path is not None else caller_file + stub_getattr, stub_dir, __all__ = lazy.attach_stub(package_name, stub_path) + if filtered_path is not None: + os.unlink(filtered_path) + else: + __all__: list[str] = [] + + mod = sys.modules[package_name] + + # -- Eagerly resolve absolute named imports (from pkg import a, b) ----- + for abs_pkg, names in absolute_named.items(): + pkg_mod = importlib.import_module(abs_pkg) + for name in names: + mod.__dict__[name] = getattr(pkg_mod, name) + if name not in __all__: + __all__.append(name) + + # -- Eagerly resolve relative wildcard imports (from .X import *) ------ + for rel_mod_name in relative_wildcards: + fq_name = f"{package_name}.{rel_mod_name}" + sub = importlib.import_module(fq_name) + exported = getattr(sub, "__all__", [n for n in dir(sub) if not n.startswith("_")]) + for name in exported: + mod.__dict__[name] = getattr(sub, name) + if name not in __all__: + __all__.append(name) + + # -- Build lazy fallback for absolute wildcard imports ----------------- + _sentinel = object() + + if fallback_packages: + _resolved_pkgs: list = [] + for _pkg_name in fallback_packages: + try: + _resolved_pkgs.append(importlib.import_module(_pkg_name)) + except (ImportError, ModuleNotFoundError) as e: + raise ImportError( + f"lazy_export() in {package_name!r}: .pyi stub declares " + f"'from {_pkg_name} import *' but the package is not installed." + ) from e + + def _pkg_getattr(name: str): + for pkg_mod in _resolved_pkgs: + val = getattr(pkg_mod, name, _sentinel) + if val is not _sentinel: + mod.__dict__[name] = val + return val + raise AttributeError(f"module {package_name!r} has no attribute {name!r}") + + if has_stub: + _stub_getattr = stub_getattr + _stub_dir = stub_dir + _dir_cache: list[str] | None = None + + def __getattr__(name: str): + try: + return _stub_getattr(name) + except AttributeError: + return _pkg_getattr(name) + + def __dir__(): + nonlocal _dir_cache + if _dir_cache is None: + pkg_names = {n for p in _resolved_pkgs for n in dir(p) if not n.startswith("_")} + _dir_cache = sorted(set(_stub_dir()) | pkg_names) + return _dir_cache + + else: + _dir_cache: list[str] | None = None + + def __getattr__(name: str): + return _pkg_getattr(name) + + def __dir__(): + nonlocal _dir_cache + if _dir_cache is None: + _dir_cache = sorted(n for p in _resolved_pkgs for n in dir(p) if not n.startswith("_")) + return _dir_cache + + elif has_stub: + __getattr__ = stub_getattr + __dir__ = stub_dir + else: + + def __getattr__(name: str): + raise AttributeError(f"module {package_name!r} has no attribute {name!r}") + + def __dir__(): + return [] + + setattr(mod, "__getattr__", __getattr__) + setattr(mod, "__dir__", __dir__) + setattr(mod, "__all__", __all__) + return __getattr__, __dir__, __all__ diff --git a/source/isaaclab/isaaclab/utils/noise/__init__.py b/source/isaaclab/isaaclab/utils/noise/__init__.py index 7f91067fd005..85ea960a5e42 100644 --- a/source/isaaclab/isaaclab/utils/noise/__init__.py +++ b/source/isaaclab/isaaclab/utils/noise/__init__.py @@ -13,23 +13,19 @@ .. code-block:: python import torch - from isaaclab.utils.noise import AdditiveGaussianNoiseCfg + from isaaclab.utils.noise import GaussianNoiseCfg # create a random tensor my_tensor = torch.rand(128, 128, device="cuda") # create a noise configuration - cfg = AdditiveGaussianNoiseCfg(mean=0.0, std=1.0) + cfg = GaussianNoiseCfg(mean=0.0, std=1.0) # apply the noise my_noisified_tensor = cfg.func(my_tensor, cfg) """ -from .noise_cfg import NoiseCfg # noqa: F401 -from .noise_cfg import ConstantNoiseCfg, GaussianNoiseCfg, NoiseModelCfg, NoiseModelWithAdditiveBiasCfg, UniformNoiseCfg -from .noise_model import NoiseModel, NoiseModelWithAdditiveBias, constant_noise, gaussian_noise, uniform_noise - -# Backward compatibility -ConstantBiasNoiseCfg = ConstantNoiseCfg -AdditiveUniformNoiseCfg = UniformNoiseCfg -AdditiveGaussianNoiseCfg = GaussianNoiseCfg + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/noise/__init__.pyi b/source/isaaclab/isaaclab/utils/noise/__init__.pyi new file mode 100644 index 000000000000..64ece4abf488 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/noise/__init__.pyi @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ConstantNoiseCfg", + "GaussianNoiseCfg", + "NoiseCfg", + "NoiseModel", + "NoiseModelCfg", + "NoiseModelWithAdditiveBias", + "NoiseModelWithAdditiveBiasCfg", + "UniformNoiseCfg", + "constant_noise", + "gaussian_noise", + "uniform_noise", +] + +from .noise_cfg import ( + ConstantNoiseCfg, + GaussianNoiseCfg, + NoiseCfg, + NoiseModelCfg, + NoiseModelWithAdditiveBiasCfg, + UniformNoiseCfg, +) +from .noise_model import ( + NoiseModel, + NoiseModelWithAdditiveBias, + constant_noise, + gaussian_noise, + uniform_noise, +) diff --git a/source/isaaclab/isaaclab/utils/noise/noise_cfg.py b/source/isaaclab/isaaclab/utils/noise/noise_cfg.py index b3275643fd29..4265a12cda84 100644 --- a/source/isaaclab/isaaclab/utils/noise/noise_cfg.py +++ b/source/isaaclab/isaaclab/utils/noise/noise_cfg.py @@ -7,13 +7,14 @@ from collections.abc import Callable from dataclasses import MISSING -from typing import Literal +from typing import TYPE_CHECKING, Literal import torch from isaaclab.utils import configclass -from . import noise_model +if TYPE_CHECKING: + from .noise_model import NoiseModel, NoiseModelWithAdditiveBias @configclass @@ -34,7 +35,7 @@ class NoiseCfg: class ConstantNoiseCfg(NoiseCfg): """Configuration for an additive constant noise term.""" - func = noise_model.constant_noise + func: str = "{DIR}.noise_model:constant_noise" bias: torch.Tensor | float = 0.0 """The bias to add. Defaults to 0.0.""" @@ -44,7 +45,7 @@ class ConstantNoiseCfg(NoiseCfg): class UniformNoiseCfg(NoiseCfg): """Configuration for a additive uniform noise term.""" - func = noise_model.uniform_noise + func: str = "{DIR}.noise_model:uniform_noise" n_min: torch.Tensor | float = -1.0 """The minimum value of the noise. Defaults to -1.0.""" @@ -56,7 +57,7 @@ class UniformNoiseCfg(NoiseCfg): class GaussianNoiseCfg(NoiseCfg): """Configuration for an additive gaussian noise term.""" - func = noise_model.gaussian_noise + func: str = "{DIR}.noise_model:gaussian_noise" mean: torch.Tensor | float = 0.0 """The mean of the noise. Defaults to 0.0.""" @@ -73,7 +74,7 @@ class GaussianNoiseCfg(NoiseCfg): class NoiseModelCfg: """Configuration for a noise model.""" - class_type: type = noise_model.NoiseModel + class_type: type[NoiseModel] | str = "{DIR}.noise_model:NoiseModel" """The class type of the noise model.""" noise_cfg: NoiseCfg = MISSING @@ -97,7 +98,7 @@ class NoiseModelCfg: class NoiseModelWithAdditiveBiasCfg(NoiseModelCfg): """Configuration for an additive gaussian noise with bias model.""" - class_type: type = noise_model.NoiseModelWithAdditiveBias + class_type: type[NoiseModelWithAdditiveBias] | str = "{DIR}.noise_model:NoiseModelWithAdditiveBias" bias_noise_cfg: NoiseCfg = MISSING """The noise configuration for the bias. diff --git a/source/isaaclab/isaaclab/utils/string.py b/source/isaaclab/isaaclab/utils/string.py index dc1cdaf53477..4e7790006ade 100644 --- a/source/isaaclab/isaaclab/utils/string.py +++ b/source/isaaclab/isaaclab/utils/string.py @@ -6,6 +6,7 @@ """Sub-module containing utilities for transforming strings and regular expressions.""" import ast +import functools import importlib import inspect import re @@ -105,11 +106,12 @@ def is_lambda_expression(name: str) -> bool: return False -def callable_to_string(value: Callable) -> str: +def callable_to_string(value: Callable, separator: str = ":") -> str: """Converts a callable object to a string. Args: value: A callable object. + separator: The separator between the module path and the function name. Defaults to ":". Raises: ValueError: When the input argument is not a callable object. @@ -132,15 +134,16 @@ def callable_to_string(value: Callable) -> str: module_name = value.__module__ function_name = value.__name__ # return the string - return f"{module_name}:{function_name}" + return f"{module_name}{separator}{function_name}" -def string_to_callable(name: str) -> Callable: +def string_to_callable(name: str, separator: str = ":") -> Callable: """Resolves the module and function names to return the function. Args: name: The function name. The format should be 'module:attribute_name' or a lambda expression of format: 'lambda x: x'. + separator: The separator between the module path and the function name. Defaults to ":". Raises: ValueError: When the resolved attribute is not a function. @@ -153,7 +156,7 @@ def string_to_callable(name: str) -> Callable: if is_lambda_expression(name): callable_object = eval(name) else: - mod_name, attr_name = name.split(":") + mod_name, attr_name = name.rsplit(separator, 1) mod = importlib.import_module(mod_name) callable_object = getattr(mod, attr_name) # check if attribute is callable @@ -170,49 +173,94 @@ def string_to_callable(name: str) -> Callable: raise ValueError(msg) -""" -Regex operations. -""" - - -def resolve_matching_names( - keys: str | Sequence[str], list_of_strings: Sequence[str], preserve_order: bool = False -) -> tuple[list[int], list[str]]: - """Match a list of query regular expressions against a list of strings and return the matched indices and names. - - When a list of query regular expressions is provided, the function checks each target string against each - query regular expression and returns the indices of the matched strings and the matched strings. +class ResolvableString(str): + """String subtype that lazily resolves ``module.path:Callable`` values. - If the :attr:`preserve_order` is True, the ordering of the matched indices and names is the same as the order - of the provided list of strings. This means that the ordering is dictated by the order of the target strings - and not the order of the query regular expressions. + The object stays string-compatible for serialization and display, while also allowing callable + use and attribute access on the resolved callable/class. + """ - If the :attr:`preserve_order` is False, the ordering of the matched indices and names is the same as the order - of the provided list of query regular expressions. + __slots__ = ("_resolved_callable", "_resolve_error") + + def __new__(cls, value: str): + obj = super().__new__(cls, value) + obj._resolved_callable = None + obj._resolve_error = None + return obj + + def _resolve(self) -> Callable: + if self._resolved_callable is not None: + return self._resolved_callable + if self._resolve_error is not None: + raise self._resolve_error + try: + resolved = string_to_callable(str(self)) + except (ImportError, AttributeError, ValueError) as error: + self._resolve_error = error + raise + self._resolved_callable = resolved + return resolved + + def __call__(self, *args, **kwargs): + return self._resolve()(*args, **kwargs) + + def _split_ref(self) -> tuple[str | None, str]: + """Parse ``module:attribute`` reference without importing.""" + value = str(self) + if ":" not in value: + return None, value + module_name, attr_path = value.split(":", 1) + return module_name, attr_path + + def __getattribute__(self, item: str): + # Provide callable metadata without forcing import/resolution. + if item == "__name__": + _, attr_path = object.__getattribute__(self, "_split_ref")() + return attr_path.rsplit(".", 1)[-1] + if item == "__qualname__": + _, attr_path = object.__getattribute__(self, "_split_ref")() + return attr_path + if item == "__module__": + module_name, _ = object.__getattribute__(self, "_split_ref")() + if module_name is None: + return str.__module__ + return module_name + return super().__getattribute__(item) + + def __getattr__(self, item: str): + # Keep generic dunder probing (e.g. hasattr(..., "__dataclass_fields__")) + # lazy and side-effect free. Metadata dunders are handled in __getattribute__. + if item.startswith("__") and item.endswith("__"): + raise AttributeError(item) + return getattr(self._resolve(), item) + + def __copy__(self): + """Return self because strings are immutable.""" + return self + + def __deepcopy__(self, memo): + """Return self so deepcopy doesn't trigger lazy resolution.""" + return self - For example, consider the list of strings is ['a', 'b', 'c', 'd', 'e'] and the regular expressions are ['a|c', 'b']. - If :attr:`preserve_order` is False, then the function will return the indices of the matched strings and the - strings as: ([0, 1, 2], ['a', 'b', 'c']). When :attr:`preserve_order` is True, it will return them as: - ([0, 2, 1], ['a', 'c', 'b']). - Note: - The function does not sort the indices. It returns the indices in the order they are found. +""" +Regex operations. +""" - Args: - keys: A regular expression or a list of regular expressions to match the strings in the list. - list_of_strings: A list of strings to match. - preserve_order: Whether to preserve the order of the query keys in the returned values. Defaults to False. - Returns: - A tuple of lists containing the matched indices and names. +@functools.cache +def _resolve_matching_names_impl( + keys: tuple[str, ...], + list_of_strings: tuple[str, ...], + preserve_order: bool, + raise_when_no_match: bool, +) -> tuple[tuple[int, ...], tuple[str, ...]]: + """Cached implementation of :func:`resolve_matching_names`. - Raises: - ValueError: When multiple matches are found for a string in the list. - ValueError: When not all regular expressions are matched. + All arguments are hashable so that ``functools.cache`` can store results. + Returns tuples (immutable) to protect the cached data from mutation; + the public wrapper converts these back to fresh lists for each caller. """ - # resolve name keys - if isinstance(keys, str): - keys = [keys] # find matching patterns index_list = [] names_list = [] @@ -258,6 +306,8 @@ def resolve_matching_names( names_list = names_list_reorder # check that all regular expressions are matched if not all(keys_match_found): + if not raise_when_no_match: + return (), () # make this print nicely aligned for debugging msg = "\n" for key, value in zip(keys, keys_match_found): @@ -267,8 +317,66 @@ def resolve_matching_names( raise ValueError( f"Not all regular expressions are matched! Please check that the regular expressions are correct: {msg}" ) - # return - return index_list, names_list + # return immutable tuples for safe caching + return tuple(index_list), tuple(names_list) + + +def resolve_matching_names( + keys: str | Sequence[str], + list_of_strings: Sequence[str], + preserve_order: bool = False, + *, + raise_when_no_match: bool = True, +) -> tuple[list[int], list[str]]: + """Match a list of query regular expressions against a list of strings and return the matched indices and names. + + When a list of query regular expressions is provided, the function checks each target string against each + query regular expression and returns the indices of the matched strings and the matched strings. + + If the :attr:`preserve_order` is True, the ordering of the matched indices and names is the same as the order + of the provided list of strings. This means that the ordering is dictated by the order of the target strings + and not the order of the query regular expressions. + + If the :attr:`preserve_order` is False, the ordering of the matched indices and names is the same as the order + of the provided list of query regular expressions. + + For example, consider the list of strings is ['a', 'b', 'c', 'd', 'e'] and the regular expressions are ['a|c', 'b']. + If :attr:`preserve_order` is False, then the function will return the indices of the matched strings and the + strings as: ([0, 1, 2], ['a', 'b', 'c']). When :attr:`preserve_order` is True, it will return them as: + ([0, 2, 1], ['a', 'c', 'b']). + + Results are cached internally — repeated calls with the same arguments avoid redundant regex matching. + + Note: + The function does not sort the indices. It returns the indices in the order they are found. + + Args: + keys: A regular expression or a list of regular expressions to match the strings in the list. + list_of_strings: A list of strings to match. + preserve_order: Whether to preserve the order of the query keys in the returned values. Defaults to False. + raise_when_no_match: Whether to raise a ``ValueError`` when not all regular expressions are matched. + Defaults to True. When False, returns empty lists instead of raising. + + Returns: + A tuple of lists containing the matched indices and names. + + Raises: + ValueError: When multiple matches are found for a string in the list. + ValueError: When not all regular expressions are matched and :attr:`raise_when_no_match` is True. + """ + _keys = (keys,) if isinstance(keys, str) else tuple(keys) + idx, names = _resolve_matching_names_impl(_keys, tuple(list_of_strings), preserve_order, raise_when_no_match) + return list(idx), list(names) + + +def clear_resolve_matching_names_cache() -> None: + """Discard all cached results from :func:`resolve_matching_names`. + + Call this when the simulation scene is torn down so that cached + name-resolution entries from destroyed assets do not accumulate + across scene rebuilds in long-lived processes. + """ + _resolve_matching_names_impl.cache_clear() def resolve_matching_names_values( @@ -280,6 +388,11 @@ def resolve_matching_names_values( """Match a list of regular expressions in a dictionary against a list of strings and return the matched indices, names, and values. + Note: + Unlike :func:`resolve_matching_names`, this function is not cached. Current callers + use it during initialization only (e.g. action/actuator config resolution), so caching + would add complexity without a measurable benefit. + If the :attr:`preserve_order` is True, the ordering of the matched indices and names is the same as the order of the provided list of strings. This means that the ordering is dictated by the order of the target strings and not the order of the query regular expressions. @@ -414,3 +527,22 @@ def find_root_prim_path_from_regex(prim_path_regex: str) -> tuple[str, int]: root_prim_path = "/".join(prim_paths_list[:root_idx]) tree_level = root_idx return root_prim_path, tree_level + + +def list_intersection(list1: list[Any], list2: list[Any] | None) -> list[Any]: + """Return the intersection of two lists. + + The returned list has elements that are in both input lists. + + Args: + list1: The first list. + list2: The second list. + + Returns: + A new list containing elements that are in both input lists. + + """ + if list2 is None: + return list1 + set2 = set(list2) + return [x for x in list1 if x in set2] diff --git a/source/isaaclab/isaaclab/utils/timer.py b/source/isaaclab/isaaclab/utils/timer.py index 4d9951db60c8..2a5014845e9b 100644 --- a/source/isaaclab/isaaclab/utils/timer.py +++ b/source/isaaclab/isaaclab/utils/timer.py @@ -3,13 +3,23 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Sub-module for a timer class that can be used for performance measurements.""" +"""Sub-module for a timer class that can be used for performance measurements. + +Note: + This module has a hard dependency on `warp` because the :class:`Timer` calls + ``wp.synchronize()`` on stop to flush pending GPU work before sampling the clock. + Since IsaacLab workloads are predominantly GPU-bound, an unsynchronized timer would + under-report wall time by returning before device kernels have finished executing. +""" from __future__ import annotations +import math import time from contextlib import ContextDecorator -from typing import Any, ClassVar +from typing import Any, ClassVar, Literal + +import warp as wp class TimerError(Exception): @@ -43,7 +53,7 @@ class Timer(ContextDecorator): time.sleep(1) timer.stop() - print(2 <= stopwatch.total_run_time) # Output: True + print(2 <= timer.total_run_time) # Output: True As a context manager: @@ -60,7 +70,7 @@ class Timer(ContextDecorator): Reference: https://gist.github.com/sumeet/1123871 """ - timing_info: ClassVar[dict[str, float]] = dict() + timing_info: ClassVar[dict[str, dict[str, float]]] = dict() """Dictionary for storing the elapsed time per timer instances globally. This dictionary logs the timer information. The keys are the names given to the timer class @@ -68,7 +78,25 @@ class Timer(ContextDecorator): is recorded in the dictionary. """ - def __init__(self, msg: str | None = None, name: str | None = None): + _welford_state: ClassVar[dict[str, float]] = dict() + """Internal accumulator (m2) for Welford's online algorithm, keyed by timer name.""" + + enable: ClassVar[bool] = True + """Whether to enable the timer.""" + + enable_display_output: ClassVar[bool] = True + """Whether to enable the display output.""" + + _UNIT_MULTIPLIERS: ClassVar[dict[str, float]] = {"s": 1.0, "ms": 1e3, "us": 1e6, "ns": 1e9} + """Mapping from time unit string to multiplier (seconds -> unit).""" + + def __init__( + self, + msg: str | None = None, + name: str | None = None, + enable: bool = True, + time_unit: Literal["s", "ms", "us", "ns"] = "s", + ): """Initializes the timer. Args: @@ -76,12 +104,21 @@ def __init__(self, msg: str | None = None, name: str | None = None): class in a context manager. Defaults to None. name: The name to use for logging times in a global dictionary. Defaults to None. + enable: Whether to enable the timer. Defaults to True. + time_unit: The unit to use for the elapsed time. Defaults to "s". """ self._msg = msg self._name = name self._start_time = None self._stop_time = None self._elapsed_time = None + self._enable = enable if Timer.enable else False + + if time_unit not in Timer._UNIT_MULTIPLIERS: + raise ValueError(f"Invalid time_unit, {time_unit} is not in {list(Timer._UNIT_MULTIPLIERS)}") + + self._format = time_unit + self._multiplier = Timer._UNIT_MULTIPLIERS[time_unit] def __str__(self) -> str: """A string representation of the class object. @@ -89,7 +126,7 @@ def __str__(self) -> str: Returns: A string containing the elapsed time. """ - return f"{self.time_elapsed:0.6f} seconds" + return f"{(self.total_run_time * self._multiplier):0.6f} {self._format}" """ Properties @@ -100,13 +137,22 @@ def time_elapsed(self) -> float: """The number of seconds that have elapsed since this timer started timing. Note: - This is used for checking how much time has elapsed while the timer is still running. + This always returns seconds regardless of the configured ``time_unit``. + It is used for checking how much time has elapsed while the timer is still running. """ + if self._start_time is None: + return 0.0 return time.perf_counter() - self._start_time @property def total_run_time(self) -> float: - """The number of seconds that elapsed from when the timer started to when it ended.""" + """The number of seconds that elapsed from when the timer started to when it ended. + + Note: + This always returns seconds regardless of the configured ``time_unit``. + """ + if self._elapsed_time is None: + return 0.0 return self._elapsed_time """ @@ -115,6 +161,9 @@ def total_run_time(self) -> float: def start(self): """Start timing.""" + if not self._enable: + return + if self._start_time is not None: raise TimerError("Timer is running. Use .stop() to stop it") @@ -122,15 +171,39 @@ def start(self): def stop(self): """Stop timing.""" + if not self._enable: + return + if self._start_time is None: raise TimerError("Timer is not running. Use .start() to start it") + # Synchronize the device to make sure we time the whole operation + wp.synchronize() + + # Get the elapsed time self._stop_time = time.perf_counter() self._elapsed_time = self._stop_time - self._start_time self._start_time = None - if self._name: - Timer.timing_info[self._name] = self._elapsed_time + if self._name is not None: + self._update_welford(self._elapsed_time) + + def _update_welford(self, value: float): + """Update the running statistics using Welford's online algorithm.""" + info = Timer.timing_info.get(self._name) + if info is None: + Timer.timing_info[self._name] = {"mean": value, "std": 0.0, "n": 1, "last": value} + Timer._welford_state[self._name] = 0.0 + else: + m2 = Timer._welford_state[self._name] + n = info["n"] + 1 + delta = value - info["mean"] + mean = info["mean"] + delta / n + delta2 = value - mean + m2 = m2 + delta * delta2 + std = math.sqrt(m2 / (n - 1)) if n > 1 else 0.0 + Timer.timing_info[self._name] = {"mean": mean, "std": std, "n": n, "last": value} + Timer._welford_state[self._name] = m2 """ Context managers @@ -145,13 +218,34 @@ def __exit__(self, *exc_info: Any): """Stop timing.""" self.stop() # print message - if self._msg is not None: - print(self._msg, f": {self._elapsed_time:0.6f} seconds") + if self._enable: + if (self._msg is not None) and (Timer.enable_display_output): + parts = [f"Last: {(self._elapsed_time * self._multiplier):0.6f} {self._format}"] + if self._name is not None: + info = Timer.timing_info[self._name] + parts.append(f"Mean: {(info['mean'] * self._multiplier):0.6f} {self._format}") + parts.append(f"Std: {(info['std'] * self._multiplier):0.6f} {self._format}") + parts.append(f"N: {info['n']}") + print(self._msg, ", ".join(parts)) """ Static Methods """ + @staticmethod + def reset(name: str | None = None): + """Reset statistics for a named timer, or all timers if name is None. + + Args: + name: Name of the timer to reset. If None, resets all timers. + """ + if name is None: + Timer.timing_info.clear() + Timer._welford_state.clear() + else: + Timer.timing_info.pop(name, None) + Timer._welford_state.pop(name, None) + @staticmethod def get_timer_info(name: str) -> float: """Retrieves the time logged in the global dictionary @@ -168,4 +262,26 @@ def get_timer_info(name: str) -> float: """ if name not in Timer.timing_info: raise TimerError(f"Timer {name} does not exist") - return Timer.timing_info.get(name) + return Timer.timing_info.get(name)["last"] + + @staticmethod + def get_timer_statistics(name: str) -> dict[str, float]: + """Retrieves the timer statistics logged in the global dictionary + based on name. + + Args: + name: Name of the entry to be retrieved. + + Raises: + TimerError: If name doesn't exist in the log. + + Returns: + A dictionary containing the mean, std, and n for the named timer. + """ + + if name not in Timer.timing_info: + raise TimerError(f"Timer {name} does not exist") + + keys = ["mean", "std", "n", "last"] + + return {k: Timer.timing_info[name][k] for k in keys} diff --git a/source/isaaclab/isaaclab/utils/version.py b/source/isaaclab/isaaclab/utils/version.py index 358a5550aa1c..7627c89c7947 100644 --- a/source/isaaclab/isaaclab/utils/version.py +++ b/source/isaaclab/isaaclab/utils/version.py @@ -12,6 +12,32 @@ from packaging.version import Version +def has_kit() -> bool: + """Check if Kit (Omniverse Kit) is available in the current environment. + + Returns True when running inside an Omniverse Kit application (e.g. Isaac Sim). + Returns False in kitless mode (e.g. Newton physics backend without Kit). + + Not cached with ``lru_cache`` because this may be called before ``AppLauncher`` + finishes starting Kit, which would permanently lock in a ``False`` result. + The underlying ``get_app()`` call is cheap once the module is loaded. + + This function deliberately avoids triggering a fresh ``import omni.kit.app`` + when called before Kit has started. If ``omni.kit.app`` is not already present + in ``sys.modules``, Kit is not running and we return ``False`` immediately without + performing any import (which would be a forbidden side-effect during cfg-only loading). + """ + import sys + + mod = sys.modules.get("omni.kit.app") + if mod is None: + return False + try: + return mod.get_app() is not None + except Exception: + return False + + @functools.lru_cache(maxsize=1) def get_isaac_sim_version() -> Version: """Get the Isaac Sim version as a Version object, cached for performance. diff --git a/source/isaaclab/isaaclab/utils/warp/__init__.py b/source/isaaclab/isaaclab/utils/warp/__init__.py index 12c9a20f8bb3..569b80c9b089 100644 --- a/source/isaaclab/isaaclab/utils/warp/__init__.py +++ b/source/isaaclab/isaaclab/utils/warp/__init__.py @@ -5,5 +5,57 @@ """Sub-module containing operations based on warp.""" -from . import fabric # noqa: F401 -from .ops import convert_to_warp_mesh, raycast_dynamic_meshes, raycast_mesh, raycast_single_mesh +import warnings + +import warp as wp + +wp.config.quiet = True +wp.init() + +from isaaclab.utils.module import lazy_export + +lazy_export() + +# Avoid a circular import at module load: `.proxy_array` imports warp, which is +# already loaded above. Importing it here ensures the class is available inside +# the shim. +from .proxy_array import ProxyArray # noqa: E402 + +_WP_TO_TORCH_ORIGINAL = wp.to_torch +_WP_TO_TORCH_WARNED = False + + +def _wp_to_torch_with_proxyarray(a, requires_grad=None): + """Shim for :func:`warp.to_torch` that gracefully handles :class:`ProxyArray`. + + Without this shim, ``wp.to_torch(proxy)`` would fail with + ``AttributeError: 'ProxyArray' object has no attribute 'requires_grad'`` + because :class:`ProxyArray` intentionally doesn't replicate the full + ``wp.array`` attribute surface. Users and third-party code that still use + ``wp.to_torch(asset.data.)`` from before the ProxyArray migration + would break hard. + + The shim routes :class:`ProxyArray` arguments to their cached ``.torch`` + view (a zero-copy :class:`torch.Tensor` of the same underlying memory) and + emits a one-shot :class:`DeprecationWarning`. For any other input type, + the original :func:`warp.to_torch` handles the call as before. + """ + global _WP_TO_TORCH_WARNED + if isinstance(a, ProxyArray): + if not _WP_TO_TORCH_WARNED: + _WP_TO_TORCH_WARNED = True + warnings.warn( + "wp.to_torch() is deprecated; use the `.torch` accessor on" + " the ProxyArray directly (e.g. `asset.data.joint_pos.torch`).", + DeprecationWarning, + stacklevel=2, + ) + return a.torch + return _WP_TO_TORCH_ORIGINAL(a, requires_grad=requires_grad) + + +# Patch at both the top-level ``warp`` namespace and the underlying module so +# callers using ``import warp as wp`` and rare ``from warp._src.torch import +# to_torch`` patterns both pick up the shim. +wp.to_torch = _wp_to_torch_with_proxyarray +wp._src.torch.to_torch = _wp_to_torch_with_proxyarray # noqa: SLF001 diff --git a/source/isaaclab/isaaclab/utils/warp/__init__.pyi b/source/isaaclab/isaaclab/utils/warp/__init__.pyi new file mode 100644 index 000000000000..ab1de52f39f7 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/warp/__init__.pyi @@ -0,0 +1,15 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ProxyArray", + "convert_to_warp_mesh", + "raycast_dynamic_meshes", + "raycast_mesh", + "raycast_single_mesh", +] + +from .ops import convert_to_warp_mesh, raycast_dynamic_meshes, raycast_mesh, raycast_single_mesh +from .proxy_array import ProxyArray diff --git a/source/isaaclab/isaaclab/utils/warp/fabric.py b/source/isaaclab/isaaclab/utils/warp/fabric.py index 3fc42ff94236..a48f773f4991 100644 --- a/source/isaaclab/isaaclab/utils/warp/fabric.py +++ b/source/isaaclab/isaaclab/utils/warp/fabric.py @@ -57,12 +57,11 @@ def decompose_fabric_transformation_matrix_to_warp_arrays( This kernel extracts transform components from Fabric's omni:fabric:worldMatrix attribute and stores them in separate arrays. It handles the quaternion convention conversion - (Warp uses xyzw, Isaac Lab uses wxyz). Args: fabric_matrices: Fabric array containing 4x4 transformation matrices array_positions: Output array for positions (N, 3) - array_orientations: Output array for quaternions in wxyz format (N, 4) + array_orientations: Output array for quaternions in xyzw format (N, 4) array_scales: Output array for scales (N, 3) indices: View indices to process mapping: Mapping from view indices to fabric indices @@ -81,12 +80,12 @@ def decompose_fabric_transformation_matrix_to_warp_arrays( array_positions[output_index, 0] = position[0] array_positions[output_index, 1] = position[1] array_positions[output_index, 2] = position[2] - # extract orientation (Warp quaternion is xyzw, convert to wxyz) + # extract orientation if array_orientations.shape[0] > 0: - array_orientations[output_index, 0] = rotation[3] # w - array_orientations[output_index, 1] = rotation[0] # x - array_orientations[output_index, 2] = rotation[1] # y - array_orientations[output_index, 3] = rotation[2] # z + array_orientations[output_index, 0] = rotation[0] # x + array_orientations[output_index, 1] = rotation[1] # y + array_orientations[output_index, 2] = rotation[2] # z + array_orientations[output_index, 3] = rotation[3] # w # extract scale if array_scales.shape[0] > 0: array_scales[output_index, 0] = scale[0] @@ -109,7 +108,6 @@ def compose_fabric_transformation_matrix_from_warp_arrays( """Compose Fabric transformation matrices from position, orientation, and scale arrays. This kernel updates Fabric's omni:fabric:worldMatrix attribute from separate component arrays. - It handles the quaternion convention conversion (Isaac Lab uses wxyz, Warp uses xyzw). After calling this kernel, IFabricHierarchy.updateWorldXforms() should be called to propagate changes through the hierarchy. @@ -117,7 +115,7 @@ def compose_fabric_transformation_matrix_from_warp_arrays( Args: fabric_matrices: Fabric array containing 4x4 transformation matrices to update array_positions: Input array for positions (N, 3) or None - array_orientations: Input array for quaternions in wxyz format (N, 4) or None + array_orientations: Input array for quaternions in xyzw format (N, 4) or None array_scales: Input array for scales (N, 3) or None broadcast_positions: If True, use first position for all prims broadcast_orientations: If True, use first orientation for all prims @@ -145,10 +143,10 @@ def compose_fabric_transformation_matrix_from_warp_arrays( index = 0 else: index = i - rotation[0] = array_orientations[index, 1] # x - rotation[1] = array_orientations[index, 2] # y - rotation[2] = array_orientations[index, 3] # z - rotation[3] = array_orientations[index, 0] # w + rotation[0] = array_orientations[index, 0] # x + rotation[1] = array_orientations[index, 1] # y + rotation[2] = array_orientations[index, 2] # z + rotation[3] = array_orientations[index, 3] # w # update scale if array_scales.shape[0] > 0: if broadcast_scales: diff --git a/source/isaaclab/isaaclab/utils/warp/kernels.py b/source/isaaclab/isaaclab/utils/warp/kernels.py index cf56e34ed45a..efcdbfe63f1e 100644 --- a/source/isaaclab/isaaclab/utils/warp/kernels.py +++ b/source/isaaclab/isaaclab/utils/warp/kernels.py @@ -53,7 +53,7 @@ def raycast_mesh_kernel( this array is not used. max_dist: The maximum ray-cast distance. Defaults to 1e6. return_distance: Whether to return the ray hit distances. Defaults to False. - return_normal: Whether to return the ray hit normals. Defaults to False`. + return_normal: Whether to return the ray hit normals. Defaults to False. return_face_id: Whether to return the ray hit face ids. Defaults to False. """ # get the thread id @@ -79,6 +79,63 @@ def raycast_mesh_kernel( ray_face_id[tid] = f +@wp.kernel(enable_backward=False) +def raycast_mesh_masked_kernel( + # input + mesh: wp.uint64, + env_mask: wp.array(dtype=wp.bool), + ray_starts: wp.array2d(dtype=wp.vec3f), + ray_directions: wp.array2d(dtype=wp.vec3f), + max_dist: wp.float32, + return_distance: int, + return_normal: int, + # output + ray_hits: wp.array2d(dtype=wp.vec3f), + ray_distance: wp.array2d(dtype=wp.float32), + ray_normal: wp.array2d(dtype=wp.vec3f), +): + """Ray-cast against a single static mesh for masked environments. + + Extends :func:`raycast_mesh_kernel` with environment masking and optional distance/normal output, + for use in multi-environment sensor pipelines. + + Launch with ``dim=(num_envs, num_rays)``. + + Args: + mesh: Warp mesh id to ray-cast against. + env_mask: Boolean mask for which environments to update. Shape is (num_envs,). + ray_starts: World-frame ray start positions [m]. Shape is (num_envs, num_rays). + ray_directions: World-frame unit ray directions. Shape is (num_envs, num_rays). + max_dist: Maximum ray-cast distance [m]. + return_distance: Whether to write hit distances to ``ray_distance`` (1) or skip (0). + return_normal: Whether to write surface normals to ``ray_normal`` (1) or skip (0). + ray_hits: Output ray hit positions [m]. Shape is (num_envs, num_rays). + Pre-filled with inf for missed hits; unchanged on miss. + ray_distance: Output hit distances [m]. Shape is (num_envs, num_rays). + Written only when ``return_distance`` is 1; pre-filled with inf for missed hits. + ray_normal: Output surface normals at hit positions. Shape is (num_envs, num_rays). + Written only when ``return_normal`` is 1; pre-filled with inf for missed hits. + """ + env, ray = wp.tid() + if not env_mask[env]: + return + + t = float(0.0) + u = float(0.0) + v = float(0.0) + sign = float(0.0) + n = wp.vec3f() + f = int(0) + + hit = wp.mesh_query_ray(mesh, ray_starts[env, ray], ray_directions[env, ray], max_dist, t, u, v, sign, n, f) + if hit: + ray_hits[env, ray] = ray_starts[env, ray] + t * ray_directions[env, ray] + if return_distance == 1: + ray_distance[env, ray] = t + if return_normal == 1: + ray_normal[env, ray] = n + + @wp.kernel(enable_backward=False) def raycast_static_meshes_kernel( mesh: wp.array2d(dtype=wp.uint64), @@ -110,14 +167,24 @@ def raycast_static_meshes_kernel( account the mesh's position and rotation. This kernel is useful for ray-casting against static meshes that are not expected to move. + .. warning:: + **Known race condition:** When two meshes are equidistant to the same ray, the + ``atomic_min`` + equality-check pattern used for closest-hit resolution is not fully + thread-safe. Two threads may both pass the equality check and write different output + fields (e.g., ``ray_hits`` from mesh A, ``ray_normal`` from mesh B). In practice this + is rare (requires exact floating-point tie) and the position output is still correct, + but normals, face IDs, and mesh IDs may be inconsistent for the affected ray. + See `warp#1058 `_ for progress on a + thread-safe fix. + Args: mesh: The input mesh. The ray-casting is performed against this mesh on the device specified by the `mesh`'s `device` attribute. ray_starts: The input ray start positions. Shape is (B, N, 3). ray_directions: The input ray directions. Shape is (B, N, 3). ray_hits: The output ray hit positions. Shape is (B, N, 3). - ray_distance: The output ray hit distances. Shape is (B, N,), if ``return_distance`` is True. Otherwise, - this array is not used. + ray_distance: The closest hit distance buffer. Shape is (B, N). Updated via ``atomic_min`` for every + thread that records a hit; used to resolve closest-hit among multiple meshes. ray_normal: The output ray hit normals. Shape is (B, N, 3), if ``return_normal`` is True. Otherwise, this array is not used. ray_face_id: The output ray hit face ids. Shape is (B, N,), if ``return_face_id`` is True. Otherwise, @@ -125,7 +192,7 @@ def raycast_static_meshes_kernel( ray_mesh_id: The output ray hit mesh ids. Shape is (B, N,), if ``return_mesh_id`` is True. Otherwise, this array is not used. max_dist: The maximum ray-cast distance. Defaults to 1e6. - return_normal: Whether to return the ray hit normals. Defaults to False`. + return_normal: Whether to return the ray hit normals. Defaults to False. return_face_id: Whether to return the ray hit face ids. Defaults to False. return_mesh_id: Whether to return the mesh id. Defaults to False. """ @@ -141,10 +208,11 @@ def raycast_static_meshes_kernel( # if the ray hit, store the hit data if mesh_query_ray_t.result: wp.atomic_min(ray_distance, tid_env, tid_ray, mesh_query_ray_t.t) - # check if hit distance is less than the current hit distance, only then update the memory - # TODO, in theory we could use the output of atomic_min to avoid the non-thread safe next comparison - # however, warp atomic_min is returning the wrong values on gpu currently. - # FIXME https://github.com/NVIDIA/warp/issues/1058 + # TODO(warp#1058): Use the return value of atomic_min to avoid the non-thread-safe + # equality check below. Currently warp atomic_min returns wrong values on GPU, so we + # fall back to a racy read-back. When two meshes tie on distance, normals/face-ids/ + # mesh-ids may be written by different threads. The hit *position* is still correct + # because all tying threads compute the same world-space point. if mesh_query_ray_t.t == ray_distance[tid_env, tid_ray]: # convert back to world space and update the hit data ray_hits[tid_env, tid_ray] = start_pos + mesh_query_ray_t.t * direction @@ -160,6 +228,7 @@ def raycast_static_meshes_kernel( @wp.kernel(enable_backward=False) def raycast_dynamic_meshes_kernel( + env_mask: wp.array(dtype=wp.bool), mesh: wp.array2d(dtype=wp.uint64), ray_starts: wp.array2d(dtype=wp.vec3), ray_directions: wp.array2d(dtype=wp.vec3), @@ -175,7 +244,7 @@ def raycast_dynamic_meshes_kernel( return_face_id: int = False, return_mesh_id: int = False, ): - """Performs ray-casting against multiple meshes. + """Performs ray-casting against multiple dynamic meshes. This function performs ray-casting against the given meshes using the provided ray start positions and directions. The resulting ray hit positions are stored in the :obj:`ray_hits` array. @@ -183,7 +252,6 @@ def raycast_dynamic_meshes_kernel( The function utilizes the ``mesh_query_ray`` method from the ``wp`` module to perform the actual ray-casting operation. The maximum ray-cast distance is set to ``1e6`` units. - Note: That the ``ray_starts``, ``ray_directions``, and ``ray_hits`` arrays should have compatible shapes and data types to ensure proper execution. Additionally, they all must be in the same frame. @@ -191,29 +259,42 @@ def raycast_dynamic_meshes_kernel( All arguments are expected to be batched with the first dimension (B, batch) being the number of envs and the second dimension (N, num_rays) being the number of rays. For Meshes, W is the number of meshes. + .. warning:: + **Known race condition:** When two meshes are equidistant to the same ray, the + ``atomic_min`` + equality-check pattern used for closest-hit resolution is not fully + thread-safe. Two threads may both pass the equality check and write different output + fields (e.g., ``ray_hits`` from mesh A, ``ray_normal`` from mesh B). In practice this + is rare (requires exact floating-point tie) and the position output is still correct, + but normals, face IDs, and mesh IDs may be inconsistent for the affected ray. + See `warp#1058 `_ for progress on a + thread-safe fix. + Args: + env_mask: Boolean mask selecting which environments to process. Shape is (B,). mesh: The input mesh. The ray-casting is performed against this mesh on the device specified by the `mesh`'s `device` attribute. ray_starts: The input ray start positions. Shape is (B, N, 3). ray_directions: The input ray directions. Shape is (B, N, 3). ray_hits: The output ray hit positions. Shape is (B, N, 3). - ray_distance: The output ray hit distances. Shape is (B, N,), if ``return_distance`` is True. Otherwise, - this array is not used. + ray_distance: The closest hit distance buffer. Shape is (B, N). Updated via ``atomic_min`` for every + thread that records a hit; used to resolve closest-hit among multiple meshes. ray_normal: The output ray hit normals. Shape is (B, N, 3), if ``return_normal`` is True. Otherwise, this array is not used. ray_face_id: The output ray hit face ids. Shape is (B, N,), if ``return_face_id`` is True. Otherwise, this array is not used. ray_mesh_id: The output ray hit mesh ids. Shape is (B, N,), if ``return_mesh_id`` is True. Otherwise, this array is not used. - mesh_positions: The input mesh positions in world frame. Shape is (W, 3). - mesh_rotations: The input mesh rotations in world frame. Shape is (W, 4). + mesh_positions: The input mesh positions in world frame. Shape is (B, W, 3). + mesh_rotations: The input mesh rotations in world frame. Shape is (B, W, 4). max_dist: The maximum ray-cast distance. Defaults to 1e6. - return_normal: Whether to return the ray hit normals. Defaults to False`. + return_normal: Whether to return the ray hit normals. Defaults to False. return_face_id: Whether to return the ray hit face ids. Defaults to False. return_mesh_id: Whether to return the mesh id. Defaults to False. """ # get the thread id tid_mesh_id, tid_env, tid_ray = wp.tid() + if not env_mask[tid_env]: + return mesh_pose = wp.transform(mesh_positions[tid_env, tid_mesh_id], mesh_rotations[tid_env, tid_mesh_id]) mesh_pose_inv = wp.transform_inverse(mesh_pose) @@ -225,10 +306,11 @@ def raycast_dynamic_meshes_kernel( # if the ray hit, store the hit data if mesh_query_ray_t.result: wp.atomic_min(ray_distance, tid_env, tid_ray, mesh_query_ray_t.t) - # check if hit distance is less than the current hit distance, only then update the memory - # TODO, in theory we could use the output of atomic_min to avoid the non-thread safe next comparison - # however, warp atomic_min is returning the wrong values on gpu currently. - # FIXME https://github.com/NVIDIA/warp/issues/1058 + # TODO(warp#1058): Use the return value of atomic_min to avoid the non-thread-safe + # equality check below. Currently warp atomic_min returns wrong values on GPU, so we + # fall back to a racy read-back. When two meshes tie on distance, normals/face-ids/ + # mesh-ids may be written by different threads. The hit *position* is still correct + # because all tying threads compute the same world-space point. if mesh_query_ray_t.t == ray_distance[tid_env, tid_ray]: # convert back to world space and update the hit data hit_pos = start_pos + mesh_query_ray_t.t * direction @@ -302,168 +384,324 @@ def reshape_tiled_image( ) ## -# Wrench Composer +# Wrench Composer — Dual-Buffer Architecture ## -@wp.func -def cast_to_link_frame(position: wp.vec3f, link_position: wp.vec3f, is_global: bool) -> wp.vec3f: - """Casts a position to the link frame of the body. - - Args: - position: The position to cast. - link_position: The link frame position. - is_global: Whether the position is in the global frame. - - Returns: - The position in the link frame of the body. - """ - if is_global: - return position - link_position - else: - return position +@wp.kernel +def set_forces_to_dual_buffers_index( + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + forces: wp.array2d(dtype=wp.vec3f), + torques: wp.array2d(dtype=wp.vec3f), + positions: wp.array2d(dtype=wp.vec3f), + global_force_w: wp.array2d(dtype=wp.vec3f), + global_torque_w: wp.array2d(dtype=wp.vec3f), + global_force_at_com_w: wp.array2d(dtype=wp.vec3f), + local_force_b: wp.array2d(dtype=wp.vec3f), + local_torque_b: wp.array2d(dtype=wp.vec3f), + is_global: bool, +): + """Set forces/torques into dual buffers using index selection (overwrites). + Dispatched with ``dim=(len(env_ids), len(body_ids))``. -@wp.func -def cast_force_to_link_frame(force: wp.vec3f, link_quat: wp.quatf, is_global: bool) -> wp.vec3f: - """Casts a force to the link frame of the body. + When ``is_global`` is True, forces/torques are written to the world-frame buffers. + Forces with ``positions`` go to ``global_force_w`` with torque ``cross(P, F)`` accumulated + into ``global_torque_w``; forces without positions go to ``global_force_at_com_w``. + When ``is_global`` is False, values go to ``local_force_b`` / ``local_torque_b``. - Args: - force: The force to cast. - link_quat: The link frame quaternion. - is_global: Whether the force is applied in the global frame. - Returns: - The force in the link frame of the body. + Any of ``forces``, ``torques``, or ``positions`` may be ``None`` (null array). """ + tid_env, tid_body = wp.tid() + ei = env_ids[tid_env] + bi = body_ids[tid_body] + if is_global: - return wp.quat_rotate_inv(link_quat, force) + if torques: + global_torque_w[ei, bi] = torques[tid_env, tid_body] + if forces: + if positions: + global_force_w[ei, bi] = forces[tid_env, tid_body] + if torques: + global_torque_w[ei, bi] = global_torque_w[ei, bi] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + else: + global_torque_w[ei, bi] = wp.cross(positions[tid_env, tid_body], forces[tid_env, tid_body]) + else: + global_force_at_com_w[ei, bi] = forces[tid_env, tid_body] else: - return force + if torques: + local_torque_b[ei, bi] = torques[tid_env, tid_body] + if forces: + local_force_b[ei, bi] = forces[tid_env, tid_body] + if positions: + if torques: + local_torque_b[ei, bi] = local_torque_b[ei, bi] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + else: + local_torque_b[ei, bi] = wp.cross(positions[tid_env, tid_body], forces[tid_env, tid_body]) -@wp.func -def cast_torque_to_link_frame(torque: wp.vec3f, link_quat: wp.quatf, is_global: bool) -> wp.vec3f: - """Casts a torque to the link frame of the body. - - Args: - torque: The torque to cast. - link_quat: The link frame quaternion. - is_global: Whether the torque is applied in the global frame. +@wp.kernel +def add_forces_to_dual_buffers_index( + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + forces: wp.array2d(dtype=wp.vec3f), + torques: wp.array2d(dtype=wp.vec3f), + positions: wp.array2d(dtype=wp.vec3f), + global_force_w: wp.array2d(dtype=wp.vec3f), + global_torque_w: wp.array2d(dtype=wp.vec3f), + global_force_at_com_w: wp.array2d(dtype=wp.vec3f), + local_force_b: wp.array2d(dtype=wp.vec3f), + local_torque_b: wp.array2d(dtype=wp.vec3f), + is_global: bool, +): + """Add forces/torques into dual buffers using index selection (accumulates). - Returns: - The torque in the link frame of the body. + Same routing logic as :func:`set_forces_to_dual_buffers_index` but uses ``+=`` instead of ``=``. + Dispatched with ``dim=(len(env_ids), len(body_ids))``. """ + tid_env, tid_body = wp.tid() + ei = env_ids[tid_env] + bi = body_ids[tid_body] + if is_global: - return wp.quat_rotate_inv(link_quat, torque) + if forces: + if positions: + global_force_w[ei, bi] = global_force_w[ei, bi] + forces[tid_env, tid_body] + global_torque_w[ei, bi] = global_torque_w[ei, bi] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + else: + global_force_at_com_w[ei, bi] = global_force_at_com_w[ei, bi] + forces[tid_env, tid_body] + if torques: + global_torque_w[ei, bi] = global_torque_w[ei, bi] + torques[tid_env, tid_body] else: - return torque + if forces: + local_force_b[ei, bi] = local_force_b[ei, bi] + forces[tid_env, tid_body] + if positions: + local_torque_b[ei, bi] = local_torque_b[ei, bi] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + if torques: + local_torque_b[ei, bi] = local_torque_b[ei, bi] + torques[tid_env, tid_body] @wp.kernel -def add_forces_and_torques_at_position( - env_ids: wp.array(dtype=wp.int32), - body_ids: wp.array(dtype=wp.int32), +def set_forces_to_dual_buffers_mask( + env_mask: wp.array(dtype=wp.bool), + body_mask: wp.array(dtype=wp.bool), forces: wp.array2d(dtype=wp.vec3f), torques: wp.array2d(dtype=wp.vec3f), positions: wp.array2d(dtype=wp.vec3f), - link_positions: wp.array2d(dtype=wp.vec3f), - link_quaternions: wp.array2d(dtype=wp.quatf), - composed_forces_b: wp.array2d(dtype=wp.vec3f), - composed_torques_b: wp.array2d(dtype=wp.vec3f), + global_force_w: wp.array2d(dtype=wp.vec3f), + global_torque_w: wp.array2d(dtype=wp.vec3f), + global_force_at_com_w: wp.array2d(dtype=wp.vec3f), + local_force_b: wp.array2d(dtype=wp.vec3f), + local_torque_b: wp.array2d(dtype=wp.vec3f), is_global: bool, ): - """Adds forces and torques to the composed force and torque at the user-provided positions. - When is_global is False, the user-provided positions are offsetting the application of the force relatively to the - link frame of the body. When is_global is True, the user-provided positions are the global positions of the force - application. + """Set forces/torques into dual buffers using mask selection (overwrites). - Args: - env_ids: The environment ids. - body_ids: The body ids. - forces: The forces. - torques: The torques. - positions: The positions. - link_positions: The link frame positions. - link_quaternions: The link frame quaternions. - composed_forces_b: The composed forces. - composed_torques_b: The composed torques. - is_global: Whether the forces and torques are applied in the global frame. + Same routing logic as :func:`set_forces_to_dual_buffers_index` but threads are gated by + ``env_mask[tid_env] and body_mask[tid_body]``, and indices are direct (no indirection array). + Dispatched with ``dim=(num_envs, num_bodies)``. """ - # get the thread id tid_env, tid_body = wp.tid() - # add the forces to the composed force, if the positions are provided, also adds a torque to the composed torque. - if forces: - # add the forces to the composed force - composed_forces_b[env_ids[tid_env], body_ids[tid_body]] += cast_force_to_link_frame( - forces[tid_env, tid_body], link_quaternions[env_ids[tid_env], body_ids[tid_body]], is_global - ) - # if there is a position offset, add a torque to the composed torque. - if positions: - composed_torques_b[env_ids[tid_env], body_ids[tid_body]] += wp.skew( - cast_to_link_frame( - positions[tid_env, tid_body], link_positions[env_ids[tid_env], body_ids[tid_body]], is_global - ) - ) @ cast_force_to_link_frame( - forces[tid_env, tid_body], link_quaternions[env_ids[tid_env], body_ids[tid_body]], is_global - ) - if torques: - composed_torques_b[env_ids[tid_env], body_ids[tid_body]] += cast_torque_to_link_frame( - torques[tid_env, tid_body], link_quaternions[env_ids[tid_env], body_ids[tid_body]], is_global - ) + if env_mask[tid_env] and body_mask[tid_body]: + if is_global: + if torques: + global_torque_w[tid_env, tid_body] = torques[tid_env, tid_body] + if forces: + if positions: + global_force_w[tid_env, tid_body] = forces[tid_env, tid_body] + if torques: + global_torque_w[tid_env, tid_body] = global_torque_w[tid_env, tid_body] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + else: + global_torque_w[tid_env, tid_body] = wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + else: + global_force_at_com_w[tid_env, tid_body] = forces[tid_env, tid_body] + else: + if torques: + local_torque_b[tid_env, tid_body] = torques[tid_env, tid_body] + if forces: + local_force_b[tid_env, tid_body] = forces[tid_env, tid_body] + if positions: + if torques: + local_torque_b[tid_env, tid_body] = local_torque_b[tid_env, tid_body] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + else: + local_torque_b[tid_env, tid_body] = wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) @wp.kernel -def set_forces_and_torques_at_position( - env_ids: wp.array(dtype=wp.int32), - body_ids: wp.array(dtype=wp.int32), +def add_forces_to_dual_buffers_mask( + env_mask: wp.array(dtype=wp.bool), + body_mask: wp.array(dtype=wp.bool), forces: wp.array2d(dtype=wp.vec3f), torques: wp.array2d(dtype=wp.vec3f), positions: wp.array2d(dtype=wp.vec3f), - link_positions: wp.array2d(dtype=wp.vec3f), - link_quaternions: wp.array2d(dtype=wp.quatf), - composed_forces_b: wp.array2d(dtype=wp.vec3f), - composed_torques_b: wp.array2d(dtype=wp.vec3f), + global_force_w: wp.array2d(dtype=wp.vec3f), + global_torque_w: wp.array2d(dtype=wp.vec3f), + global_force_at_com_w: wp.array2d(dtype=wp.vec3f), + local_force_b: wp.array2d(dtype=wp.vec3f), + local_torque_b: wp.array2d(dtype=wp.vec3f), is_global: bool, ): - """Sets forces and torques to the composed force and torque at the user-provided positions. - When is_global is False, the user-provided positions are offsetting the application of the force relatively - to the link frame of the body. When is_global is True, the user-provided positions are the global positions - of the force application. + """Add forces/torques into dual buffers using mask selection (accumulates). - Args: - env_ids: The environment ids. - body_ids: The body ids. - forces: The forces. - torques: The torques. - positions: The positions. - link_positions: The link frame positions. - link_quaternions: The link frame quaternions. - composed_forces_b: The composed forces. - composed_torques_b: The composed torques. - is_global: Whether the forces and torques are applied in the global frame. + Same routing logic as :func:`add_forces_to_dual_buffers_index` but threads are gated by + ``env_mask[tid_env] and body_mask[tid_body]``. + Dispatched with ``dim=(num_envs, num_bodies)``. """ - # get the thread id tid_env, tid_body = wp.tid() - # set the torques to the composed torque - if torques: - composed_torques_b[env_ids[tid_env], body_ids[tid_body]] = cast_torque_to_link_frame( - torques[tid_env, tid_body], link_quaternions[env_ids[tid_env], body_ids[tid_body]], is_global - ) - # set the forces to the composed force, if the positions are provided, adds a torque to the composed torque - # from the force at that position. - if forces: - # set the forces to the composed force - composed_forces_b[env_ids[tid_env], body_ids[tid_body]] = cast_force_to_link_frame( - forces[tid_env, tid_body], link_quaternions[env_ids[tid_env], body_ids[tid_body]], is_global - ) - # if there is a position offset, set the torque from the force at that position. - if positions: - composed_torques_b[env_ids[tid_env], body_ids[tid_body]] = wp.skew( - cast_to_link_frame( - positions[tid_env, tid_body], link_positions[env_ids[tid_env], body_ids[tid_body]], is_global - ) - ) @ cast_force_to_link_frame( - forces[tid_env, tid_body], link_quaternions[env_ids[tid_env], body_ids[tid_body]], is_global - ) + if env_mask[tid_env] and body_mask[tid_body]: + if is_global: + if forces: + if positions: + global_force_w[tid_env, tid_body] = global_force_w[tid_env, tid_body] + forces[tid_env, tid_body] + global_torque_w[tid_env, tid_body] = global_torque_w[tid_env, tid_body] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + else: + global_force_at_com_w[tid_env, tid_body] = ( + global_force_at_com_w[tid_env, tid_body] + forces[tid_env, tid_body] + ) + if torques: + global_torque_w[tid_env, tid_body] = global_torque_w[tid_env, tid_body] + torques[tid_env, tid_body] + else: + if forces: + local_force_b[tid_env, tid_body] = local_force_b[tid_env, tid_body] + forces[tid_env, tid_body] + if positions: + local_torque_b[tid_env, tid_body] = local_torque_b[tid_env, tid_body] + wp.cross( + positions[tid_env, tid_body], forces[tid_env, tid_body] + ) + if torques: + local_torque_b[tid_env, tid_body] = local_torque_b[tid_env, tid_body] + torques[tid_env, tid_body] + + +@wp.kernel +def add_raw_wrench_buffers( + src_gf: wp.array2d(dtype=wp.vec3f), + src_gt: wp.array2d(dtype=wp.vec3f), + src_gfc: wp.array2d(dtype=wp.vec3f), + src_lf: wp.array2d(dtype=wp.vec3f), + src_lt: wp.array2d(dtype=wp.vec3f), + dst_gf: wp.array2d(dtype=wp.vec3f), + dst_gt: wp.array2d(dtype=wp.vec3f), + dst_gfc: wp.array2d(dtype=wp.vec3f), + dst_lf: wp.array2d(dtype=wp.vec3f), + dst_lt: wp.array2d(dtype=wp.vec3f), +): + """Element-wise add all five source wrench buffers into destination buffers. + + Dispatched with ``dim=(num_envs, num_bodies)``. Each ``src_*`` / ``dst_*`` pair corresponds + to one of the five input buffers (global_force_w, global_torque_w, global_force_at_com_w, + local_force_b, local_torque_b). + """ + tid_env, tid_body = wp.tid() + dst_gf[tid_env, tid_body] = dst_gf[tid_env, tid_body] + src_gf[tid_env, tid_body] + dst_gt[tid_env, tid_body] = dst_gt[tid_env, tid_body] + src_gt[tid_env, tid_body] + dst_gfc[tid_env, tid_body] = dst_gfc[tid_env, tid_body] + src_gfc[tid_env, tid_body] + dst_lf[tid_env, tid_body] = dst_lf[tid_env, tid_body] + src_lf[tid_env, tid_body] + dst_lt[tid_env, tid_body] = dst_lt[tid_env, tid_body] + src_lt[tid_env, tid_body] + + +@wp.kernel +def compose_wrench_to_body_frame( + global_force_w: wp.array2d(dtype=wp.vec3f), + global_torque_w: wp.array2d(dtype=wp.vec3f), + global_force_at_com_w: wp.array2d(dtype=wp.vec3f), + local_force_b: wp.array2d(dtype=wp.vec3f), + local_torque_b: wp.array2d(dtype=wp.vec3f), + com_pos_w: wp.array2d(dtype=wp.vec3f), + link_quat_w: wp.array2d(dtype=wp.quatf), + out_force_b: wp.array2d(dtype=wp.vec3f), + out_torque_b: wp.array2d(dtype=wp.vec3f), +): + """Compose global and local wrench buffers into a single body-frame output. + + Global torques store the moment of positional forces about the world origin: ``cross(P, F)``. + This kernel corrects to be about the body's CoM via ``cross(P, F) - cross(com_pos_w, F) = + cross(P - com_pos_w, F)``, then rotates both force and torque into the body frame using + ``quat_rotate_inv(link_quat_w, ...)``, and adds local-frame values. + + Dispatched with ``dim=(num_envs, num_bodies)``. + """ + tid_env, tid_body = wp.tid() + total_force_w = global_force_w[tid_env, tid_body] + global_force_at_com_w[tid_env, tid_body] + corrected_torque_w = global_torque_w[tid_env, tid_body] - wp.cross( + com_pos_w[tid_env, tid_body], global_force_w[tid_env, tid_body] + ) + out_force_b[tid_env, tid_body] = ( + wp.quat_rotate_inv(link_quat_w[tid_env, tid_body], total_force_w) + local_force_b[tid_env, tid_body] + ) + out_torque_b[tid_env, tid_body] = ( + wp.quat_rotate_inv(link_quat_w[tid_env, tid_body], corrected_torque_w) + local_torque_b[tid_env, tid_body] + ) + + +@wp.kernel +def reset_wrench_composer_index( + env_ids: wp.array(dtype=wp.int32), + global_force_w: wp.array2d(dtype=wp.vec3f), + global_torque_w: wp.array2d(dtype=wp.vec3f), + global_force_at_com_w: wp.array2d(dtype=wp.vec3f), + local_force_b: wp.array2d(dtype=wp.vec3f), + local_torque_b: wp.array2d(dtype=wp.vec3f), + out_force_b: wp.array2d(dtype=wp.vec3f), + out_torque_b: wp.array2d(dtype=wp.vec3f), +): + """Zero all 7 wrench composer buffers at the specified environment indices. + + Dispatched with ``dim=(len(env_ids), num_bodies)``. + """ + tid_env, tid_body = wp.tid() + ei = env_ids[tid_env] + z = wp.vec3f(0.0) + global_force_w[ei, tid_body] = z + global_torque_w[ei, tid_body] = z + global_force_at_com_w[ei, tid_body] = z + local_force_b[ei, tid_body] = z + local_torque_b[ei, tid_body] = z + out_force_b[ei, tid_body] = z + out_torque_b[ei, tid_body] = z + + +@wp.kernel +def reset_wrench_composer_mask( + env_mask: wp.array(dtype=wp.bool), + global_force_w: wp.array2d(dtype=wp.vec3f), + global_torque_w: wp.array2d(dtype=wp.vec3f), + global_force_at_com_w: wp.array2d(dtype=wp.vec3f), + local_force_b: wp.array2d(dtype=wp.vec3f), + local_torque_b: wp.array2d(dtype=wp.vec3f), + out_force_b: wp.array2d(dtype=wp.vec3f), + out_torque_b: wp.array2d(dtype=wp.vec3f), +): + """Zero all 7 wrench composer buffers for environments matching the mask. + + Dispatched with ``dim=(num_envs, num_bodies)``. + """ + tid_env, tid_body = wp.tid() + if env_mask[tid_env]: + z = wp.vec3f(0.0) + global_force_w[tid_env, tid_body] = z + global_torque_w[tid_env, tid_body] = z + global_force_at_com_w[tid_env, tid_body] = z + local_force_b[tid_env, tid_body] = z + local_torque_b[tid_env, tid_body] = z + out_force_b[tid_env, tid_body] = z + out_torque_b[tid_env, tid_body] = z diff --git a/source/isaaclab/isaaclab/utils/warp/math_ops.py b/source/isaaclab/isaaclab/utils/warp/math_ops.py new file mode 100644 index 000000000000..36a0f960a8b7 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/warp/math_ops.py @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +def transform_to_vec_quat( + t: wp.array, +) -> tuple[wp.array, wp.array]: + """Split a wp.transformf array into position (vec3f) and quaternion (quatf) arrays. + + Zero-copy: returns views into the same underlying memory. + + Args: + t: Array of transforms (dtype=wp.transformf). Shape ``(N,)``, ``(N, M)``, or ``(N, M, K)``. + + Returns: + Tuple of (positions, quaternions) as warp array views with matching dimensionality. + + Raises: + TypeError: If *t* does not have dtype ``wp.transformf``. + """ + if t.dtype != wp.transformf: + raise TypeError(f"Expected wp.transformf array, got dtype={t.dtype}") + floats = t.view(wp.float32) + if t.ndim == 1: + return floats[:, :3].view(wp.vec3f), floats[:, 3:].view(wp.quatf) + if t.ndim == 2: + return floats[:, :, :3].view(wp.vec3f), floats[:, :, 3:].view(wp.quatf) + if t.ndim == 3: + return floats[:, :, :, :3].view(wp.vec3f), floats[:, :, :, 3:].view(wp.quatf) + raise ValueError(f"Expected 1D, 2D, or 3D transform array, got ndim={t.ndim}") diff --git a/source/isaaclab/isaaclab/utils/warp/ops.py b/source/isaaclab/isaaclab/utils/warp/ops.py index f7cc8ac01def..a3ee273b627c 100644 --- a/source/isaaclab/isaaclab/utils/warp/ops.py +++ b/source/isaaclab/isaaclab/utils/warp/ops.py @@ -17,10 +17,12 @@ # initialize the warp module wp.init() -from isaaclab.utils.math import convert_quat - from . import kernels +# Cache of all-True env masks keyed by (n_envs, device) to avoid per-call allocations in +# raycast_dynamic_meshes. Populated lazily on first call with a given (n_envs, device) pair. +_all_env_mask_cache: dict[tuple[int, str], wp.array] = {} + def raycast_mesh( ray_starts: torch.Tensor, @@ -206,7 +208,7 @@ def raycast_dynamic_meshes( ray_directions: The ray directions for each ray. Shape (B, N, 3). mesh_ids_wp: The warp mesh ids to ray-cast against. Length (B, M). mesh_positions_w: The world positions of the meshes. Shape (B, M, 3). - mesh_orientations_w: The world orientation as quaternion (wxyz) format. Shape (B, M, 4). + mesh_orientations_w: The world orientation as quaternion (x, y, z, w) format. Shape (B, M, 4). max_dist: The maximum distance to ray-cast. Defaults to 1e6. return_distance: Whether to return the distance of the ray until it hits the mesh. Defaults to False. return_normal: Whether to return the normal of the mesh face the ray hits. Defaults to False. @@ -333,16 +335,23 @@ def raycast_dynamic_meshes( ) mesh_quat_wp_w = wp.from_torch(quat_identity, dtype=wp.quat) else: - mesh_orientations_w = convert_quat( - mesh_orientations_w.to(dtype=torch.float32, device=torch_device), "xyzw" - ).contiguous() + # mesh orientations are already in xyzw format + mesh_orientations_w = mesh_orientations_w.to(dtype=torch.float32, device=torch_device).contiguous() mesh_quat_wp_w = wp.from_torch(mesh_orientations_w, dtype=wp.quat) + # All environments active when called through this public API. + # Cache the mask by (n_envs, device) to avoid a per-call allocation. + cache_key = (n_envs, str(torch_device)) + if cache_key not in _all_env_mask_cache: + _all_env_mask_cache[cache_key] = wp.from_torch(torch.ones(n_envs, dtype=torch.bool, device=torch_device)) + all_env_mask = _all_env_mask_cache[cache_key] + # launch the warp kernel wp.launch( kernel=kernels.raycast_dynamic_meshes_kernel, dim=[n_meshes, n_envs, n_rays_per_env], inputs=[ + all_env_mask, mesh_ids_wp, ray_starts_wp, ray_directions_wp, diff --git a/source/isaaclab/isaaclab/utils/warp/proxy_array.py b/source/isaaclab/isaaclab/utils/warp/proxy_array.py new file mode 100644 index 000000000000..912e44798877 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/warp/proxy_array.py @@ -0,0 +1,338 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first dual-access array wrapper with explicit ``.torch`` and ``.warp`` accessors. + +Inspired by ProxyArray from mujocolab/mjlab (BSD-3-Clause). +""" + +from __future__ import annotations + +import os +import warnings +from typing import ClassVar + +import torch +import warp as wp + +_QUATF_ACCESS_WARN_ENV = "WARN_ON_TORCH_QUATF_ACCESS" +"""Environment variable that, when set to ``"1"``, makes :attr:`ProxyArray.torch` +emit a :class:`UserWarning` on every read of a ``wp.quatf``-typed array. Used as a +runtime aid for tracking down call sites that may still assume Isaac Lab 2.x's +``(w, x, y, z)`` quaternion convention after the migration to Isaac Lab 3.x's +``(x, y, z, w)`` convention. See the Isaac Lab 3.0 migration guide for details.""" + + +class ProxyArray: + """Warp-first array wrapper providing cached zero-copy ``.torch`` and ``.warp`` accessors. + + This class wraps a :class:`warp.array` and provides: + + * A ``.warp`` property that returns the original warp array (for kernel interop). + * A ``.torch`` property that returns a cached, zero-copy :class:`torch.Tensor` view + (via :func:`warp.to_torch`). + * Convenience properties (``shape``, ``dtype``, ``device``) delegated to the warp array. + * A deprecation bridge for common torch functions, indexing, and arithmetic/comparison + operators while emitting a one-time :class:`DeprecationWarning`. Tensor instance methods + such as ``clone()`` are not forwarded; use explicit ``.torch`` access for those. + + Example: + + .. code-block:: python + + import warp as wp + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(100, dtype=wp.vec3f, device="cuda:0") + ta = ProxyArray(arr) + + # Explicit access (preferred) + ta.warp # -> wp.array, shape (100,), dtype vec3f + ta.torch # -> torch.Tensor, shape (100, 3) + + # Deprecation bridge (warns once, then silent) + result = ta + 1.0 # works, emits DeprecationWarning + """ + + _deprecation_warned: ClassVar[bool] = False + """Class-level flag ensuring the deprecation warning is emitted at most once.""" + + def __init__(self, wp_array: wp.array) -> None: + """Initialize the ProxyArray wrapper. + + The instance is immutable after construction: the wrapped ``wp.array`` cannot + be reassigned. If the underlying simulation memory is re-allocated, construct + a new :class:`ProxyArray` instead of mutating an existing one. + + Args: + wp_array: The warp array to wrap. + + Raises: + TypeError: If ``wp_array`` is not a :class:`warp.array`. + """ + if not isinstance(wp_array, wp.array): + raise TypeError( + f"ProxyArray expects a warp.array, got {type(wp_array).__name__}." + " If you have a ProxyArray, use it directly instead of wrapping it again." + ) + # Bypass __setattr__ for the two internal fields — everything else raises. + object.__setattr__(self, "_warp", wp_array) + object.__setattr__(self, "_torch_cache", None) + # Cached once at construction so the .torch read path stays a constant-time + # check; only used when the WARN_ON_TORCH_QUATF_ACCESS env var is set. + object.__setattr__(self, "_is_quatf", wp_array.dtype is wp.quatf) + + def __setattr__(self, name: str, value) -> None: + """Forbid mutation of ProxyArray instances except for the internal torch cache. + + The torch view is populated lazily on first ``.torch`` access; that is the + only allowed post-init state change. Every other write raises + :class:`AttributeError` so callers don't accidentally re-point the wrapper. + """ + if name == "_torch_cache": + object.__setattr__(self, name, value) + return + raise AttributeError( + f"ProxyArray is immutable; cannot set attribute {name!r}." + " Construct a new ProxyArray instead of mutating an existing one." + ) + + @staticmethod + def _quatf_access_warning_enabled() -> bool: + """Return ``True`` when the ``WARN_ON_TORCH_QUATF_ACCESS`` env var is set to ``"1"``. + + Read on every :attr:`torch` access to keep the flag dynamic — a single + ``os.environ`` lookup is cheap relative to the warp/torch interop work + that follows. + """ + return os.environ.get(_QUATF_ACCESS_WARN_ENV, "0") == "1" + + # ------------------------------------------------------------------ + # Core accessors + # ------------------------------------------------------------------ + + @property + def warp(self) -> wp.array: + """The underlying warp array.""" + return self._warp + + @property + def torch(self) -> torch.Tensor: + """A cached, zero-copy :class:`torch.Tensor` view of the warp array. + + The tensor is created on first access via :func:`warp.to_torch` and cached + for subsequent calls. Since this is a zero-copy view, modifications to the + tensor are visible through the warp array and vice versa. + + When the underlying warp array has dtype ``wp.quatf`` and the + ``WARN_ON_TORCH_QUATF_ACCESS`` environment variable is set to ``"1"``, + each read emits a :class:`UserWarning` pointing at the call site. This + is a runtime aid for migrating Isaac Lab 2.x code (which used the + ``(w, x, y, z)`` quaternion convention) to Isaac Lab 3.x's + ``(x, y, z, w)`` convention. + """ + if self._is_quatf and self._quatf_access_warning_enabled(): + warnings.warn( + "Reading .torch on a wp.quatf-typed ProxyArray. The Isaac Lab" + " quaternion convention changed from (w, x, y, z) in 2.x to" + " (x, y, z, w) in 3.x. If your code assumes the old order," + " this is likely the source of incorrect rotations." + f" Unset {_QUATF_ACCESS_WARN_ENV} to silence this warning.", + UserWarning, + stacklevel=2, + ) + if self._torch_cache is None: + self._torch_cache = wp.to_torch(self._warp) + return self._torch_cache + + # ------------------------------------------------------------------ + # Convenience properties + # ------------------------------------------------------------------ + + @property + def shape(self) -> tuple[int, ...]: + """Shape of the underlying warp array.""" + return self._warp.shape + + @property + def dtype(self): + """Warp dtype of the underlying array.""" + return self._warp.dtype + + @property + def device(self) -> str: + """Device string of the underlying warp array.""" + return self._warp.device + + def __len__(self) -> int: + """Return the size of the first dimension.""" + return self._warp.shape[0] + + def __repr__(self) -> str: + """Return a string representation of the ProxyArray.""" + return f"ProxyArray(shape={self.shape}, dtype={self.dtype}, device={self.device})" + + # ------------------------------------------------------------------ + # Warp kernel interop + # ------------------------------------------------------------------ + + @property + def __cuda_array_interface__(self): + """Delegate the CUDA array interface to the underlying warp array. + + This allows a ``ProxyArray`` to be passed directly as an argument to + :func:`warp.launch` without explicitly accessing ``.warp``. + + Raises: + AttributeError: If the underlying warp array is not on a CUDA device. + """ + return self._warp.__cuda_array_interface__ + + @property + def __array_interface__(self): + """Delegate the NumPy array interface to the underlying warp array. + + This allows a ``ProxyArray`` to be passed directly as an argument to + :func:`warp.launch` on CPU without explicitly accessing ``.warp``. + + Raises: + AttributeError: If the underlying warp array is not on a CPU device. + """ + return self._warp.__array_interface__ + + # ------------------------------------------------------------------ + # Indexing (deprecation bridge — delegates to .torch) + # ------------------------------------------------------------------ + + def __getitem__(self, key): + """Index into the torch view of this array. + + Supports all torch indexing: ``int``, ``slice``, ``tuple``, + boolean masks, and fancy indexing (multi-dimensional). + """ + self._warn_implicit() + return self.torch[key] + + def __setitem__(self, key, value): + """Write through the torch view into the shared warp memory. + + Supports all torch indexing: ``int``, ``slice``, ``tuple``, + boolean masks, and fancy indexing (multi-dimensional). + """ + self._warn_implicit() + self.torch[key] = value + + # ------------------------------------------------------------------ + # Deprecation bridge + # ------------------------------------------------------------------ + + @classmethod + def _warn_implicit(cls) -> None: + """Emit a one-time deprecation warning for implicit torch usage.""" + if not cls._deprecation_warned: + cls._deprecation_warned = True + warnings.warn( + "Implicit use of ProxyArray as a torch.Tensor is deprecated. " + "Use the explicit .torch property instead (e.g., array.torch).", + DeprecationWarning, + stacklevel=3, + ) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + """Enable torch operations on ProxyArray by unwrapping to ``.torch``. + + This method is called by PyTorch when a torch function receives a + ``ProxyArray`` as an argument. It unwraps all ``ProxyArray`` instances + to their ``.torch`` tensors and delegates to the original function. + """ + if kwargs is None: + kwargs = {} + cls._warn_implicit() + + def unwrap(x): + if isinstance(x, ProxyArray): + return x.torch + if isinstance(x, (list, tuple)): + return type(x)(unwrap(i) for i in x) + return x + + args = unwrap(args) + kwargs = {k: unwrap(v) for k, v in kwargs.items()} + return func(*args, **kwargs) + + # ------------------------------------------------------------------ + # Arithmetic operators + # ------------------------------------------------------------------ + + def _binop(self, other, op: str) -> torch.Tensor: + """Helper for binary and reflected binary operations.""" + self._warn_implicit() + other_val = other.torch if isinstance(other, ProxyArray) else other + return getattr(self.torch, op)(other_val) + + def __add__(self, other) -> torch.Tensor: + return self._binop(other, "__add__") + + def __radd__(self, other) -> torch.Tensor: + return self._binop(other, "__radd__") + + def __sub__(self, other) -> torch.Tensor: + return self._binop(other, "__sub__") + + def __rsub__(self, other) -> torch.Tensor: + return self._binop(other, "__rsub__") + + def __mul__(self, other) -> torch.Tensor: + return self._binop(other, "__mul__") + + def __rmul__(self, other) -> torch.Tensor: + return self._binop(other, "__rmul__") + + def __truediv__(self, other) -> torch.Tensor: + return self._binop(other, "__truediv__") + + def __rtruediv__(self, other) -> torch.Tensor: + return self._binop(other, "__rtruediv__") + + def __pow__(self, other) -> torch.Tensor: + return self._binop(other, "__pow__") + + def __rpow__(self, other) -> torch.Tensor: + return self._binop(other, "__rpow__") + + def __neg__(self) -> torch.Tensor: + self._warn_implicit() + return -self.torch + + def __pos__(self) -> torch.Tensor: + self._warn_implicit() + return +self.torch + + def __abs__(self) -> torch.Tensor: + self._warn_implicit() + return abs(self.torch) + + # ------------------------------------------------------------------ + # Comparison operators + # ------------------------------------------------------------------ + + def __eq__(self, other) -> torch.Tensor: + return self._binop(other, "__eq__") + + def __ne__(self, other) -> torch.Tensor: + return self._binop(other, "__ne__") + + def __lt__(self, other) -> torch.Tensor: + return self._binop(other, "__lt__") + + def __le__(self, other) -> torch.Tensor: + return self._binop(other, "__le__") + + def __gt__(self, other) -> torch.Tensor: + return self._binop(other, "__gt__") + + def __ge__(self, other) -> torch.Tensor: + return self._binop(other, "__ge__") diff --git a/source/isaaclab/isaaclab/utils/warp/utils.py b/source/isaaclab/isaaclab/utils/warp/utils.py new file mode 100644 index 000000000000..2df288dfba2c --- /dev/null +++ b/source/isaaclab/isaaclab/utils/warp/utils.py @@ -0,0 +1,151 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import functools +from collections.abc import Sequence + +import torch +import warp as wp + +## +# Mask resolution - ids/mask to warp boolean mask. +## + + +@wp.kernel +def _populate_mask_from_ids( + mask: wp.array(dtype=wp.bool), + ids: wp.array(dtype=wp.int32), +): + i = wp.tid() + mask[ids[i]] = True + + +def resolve_1d_mask( + *, + ids: Sequence[int] | slice | torch.Tensor | wp.array | None = None, + mask: wp.array | torch.Tensor | None = None, + all_mask: wp.array, + scratch_mask: wp.array, + device: str, +) -> wp.array: + """Resolve ids/mask into a warp boolean mask. + + Callers provide pre-allocated ``all_mask`` (all-True) and ``scratch_mask`` (reusable + work buffer) so this function never allocates. + + Args: + ids: Index ids. Accepts ``Sequence[int]``, ``slice``, ``torch.Tensor``, + ``wp.array(dtype=wp.int32)``, or ``None`` (all elements). + mask: Direct boolean mask. ``wp.array`` is returned as-is; + ``torch.Tensor`` is converted. + all_mask: Pre-allocated all-True mask returned when both *ids* and *mask* + are ``None``. + scratch_mask: Pre-allocated scratch buffer populated in-place when *ids* + are provided. Not re-entrant (shared buffer). + device: Warp device string (e.g. ``"cuda:0"``). + + Returns: + A ``wp.array(dtype=wp.bool)`` mask. + """ + # Normalize slice(None) to None so the capture guard treats it identically to ids=None. + if isinstance(ids, slice) and ids == slice(None): + ids = None + + if wp.get_device().is_capturing: + if ids is not None or (mask is not None and not isinstance(mask, wp.array)): + raise RuntimeError( + "resolve_1d_mask is only capturable when mask is a wp.array or both ids and mask are None." + ) + + # --- Direct mask input --- + if mask is not None: + if isinstance(mask, wp.array): + return mask + if isinstance(mask, torch.Tensor): + if mask.dtype != torch.bool: + mask = mask.to(dtype=torch.bool) + if str(mask.device) != device: + mask = mask.to(device) + return wp.from_torch(mask, dtype=wp.bool) + raise TypeError(f"Unsupported mask type: {type(mask)}") + + # --- Fast path: all elements --- + if ids is None: + return all_mask + + # --- Normalize slice to list --- + if isinstance(ids, slice): + start, stop, step = ids.indices(scratch_mask.shape[0]) + ids = list(range(start, stop, step)) + + # --- Normalize to concrete type --- + if not isinstance(ids, (torch.Tensor, wp.array)): + ids = list(ids) + + # --- Populate scratch mask --- + scratch_mask.fill_(False) + + if isinstance(ids, torch.Tensor): + if ids.numel() == 0: + return scratch_mask + if str(ids.device) != device: + ids = ids.to(device) + if ids.dtype != torch.int32: + ids = ids.to(dtype=torch.int32) + if not ids.is_contiguous(): + ids = ids.contiguous() + ids_wp = wp.from_torch(ids, dtype=wp.int32) + elif isinstance(ids, wp.array): + if ids.shape[0] == 0: + return scratch_mask + if ids.dtype != wp.int32: + raise TypeError(f"Unsupported wp.array dtype for ids: {ids.dtype}. Expected wp.int32 index array.") + ids_wp = ids + else: + if len(ids) == 0: + return scratch_mask + ids_wp = wp.array(ids, dtype=wp.int32, device=device) + + wp.launch(_populate_mask_from_ids, dim=ids_wp.shape[0], inputs=[scratch_mask, ids_wp], device=device) + return scratch_mask + + +## +# Capture safety — property guard. +## + + +def capture_unsafe(reason: str | None = None): + """Mark a callable as not CUDA-graph-capture-safe. + + Raises ``RuntimeError`` if the decorated callable is invoked while + ``wp.get_device().is_capturing`` is ``True``. + + Args: + reason: Optional explanation appended to the error message. + + Usage:: + + @property + @capture_unsafe("Relies on a Python timestamp guard.") + def projected_gravity_b(self) -> wp.array: ... + """ + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if wp.get_device().is_capturing: + msg = f"'{func.__qualname__}' cannot be called during CUDA graph capture." + if reason: + msg = f"{msg} {reason}" + raise RuntimeError(msg) + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/source/isaaclab/isaaclab/utils/wrench_composer.py b/source/isaaclab/isaaclab/utils/wrench_composer.py index 8bd42f81e9e6..6c9390e97d7b 100644 --- a/source/isaaclab/isaaclab/utils/wrench_composer.py +++ b/source/isaaclab/isaaclab/utils/wrench_composer.py @@ -5,203 +5,269 @@ from __future__ import annotations +import warnings +from collections.abc import Sequence from typing import TYPE_CHECKING +import numpy as np import torch import warp as wp -from isaaclab.utils.math import convert_quat -from isaaclab.utils.warp.kernels import add_forces_and_torques_at_position, set_forces_and_torques_at_position +from isaaclab.utils.warp import ProxyArray +from isaaclab.utils.warp.kernels import ( + add_forces_to_dual_buffers_index, + add_forces_to_dual_buffers_mask, + add_raw_wrench_buffers, + compose_wrench_to_body_frame, + reset_wrench_composer_index, + reset_wrench_composer_mask, + set_forces_to_dual_buffers_index, + set_forces_to_dual_buffers_mask, +) if TYPE_CHECKING: - from isaaclab.assets import Articulation, RigidObject, RigidObjectCollection + from isaaclab.assets import BaseArticulation, BaseRigidObject, BaseRigidObjectCollection class WrenchComposer: - def __init__(self, asset: Articulation | RigidObject | RigidObjectCollection) -> None: - """Wrench composer. + def __init__(self, asset: BaseArticulation | BaseRigidObject | BaseRigidObjectCollection) -> None: + """Wrench composer with dual-buffer architecture. - This class is used to compose forces and torques at the body's link frame. - It can compose global wrenches and local wrenches. The result is always in the link frame of the body. + This class composes forces and torques applied to rigid bodies. Forces and torques can be + specified in either the global (world) frame or the local (body) frame. Internally, they are + stored in separate global and local input buffers. When the final composed wrench is needed, + the global contributions are rotated into the body frame and combined with the local + contributions to produce the output force and torque expressed in the body frame. + + The dual-buffer architecture uses five input buffers: + + - ``global_force_w``: Global forces [N] (world frame). + - ``global_torque_w``: Global torques [N·m] (world frame), including moment contributions + from positional forces (``cross(P, F)``). + - ``global_force_at_com_w``: Global forces [N] applied at the body's CoM (world frame, no positional torque). + - ``local_force_b``: Local forces [N] (body frame). + - ``local_torque_b``: Local torques [N·m] (body frame). + + And two output buffers: + + - ``out_force_b``: Composed force [N] in body frame. + - ``out_torque_b``: Composed torque [N·m] in body frame. Args: - asset: Asset to use. Defaults to None. + asset: Asset to use. """ self.num_envs = asset.num_instances - # Avoid isinstance to prevent circular import issues, use attribute presence instead. + # Avoid isinstance to prevent circular import issues; check by attribute presence instead. if hasattr(asset, "num_bodies"): self.num_bodies = asset.num_bodies else: - self.num_bodies = asset.num_objects + raise ValueError(f"Unsupported asset type: {asset.__class__.__name__}") self.device = asset.device self._asset = asset self._active = False - - # Avoid isinstance here due to potential circular import issues; check by attribute presence instead. - if hasattr(self._asset.data, "body_link_pos_w") and hasattr(self._asset.data, "body_link_quat_w"): - self._get_link_position_fn = lambda a=self._asset: a.data.body_link_pos_w[..., :3] - self._get_link_quaternion_fn = lambda a=self._asset: a.data.body_link_quat_w[..., :4] - elif hasattr(self._asset.data, "object_link_pos_w") and hasattr(self._asset.data, "object_link_quat_w"): - self._get_link_position_fn = lambda a=self._asset: a.data.object_link_pos_w[..., :3] - self._get_link_quaternion_fn = lambda a=self._asset: a.data.object_link_quat_w[..., :4] + self._dirty = False + if hasattr(self._asset.data, "body_com_pos_w"): + self._get_com_pos_fn = lambda a=self._asset: a.data.body_com_pos_w.warp + else: + raise ValueError(f"Unsupported asset type: {self._asset.__class__.__name__}") + if hasattr(self._asset.data, "body_link_quat_w"): + self._get_link_quat_fn = lambda a=self._asset: a.data.body_link_quat_w.warp else: raise ValueError(f"Unsupported asset type: {self._asset.__class__.__name__}") - # Create buffers - self._composed_force_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) - self._composed_torque_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) - self._ALL_ENV_INDICES_WP = wp.from_torch( - torch.arange(self.num_envs, dtype=torch.int32, device=self.device), dtype=wp.int32 - ) - self._ALL_BODY_INDICES_WP = wp.from_torch( - torch.arange(self.num_bodies, dtype=torch.int32, device=self.device), dtype=wp.int32 + # -- Input buffers (5 total) -- + self._global_force_w = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._global_torque_w = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._global_force_at_com_w = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._local_force_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._local_torque_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + + # -- Output buffers (2 total) -- + self._out_force_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._out_torque_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + + # ProxyArray caches for the output buffers, exposed via the public properties. + self._out_force_b_ta = ProxyArray(self._out_force_b) + self._out_torque_b_ta = ProxyArray(self._out_torque_b) + + # -- Index / mask helper arrays -- + self._ALL_ENV_INDICES = wp.array(np.arange(self.num_envs, dtype=np.int32), dtype=wp.int32, device=self.device) + self._ALL_BODY_INDICES = wp.array( + np.arange(self.num_bodies, dtype=np.int32), dtype=wp.int32, device=self.device ) + self._ALL_ENV_MASK = wp.ones((self.num_envs), dtype=wp.bool, device=self.device) + self._ALL_BODY_MASK = wp.ones((self.num_bodies), dtype=wp.bool, device=self.device) - # Pinning the composed force and torque to the torch tensor to avoid copying the data to the torch tensor - self._composed_force_b_torch = wp.to_torch(self._composed_force_b) - self._composed_torque_b_torch = wp.to_torch(self._composed_torque_b) - # Pinning the environment and body indices to the torch tensor to allow for slicing. - self._ALL_ENV_INDICES_TORCH = wp.to_torch(self._ALL_ENV_INDICES_WP) - self._ALL_BODY_INDICES_TORCH = wp.to_torch(self._ALL_BODY_INDICES_WP) - - # Flag to check if the link poses have been updated. - self._link_poses_updated = False + # ------------------------------------------------------------------ + # Properties + # ------------------------------------------------------------------ @property def active(self) -> bool: - """Whether the wrench composer is active.""" + """Whether the wrench composer is active (has pending forces/torques). + + Set to ``True`` when any ``add_*`` or ``set_*`` method writes data. Cleared only by a + full :meth:`reset` call (no arguments). Partial resets (with ``env_ids`` or ``env_mask``) + do **not** clear this flag because checking whether all environments are zero would + require scanning the buffers, defeating the purpose of a cheap guard. + + This means the flag may remain ``True`` even if all buffers are zero after partial resets. + This is by design: the cost of an unnecessary compose + apply on zero data is negligible + compared to scanning the buffers every frame. + """ return self._active @property - def composed_force(self) -> wp.array: - """Composed force at the body's link frame. + def global_force_w(self) -> wp.array: + """Global force buffer [N] (world frame), dtype ``wp.vec3f``. Shape: ``(num_envs, num_bodies)``. - .. note:: If some of the forces are applied in the global frame, the composed force will be in the link frame - of the body. + .. note:: + This returns the underlying buffer reference for read-only inspection. Writing to it + directly bypasses the dirty flag and may produce stale output buffers. Use the + ``add_*`` or ``set_*`` methods to modify forces. + """ + return self._global_force_w - Returns: - wp.array: Composed force at the body's link frame. (num_envs, num_bodies, 3) + @property + def global_torque_w(self) -> wp.array: + """Global torque buffer [N·m] (world frame), dtype ``wp.vec3f``. Shape: ``(num_envs, num_bodies)``. + + Stores user-supplied torques plus moment contributions from positional forces (``cross(P, F)``). + + .. note:: + Read-only reference. See :attr:`global_force_w` for caveats on direct writes. """ - return self._composed_force_b + return self._global_torque_w @property - def composed_torque(self) -> wp.array: - """Composed torque at the body's link frame. + def global_force_at_com_w(self) -> wp.array: + """Global force at body's CoM buffer [N] (world frame, no positional torque). - .. note:: If some of the torques are applied in the global frame, the composed torque will be in the link frame - of the body. + dtype ``wp.vec3f``. Shape: ``(num_envs, num_bodies)``. - Returns: - wp.array: Composed torque at the body's link frame. (num_envs, num_bodies, 3) + .. note:: + Read-only reference. See :attr:`global_force_w` for caveats on direct writes. """ - return self._composed_torque_b + return self._global_force_at_com_w @property - def composed_force_as_torch(self) -> torch.Tensor: - """Composed force at the body's link frame as torch tensor. + def local_force_b(self) -> wp.array: + """Local force buffer [N] (body frame), dtype ``wp.vec3f``. Shape: ``(num_envs, num_bodies)``. - .. note:: If some of the forces are applied in the global frame, the composed force will be in the link frame - of the body. + .. note:: + Read-only reference. See :attr:`global_force_w` for caveats on direct writes. + """ + return self._local_force_b - Returns: - torch.Tensor: Composed force at the body's link frame. (num_envs, num_bodies, 3) + @property + def local_torque_b(self) -> wp.array: + """Local torque buffer [N·m] (body frame), dtype ``wp.vec3f``. Shape: ``(num_envs, num_bodies)``. + + .. note:: + Read-only reference. See :attr:`global_force_w` for caveats on direct writes. """ - return self._composed_force_b_torch + return self._local_torque_b @property - def composed_torque_as_torch(self) -> torch.Tensor: - """Composed torque at the body's link frame as torch tensor. + def out_force_b(self) -> ProxyArray: + """Composed output force [N] in the body frame. - .. note:: If some of the torques are applied in the global frame, the composed torque will be in the link frame - of the body. + Shape is ``(num_envs, num_bodies)``, dtype = ``wp.vec3f``. In torch this resolves to + ``(num_envs, num_bodies, 3)``. Use ``.warp`` for the underlying :class:`wp.array` or + ``.torch`` for a cached zero-copy :class:`torch.Tensor` view. - Returns: - torch.Tensor: Composed torque at the body's link frame. (num_envs, num_bodies, 3) + Triggers composition from input buffers if dirty. """ - return self._composed_torque_b_torch + self._ensure_composed() + return self._out_force_b_ta - def add_forces_and_torques( + @property + def out_torque_b(self) -> ProxyArray: + """Composed output torque [N·m] in the body frame. + + Shape is ``(num_envs, num_bodies)``, dtype = ``wp.vec3f``. In torch this resolves to + ``(num_envs, num_bodies, 3)``. Use ``.warp`` for the underlying :class:`wp.array` or + ``.torch`` for a cached zero-copy :class:`torch.Tensor` view. + + Triggers composition from input buffers if dirty. + """ + self._ensure_composed() + return self._out_torque_b_ta + + @property + def composed_force(self) -> ProxyArray: + """Composed force in the body frame. + + .. deprecated:: 4.5.33 + Use :attr:`out_force_b` instead. + """ + warnings.warn( + "The property 'composed_force' is deprecated. Use 'out_force_b' instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.out_force_b + + @property + def composed_torque(self) -> ProxyArray: + """Composed torque in the body frame. + + .. deprecated:: 4.5.33 + Use :attr:`out_torque_b` instead. + """ + warnings.warn( + "The property 'composed_torque' is deprecated. Use 'out_torque_b' instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.out_torque_b + + # ------------------------------------------------------------------ + # Public methods + # ------------------------------------------------------------------ + + def add_forces_and_torques_index( self, forces: wp.array | torch.Tensor | None = None, torques: wp.array | torch.Tensor | None = None, positions: wp.array | torch.Tensor | None = None, - body_ids: wp.array | torch.Tensor | None = None, - env_ids: wp.array | torch.Tensor | None = None, + body_ids: torch.Tensor | None = None, + env_ids: torch.Tensor | None = None, is_global: bool = False, ): - """Add forces and torques to the composed force and torque. + """Add forces and torques into the input buffers using index-based selection. - Composed force and torque are the sum of all the forces and torques applied to the body. - It can compose global wrenches and local wrenches. The result is always in the link frame of the body. - - The user can provide any combination of forces, torques, and positions. - - .. note:: Users may want to call `reset` function after every simulation step to ensure no force is carried - over to the next step. However, this may not necessary if the user calls `set_forces_and_torques` function - instead of `add_forces_and_torques`. + Accumulates onto whatever is already in the buffers. The result is always composed into the + body frame when the output properties are accessed. Args: - forces: Forces. (num_envs, num_bodies, 3). Defaults to None. - torques: Torques. (num_envs, num_bodies, 3). Defaults to None. - positions: Positions. (num_envs, num_bodies, 3). Defaults to None. - body_ids: Body ids. (num_envs, num_bodies). Defaults to None (all bodies). - env_ids: Environment ids. (num_envs). Defaults to None (all environments). - is_global: Whether the forces and torques are applied in the global frame. Defaults to False. - - Raises: - ValueError: If the type of the input is not supported. - ValueError: If the input is a slice and it is not None. + forces: Forces [N]. Shape: (len(env_ids), len(body_ids), 3). Defaults to None. + torques: Torques [N·m]. Shape: (len(env_ids), len(body_ids), 3). Defaults to None. + positions: The positions [m] at which forces act. If `is_global` is True, these are global + positions expressed in the world frame. If `is_global` is False, these are offsets from the + body's CoM expressed in the body frame. If None, forces are assumed to act at the body's + CoM, independent of the `is_global` flag. + Shape: (len(env_ids), len(body_ids), 3). Defaults to None. + body_ids: Body indices. Defaults to None (all bodies). + env_ids: Environment indices. Defaults to None (all environments). + is_global: Whether the forces and torques are expressed in the global world frame or the local body frame. + Defaults to False. """ - # Resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES_WP - elif isinstance(env_ids, torch.Tensor): - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - elif isinstance(env_ids, list): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - elif isinstance(env_ids, slice): - if env_ids == slice(None): - env_ids = self._ALL_ENV_INDICES_WP - else: - raise ValueError(f"Doesn't support slice input for env_ids: {env_ids}") - # -- body_ids - if body_ids is None: - body_ids = self._ALL_BODY_INDICES_WP - elif isinstance(body_ids, torch.Tensor): - body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) - elif isinstance(body_ids, list): - body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) - elif isinstance(body_ids, slice): - if body_ids == slice(None): - body_ids = self._ALL_BODY_INDICES_WP - else: - raise ValueError(f"Doesn't support slice input for body_ids: {body_ids}") - - # Resolve remaining inputs - # -- don't launch if no forces or torques are provided + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) if forces is None and torques is None: - return - if isinstance(forces, torch.Tensor): - forces = wp.from_torch(forces, dtype=wp.vec3f) - if isinstance(torques, torch.Tensor): - torques = wp.from_torch(torques, dtype=wp.vec3f) - if isinstance(positions, torch.Tensor): - positions = wp.from_torch(positions, dtype=wp.vec3f) - - # Get the link positions and quaternions - if not self._link_poses_updated: - self._link_positions = wp.from_torch(self._get_link_position_fn().clone(), dtype=wp.vec3f) - self._link_quaternions = wp.from_torch( - convert_quat(self._get_link_quaternion_fn().clone(), to="xyzw"), dtype=wp.quatf + warnings.warn( + "No forces or torques provided. No force will be added.", + UserWarning, + stacklevel=2, ) - self._link_poses_updated = True + return - # Set the active flag to true self._active = True + self._dirty = True wp.launch( - add_forces_and_torques_at_position, + add_forces_to_dual_buffers_index, dim=(env_ids.shape[0], body_ids.shape[0]), inputs=[ env_ids, @@ -209,16 +275,17 @@ def add_forces_and_torques( forces, torques, positions, - self._link_positions, - self._link_quaternions, - self._composed_force_b, - self._composed_torque_b, + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, is_global, ], device=self.device, ) - def set_forces_and_torques( + def set_forces_and_torques_index( self, forces: wp.array | torch.Tensor | None = None, torques: wp.array | torch.Tensor | None = None, @@ -227,80 +294,43 @@ def set_forces_and_torques( env_ids: wp.array | torch.Tensor | None = None, is_global: bool = False, ): - """Set forces and torques to the composed force and torque. - - Composed force and torque are the sum of all the forces and torques applied to the body. - It can compose global wrenches and local wrenches. The result is always in the link frame of the body. + """Set forces and torques into the input buffers using index-based selection. - The user can provide any combination of forces, torques, and positions. + Resets the specified environments first, then writes the new values. This replaces any + previously accumulated forces/torques for the targeted environments while leaving other + environments untouched. Args: - forces: Forces. (num_envs, num_bodies, 3). Defaults to None. - torques: Torques. (num_envs, num_bodies, 3). Defaults to None. - positions: Positions. (num_envs, num_bodies, 3). Defaults to None. - body_ids: Body ids. (num_envs, num_bodies). Defaults to None (all bodies). - env_ids: Environment ids. (num_envs). Defaults to None (all environments). - is_global: Whether the forces and torques are applied in the global frame. Defaults to False. - - Raises: - ValueError: If the type of the input is not supported. - ValueError: If the input is a slice and it is not None. + forces: Forces [N]. Shape: (len(env_ids), len(body_ids), 3). Defaults to None. + torques: Torques [N·m]. Shape: (len(env_ids), len(body_ids), 3). Defaults to None. + positions: The positions [m] at which forces act. If `is_global` is True, these are global + positions expressed in the world frame. If `is_global` is False, these are offsets from the + body's CoM expressed in the body frame. If None, forces are assumed to act at the body's + CoM, independent of the `is_global` flag. + Shape: (len(env_ids), len(body_ids), 3). Defaults to None. + body_ids: Body indices. Defaults to None (all bodies). + env_ids: Environment indices. Defaults to None (all environments). + is_global: Whether the forces and torques are expressed in the global world frame or the local body frame. + Defaults to False. """ - # Resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES_WP - elif isinstance(env_ids, torch.Tensor): - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - elif isinstance(env_ids, list): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - elif isinstance(env_ids, slice): - if env_ids == slice(None): - env_ids = self._ALL_ENV_INDICES_WP - else: - raise ValueError(f"Doesn't support slice input for env_ids: {env_ids}") - # -- body_ids - if body_ids is None: - body_ids = self._ALL_BODY_INDICES_WP - elif isinstance(body_ids, torch.Tensor): - body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) - elif isinstance(body_ids, list): - body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) - elif isinstance(body_ids, slice): - if body_ids == slice(None): - body_ids = self._ALL_BODY_INDICES_WP - else: - raise ValueError(f"Doesn't support slice input for body_ids: {body_ids}") - # Resolve remaining inputs - # -- don't launch if no forces or torques are provided + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) if forces is None and torques is None: - return - if forces is None: - forces = wp.empty((0, 0), dtype=wp.vec3f, device=self.device) - elif isinstance(forces, torch.Tensor): - forces = wp.from_torch(forces, dtype=wp.vec3f) - if torques is None: - torques = wp.empty((0, 0), dtype=wp.vec3f, device=self.device) - elif isinstance(torques, torch.Tensor): - torques = wp.from_torch(torques, dtype=wp.vec3f) - if positions is None: - positions = wp.empty((0, 0), dtype=wp.vec3f, device=self.device) - elif isinstance(positions, torch.Tensor): - positions = wp.from_torch(positions, dtype=wp.vec3f) - - # Get the link positions and quaternions - if not self._link_poses_updated: - self._link_positions = wp.from_torch(self._get_link_position_fn().clone(), dtype=wp.vec3f) - self._link_quaternions = wp.from_torch( - convert_quat(self._get_link_quaternion_fn().clone(), to="xyzw"), dtype=wp.quatf + warnings.warn( + "No forces or torques provided. No force will be set.", + UserWarning, + stacklevel=2, ) - self._link_poses_updated = True + return + + # Clear input buffers for the targeted environments before writing + self.reset(env_ids=env_ids) - # Set the active flag to true self._active = True + self._dirty = True wp.launch( - set_forces_and_torques_at_position, + set_forces_to_dual_buffers_index, dim=(env_ids.shape[0], body_ids.shape[0]), inputs=[ env_ids, @@ -308,42 +338,389 @@ def set_forces_and_torques( forces, torques, positions, - self._link_positions, - self._link_quaternions, - self._composed_force_b, - self._composed_torque_b, + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, is_global, ], device=self.device, ) - def reset(self, env_ids: wp.array | torch.Tensor | None = None): - """Reset the composed force and torque. + def add_forces_and_torques_mask( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_mask: wp.array | torch.Tensor | None = None, + env_mask: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ): + """Add forces and torques into the input buffers using mask-based selection. - This function will reset the composed force and torque to zero. - It will also make sure the link positions and quaternions are updated in the next call of the - `add_forces_and_torques` or `set_forces_and_torques` functions. + Accumulates onto whatever is already in the buffers. - .. note:: This function should be called after every simulation step / reset to ensure no force is carried - over to the next step. + Args: + forces: Forces [N]. Shape: (num_envs, num_bodies, 3). Defaults to None. + torques: Torques [N·m]. Shape: (num_envs, num_bodies, 3). Defaults to None. + positions: The positions [m] at which forces act. If `is_global` is True, these are global + positions expressed in the world frame. If `is_global` is False, these are offsets from the + body's CoM expressed in the body frame. If None, forces are assumed to act at the body's + CoM, independent of the `is_global` flag. + Shape: (num_envs, num_bodies, 3). Defaults to None. + body_mask: Body mask. Shape: (num_bodies,). Defaults to None (all bodies). + env_mask: Environment mask. Shape: (num_envs,). Defaults to None (all environments). + is_global: Whether the forces and torques are expressed in the global world frame or the local body frame. + Defaults to False. """ - if env_ids is None: - self._composed_force_b.zero_() - self._composed_torque_b.zero_() + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + if forces is None and torques is None: + warnings.warn( + "No forces or torques provided. No force will be added.", + UserWarning, + stacklevel=2, + ) + return + + self._active = True + self._dirty = True + + wp.launch( + add_forces_to_dual_buffers_mask, + dim=(self.num_envs, self.num_bodies), + inputs=[ + env_mask, + body_mask, + forces, + torques, + positions, + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, + is_global, + ], + device=self.device, + ) + + def set_forces_and_torques_mask( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_mask: wp.array | torch.Tensor | None = None, + env_mask: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ): + """Set forces and torques into the input buffers using mask-based selection. + + Resets the masked environments first, then writes the new values. This replaces any + previously accumulated forces/torques for the masked environments while leaving other + environments untouched. + + Args: + forces: Forces [N]. Shape: (num_envs, num_bodies, 3). Defaults to None. + torques: Torques [N·m]. Shape: (num_envs, num_bodies, 3). Defaults to None. + positions: The positions [m] at which forces act. If `is_global` is True, these are global + positions expressed in the world frame. If `is_global` is False, these are offsets from the + body's CoM expressed in the body frame. If None, forces are assumed to act at the body's + CoM, independent of the `is_global` flag. + Shape: (num_envs, num_bodies, 3). Defaults to None. + body_mask: Body mask. Shape: (num_bodies,). Defaults to None (all bodies). + env_mask: Environment mask. Shape: (num_envs,). Defaults to None (all environments). + is_global: Whether the forces and torques are expressed in the global world frame or the local body frame. + Defaults to False. + """ + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + if forces is None and torques is None: + warnings.warn( + "No forces or torques provided. No force will be set.", + UserWarning, + stacklevel=2, + ) + return + + # Clear input buffers for the masked environments before writing + self.reset(env_mask=env_mask) + + self._active = True + self._dirty = True + + wp.launch( + set_forces_to_dual_buffers_mask, + dim=(self.num_envs, self.num_bodies), + inputs=[ + env_mask, + body_mask, + forces, + torques, + positions, + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, + is_global, + ], + device=self.device, + ) + + def add_raw_buffers_from(self, other: WrenchComposer): + """Add another composer's raw input buffers into this composer's input buffers. + + This performs element-wise addition of all five input buffers from ``other`` into ``self``. + Useful for combining wrenches from multiple sources before composition. + + Args: + other: Another WrenchComposer whose input buffers will be added into this one. + """ + if not other._active: + return + if __debug__: + if other.num_envs != self.num_envs or other.num_bodies != self.num_bodies: + raise ValueError( + f"Cannot add buffers from composer with shape ({other.num_envs}, {other.num_bodies}) " + f"into composer with shape ({self.num_envs}, {self.num_bodies})." + ) + + self._active = True + self._dirty = True + + wp.launch( + add_raw_wrench_buffers, + dim=(self.num_envs, self.num_bodies), + inputs=[ + other._global_force_w, + other._global_torque_w, + other._global_force_at_com_w, + other._local_force_b, + other._local_torque_b, + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, + ], + device=self.device, + ) + + def compose_to_body_frame(self): + """Compose the five input buffers into the two output buffers in body frame. + + This corrects world-frame torques for the body's CoM position, rotates global forces and torques into the + body frame, then adds local-frame contributions. After this call, ``out_force_b`` and ``out_torque_b`` + contain the final composed wrench. + + The dirty flag is cleared after composition. + """ + com_pos_w = self._get_com_pos_fn() + link_quat_w = self._get_link_quat_fn() + + wp.launch( + compose_wrench_to_body_frame, + dim=(self.num_envs, self.num_bodies), + inputs=[ + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, + com_pos_w, + link_quat_w, + self._out_force_b, + self._out_torque_b, + ], + device=self.device, + ) + self._dirty = False + + def reset( + self, + env_ids: wp.array | torch.Tensor | Sequence[int] | slice | None = None, + env_mask: wp.array | None = None, + ): + """Reset the wrench composer buffers. + + With no arguments, zeros all seven buffers (5 input + 2 output) and clears all flags. + With ``env_ids`` or ``env_mask``, performs a partial reset on the specified environments + using the reset kernels. + + .. caution:: If both ``env_ids`` and ``env_mask`` are provided, ``env_mask`` takes precedence. + + Args: + env_ids: Environment indices. Defaults to None (all environments). + env_mask: Environment mask. Defaults to None (all environments). + """ + if env_ids is None and env_mask is None: + # Full reset: zero all 7 buffers + self._global_force_w.zero_() + self._global_torque_w.zero_() + self._global_force_at_com_w.zero_() + self._local_force_b.zero_() + self._local_torque_b.zero_() + self._out_force_b.zero_() + self._out_torque_b.zero_() self._active = False + self._dirty = False + elif env_mask is not None: + wp.launch( + reset_wrench_composer_mask, + dim=(self.num_envs, self.num_bodies), + inputs=[ + env_mask, + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, + self._out_force_b, + self._out_torque_b, + ], + device=self.device, + ) + self._dirty = True else: - indices = env_ids - if isinstance(env_ids, torch.Tensor): - indices = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + # Partial reset via index + if env_ids is None or env_ids == slice(None): + env_ids = self._ALL_ENV_INDICES elif isinstance(env_ids, list): - indices = wp.array(env_ids, dtype=wp.int32, device=self.device) - elif isinstance(env_ids, slice): - if env_ids == slice(None): - indices = self._ALL_ENV_INDICES_WP - else: - indices = env_ids + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + elif isinstance(env_ids, torch.Tensor): + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + + wp.launch( + reset_wrench_composer_index, + dim=(env_ids.shape[0], self.num_bodies), + inputs=[ + env_ids, + self._global_force_w, + self._global_torque_w, + self._global_force_at_com_w, + self._local_force_b, + self._local_torque_b, + self._out_force_b, + self._out_torque_b, + ], + device=self.device, + ) + self._dirty = True + + # ------------------------------------------------------------------ + # Deprecated methods + # ------------------------------------------------------------------ + + def add_forces_and_torques( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: torch.Tensor | None = None, + env_ids: torch.Tensor | None = None, + is_global: bool = False, + ): + """Deprecated, same as :meth:`add_forces_and_torques_index`. + + .. deprecated:: 4.5.33 + Use :meth:`add_forces_and_torques_index` instead. + """ + warnings.warn( + "The function 'add_forces_and_torques' is deprecated. Please use 'add_forces_and_torques_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.add_forces_and_torques_index(forces, torques, positions, body_ids, env_ids, is_global) + + def set_forces_and_torques( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: wp.array | torch.Tensor | None = None, + env_ids: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ): + """Deprecated, same as :meth:`set_forces_and_torques_index`. - self._composed_force_b[indices].zero_() - self._composed_torque_b[indices].zero_() + .. deprecated:: 4.5.33 + Use :meth:`set_forces_and_torques_index` instead. + """ + warnings.warn( + "The function 'set_forces_and_torques' is deprecated. Please use 'set_forces_and_torques_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.set_forces_and_torques_index(forces, torques, positions, body_ids, env_ids, is_global) + + # ------------------------------------------------------------------ + # Internal helpers + # ------------------------------------------------------------------ + + def _resolve_env_ids(self, env_ids: wp.array | torch.Tensor | list | slice | None) -> wp.array: + """Resolve environment IDs to a warp int32 array. + + Args: + env_ids: Environment indices as any supported type, or None for all environments. + + Returns: + Warp array of int32 environment indices. + + Raises: + TypeError: If ``env_ids`` is an unsupported type. + """ + if env_ids is None: + return self._ALL_ENV_INDICES + # Check tensor types before slice comparison (tensor == slice crashes) + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids.contiguous(), dtype=wp.int32) + if isinstance(env_ids, wp.array): + return env_ids + if env_ids == slice(None): + return self._ALL_ENV_INDICES + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + raise TypeError( + f"env_ids must be None, slice(None), list, torch.Tensor, or wp.array, got {type(env_ids).__name__}" + ) + + def _resolve_body_ids(self, body_ids: wp.array | torch.Tensor | list | slice | None) -> wp.array: + """Resolve body IDs to a warp int32 array. + + Args: + body_ids: Body indices as any supported type, or None for all bodies. + + Returns: + Warp array of int32 body indices. + + Raises: + TypeError: If ``body_ids`` is an unsupported type. + """ + if body_ids is None: + return self._ALL_BODY_INDICES + if isinstance(body_ids, torch.Tensor): + if body_ids.dtype == torch.int64: + body_ids = body_ids.to(torch.int32) + return wp.from_torch(body_ids.contiguous(), dtype=wp.int32) + if isinstance(body_ids, wp.array): + return body_ids + if body_ids == slice(None): + return self._ALL_BODY_INDICES + if isinstance(body_ids, list): + return wp.array(body_ids, dtype=wp.int32, device=self.device) + raise TypeError( + f"body_ids must be None, slice(None), list, torch.Tensor, or wp.array, got {type(body_ids).__name__}" + ) - self._link_poses_updated = False + def _ensure_composed(self): + """Compose input buffers into output buffers if dirty.""" + if self._dirty: + self.compose_to_body_frame() diff --git a/source/isaaclab/isaaclab/visualizers/__init__.py b/source/isaaclab/isaaclab/visualizers/__init__.py new file mode 100644 index 000000000000..c5bcae09f363 --- /dev/null +++ b/source/isaaclab/isaaclab/visualizers/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Visualizer base and factory entrypoints.""" + +from __future__ import annotations + +from .base_visualizer import BaseVisualizer +from .visualizer import Visualizer +from .visualizer_cfg import VisualizerCfg + +__all__ = ["BaseVisualizer", "Visualizer", "VisualizerCfg"] diff --git a/source/isaaclab/isaaclab/visualizers/__init__.pyi b/source/isaaclab/isaaclab/visualizers/__init__.pyi new file mode 100644 index 000000000000..0fcbfd637dc9 --- /dev/null +++ b/source/isaaclab/isaaclab/visualizers/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "BaseVisualizer", + "Visualizer", + "VisualizerCfg", +] + +from .base_visualizer import BaseVisualizer +from .visualizer import Visualizer +from .visualizer_cfg import VisualizerCfg diff --git a/source/isaaclab/isaaclab/visualizers/base_visualizer.py b/source/isaaclab/isaaclab/visualizers/base_visualizer.py new file mode 100644 index 000000000000..b0fc5a81088f --- /dev/null +++ b/source/isaaclab/isaaclab/visualizers/base_visualizer.py @@ -0,0 +1,323 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for visualizers.""" + +from __future__ import annotations + +import logging +import random +import re +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from isaaclab.physics import SceneDataProvider + + from .visualizer_cfg import VisualizerCfg + + +logger = logging.getLogger(__name__) + + +class BaseVisualizer(ABC): + """Base class for all visualizer backends. + + Lifecycle: __init__() -> initialize() -> step() (repeated) -> close() + """ + + def __init__(self, cfg: VisualizerCfg): + """Initialize visualizer with config. + + Args: + cfg: Visualizer configuration. + """ + self.cfg = cfg + self._scene_data_provider = None + self._is_initialized = False + self._is_closed = False + + @abstractmethod + def initialize(self, scene_data_provider: SceneDataProvider) -> None: + """Initialize visualizer resources. + + Args: + scene_data_provider: Scene data provider used by the visualizer. + """ + raise NotImplementedError + + @abstractmethod + def step(self, dt: float) -> None: + """Update visualization for one step. + + Args: + dt: Time step in seconds. + """ + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Clean up resources.""" + raise NotImplementedError + + @abstractmethod + def is_running(self) -> bool: + """Check if visualizer is still running (e.g., window not closed). + + Returns: + ``True`` if the visualizer is running, otherwise ``False``. + """ + raise NotImplementedError + + def is_training_paused(self) -> bool: + """Check if training is paused by visualizer controls. + + Returns: + ``True`` if training is paused, otherwise ``False``. + """ + return False + + def is_rendering_paused(self) -> bool: + """Check if rendering is paused by visualizer controls. + + Returns: + ``True`` if rendering is paused, otherwise ``False``. + """ + return False + + @property + def is_initialized(self) -> bool: + """Check if initialize() has been called.""" + return self._is_initialized + + @property + def is_closed(self) -> bool: + """Check if close() has been called.""" + return self._is_closed + + def supports_markers(self) -> bool: + """Check if visualizer supports VisualizationMarkers. + + Returns: + ``True`` if marker rendering is supported, otherwise ``False``. + """ + return False + + def supports_live_plots(self) -> bool: + """Check if visualizer supports LivePlots. + + Returns: + ``True`` if live plots are supported, otherwise ``False``. + """ + return False + + def requires_forward_before_step(self) -> bool: + """Whether simulation should run forward() before step(). + + Returns: + ``True`` when forward kinematics should run before stepping. + """ + return False + + def pumps_app_update(self) -> bool: + """Whether this visualizer calls omni.kit.app.get_app().update() in step(). + + Returns True for visualizers (e.g. KitVisualizer) that already pump the Kit + app loop, so SimulationContext.render() can skip its own app.update() call + and avoid double-rendering. + """ + return False + + def get_visualized_env_ids(self) -> list[int] | None: + """Return env IDs this visualizer is displaying, if any. + + Returns: + Visualized environment ids, or ``None`` for all environments. + """ + return self._env_ids + + def _compute_visualized_env_ids(self) -> list[int] | None: + """Compute which environment indices to visualize from config. + + Returns: + Selected environment ids, or ``None`` to visualize all environments. + """ + if self._scene_data_provider is None: + return None + cfg = self.cfg + num_envs = self._scene_data_provider.get_metadata().get("num_envs", 0) + if num_envs <= 0: + logger.debug("[Visualizer] num_envs is 0 or missing from provider metadata; env selection disabled.") + return None + # Explicit list wins; never combine with random cap-only mode. + if cfg.visible_env_indices is not None: + return [i for i in cfg.visible_env_indices if 0 <= i < num_envs] + + max_visible = getattr(cfg, "max_visible_envs", None) + # Random subset only for cap-only mode: needs a cap and no explicit indices (see VisualizerCfg). + if max_visible is not None and getattr(cfg, "randomly_sample_visible_envs", True) and int(max_visible) >= 0: + k = min(int(max_visible), num_envs) + # k == 0: sample(range(n), 0) is []; contiguous resolver used the same convention. + return sorted(random.sample(range(num_envs), k)) + return None + + def get_rendering_dt(self) -> float | None: + """Get rendering time step. + + Returns: + Rendering time step override, or ``None`` to use interface default. + """ + return None + + def set_camera_view(self, eye: tuple, target: tuple) -> None: + """Set camera view position. + + Args: + eye: Camera eye position. + target: Camera target position. + """ + pass + + def _resolve_cfg_camera_pose( + self, _visualizer_name: str + ) -> tuple[tuple[float, float, float], tuple[float, float, float]]: + """Resolve camera pose from cfg eye/lookat fields.""" + eye = tuple(float(v) for v in self.cfg.eye) + lookat = tuple(float(v) for v in self.cfg.lookat) + return eye, lookat + + def _resolve_camera_pose_from_usd_path( + self, usd_path: str + ) -> tuple[tuple[float, float, float], tuple[float, float, float]] | None: + """Resolve camera pose/target from provider camera transforms. + + Args: + usd_path: Concrete USD camera path. + + Returns: + Eye/target tuple when available, otherwise ``None``. + """ + if self._scene_data_provider is None: + return None + transforms = self._scene_data_provider.get_camera_transforms() + if not transforms: + return None + + env_id, template_path = self._resolve_template_camera_path(usd_path) + camera_transform = self._lookup_camera_transform(transforms, template_path, env_id) + if camera_transform is None: + return None + pos, ori = camera_transform + + pos_t = (float(pos[0]), float(pos[1]), float(pos[2])) + ori_t = (float(ori[0]), float(ori[1]), float(ori[2]), float(ori[3])) + forward = self._quat_rotate_vec(ori_t, (0.0, 0.0, -1.0)) + target = (pos_t[0] + forward[0], pos_t[1] + forward[1], pos_t[2] + forward[2]) + return pos_t, target + + def _resolve_template_camera_path(self, usd_path: str) -> tuple[int, str]: + """Normalize concrete env camera path to templated camera path. + + Args: + usd_path: Concrete USD camera path. + + Returns: + Tuple of environment id and templated camera path. + """ + env_pattern = re.compile(r"(?P/World/envs/env_)(?P\d+)(?P/.*)") + if match := env_pattern.match(usd_path): + return int(match.group("id")), match.group("root") + "%d" + match.group("path") + return 0, usd_path + + def _lookup_camera_transform( + self, transforms: dict[str, Any], template_path: str, env_id: int + ) -> tuple[list[float], list[float]] | None: + """Fetch camera position/orientation for a templated path and environment. + + Args: + transforms: Camera transform dictionary from provider. + template_path: Templated camera path. + env_id: Environment id to query. + + Returns: + Position/orientation tuple when available, otherwise ``None``. + """ + order = transforms.get("order", []) + positions = transforms.get("positions", []) + orientations = transforms.get("orientations", []) + + if template_path not in order: + return None + idx = order.index(template_path) + if idx >= len(positions) or idx >= len(orientations): + return None + if env_id < 0 or env_id >= len(positions[idx]): + return None + pos = positions[idx][env_id] + ori = orientations[idx][env_id] + if pos is None or ori is None: + return None + return pos, ori + + @staticmethod + def _quat_rotate_vec( + quat_xyzw: tuple[float, float, float, float], vec: tuple[float, float, float] + ) -> tuple[float, float, float]: + """Rotate a vector by a quaternion. + + Args: + quat_xyzw: Quaternion in xyzw order. + vec: Input vector. + + Returns: + Rotated vector. + """ + import torch + + from isaaclab.utils.math import quat_apply + + quat = torch.tensor(quat_xyzw, dtype=torch.float32).unsqueeze(0) + vector = torch.tensor(vec, dtype=torch.float32).unsqueeze(0) + rotated = quat_apply(quat, vector)[0] + return (float(rotated[0]), float(rotated[1]), float(rotated[2])) + + def reset(self, soft: bool = False) -> None: + """Reset visualizer state. + + Args: + soft: Whether to perform a soft reset. + """ + pass + + def _log_initialization_table(self, logger: logging.Logger, title: str, rows: list[tuple[str, Any]]) -> None: + """Log a compact initialization table for a visualizer. + + Args: + logger: Logger used to emit the table. + title: Table title. + rows: Table row key/value pairs. + """ + from prettytable import PrettyTable + + table = PrettyTable() + table.title = title + table.field_names = ["Field", "Value"] + table.align["Field"] = "l" + table.align["Value"] = "l" + for key, value in rows: + table.add_row([key, value]) + logger.info("Visualizer initialization:\n%s", table.get_string()) + + def play(self) -> None: + """Handle simulation play/start. No-op by default.""" + pass + + def pause(self) -> None: + """Handle simulation pause. No-op by default.""" + pass + + def stop(self) -> None: + """Handle simulation stop. No-op by default.""" + pass diff --git a/source/isaaclab/isaaclab/visualizers/visualizer.py b/source/isaaclab/isaaclab/visualizers/visualizer.py new file mode 100644 index 000000000000..b84617ed870e --- /dev/null +++ b/source/isaaclab/isaaclab/visualizers/visualizer.py @@ -0,0 +1,81 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory for creating visualizer instances.""" + +from __future__ import annotations + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_visualizer import BaseVisualizer + +# Visualizer types; each loads from isaaclab_visualizers. for minimal deps. +_VISUALIZER_TYPES = ("kit", "newton", "rerun", "viser") + + +class Visualizer(FactoryBase, BaseVisualizer): + """Factory for creating visualizer instances.""" + + _backend_class_names = { + "kit": "KitVisualizer", + "newton": "NewtonVisualizer", + "rerun": "RerunVisualizer", + "viser": "ViserVisualizer", + } + + @classmethod + def _get_backend(cls, cfg, *args, **kwargs) -> str: + """Resolve backend key from visualizer config. + + Args: + cfg: Visualizer configuration instance. + *args: Unused positional arguments. + **kwargs: Unused keyword arguments. + + Returns: + Backend key used by the factory. + + Raises: + ValueError: If visualizer type is not registered. + """ + visualizer_type = getattr(cfg, "visualizer_type", None) + if visualizer_type not in _VISUALIZER_TYPES: + raise ValueError( + f"Visualizer type '{visualizer_type}' is not registered. Valid types: " + f"{', '.join(repr(k) for k in _VISUALIZER_TYPES)}." + ) + return visualizer_type + + @classmethod + def _get_module_name(cls, backend: str) -> str: + """Return module path for a visualizer backend. + + Args: + backend: Backend key. + + Returns: + Module import path for the backend. + """ + return f"isaaclab_visualizers.{backend}" + + def __new__(cls, cfg, *args, **kwargs) -> BaseVisualizer: + """Create a new visualizer instance based on the visualizer type. + + Args: + cfg: Visualizer configuration instance. + *args: Additional constructor positional arguments. + **kwargs: Additional constructor keyword arguments. + + Returns: + Instantiated backend visualizer. + + Raises: + TypeError: If backend class does not inherit from ``BaseVisualizer``. + """ + result = super().__new__(cls, cfg, *args, **kwargs) + if not isinstance(result, BaseVisualizer): + name = type(result).__name__ + raise TypeError(f"Backend visualizer {name!r} must inherit from BaseVisualizer.") + return result diff --git a/source/isaaclab/isaaclab/visualizers/visualizer_cfg.py b/source/isaaclab/isaaclab/visualizers/visualizer_cfg.py new file mode 100644 index 000000000000..1ee4cde038b5 --- /dev/null +++ b/source/isaaclab/isaaclab/visualizers/visualizer_cfg.py @@ -0,0 +1,100 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base configuration for visualizers.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal + +from isaaclab.utils import configclass + +if TYPE_CHECKING: + from .base_visualizer import BaseVisualizer + + +@configclass +class VisualizerCfg: + """Base configuration for all visualizer backends. + + Note: + This is an abstract base class and should not be instantiated directly. + Use specific configs from isaaclab_visualizers: KitVisualizerCfg, NewtonVisualizerCfg, + RerunVisualizerCfg, or ViserVisualizerCfg (from isaaclab_visualizers.kit/.newton/.rerun/.viser). + """ + + visualizer_type: str | None = None + """Type identifier (e.g., 'newton', 'rerun', 'viser', 'kit'). Must be overridden by subclasses.""" + + enable_markers: bool = True + """Enable visualization markers (debug drawing).""" + + enable_live_plots: bool = True + """Enable live plotting of data.""" + + eye: tuple[float, float, float] = (7.5, 7.5, 7.5) + """Initial camera eye position (x, y, z) in world coordinates.""" + + lookat: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Initial camera look-at point (x, y, z) in world coordinates.""" + + cam_source: Literal["cfg", "prim_path"] = "cfg" + """Camera source mode: 'cfg' uses eye/lookat, 'prim_path' follows a camera prim.""" + + cam_prim_path: str = "/World/envs/env_0/Camera" + """Absolute USD path to a camera prim when cam_source='prim_path'.""" + + max_visible_envs: int | None = None + """Upper bound on how many envs are shown. + + * If visible_env_indices is not None, then this field will apply also + to the explicit env indices set to the visible_env_indices. + """ + + visible_env_indices: list[int] | None = None + """env indices to visualize in order (out-of-range indices are dropped).""" + + randomly_sample_visible_envs: bool = True + """If ``max_visible_envs`` is provided, when enabled, selected visible envs are randomly sampled. + If disabled, the first ``max_visible_envs`` envs are selected. + + * Note: ``visible_env_indices`` overrides this field. + """ + + def get_visualizer_type(self) -> str | None: + """Get the visualizer type identifier. + + Returns: + The visualizer type string, or None if not set (base class). + """ + return self.visualizer_type + + def create_visualizer(self) -> BaseVisualizer: + """Create visualizer instance from this config using factory pattern. + + Loads the matching backend from isaaclab_visualizers (e.g. isaaclab_visualizers.rerun). + + Raises: + ValueError: If visualizer_type is None (base class used directly) or not registered. + ImportError: If isaaclab_visualizers or the requested backend extra is not installed. + """ + from .visualizer import Visualizer + + if self.visualizer_type is None: + raise ValueError( + "Cannot create visualizer from base VisualizerCfg class. " + "Use a specific config from isaaclab_visualizers " + "(e.g. KitVisualizerCfg, NewtonVisualizerCfg, RerunVisualizerCfg, ViserVisualizerCfg)." + ) + + try: + return Visualizer(self) + except (ValueError, ImportError, ModuleNotFoundError) as exc: + if self.visualizer_type in ("newton", "rerun", "viser", "kit"): + raise ImportError( + f"Visualizer '{self.visualizer_type}' requires the isaaclab_visualizers package. " + f"Install with: pip install isaaclab_visualizers[{self.visualizer_type}]" + ) from exc + raise diff --git a/source/isaaclab/setup.py b/source/isaaclab/setup.py index 369bf5bdf8a9..67c18c4c62d1 100644 --- a/source/isaaclab/setup.py +++ b/source/isaaclab/setup.py @@ -18,8 +18,8 @@ # Minimum dependencies required prior to installation INSTALL_REQUIRES = [ # generic - "numpy<2", - "torch>=2.7", + "numpy>=2", + "torch>=2.10", "onnx>=1.18.0", # 1.16.2 throws access violation on Windows "prettytable==3.3.0", "toml", @@ -29,22 +29,34 @@ "gymnasium==1.2.1", # procedural-generation "trimesh", - "pyglet<2", + "pyglet>=2.1.6,<3", + "mujoco==3.6.0", + "mujoco-warp==3.6.0", # image processing "transformers==4.57.6", "einops", # needed for transformers, doesn't always auto-install - "warp-lang", + "warp-lang==1.12.0", + "matplotlib>=3.10.3", # minimum version for Python 3.12 support # make sure this is consistent with isaac sim version - "pillow==11.3.0", + "pillow==12.1.1", + # required by omni.replicator.core S3 backend + "botocore", # livestream "starlette==0.49.1", + "omniverseclient==2.71.1.7015", # testing "pytest", "pytest-mock", "junitparser", - "flatdict==4.0.0", + "coverage==7.6.1", + "debugpy>=1.8.20", + "flatdict>=4.1.0", "flaky", "packaging", + "psutil", + # Required by pydantic-core/imgui_bundle on Python 3.12 (Sentinel symbol). + "typing_extensions>=4.14.0", + "lazy_loader>=0.4", ] # Append Linux x86_64 and ARM64 deps via PEP 508 markers @@ -52,14 +64,59 @@ SUPPORTED_ARCHS = "platform_machine in 'x86_64,AMD64'" INSTALL_REQUIRES += [ # required by isaaclab.isaaclab.controllers.pink_ik + f"pin ; platform_system == 'Linux' and ({SUPPORTED_ARCHS_ARM})", f"pin-pink==3.1.0 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS_ARM})", f"daqp==0.7.2 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS_ARM})", - # required by isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1_t2_dex_retargeting_utils - f"dex-retargeting==0.4.6 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS})", +] +# Adds OpenUSD dependencies based on architecture for Kit less mode. +INSTALL_REQUIRES += [ + f"usd-core==25.8.0 ; ({SUPPORTED_ARCHS})", + f"usd-exchange>=2.2 ; ({SUPPORTED_ARCHS_ARM})", +] + +# Pin hf-xet to avoid broken tarball (hf_xet-1.1.8.dev2) cached on NVIDIA Artifactory. +# (https://urm.nvidia.com/artifactory/api/pypi/ct-omniverse-pypi) that gets installed with --pre +# and --extra-index-url flags. The broken hf-xet-1.1.8.dev2 package is present as of Mar 12 2026. +# TODO: Can be removed once the broken hf-xet-1.1.8.dev2 package is removed from NVIDIA Artifactory. +# Issue: https://nvbugs/5974917 includes verification steps. +INSTALL_REQUIRES += [ + # 1.4.1 is latest as of Mar 12 2026 + f"hf-xet>=1.4.1,<2.0.0 ; ({SUPPORTED_ARCHS_ARM})", ] PYTORCH_INDEX_URL = ["https://download.pytorch.org/whl/cu128"] +# Isaac Lab subpackages + Isaac Sim +EXTRAS_REQUIRE = { + "isaacsim": ["isaacsim[all,extscache]==5.1.0"], + # Individual Isaac Lab sub-packages + "assets": ["isaaclab_assets"], + "physx": ["isaaclab_physx"], + "contrib": ["isaaclab_contrib"], + "mimic": ["isaaclab_mimic"], + "newton": ["isaaclab_newton"], + "rl": ["isaaclab_rl"], + "tasks": ["isaaclab_tasks"], + "teleop": ["isaaclab_teleop"], + "visualizers": ["isaaclab_visualizers[all]"], + "visualizers-kit": ["isaaclab_visualizers[kit]"], + "visualizers-newton": ["isaaclab_visualizers[newton]"], + "visualizers-rerun": ["isaaclab_visualizers[rerun]"], + "visualizers-viser": ["isaaclab_visualizers[viser]"], + # Convenience: all sub-packages (does not include isaacsim) + "all": [ + "isaaclab_assets", + "isaaclab_physx", + "isaaclab_contrib", + "isaaclab_mimic", + "isaaclab_newton", + "isaaclab_rl", + "isaaclab_tasks", + "isaaclab_teleop", + "isaaclab_visualizers[all]", + ], +} + # Installation operation setup( name="isaaclab", @@ -71,17 +128,15 @@ keywords=EXTENSION_TOML_DATA["package"]["keywords"], license="BSD-3-Clause", include_package_data=True, - python_requires=">=3.10", + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", install_requires=INSTALL_REQUIRES, + extras_require=EXTRAS_REQUIRE, dependency_links=PYTORCH_INDEX_URL, packages=["isaaclab"], classifiers=[ - "Natural Language :: English", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Isaac Sim :: 4.5.0", - "Isaac Sim :: 5.0.0", - "Isaac Sim :: 5.1.0", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", ], zip_safe=False, ) diff --git a/source/isaaclab/test/app/test_argparser_launch.py b/source/isaaclab/test/app/test_argparser_launch.py index 683409dd190c..25ab91196ba3 100644 --- a/source/isaaclab/test/app/test_argparser_launch.py +++ b/source/isaaclab/test/app/test_argparser_launch.py @@ -14,7 +14,7 @@ def test_livestream_launch_with_argparser(mocker): """Test launching with argparser arguments.""" # Mock the parse_args method - mocker.patch("argparse.ArgumentParser.parse_args", return_value=argparse.Namespace(livestream=1, headless=True)) + mocker.patch("argparse.ArgumentParser.parse_args", return_value=argparse.Namespace(livestream=1)) # create argparser parser = argparse.ArgumentParser() # add app launcher arguments @@ -25,18 +25,38 @@ def test_livestream_launch_with_argparser(mocker): # parse args mock_args = parser.parse_args() # everything defaults to None - app = AppLauncher(mock_args).app - - # import settings - import carb - - # acquire settings interface - carb_settings_iface = carb.settings.get_settings() - # check settings - # -- no-gui mode - assert carb_settings_iface.get("/app/window/enabled") is False - # -- livestream - assert carb_settings_iface.get("/app/livestream/enabled") is True + app_launcher = AppLauncher(mock_args) + app = app_launcher.app + assert app_launcher._livestream == 1 + assert app_launcher._headless is True # close the app on exit app.close() + + +def test_visualizer_alias_parsing(): + """Test that --viz alias maps to visualizer values.""" + parser = argparse.ArgumentParser() + AppLauncher.add_app_launcher_args(parser) + + args = parser.parse_args(["--viz", "kit,newton"]) + assert args.visualizer == ["kit", "newton"] + assert args.visualizer_explicit is True + + +def test_headless_deprecated_arg_parsing(): + """Test that deprecated --headless is still accepted by the parser.""" + parser = argparse.ArgumentParser() + AppLauncher.add_app_launcher_args(parser) + + args = parser.parse_args(["--headless"]) + assert args.headless is True + assert args.headless_explicit is True + + +def test_visualizer_none_parsing(): + parser = argparse.ArgumentParser() + AppLauncher.add_app_launcher_args(parser) + args = parser.parse_args(["--viz", "none"]) + assert args.visualizer == ["none"] + assert args.visualizer_explicit is True diff --git a/source/isaaclab/test/app/test_env_var_launch.py b/source/isaaclab/test/app/test_env_var_launch.py index 9ec07f932749..6a58b220692d 100644 --- a/source/isaaclab/test/app/test_env_var_launch.py +++ b/source/isaaclab/test/app/test_env_var_launch.py @@ -16,18 +16,20 @@ def test_livestream_launch_with_env_vars(mocker): # Mock the environment variables mocker.patch.dict(os.environ, {"LIVESTREAM": "1", "HEADLESS": "1"}) # everything defaults to None - app = AppLauncher().app - - # import settings - import carb - - # acquire settings interface - carb_settings_iface = carb.settings.get_settings() - # check settings - # -- no-gui mode - assert carb_settings_iface.get("/app/window/enabled") is False - # -- livestream - assert carb_settings_iface.get("/app/livestream/enabled") is True + app_launcher = AppLauncher() + app = app_launcher.app + + from isaaclab.app.settings_manager import get_settings_manager + + settings = get_settings_manager() + assert settings.get("/app/window/enabled") is False + # Do not assert /app/livestream/enabled here: + # this key is owned by Kit extensions and is not guaranteed to be populated + # in all app startup paths/environments. AppLauncher behavior is driven by + # its resolved launch state and livestream args. + assert app_launcher._livestream == 1 + assert app_launcher._headless is True + assert "omni.kit.livestream.app" in app_launcher._livestream_args # close the app on exit app.close() diff --git a/source/isaaclab/test/app/test_kwarg_launch.py b/source/isaaclab/test/app/test_kwarg_launch.py index b2781637b722..3cc22b157ef7 100644 --- a/source/isaaclab/test/app/test_kwarg_launch.py +++ b/source/isaaclab/test/app/test_kwarg_launch.py @@ -3,8 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause +import argparse + import pytest +import isaaclab.app.app_launcher as app_launcher_module from isaaclab.app import AppLauncher @@ -12,18 +15,201 @@ def test_livestream_launch_with_kwargs(mocker): """Test launching with keyword arguments.""" # everything defaults to None - app = AppLauncher(headless=True, livestream=1).app - - # import settings - import carb - - # acquire settings interface - carb_settings_iface = carb.settings.get_settings() - # check settings - # -- no-gui mode - assert carb_settings_iface.get("/app/window/enabled") is False - # -- livestream - assert carb_settings_iface.get("/app/livestream/enabled") is True + app_launcher = AppLauncher(headless=True, livestream=1) + app = app_launcher.app + assert app_launcher._livestream == 1 + assert app_launcher._headless is True # close the app on exit app.close() + + +class _DummySettings: + def __init__(self): + self.values = {} + + def set_string(self, path: str, value: str) -> None: + self.values[path] = value + + def set_int(self, path: str, value: int) -> None: + self.values[path] = value + + def set_bool(self, path: str, value: bool) -> None: + self.values[path] = value + + +def test_set_visualizer_settings_stores_values(monkeypatch: pytest.MonkeyPatch): + settings = _DummySettings() + monkeypatch.setattr(app_launcher_module, "get_settings_manager", lambda: settings) + + launcher = AppLauncher.__new__(AppLauncher) + launcher._set_visualizer_settings({"visualizer": ["viser", "rerun"], "max_visible_envs": 0}) + + assert settings.values == { + "/isaaclab/visualizer/types": "viser rerun", + "/isaaclab/visualizer/explicit": False, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": 0, + } + + +def test_set_visualizer_settings_rejects_negative_max_visible_envs( + monkeypatch: pytest.MonkeyPatch, +): + def _unexpected_settings_manager(): + raise AssertionError("settings manager should not be queried for invalid values") + + monkeypatch.setattr(app_launcher_module, "get_settings_manager", _unexpected_settings_manager) + + launcher = AppLauncher.__new__(AppLauncher) + with pytest.raises(ValueError, match="Invalid value for --max_visible_envs: -5"): + launcher._set_visualizer_settings({"visualizer": ["viser"], "max_visible_envs": -5}) + + +def test_set_visualizer_settings_suppresses_settings_manager_errors(monkeypatch: pytest.MonkeyPatch): + def _raise_settings_error(): + raise RuntimeError("settings unavailable") + + monkeypatch.setattr(app_launcher_module, "get_settings_manager", _raise_settings_error) + + launcher = AppLauncher.__new__(AppLauncher) + launcher._set_visualizer_settings({"visualizer": ["viser"], "max_visible_envs": 3}) + + +def test_parse_visualizer_csv_accepts_comma_delimited_values(): + parsed = app_launcher_module.AppLauncher._parse_visualizer_csv("kit,newton,rerun,viser") + assert parsed == ["kit", "newton", "rerun", "viser"] + + +def test_parse_visualizer_csv_rejects_spaces_between_entries(): + with pytest.raises(argparse.ArgumentTypeError, match="spaces are not allowed"): + app_launcher_module.AppLauncher._parse_visualizer_csv("kit, newton") + + +def test_resolve_visualizer_settings_rejects_none_with_others(): + launcher = AppLauncher.__new__(AppLauncher) + with pytest.raises(ValueError, match="'none' cannot be combined"): + launcher._resolve_visualizer_settings( + {"visualizer": ["none", "kit"], "visualizer_explicit": True}, + ) + + +def test_visualizer_csv_does_not_swallow_hydra_overrides(): + parser = argparse.ArgumentParser(add_help=False) + app_launcher_module.AppLauncher.add_app_launcher_args(parser) + + args, hydra_args = parser.parse_known_args( + ["--visualizer", "kit,newton,rerun", "presets=newton_mjwarp", "env.episode_length=10"] + ) + + assert args.visualizer == ["kit", "newton", "rerun"] + assert hydra_args == ["presets=newton_mjwarp", "env.episode_length=10"] + + +def _resolve_headless_for_case(monkeypatch: pytest.MonkeyPatch, launcher_args: dict) -> tuple[bool, AppLauncher]: + monkeypatch.setenv("HEADLESS", "0") + launcher = AppLauncher.__new__(AppLauncher) + launcher._livestream = 0 + launcher._resolve_visualizer_settings(launcher_args) + launcher._resolve_headless_settings(launcher_args, livestream_arg=-1, livestream_env=0) + return launcher._headless, launcher + + +def test_matrix_cli_kit_newton_with_custom_kit_cfg_intent_non_headless(monkeypatch: pytest.MonkeyPatch): + headless, launcher = _resolve_headless_for_case( + monkeypatch, + { + "visualizer": ["kit", "newton"], + "visualizer_explicit": True, + "visualizer_intent": {"has_any_visualizers": True, "has_kit_visualizer": True}, + }, + ) + assert headless is False + assert launcher._cli_visualizer_types == ["kit", "newton"] + + +def test_matrix_cli_rerun_with_custom_kit_cfg_intent_headless(monkeypatch: pytest.MonkeyPatch): + headless, launcher = _resolve_headless_for_case( + monkeypatch, + { + "visualizer": ["rerun"], + "visualizer_explicit": True, + "visualizer_intent": {"has_any_visualizers": True, "has_kit_visualizer": True}, + }, + ) + assert headless is True + assert launcher._cli_visualizer_types == ["rerun"] + + +def test_matrix_no_cli_with_cfg_kit_newton_non_headless(monkeypatch: pytest.MonkeyPatch): + headless, launcher = _resolve_headless_for_case( + monkeypatch, + { + "visualizer_intent": {"has_any_visualizers": True, "has_kit_visualizer": True}, + }, + ) + assert headless is False + assert launcher._cli_visualizer_explicit is False + + +def test_matrix_viz_none_disables_all_and_headless(monkeypatch: pytest.MonkeyPatch): + headless, launcher = _resolve_headless_for_case( + monkeypatch, + { + "visualizer": ["none"], + "visualizer_explicit": True, + "visualizer_intent": {"has_any_visualizers": True, "has_kit_visualizer": True}, + }, + ) + assert headless is True + assert launcher._cli_visualizer_disable_all is True + assert launcher._cli_visualizer_types == [] + + +def test_matrix_headless_flag_deprecated_takes_precedence(monkeypatch: pytest.MonkeyPatch): + headless, launcher = _resolve_headless_for_case( + monkeypatch, + { + "headless": True, + "headless_explicit": True, + "visualizer_intent": {"has_any_visualizers": True, "has_kit_visualizer": True}, + }, + ) + assert headless is True + assert launcher._cli_visualizer_types == [] + + +def test_matrix_headless_with_viz_names_takes_precedence(monkeypatch: pytest.MonkeyPatch): + headless, launcher = _resolve_headless_for_case( + monkeypatch, + { + "headless": True, + "headless_explicit": True, + "visualizer": ["kit", "newton"], + "visualizer_explicit": True, + "visualizer_intent": {"has_any_visualizers": True, "has_kit_visualizer": True}, + }, + ) + assert headless is True + assert launcher._cli_visualizer_disable_all is True + assert launcher._cli_visualizer_types == [] + + +def test_no_cli_and_no_cfg_visualizers_defaults_headless(monkeypatch: pytest.MonkeyPatch): + headless, _ = _resolve_headless_for_case(monkeypatch, {}) + assert headless is True + + +def test_no_cli_and_non_kit_cfg_visualizers_defaults_headless(monkeypatch: pytest.MonkeyPatch): + headless, _ = _resolve_headless_for_case( + monkeypatch, + {"visualizer_intent": {"has_any_visualizers": True, "has_kit_visualizer": False}}, + ) + assert headless is True + + +def test_invalid_visualizer_intent_rejected(monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("HEADLESS", "0") + launcher = AppLauncher.__new__(AppLauncher) + with pytest.raises(ValueError, match="visualizer_intent"): + launcher._resolve_visualizer_settings({"visualizer_intent": {"has_any_visualizers": "yes"}}) diff --git a/source/isaaclab/test/app/test_non_headless_launch.py b/source/isaaclab/test/app/test_non_headless_launch.py index eb8544b995cc..8fc8a051ae38 100644 --- a/source/isaaclab/test/app/test_non_headless_launch.py +++ b/source/isaaclab/test/app/test_non_headless_launch.py @@ -20,6 +20,7 @@ """Rest everything follows.""" + import isaaclab.sim as sim_utils from isaaclab.assets import AssetBaseCfg from isaaclab.scene import InteractiveScene, InteractiveSceneCfg diff --git a/source/isaaclab/test/assets/check_external_force.py b/source/isaaclab/test/assets/check_external_force.py index f038b907a1f0..f4b0da603812 100644 --- a/source/isaaclab/test/assets/check_external_force.py +++ b/source/isaaclab/test/assets/check_external_force.py @@ -96,13 +96,13 @@ def main(): sim_time = 0.0 count = 0 # reset root state - root_state = robot.data.default_root_state.clone() + root_state = robot.data.default_root_state.torch.clone() root_state[0, :2] = torch.tensor([0.0, -0.5], device=sim.device) root_state[1, :2] = torch.tensor([0.0, 0.5], device=sim.device) robot.write_root_pose_to_sim(root_state[:, :7]) robot.write_root_velocity_to_sim(root_state[:, 7:]) # reset dof state - joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel + joint_pos, joint_vel = robot.data.default_joint_pos.torch, robot.data.default_joint_vel.torch robot.write_joint_state_to_sim(joint_pos, joint_vel) robot.reset() # apply force @@ -112,7 +112,7 @@ def main(): # reset command print(">>>>>>>> Reset!") # apply action to the robot - robot.set_joint_position_target(robot.data.default_joint_pos.clone()) + robot.set_joint_position_target(robot.data.default_joint_pos.torch.clone()) robot.write_data_to_sim() # perform step sim.step() diff --git a/source/isaaclab/test/assets/check_fixed_base_assets.py b/source/isaaclab/test/assets/check_fixed_base_assets.py index c62c5a3334da..11f9010a7d88 100644 --- a/source/isaaclab/test/assets/check_fixed_base_assets.py +++ b/source/isaaclab/test/assets/check_fixed_base_assets.py @@ -108,13 +108,16 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articula # reset robots for index, robot in enumerate(entities.values()): # root state - root_state = robot.data.default_root_state.clone() + root_state = robot.data.default_root_state.torch.clone() root_state[:, :3] += origins[index] root_state[:, :2] += torch.randn_like(root_state[:, :2]) * 0.25 robot.write_root_pose_to_sim(root_state[:, :7]) robot.write_root_velocity_to_sim(root_state[:, 7:]) # joint state - joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() + joint_pos, joint_vel = ( + robot.data.default_joint_pos.torch.clone(), + robot.data.default_joint_vel.torch.clone(), + ) robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset the internal state robot.reset() @@ -124,7 +127,7 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articula if count % 200 == 0: print("Name: ", name, "is_fixed_base: ", robot.is_fixed_base) # generate random joint positions - joint_pos_target = robot.data.default_joint_pos + torch.randn_like(robot.data.joint_pos) * 0.1 + joint_pos_target = robot.data.default_joint_pos.torch + torch.randn_like(robot.data.joint_pos.torch) * 0.1 # apply action to the robot robot.set_joint_position_target(joint_pos_target) # write data to sim diff --git a/source/isaaclab/test/assets/check_ridgeback_franka.py b/source/isaaclab/test/assets/check_ridgeback_franka.py index 724cd8b2a080..69ca6a7d8761 100644 --- a/source/isaaclab/test/assets/check_ridgeback_franka.py +++ b/source/isaaclab/test/assets/check_ridgeback_franka.py @@ -72,7 +72,7 @@ def add_robots() -> Articulation: def run_simulator(sim: sim_utils.SimulationContext, robot: Articulation): """Runs the simulator by applying actions to the robot at every time-step""" # dummy action - actions = robot.data.default_joint_pos.clone() + actions = robot.data.default_joint_pos.torch.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() @@ -87,12 +87,15 @@ def run_simulator(sim: sim_utils.SimulationContext, robot: Articulation): sim_time = 0.0 ep_step_count = 0 # reset dof state - joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() + joint_pos, joint_vel = ( + robot.data.default_joint_pos.torch.clone(), + robot.data.default_joint_vel.torch.clone(), + ) robot.write_joint_state_to_sim(joint_pos, joint_vel) # reset internals robot.reset() # reset command - actions = torch.rand_like(robot.data.default_joint_pos) + robot.data.default_joint_pos + actions = torch.rand_like(robot.data.default_joint_pos.torch) + robot.data.default_joint_pos.torch # -- base actions[:, 0:3] = 0.0 # -- gripper diff --git a/source/isaaclab/test/assets/test_articulation_iface.py b/source/isaaclab/test/assets/test_articulation_iface.py new file mode 100644 index 000000000000..3e471026a9dc --- /dev/null +++ b/source/isaaclab/test/assets/test_articulation_iface.py @@ -0,0 +1,2864 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + +""" +Checks that the articulation interfaces are consistent across backends, and are providing the exact same data as what +the base articulation class advertises. All articulation interfaces need to comply with the same interface contract. + +The setup is a bit convoluted so that we can run these tests without requiring Isaac Sim or GPU simulation. +""" + +"""Launch Isaac Sim Simulator first (when available).""" + +import os +import sys +from unittest.mock import MagicMock + +# When running kitless (e.g., ovphysx backend via run_ovphysx.sh), AppLauncher +# will try to boot Kit and hang. Skip it entirely when LD_PRELOAD is cleared +# (the signature of run_ovphysx.sh) or when EXP_PATH is not set. +_kitless = os.environ.get("LD_PRELOAD", "") == "" and "EXP_PATH" not in os.environ + +if not _kitless: + from isaaclab.app import AppLauncher + + simulation_app = AppLauncher(headless=True).app +else: + simulation_app = None + for _mod in ("isaacsim.core", "isaacsim.core.simulation_manager"): + sys.modules.setdefault(_mod, MagicMock()) + +import numpy as np +import pytest +import torch +import warp as wp + +from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg +from isaaclab.test.mock_interfaces.utils import MockWrenchComposer + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity. +# This is needed because the PhysX Data classes call +# SimulationManager.get_physics_sim_view().get_gravity() but there's no actual +# physics scene when running unit tests. +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + +""" +Check which backends are available. +""" + +BACKENDS = ["mock"] # Mock backend is always available. + +try: + from isaaclab_physx.assets.articulation.articulation import Articulation as PhysXArticulation + from isaaclab_physx.assets.articulation.articulation_data import ArticulationData as PhysXArticulationData + from isaaclab_physx.test.mock_interfaces.views import MockArticulationViewWarp as PhysXMockArticulationViewWarp + + BACKENDS.append("physx") +except ImportError: + pass + +try: + from isaaclab_newton.assets.articulation.articulation import Articulation as NewtonArticulation + from isaaclab_newton.assets.articulation.articulation_data import ArticulationData as NewtonArticulationData + from isaaclab_newton.test.mock_interfaces.views import MockNewtonArticulationView as NewtonMockArticulationView + + BACKENDS.append("newton") +except ImportError: + pass + +try: + from isaaclab_ovphysx.assets.articulation.articulation import Articulation as OvPhysxArticulation + from isaaclab_ovphysx.assets.articulation.articulation_data import ArticulationData as OvPhysxArticulationData + from isaaclab_ovphysx.test.mock_interfaces.views import MockOvPhysxBindingSet + + BACKENDS.append("ovphysx") +except ImportError: + pass + + +def create_physx_articulation( + num_instances: int = 2, + num_joints: int = 6, + num_bodies: int = 7, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + device: str = "cuda:0", +): + """Create a test Articulation instance with mocked dependencies.""" + joint_names = [f"joint_{i}" for i in range(num_joints)] + body_names = [f"body_{i}" for i in range(num_bodies)] + fixed_tendon_names = [f"fixed_tendon_{i}" for i in range(num_fixed_tendons)] + spatial_tendon_names = [f"spatial_tendon_{i}" for i in range(num_spatial_tendons)] + + articulation = object.__new__(PhysXArticulation) + + articulation.cfg = ArticulationCfg( + prim_path="/World/Robot", + soft_joint_pos_limit_factor=1.0, + actuators={}, + ) + + # Create PhysX mock view + mock_view = PhysXMockArticulationViewWarp( + count=num_instances, + num_links=num_bodies, + num_dofs=num_joints, + device=device, + max_fixed_tendons=num_fixed_tendons, + max_spatial_tendons=num_spatial_tendons, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + # Set up the mock view's metatype for accessing names/counts + mock_metatype = MagicMock() + mock_metatype.fixed_base = False + mock_metatype.dof_count = num_joints + mock_metatype.link_count = num_bodies + mock_metatype.dof_names = joint_names + mock_metatype.link_names = body_names + object.__setattr__(mock_view, "_shared_metatype", mock_metatype) + + object.__setattr__(articulation, "_root_view", mock_view) + object.__setattr__(articulation, "_device", device) + + # We can't call the initialize method here, because we don't have a good mock for the actuators yet. + # We need to set the _data attribute manually. + + # Create ArticulationData instance (SimulationManager already mocked at module level) + data = PhysXArticulationData(mock_view, device) + object.__setattr__(articulation, "_data", data) + + # Set tendon names on articulation and data + object.__setattr__(articulation, "_fixed_tendon_names", fixed_tendon_names) + object.__setattr__(articulation, "_spatial_tendon_names", spatial_tendon_names) + data.fixed_tendon_names = fixed_tendon_names + data.spatial_tendon_names = spatial_tendon_names + + # Create mock wrench composers (pass articulation which has num_instances, num_bodies, device properties) + mock_inst_wrench = MockWrenchComposer(articulation) + mock_perm_wrench = MockWrenchComposer(articulation) + object.__setattr__(articulation, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(articulation, "_permanent_wrench_composer", mock_perm_wrench) + + # Prevent __del__ / _clear_callbacks from raising AttributeError + object.__setattr__(articulation, "_initialize_handle", None) + object.__setattr__(articulation, "_invalidate_initialize_handle", None) + object.__setattr__(articulation, "_prim_deletion_handle", None) + object.__setattr__(articulation, "_debug_vis_handle", None) + + # Set up other required attributes + object.__setattr__(articulation, "actuators", {}) + object.__setattr__(articulation, "_has_implicit_actuators", False) + object.__setattr__(articulation, "_ALL_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device)) + object.__setattr__( + articulation, "_ALL_BODY_INDICES", wp.array(np.arange(num_bodies, dtype=np.int32), device=device) + ) + object.__setattr__( + articulation, "_ALL_JOINT_INDICES", wp.array(np.arange(num_joints, dtype=np.int32), device=device) + ) + + # Tendon index arrays + object.__setattr__( + articulation, + "_ALL_FIXED_TENDON_INDICES", + wp.array(np.arange(num_fixed_tendons, dtype=np.int32), device=device), + ) + object.__setattr__( + articulation, + "_ALL_SPATIAL_TENDON_INDICES", + wp.array(np.arange(num_spatial_tendons, dtype=np.int32), device=device), + ) + + # Warp arrays for set_external_force_and_torque + object.__setattr__( + articulation, "_ALL_INDICES_WP", wp.array(np.arange(num_instances, dtype=np.int32), device=device) + ) + object.__setattr__( + articulation, "_ALL_BODY_INDICES_WP", wp.array(np.arange(num_bodies, dtype=np.int32), device=device) + ) + + # Initialize joint targets + object.__setattr__(articulation, "_joint_pos_target_sim", torch.zeros(num_instances, num_joints, device=device)) + object.__setattr__(articulation, "_joint_vel_target_sim", torch.zeros(num_instances, num_joints, device=device)) + object.__setattr__(articulation, "_joint_effort_target_sim", torch.zeros(num_instances, num_joints, device=device)) + + # Cached .view(wp.float32) wrappers + object.__setattr__(articulation, "_root_link_pose_w_f32", None) + object.__setattr__(articulation, "_root_com_vel_w_f32", None) + object.__setattr__(articulation, "_root_link_vel_w_f32", None) + object.__setattr__(articulation, "_inst_wrench_force_f32", None) + object.__setattr__(articulation, "_inst_wrench_torque_f32", None) + object.__setattr__(articulation, "_perm_wrench_force_f32", None) + object.__setattr__(articulation, "_perm_wrench_torque_f32", None) + + # Pre-allocated pinned CPU buffers for PhysX TensorAPI writes + N, J, B = num_instances, num_joints, num_bodies + cpu_env_ids = wp.array(np.arange(N, dtype=np.int32), device="cpu") + object.__setattr__(articulation, "_cpu_env_ids_all", cpu_env_ids) + object.__setattr__(articulation, "_cpu_joint_stiffness", wp.zeros((N, J), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_joint_damping", wp.zeros((N, J), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_joint_pos_limits", wp.zeros((N, J, 2), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_joint_vel_limits", wp.zeros((N, J), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_joint_effort_limits", wp.zeros((N, J), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_joint_armature", wp.zeros((N, J), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_joint_friction_props", wp.zeros((N, J, 3), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_body_mass", wp.zeros((N, B), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_body_coms", wp.zeros((N, B, 7), dtype=wp.float32, device="cpu")) + object.__setattr__(articulation, "_cpu_body_inertia", wp.zeros((N, B, 9), dtype=wp.float32, device="cpu")) + + return articulation, mock_view + + +def create_ovphysx_articulation( + num_instances: int = 2, + num_joints: int = 6, + num_bodies: int = 7, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + device: str = "cuda:0", +): + """Create a test OvPhysX Articulation instance with mocked tensor bindings.""" + joint_names = [f"joint_{i}" for i in range(num_joints)] + body_names = [f"body_{i}" for i in range(num_bodies)] + + articulation = object.__new__(OvPhysxArticulation) + + articulation.cfg = ArticulationCfg( + prim_path="/World/Robot", + soft_joint_pos_limit_factor=1.0, + actuators={}, + ) + + # Create mock binding set + mock_bindings = MockOvPhysxBindingSet( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + is_fixed_base=False, + joint_names=joint_names, + body_names=body_names, + num_fixed_tendons=num_fixed_tendons, + num_spatial_tendons=num_spatial_tendons, + ) + mock_bindings.set_random_data() + + fixed_tendon_names = [f"fixed_tendon_{i}" for i in range(num_fixed_tendons)] + spatial_tendon_names = [f"spatial_tendon_{i}" for i in range(num_spatial_tendons)] + + object.__setattr__(articulation, "_device", device) + object.__setattr__(articulation, "_ovphysx", MagicMock()) + object.__setattr__(articulation, "_bindings", mock_bindings.bindings) + object.__setattr__(articulation, "_num_instances", num_instances) + object.__setattr__(articulation, "_num_joints", num_joints) + object.__setattr__(articulation, "_num_bodies", num_bodies) + object.__setattr__(articulation, "_is_fixed_base", False) + object.__setattr__(articulation, "_joint_names", joint_names) + object.__setattr__(articulation, "_body_names", body_names) + object.__setattr__(articulation, "_fixed_tendon_names", fixed_tendon_names) + object.__setattr__(articulation, "_spatial_tendon_names", spatial_tendon_names) + object.__setattr__(articulation, "_num_fixed_tendons", num_fixed_tendons) + object.__setattr__(articulation, "_num_spatial_tendons", num_spatial_tendons) + + # Create ArticulationData + data = OvPhysxArticulationData(mock_bindings.bindings, device) + data._num_instances = num_instances + data._num_joints = num_joints + data._num_bodies = num_bodies + data._num_fixed_tendons = num_fixed_tendons + data._num_spatial_tendons = num_spatial_tendons + data._is_fixed_base = False + data.body_names = body_names + data.joint_names = joint_names + data.fixed_tendon_names = fixed_tendon_names + data.spatial_tendon_names = spatial_tendon_names + data._create_buffers() + object.__setattr__(articulation, "_data", data) + + # Wrench composers + mock_inst_wrench = MockWrenchComposer(articulation) + mock_perm_wrench = MockWrenchComposer(articulation) + object.__setattr__(articulation, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(articulation, "_permanent_wrench_composer", mock_perm_wrench) + + # Prevent __del__ / _clear_callbacks from raising + object.__setattr__(articulation, "_initialize_handle", None) + object.__setattr__(articulation, "_invalidate_initialize_handle", None) + object.__setattr__(articulation, "_prim_deletion_handle", None) + object.__setattr__(articulation, "_debug_vis_handle", None) + object.__setattr__(articulation, "actuators", {}) + + return articulation, mock_bindings + + +def create_newton_articulation( + num_instances: int = 2, + num_joints: int = 6, + num_bodies: int = 7, + device: str = "cuda:0", +): + """Create a test Newton Articulation instance with mocked dependencies.""" + import isaaclab_newton.assets.articulation.articulation_data as newton_data_module + + joint_names = [f"joint_{i}" for i in range(num_joints)] + body_names = [f"body_{i}" for i in range(num_bodies)] + + # Create Newton mock view + mock_view = NewtonMockArticulationView( + num_instances=num_instances, + num_bodies=num_bodies, + num_joints=num_joints, + device=device, + is_fixed_base=False, + joint_names=joint_names, + body_names=body_names, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + # Mock NewtonManager (aliased as SimulationManager in Newton modules) + mock_model = MagicMock() + mock_model.gravity = wp.array(np.array([[0.0, 0.0, -9.81]], dtype=np.float32), dtype=wp.vec3f, device=device) + mock_state = MagicMock() + mock_control = MagicMock() + + mock_manager = MagicMock() + mock_manager.get_model.return_value = mock_model + mock_manager.get_state_0.return_value = mock_state + mock_manager.get_state_1.return_value = mock_state + mock_manager.get_control.return_value = mock_control + + # Patch SimulationManager in the Newton data module + original_sim_manager = newton_data_module.SimulationManager + newton_data_module.SimulationManager = mock_manager + + try: + data = NewtonArticulationData(mock_view, device) + finally: + newton_data_module.SimulationManager = original_sim_manager + + # Create Articulation shell (bypass __init__) + articulation = object.__new__(NewtonArticulation) + + articulation.cfg = ArticulationCfg( + prim_path="/World/Robot", + soft_joint_pos_limit_factor=1.0, + actuators={}, + ) + + object.__setattr__(articulation, "_root_view", mock_view) + object.__setattr__(articulation, "_device", device) + object.__setattr__(articulation, "_data", data) + + # Tendon names (Newton doesn't support tendons) + object.__setattr__(articulation, "_fixed_tendon_names", []) + object.__setattr__(articulation, "_spatial_tendon_names", []) + data.fixed_tendon_names = [] + data.spatial_tendon_names = [] + + # Mock wrench composers + mock_inst_wrench = MockWrenchComposer(articulation) + mock_perm_wrench = MockWrenchComposer(articulation) + object.__setattr__(articulation, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(articulation, "_permanent_wrench_composer", mock_perm_wrench) + + # Prevent __del__ / _clear_callbacks from raising AttributeError + object.__setattr__(articulation, "_initialize_handle", None) + object.__setattr__(articulation, "_invalidate_initialize_handle", None) + object.__setattr__(articulation, "_prim_deletion_handle", None) + object.__setattr__(articulation, "_debug_vis_handle", None) + + # Other required attributes + object.__setattr__(articulation, "actuators", {}) + object.__setattr__(articulation, "_has_implicit_actuators", False) + + # Newton uses wp.array for indices (not torch) + object.__setattr__(articulation, "_ALL_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device)) + object.__setattr__( + articulation, "_ALL_BODY_INDICES", wp.array(np.arange(num_bodies, dtype=np.int32), device=device) + ) + object.__setattr__( + articulation, "_ALL_JOINT_INDICES", wp.array(np.arange(num_joints, dtype=np.int32), device=device) + ) + + # Newton uses wp.bool masks + object.__setattr__(articulation, "_ALL_ENV_MASK", wp.ones((num_instances,), dtype=wp.bool, device=device)) + object.__setattr__(articulation, "_ALL_BODY_MASK", wp.ones((num_bodies,), dtype=wp.bool, device=device)) + object.__setattr__(articulation, "_ALL_JOINT_MASK", wp.ones((num_joints,), dtype=wp.bool, device=device)) + + # Tendon arrays (empty) + object.__setattr__(articulation, "_ALL_FIXED_TENDON_INDICES", wp.array(np.array([], dtype=np.int32), device=device)) + object.__setattr__(articulation, "_ALL_FIXED_TENDON_MASK", wp.ones((0,), dtype=wp.bool, device=device)) + object.__setattr__( + articulation, "_ALL_SPATIAL_TENDON_INDICES", wp.array(np.array([], dtype=np.int32), device=device) + ) + object.__setattr__(articulation, "_ALL_SPATIAL_TENDON_MASK", wp.ones((0,), dtype=wp.bool, device=device)) + + # Joint targets (Newton uses warp, not torch) + object.__setattr__( + articulation, + "_joint_pos_target_sim", + wp.zeros((num_instances, num_joints), dtype=wp.float32, device=device), + ) + object.__setattr__( + articulation, + "_joint_vel_target_sim", + wp.zeros((num_instances, num_joints), dtype=wp.float32, device=device), + ) + object.__setattr__( + articulation, + "_joint_effort_target_sim", + wp.zeros((num_instances, num_joints), dtype=wp.float32, device=device), + ) + + return articulation, mock_view + + +def create_mock_articulation( + num_instances: int = 2, + num_joints: int = 6, + num_bodies: int = 7, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + device: str = "cuda:0", +): + from isaaclab.test.mock_interfaces.assets.mock_articulation import MockArticulation + + art = MockArticulation( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + num_fixed_tendons=num_fixed_tendons, + num_spatial_tendons=num_spatial_tendons, + device=device, + ) + return art, None # No view for mock backend + + +def get_articulation( + backend: str, + num_instances: int = 2, + num_joints: int = 6, + num_bodies: int = 7, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + device: str = "cuda:0", +): + if backend == "physx": + return create_physx_articulation( + num_instances, num_joints, num_bodies, num_fixed_tendons, num_spatial_tendons, device + ) + elif backend == "ovphysx": + return create_ovphysx_articulation( + num_instances, num_joints, num_bodies, num_fixed_tendons, num_spatial_tendons, device + ) + elif backend == "newton": + return create_newton_articulation(num_instances, num_joints, num_bodies, device) + elif backend.lower() == "mock": + return create_mock_articulation( + num_instances, num_joints, num_bodies, num_fixed_tendons, num_spatial_tendons, device + ) + else: + raise ValueError(f"Invalid backend: {backend}") + + +@pytest.fixture +def articulation_iface(request): + backend = request.getfixturevalue("backend") + num_instances = request.getfixturevalue("num_instances") + num_joints = request.getfixturevalue("num_joints") + num_bodies = request.getfixturevalue("num_bodies") + device = request.getfixturevalue("device") + try: + num_fixed_tendons = request.getfixturevalue("num_fixed_tendons") + except pytest.FixtureLookupError: + num_fixed_tendons = 0 + try: + num_spatial_tendons = request.getfixturevalue("num_spatial_tendons") + except pytest.FixtureLookupError: + num_spatial_tendons = 0 + return get_articulation( + backend, num_instances, num_joints, num_bodies, num_fixed_tendons, num_spatial_tendons, device + ) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _check_proxy_array(arr, *, expected_shape: tuple, expected_dtype: type, name: str): + """Assert that `arr` is a ProxyArray with the expected shape and dtype.""" + from isaaclab.utils.warp import ProxyArray + + assert isinstance(arr, ProxyArray), f"{name}: expected ProxyArray, got {type(arr)}" + assert arr.shape == expected_shape, f"{name}: expected shape {expected_shape}, got {arr.shape}" + assert arr.dtype == expected_dtype, f"{name}: expected dtype {expected_dtype}, got {arr.dtype}" + + +# Common parametrize decorator for all interface tests +_backends = pytest.mark.parametrize("backend", BACKENDS, indirect=False) + +# We also need to provide the fixture params that articulation_iface reads: +_default_dims = pytest.mark.parametrize( + "num_instances, num_joints, num_bodies", + [(1, 1, 1), (1, 2, 2), (2, 6, 7), (100, 8, 13)], +) + +_default_devices = pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +_index_resolution_backends = pytest.mark.parametrize( + "backend", [backend for backend in ("physx", "newton") if backend in BACKENDS], indirect=False +) + + +# --------------------------------------------------------------------------- +# Tests: Index resolution helpers +# --------------------------------------------------------------------------- + + +class TestArticulationIndexResolution: + """Test backend-specific index resolution helpers.""" + + @_index_resolution_backends + def test_resolve_env_ids_handles_tensor_view_shape(self, backend): + art, _ = get_articulation(backend, num_instances=4, device="cpu") + + env_ids = torch.arange(4, dtype=torch.int32, device="cpu") + resolved_full = art._resolve_env_ids(env_ids) + resolved_view = art._resolve_env_ids(env_ids[:2]) + + assert resolved_full.shape[0] == 4 + assert resolved_view.shape[0] == 2 + + @_index_resolution_backends + def test_resolve_joint_ids_handles_tensor_view_shape(self, backend): + art, _ = get_articulation(backend, num_joints=4, device="cpu") + + joint_ids = torch.arange(4, dtype=torch.int32, device="cpu") + resolved_full = art._resolve_joint_ids(joint_ids) + resolved_view = art._resolve_joint_ids(joint_ids[:2]) + + assert resolved_full.shape[0] == 4 + assert resolved_view.shape[0] == 2 + + @_index_resolution_backends + def test_resolve_body_ids_handles_tensor_view_shape(self, backend): + art, _ = get_articulation(backend, num_bodies=4, device="cpu") + + body_ids = torch.arange(4, dtype=torch.int32, device="cpu") + resolved_full = art._resolve_body_ids(body_ids) + resolved_view = art._resolve_body_ids(body_ids[:2]) + + assert resolved_full.shape[0] == 4 + assert resolved_view.shape[0] == 2 + + +# --------------------------------------------------------------------------- +# Tests: Articulation properties +# --------------------------------------------------------------------------- + + +class TestArticulationProperties: + """Test that articulation properties return the correct types/values.""" + + @_backends + @_default_dims + @_default_devices + def test_num_instances(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + assert art.num_instances == num_instances + + @_backends + @_default_dims + @_default_devices + def test_num_joints(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + assert art.num_joints == num_joints + + @_backends + @_default_dims + @_default_devices + def test_num_bodies(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + assert art.num_bodies == num_bodies + + @_backends + @_default_dims + @_default_devices + def test_is_fixed_base(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + assert isinstance(art.is_fixed_base, bool) + + @_backends + @_default_dims + @_default_devices + def test_joint_names(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + names = art.joint_names + assert isinstance(names, list) + assert len(names) == num_joints + assert all(isinstance(n, str) for n in names) + + @_backends + @_default_dims + @_default_devices + def test_body_names(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + names = art.body_names + assert isinstance(names, list) + assert len(names) == num_bodies + assert all(isinstance(n, str) for n in names) + + @_backends + @_default_dims + @_default_devices + def test_data_returns_articulation_data( + self, backend, num_instances, num_joints, num_bodies, device, articulation_iface + ): + from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData + + art, _ = articulation_iface + assert isinstance(art.data, BaseArticulationData) + + +# --------------------------------------------------------------------------- +# Tests: Articulation finder methods +# --------------------------------------------------------------------------- + + +class TestArticulationFinders: + """Test that finder methods return (list[int], list[str]) tuples.""" + + @_backends + @_default_dims + @_default_devices + def test_find_bodies_all(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + indices, names = art.find_bodies(".*") + assert isinstance(indices, list) and isinstance(names, list) + assert len(indices) == num_bodies + assert len(names) == num_bodies + assert all(isinstance(i, int) for i in indices) + assert all(isinstance(n, str) for n in names) + + @_backends + @_default_dims + @_default_devices + def test_find_joints_all(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + indices, names = art.find_joints(".*") + assert isinstance(indices, list) and isinstance(names, list) + assert len(indices) == num_joints + assert len(names) == num_joints + + @_backends + @_default_dims + @_default_devices + def test_find_bodies_single(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + first_body = art.body_names[0] + indices, names = art.find_bodies(first_body) + assert indices == [0] + assert names == [first_body] + + @_backends + @_default_dims + @_default_devices + def test_find_joints_single(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + first_joint = art.joint_names[0] + indices, names = art.find_joints(first_joint) + assert indices == [0] + assert names == [first_joint] + + +# --------------------------------------------------------------------------- +# Tests: resolve_matching_names caching behavior +# --------------------------------------------------------------------------- + + +_non_mock_backends = pytest.mark.parametrize("backend", [b for b in BACKENDS if b != "mock"], indirect=False) + + +class TestResolveMatchingNamesCache: + """Test that resolve_matching_names caching returns correct, isolated results.""" + + @_non_mock_backends + @pytest.mark.parametrize("num_instances, num_joints, num_bodies", [(2, 6, 7)]) + @_default_devices + def test_unmatched_regex_raises(self, backend, num_instances, num_joints, num_bodies, device): + """ValueError from resolve_matching_names propagates correctly.""" + art, _ = get_articulation(backend, num_instances, num_joints, num_bodies, device=device) + with pytest.raises(ValueError): + art.find_bodies("nonexistent_body_xyz") + with pytest.raises(ValueError): + art.find_joints("nonexistent_joint_xyz") + + @_backends + @pytest.mark.parametrize("num_instances, num_joints, num_bodies", [(2, 6, 7)]) + @_default_devices + def test_mutating_result_does_not_corrupt_cache( + self, backend, num_instances, num_joints, num_bodies, device, articulation_iface + ): + """Mutating returned lists must not affect future cached results.""" + art, _ = articulation_iface + + for finder, expected_len in [("find_bodies", num_bodies), ("find_joints", num_joints)]: + idx1, names1 = getattr(art, finder)(".*") + assert len(idx1) == expected_len + + idx1.clear() + names1.append("corrupted") + + idx2, names2 = getattr(art, finder)(".*") + assert len(idx2) == expected_len + assert "corrupted" not in names2 + + @_non_mock_backends + @pytest.mark.parametrize("num_instances, num_joints, num_bodies", [(2, 6, 7)]) + @_default_devices + def test_find_with_multiple_patterns(self, backend, num_instances, num_joints, num_bodies, device): + """Passing a list of regex patterns works correctly.""" + art, _ = get_articulation(backend, num_instances, num_joints, num_bodies, device=device) + idx, names = art.find_joints(["joint_0", "joint_1"]) + assert "joint_0" in names + assert "joint_1" in names + assert len(names) == 2 + + @_non_mock_backends + @pytest.mark.parametrize("num_instances, num_joints, num_bodies", [(2, 6, 7)]) + @_default_devices + def test_find_with_preserve_order(self, backend, num_instances, num_joints, num_bodies, device): + """preserve_order=True returns names in the order of the input patterns.""" + art, _ = get_articulation(backend, num_instances, num_joints, num_bodies, device=device) + idx_fwd, names_fwd = art.find_joints(["joint_1", "joint_0"], preserve_order=True) + assert names_fwd == ["joint_1", "joint_0"] + + idx_rev, names_rev = art.find_joints(["joint_0", "joint_1"], preserve_order=True) + assert names_rev == ["joint_0", "joint_1"] + + +# --------------------------------------------------------------------------- +# Tests: ArticulationData root state properties +# --------------------------------------------------------------------------- + + +class TestArticulationDataRootState: + """Test data properties for root rigid body state.""" + + @_backends + @_default_dims + @_default_devices + def test_root_link_pose_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_pose_w, + expected_shape=(num_instances,), + expected_dtype=wp.transformf, + name="root_link_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.spatial_vectorf, + name="root_link_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_pose_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_pose_w, + expected_shape=(num_instances,), + expected_dtype=wp.transformf, + name="root_com_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.spatial_vectorf, + name="root_com_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_pos_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_pos_w, expected_shape=(num_instances,), expected_dtype=wp.vec3f, name="root_link_pos_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_quat_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_quat_w, expected_shape=(num_instances,), expected_dtype=wp.quatf, name="root_link_quat_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_lin_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_lin_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_ang_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_ang_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_ang_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_pos_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_pos_w, expected_shape=(num_instances,), expected_dtype=wp.vec3f, name="root_com_pos_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_quat_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_quat_w, expected_shape=(num_instances,), expected_dtype=wp.quatf, name="root_com_quat_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_lin_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_lin_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_ang_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_ang_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_ang_vel_w", + ) + + +# --------------------------------------------------------------------------- +# Tests: ArticulationData derived properties +# --------------------------------------------------------------------------- + + +class TestArticulationDataDerivedProperties: + """Test derived/computed data properties.""" + + @_backends + @_default_dims + @_default_devices + def test_projected_gravity_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.projected_gravity_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="projected_gravity_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_heading_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.heading_w, expected_shape=(num_instances,), expected_dtype=wp.float32, name="heading_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_lin_vel_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_lin_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_lin_vel_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_ang_vel_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_link_ang_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_ang_vel_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_lin_vel_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_lin_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_lin_vel_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_ang_vel_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.root_com_ang_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_ang_vel_b", + ) + + +# --------------------------------------------------------------------------- +# Tests: ArticulationData body state properties +# --------------------------------------------------------------------------- + + +class TestArticulationDataBodyState: + """Test data properties for all body states.""" + + @_backends + @_default_dims + @_default_devices + def test_body_link_pose_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_link_pose_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.transformf, + name="body_link_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_link_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.spatial_vectorf, + name="body_link_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pose_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_pose_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.transformf, + name="body_com_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.spatial_vectorf, + name="body_com_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_acc_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_acc_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.spatial_vectorf, + name="body_com_acc_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pose_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + if backend == "newton": + pytest.xfail("Newton only stores CoM position, not orientation") + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_pose_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.transformf, + name="body_com_pose_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_mass(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_mass, expected_shape=(num_instances, num_bodies), expected_dtype=wp.float32, name="body_mass" + ) + + @_backends + @_default_dims + @_default_devices + def test_body_inertia(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_inertia, + expected_shape=(num_instances, num_bodies, 9), + expected_dtype=wp.float32, + name="body_inertia", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_pos_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_link_pos_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_pos_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_quat_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_link_quat_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.quatf, + name="body_link_quat_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_lin_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_link_lin_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_ang_vel_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_link_ang_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_ang_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pos_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_pos_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_pos_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_quat_w(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_quat_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.quatf, + name="body_com_quat_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pos_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_pos_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_pos_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_quat_b(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + if backend == "newton": + pytest.xfail("Newton only stores CoM position, not orientation") + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.body_com_quat_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.quatf, + name="body_com_quat_b", + ) + + +# --------------------------------------------------------------------------- +# Tests: ArticulationData joint state and properties +# --------------------------------------------------------------------------- + + +class TestArticulationDataJointState: + """Test data properties for joint state and joint properties.""" + + @_backends + @_default_dims + @_default_devices + def test_joint_pos(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_pos, expected_shape=(num_instances, num_joints), expected_dtype=wp.float32, name="joint_pos" + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_vel(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_vel, expected_shape=(num_instances, num_joints), expected_dtype=wp.float32, name="joint_vel" + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_acc(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_acc, expected_shape=(num_instances, num_joints), expected_dtype=wp.float32, name="joint_acc" + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_stiffness(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_stiffness, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_stiffness", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_damping(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_damping, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_damping", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_armature(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_armature, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_armature", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_friction_coeff(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_friction_coeff, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_friction_coeff", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_pos_limits(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_pos_limits, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.vec2f, + name="joint_pos_limits", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_vel_limits(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_vel_limits, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_vel_limits", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_effort_limits(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_effort_limits, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_effort_limits", + ) + + @_backends + @_default_dims + @_default_devices + def test_soft_joint_pos_limits(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.soft_joint_pos_limits, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.vec2f, + name="soft_joint_pos_limits", + ) + + +# --------------------------------------------------------------------------- +# Tests: ArticulationData defaults and command targets +# --------------------------------------------------------------------------- + + +class TestArticulationDataDefaults: + """Test default state and command target properties.""" + + @_backends + @_default_dims + @_default_devices + def test_default_root_pose(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.default_root_pose, + expected_shape=(num_instances,), + expected_dtype=wp.transformf, + name="default_root_pose", + ) + + @_backends + @_default_dims + @_default_devices + def test_default_root_vel(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.default_root_vel, + expected_shape=(num_instances,), + expected_dtype=wp.spatial_vectorf, + name="default_root_vel", + ) + + @_backends + @_default_dims + @_default_devices + def test_default_joint_pos(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.default_joint_pos, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="default_joint_pos", + ) + + @_backends + @_default_dims + @_default_devices + def test_default_joint_vel(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.default_joint_vel, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="default_joint_vel", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_pos_target(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_pos_target, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_pos_target", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_vel_target(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_vel_target, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_vel_target", + ) + + @_backends + @_default_dims + @_default_devices + def test_joint_effort_target(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.joint_effort_target, + expected_shape=(num_instances, num_joints), + expected_dtype=wp.float32, + name="joint_effort_target", + ) + + +# --------------------------------------------------------------------------- +# Writer/setter test helpers +# --------------------------------------------------------------------------- + +# Map warp structured dtypes to their torch trailing dimension size. +_WP_DTYPE_TO_TRAILING = { + wp.transformf: 7, + wp.spatial_vectorf: 6, + wp.vec2f: 2, + wp.float32: 0, # no trailing dimension +} + + +def _make_data_torch(shape: tuple, device: str, wp_dtype=wp.float32) -> torch.Tensor: + """Create valid torch test data for a given warp dtype. + + For transformf shapes, appends a trailing dim of 7 and sets quat w=1. + For spatial_vectorf, appends trailing 6. + For vec2f, appends trailing 2 with [-1, 1]. + For float32, no trailing dim. + """ + trailing = _WP_DTYPE_TO_TRAILING[wp_dtype] + if trailing: + full_shape = (*shape, trailing) + else: + full_shape = shape + data = torch.zeros(full_shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., 6] = 1.0 # identity quat w + elif wp_dtype == wp.vec2f: + data[..., 0] = -1.0 + data[..., 1] = 1.0 + elif wp_dtype == wp.float32: + data.fill_(1.0) + return data + + +def _make_data_warp(shape: tuple, device: str, wp_dtype=wp.float32) -> wp.array: + """Create valid warp test data for a given warp dtype. + + Warp structured types collapse the trailing dim into the dtype, + so a (N,) transformf array is equivalent to (N, 7) float32 in torch. + """ + t = _make_data_torch(shape, device, wp_dtype) + if wp_dtype == wp.float32: + return wp.from_torch(t, dtype=wp.float32) + # For structured types, the torch tensor has the trailing dim; convert to warp + return wp.from_torch(t.contiguous(), dtype=wp_dtype) + + +def _make_bad_data_torch(shape: tuple, device: str, wp_dtype=wp.float32) -> torch.Tensor: + """Create torch data with wrong leading shape for negative testing. + + Adds +1 to the first dimension so the shape doesn't match. + """ + bad_shape = (shape[0] + 1,) + shape[1:] + return _make_data_torch(bad_shape, device, wp_dtype) + + +def _make_bad_data_warp(shape: tuple, device: str, wp_dtype=wp.float32) -> wp.array: + """Create warp data with wrong leading shape for negative testing.""" + bad_shape = (shape[0] + 1,) + shape[1:] + return _make_data_warp(bad_shape, device, wp_dtype) + + +def _make_env_mask(num_instances: int, device: str, partial: bool) -> wp.array | None: + """Create an env_mask: None for all envs, or a partial bool mask.""" + if not partial: + return None + mask_np = np.zeros(num_instances, dtype=bool) + mask_np[0] = True + return wp.array(mask_np, dtype=wp.bool, device=device) + + +def _make_env_ids(device: str, subset: bool) -> torch.Tensor | None: + """Create env_ids: None for all envs, or [0] for a subset.""" + if not subset: + return None + return torch.tensor([0], dtype=torch.int32, device=device) + + +def _make_item_mask(total: int, selected: list[int], device: str) -> wp.array: + """Create a bool warp mask with True at `selected` indices, False elsewhere.""" + mask_np = np.zeros(total, dtype=bool) + for i in selected: + mask_np[i] = True + return wp.array(mask_np, dtype=wp.bool, device=device) + + +# --------------------------------------------------------------------------- +# Tests: Root writers — torch/warp × index/mask × all/subset × negative +# --------------------------------------------------------------------------- + +_ROOT_POSE_METHODS = ["root_pose", "root_link_pose", "root_com_pose"] +_ROOT_VEL_METHODS = ["root_velocity", "root_link_velocity", "root_com_velocity"] + + +class TestArticulationWritersRoot: + """Test root pose/velocity writers with all input combinations.""" + + # -- index variants -- + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_POSE_METHODS) + def test_write_root_pose_to_sim_index( + self, backend, num_instances, num_joints, num_bodies, device, articulation_iface, method_suffix + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"write_{method_suffix}_to_sim_index") + + # torch, all envs + method(root_pose=_make_data_torch((num_instances,), device, wp.transformf)) + # torch, subset + method(root_pose=_make_data_torch((1,), device, wp.transformf), env_ids=_make_env_ids(device, True)) + # warp, all envs + method(root_pose=_make_data_warp((num_instances,), device, wp.transformf)) + # warp, subset + method(root_pose=_make_data_warp((1,), device, wp.transformf), env_ids=_make_env_ids(device, True)) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_torch((num_instances,), device, wp.transformf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_warp((num_instances,), device, wp.transformf)) + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_VEL_METHODS) + def test_write_root_velocity_to_sim_index( + self, backend, num_instances, num_joints, num_bodies, device, articulation_iface, method_suffix + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"write_{method_suffix}_to_sim_index") + + # torch, all envs + method(root_velocity=_make_data_torch((num_instances,), device, wp.spatial_vectorf)) + # torch, subset + method(root_velocity=_make_data_torch((1,), device, wp.spatial_vectorf), env_ids=_make_env_ids(device, True)) + # warp, all envs + method(root_velocity=_make_data_warp((num_instances,), device, wp.spatial_vectorf)) + # warp, subset + method(root_velocity=_make_data_warp((1,), device, wp.spatial_vectorf), env_ids=_make_env_ids(device, True)) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_torch((num_instances,), device, wp.spatial_vectorf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_warp((num_instances,), device, wp.spatial_vectorf)) + + # -- mask variants -- + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_POSE_METHODS) + def test_write_root_pose_to_sim_mask( + self, backend, num_instances, num_joints, num_bodies, device, articulation_iface, method_suffix + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"write_{method_suffix}_to_sim_mask") + + # torch, no mask (all) + method(root_pose=_make_data_torch((num_instances,), device, wp.transformf)) + # torch, partial mask + method( + root_pose=_make_data_torch((num_instances,), device, wp.transformf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # warp, no mask + method(root_pose=_make_data_warp((num_instances,), device, wp.transformf)) + # warp, partial mask + method( + root_pose=_make_data_warp((num_instances,), device, wp.transformf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_torch((num_instances,), device, wp.transformf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_warp((num_instances,), device, wp.transformf)) + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_VEL_METHODS) + def test_write_root_velocity_to_sim_mask( + self, backend, num_instances, num_joints, num_bodies, device, articulation_iface, method_suffix + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"write_{method_suffix}_to_sim_mask") + + # torch, no mask + method(root_velocity=_make_data_torch((num_instances,), device, wp.spatial_vectorf)) + # torch, partial mask + method( + root_velocity=_make_data_torch((num_instances,), device, wp.spatial_vectorf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # warp, no mask + method(root_velocity=_make_data_warp((num_instances,), device, wp.spatial_vectorf)) + # warp, partial mask + method( + root_velocity=_make_data_warp((num_instances,), device, wp.spatial_vectorf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_torch((num_instances,), device, wp.spatial_vectorf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_warp((num_instances,), device, wp.spatial_vectorf)) + + +# --------------------------------------------------------------------------- +# Tests: Joint writers — torch/warp × index/mask × all/subset × negative +# --------------------------------------------------------------------------- + +# (method_name, kwarg_name, wp_dtype, accepts_float) +_JOINT_METHODS = [ + ("write_joint_position_to_sim", "position", wp.float32, False), + ("write_joint_velocity_to_sim", "velocity", wp.float32, False), + ("write_joint_stiffness_to_sim", "stiffness", wp.float32, True), + ("write_joint_damping_to_sim", "damping", wp.float32, True), + ("write_joint_position_limit_to_sim", "limits", wp.vec2f, True), + ("write_joint_velocity_limit_to_sim", "limits", wp.float32, True), + ("write_joint_effort_limit_to_sim", "limits", wp.float32, True), + ("write_joint_armature_to_sim", "armature", wp.float32, True), + ("write_joint_friction_coefficient_to_sim", "joint_friction_coeff", wp.float32, False), + ("set_joint_position_target", "target", wp.float32, False), + ("set_joint_velocity_target", "target", wp.float32, False), + ("set_joint_effort_target", "target", wp.float32, False), +] + + +class TestArticulationWritersJoint: + """Test joint writers/setters with all input combinations.""" + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, accepts_float", + _JOINT_METHODS, + ids=[m[0] for m in _JOINT_METHODS], + ) + def test_joint_writer_index( + self, + backend, + num_instances, + num_joints, + num_bodies, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + accepts_float, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_index") + sub_j = min(2, num_joints) + sub_joint_ids = list(range(sub_j)) + + # torch, all envs + all joints + method(**{kwarg: _make_data_torch((num_instances, num_joints), device, wp_dtype)}) + # torch, subset envs + subset joints + method( + **{ + kwarg: _make_data_torch((1, sub_j), device, wp_dtype), + "joint_ids": sub_joint_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # warp, all envs + all joints + method(**{kwarg: _make_data_warp((num_instances, num_joints), device, wp_dtype)}) + # warp, subset + method( + **{ + kwarg: _make_data_warp((1, sub_j), device, wp_dtype), + "joint_ids": sub_joint_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # float scalar (only for accepts_float methods, and NOT for vec2f position_limit) + if accepts_float and wp_dtype != wp.vec2f: + method(**{kwarg: 1.0}) + # float scalar for vec2f position_limit should raise ValueError + if accepts_float and wp_dtype == wp.vec2f: + with pytest.raises((ValueError, TypeError)): + method(**{kwarg: 1.0}) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_torch((num_instances, num_joints), device, wp_dtype)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_warp((num_instances, num_joints), device, wp_dtype)}) + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, accepts_float", + _JOINT_METHODS, + ids=[m[0] for m in _JOINT_METHODS], + ) + def test_joint_writer_mask( + self, + backend, + num_instances, + num_joints, + num_bodies, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + accepts_float, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_mask") + sub_joint_sel = list(range(min(2, num_joints))) + + # torch, no mask + method(**{kwarg: _make_data_torch((num_instances, num_joints), device, wp_dtype)}) + # torch, partial env_mask + joint_mask + method( + **{ + kwarg: _make_data_torch((num_instances, num_joints), device, wp_dtype), + "joint_mask": _make_item_mask(num_joints, sub_joint_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # warp, no mask + method(**{kwarg: _make_data_warp((num_instances, num_joints), device, wp_dtype)}) + # warp, partial env_mask + joint_mask + method( + **{ + kwarg: _make_data_warp((num_instances, num_joints), device, wp_dtype), + "joint_mask": _make_item_mask(num_joints, sub_joint_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # float scalar (only for accepts_float methods, and NOT for vec2f) + if accepts_float and wp_dtype != wp.vec2f: + method(**{kwarg: 1.0}) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_torch((num_instances, num_joints), device, wp_dtype)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_warp((num_instances, num_joints), device, wp_dtype)}) + + +# --------------------------------------------------------------------------- +# Tests: Body writers — torch/warp × index/mask × all/subset × negative +# --------------------------------------------------------------------------- + +# (method_name, kwarg_name, wp_dtype, trailing_dim) +_BODY_METHODS = [ + ("set_masses", "masses", wp.float32, 0), + ("set_coms", "coms", wp.transformf, 7), + ("set_inertias", "inertias", wp.float32, 9), +] + + +class TestArticulationWritersBody: + """Test body property writers/setters with all input combinations.""" + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, trailing", + _BODY_METHODS, + ids=[m[0] for m in _BODY_METHODS], + ) + def test_body_writer_index( + self, + backend, + num_instances, + num_joints, + num_bodies, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + trailing, + ): + if backend == "newton" and method_base == "set_coms": + pytest.xfail("Newton only stores CoM position, not orientation") + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_index") + + # For inertias, the shape is (N, B, 9) always (no structured warp type) + # For coms, torch shape is (N, B, 7), warp shape is (N, B) transformf + # For masses, shape is (N, B) + + def _torch_shape(n_envs, n_bods): + if trailing: + return (n_envs, n_bods, trailing) + return (n_envs, n_bods) + + def _warp_shape(n_envs, n_bods): + return (n_envs, n_bods) + + def _make_torch(n_envs, n_bods): + shape = _torch_shape(n_envs, n_bods) + data = torch.ones(shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., :3] = 0.0 + data[..., 3:6] = 0.0 + data[..., 6] = 1.0 + return data + + def _make_warp(n_envs, n_bods): + t = _make_torch(n_envs, n_bods) + if wp_dtype == wp.transformf: + return wp.from_torch(t.contiguous(), dtype=wp.transformf) + return wp.from_torch(t.contiguous(), dtype=wp.float32) + + sub_b = min(2, num_bodies) + sub_body_ids = list(range(sub_b)) + + # torch, all envs + all bodies + method(**{kwarg: _make_torch(num_instances, num_bodies)}) + # torch, subset + method( + **{ + kwarg: _make_torch(1, sub_b), + "body_ids": sub_body_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # warp, all envs + all bodies + method(**{kwarg: _make_warp(num_instances, num_bodies)}) + # warp, subset + method( + **{ + kwarg: _make_warp(1, sub_b), + "body_ids": sub_body_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # negative: bad torch shape (extra env) + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_torch(num_instances + 1, num_bodies)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_warp(num_instances + 1, num_bodies)}) + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, trailing", + _BODY_METHODS, + ids=[m[0] for m in _BODY_METHODS], + ) + def test_body_writer_mask( + self, + backend, + num_instances, + num_joints, + num_bodies, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + trailing, + ): + if backend == "newton" and method_base == "set_coms": + pytest.xfail("Newton only stores CoM position, not orientation") + art, _ = articulation_iface + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_mask") + + def _torch_shape(n_envs, n_bods): + if trailing: + return (n_envs, n_bods, trailing) + return (n_envs, n_bods) + + def _make_torch(n_envs, n_bods): + shape = _torch_shape(n_envs, n_bods) + data = torch.ones(shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., :3] = 0.0 + data[..., 3:6] = 0.0 + data[..., 6] = 1.0 + return data + + def _make_warp(n_envs, n_bods): + t = _make_torch(n_envs, n_bods) + if wp_dtype == wp.transformf: + return wp.from_torch(t.contiguous(), dtype=wp.transformf) + return wp.from_torch(t.contiguous(), dtype=wp.float32) + + sub_body_sel = list(range(min(2, num_bodies))) + + # torch, no mask + method(**{kwarg: _make_torch(num_instances, num_bodies)}) + # torch, partial env_mask + body_mask + method( + **{ + kwarg: _make_torch(num_instances, num_bodies), + "body_mask": _make_item_mask(num_bodies, sub_body_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # warp, no mask + method(**{kwarg: _make_warp(num_instances, num_bodies)}) + # warp, partial env_mask + body_mask + method( + **{ + kwarg: _make_warp(num_instances, num_bodies), + "body_mask": _make_item_mask(num_bodies, sub_body_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_torch(num_instances + 1, num_bodies)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_warp(num_instances + 1, num_bodies)}) + + +# --------------------------------------------------------------------------- +# Tests: Alias/shorthand properties +# --------------------------------------------------------------------------- + + +class TestArticulationDataAliases: + """Test that alias properties return the same shape/dtype as their canonical counterparts.""" + + @_backends + @_default_dims + @_default_devices + def test_root_aliases(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + """root_pose_w == root_link_pose_w, root_vel_w == root_com_vel_w, etc.""" + art, _ = articulation_iface + art.data.update(dt=0.01) + d = art.data + + assert d.root_pose_w.shape == d.root_link_pose_w.shape + assert d.root_pose_w.dtype == d.root_link_pose_w.dtype + assert d.root_pos_w.shape == d.root_link_pos_w.shape + assert d.root_quat_w.shape == d.root_link_quat_w.shape + + assert d.root_vel_w.shape == d.root_com_vel_w.shape + assert d.root_vel_w.dtype == d.root_com_vel_w.dtype + assert d.root_lin_vel_w.shape == d.root_com_lin_vel_w.shape + assert d.root_ang_vel_w.shape == d.root_com_ang_vel_w.shape + + @_backends + @_default_dims + @_default_devices + def test_body_aliases(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + d = art.data + + assert d.body_pose_w.shape == d.body_link_pose_w.shape + assert d.body_pos_w.shape == d.body_link_pos_w.shape + assert d.body_quat_w.shape == d.body_link_quat_w.shape + assert d.body_vel_w.shape == d.body_com_vel_w.shape + assert d.body_lin_vel_w.shape == d.body_com_lin_vel_w.shape + assert d.body_ang_vel_w.shape == d.body_com_ang_vel_w.shape + + @_backends + @_default_dims + @_default_devices + def test_joint_aliases(self, backend, num_instances, num_joints, num_bodies, device, articulation_iface): + art, _ = articulation_iface + art.data.update(dt=0.01) + d = art.data + + assert d.joint_limits.shape == d.joint_pos_limits.shape + assert d.joint_friction.shape == d.joint_friction_coeff.shape + + +# --------------------------------------------------------------------------- +# Tendon tests — parametrize, properties, finders, data, writers +# --------------------------------------------------------------------------- + +# Newton does not support tendons (always 0), so exclude it from tendon tests. +_tendon_backends = pytest.mark.parametrize("backend", [b for b in BACKENDS if b != "newton"], indirect=False) + +_tendon_dims = pytest.mark.parametrize( + "num_instances, num_joints, num_bodies, num_fixed_tendons, num_spatial_tendons", + [ + (1, 2, 2, 1, 0), # fixed only + (2, 6, 7, 3, 2), # both types + (100, 8, 13, 4, 3), # large, both types + ], +) + + +class TestArticulationTendonProperties: + """Test that tendon-related articulation properties return the correct types/values.""" + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_num_fixed_tendons( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + assert art.num_fixed_tendons == num_fixed_tendons + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_num_spatial_tendons( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + assert art.num_spatial_tendons == num_spatial_tendons + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_fixed_tendon_names( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + names = art.fixed_tendon_names + assert isinstance(names, list) + assert len(names) == num_fixed_tendons + assert all(isinstance(n, str) for n in names) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_spatial_tendon_names( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + names = art.spatial_tendon_names + assert isinstance(names, list) + assert len(names) == num_spatial_tendons + assert all(isinstance(n, str) for n in names) + + +class TestArticulationTendonFinders: + """Test that tendon finder methods return (list[int], list[str]) tuples.""" + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_find_fixed_tendons_all( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_fixed_tendons == 0: + pytest.skip("No fixed tendons configured") + indices, names = art.find_fixed_tendons(".*") + assert isinstance(indices, list) and isinstance(names, list) + assert len(indices) == num_fixed_tendons + assert len(names) == num_fixed_tendons + assert all(isinstance(i, int) for i in indices) + assert all(isinstance(n, str) for n in names) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_find_fixed_tendons_single( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_fixed_tendons == 0: + pytest.skip("No fixed tendons configured") + first = art.fixed_tendon_names[0] + indices, names = art.find_fixed_tendons(first) + assert indices == [0] + assert names == [first] + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_find_spatial_tendons_all( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + indices, names = art.find_spatial_tendons(".*") + assert isinstance(indices, list) and isinstance(names, list) + assert len(indices) == num_spatial_tendons + assert len(names) == num_spatial_tendons + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_find_spatial_tendons_single( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + first = art.spatial_tendon_names[0] + indices, names = art.find_spatial_tendons(first) + assert indices == [0] + assert names == [first] + + +class TestArticulationDataTendonState: + """Test data properties for tendon state (fixed and spatial).""" + + # -- Fixed tendon data properties -- + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_fixed_tendon_stiffness( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.fixed_tendon_stiffness, + expected_shape=(num_instances, num_fixed_tendons), + expected_dtype=wp.float32, + name="fixed_tendon_stiffness", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_fixed_tendon_damping( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.fixed_tendon_damping, + expected_shape=(num_instances, num_fixed_tendons), + expected_dtype=wp.float32, + name="fixed_tendon_damping", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_fixed_tendon_limit_stiffness( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.fixed_tendon_limit_stiffness, + expected_shape=(num_instances, num_fixed_tendons), + expected_dtype=wp.float32, + name="fixed_tendon_limit_stiffness", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_fixed_tendon_rest_length( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.fixed_tendon_rest_length, + expected_shape=(num_instances, num_fixed_tendons), + expected_dtype=wp.float32, + name="fixed_tendon_rest_length", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_fixed_tendon_offset( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + _check_proxy_array( + art.data.fixed_tendon_offset, + expected_shape=(num_instances, num_fixed_tendons), + expected_dtype=wp.float32, + name="fixed_tendon_offset", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_fixed_tendon_pos_limits( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + art.data.update(dt=0.01) + from isaaclab.utils.warp import ProxyArray + + arr = art.data.fixed_tendon_pos_limits + assert isinstance(arr, ProxyArray), f"fixed_tendon_pos_limits: expected ProxyArray, got {type(arr)}" + if num_fixed_tendons == 0: + # When no tendons, shape is (N, 0, 2) float32 + assert arr.shape == (num_instances, 0, 2) + assert arr.dtype == wp.float32 + else: + # PhysX returns (N, T, 2) float32; Mock returns (N, T) vec2f + assert arr.shape in ((num_instances, num_fixed_tendons), (num_instances, num_fixed_tendons, 2)) + assert arr.dtype in (wp.vec2f, wp.float32) + + # -- Spatial tendon data properties -- + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_spatial_tendon_stiffness( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + _check_proxy_array( + art.data.spatial_tendon_stiffness, + expected_shape=(num_instances, num_spatial_tendons), + expected_dtype=wp.float32, + name="spatial_tendon_stiffness", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_spatial_tendon_damping( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + _check_proxy_array( + art.data.spatial_tendon_damping, + expected_shape=(num_instances, num_spatial_tendons), + expected_dtype=wp.float32, + name="spatial_tendon_damping", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_spatial_tendon_limit_stiffness( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + _check_proxy_array( + art.data.spatial_tendon_limit_stiffness, + expected_shape=(num_instances, num_spatial_tendons), + expected_dtype=wp.float32, + name="spatial_tendon_limit_stiffness", + ) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_spatial_tendon_offset( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + _check_proxy_array( + art.data.spatial_tendon_offset, + expected_shape=(num_instances, num_spatial_tendons), + expected_dtype=wp.float32, + name="spatial_tendon_offset", + ) + + +# --------------------------------------------------------------------------- +# Tests: Fixed tendon writers — torch/warp × index/mask × all/subset × negative +# --------------------------------------------------------------------------- + +# (method_name, kwarg_name, wp_dtype, accepts_float) +_FIXED_TENDON_METHODS = [ + ("set_fixed_tendon_stiffness", "stiffness", wp.float32, True), + ("set_fixed_tendon_damping", "damping", wp.float32, True), + ("set_fixed_tendon_limit_stiffness", "limit_stiffness", wp.float32, True), + ("set_fixed_tendon_rest_length", "rest_length", wp.float32, True), + ("set_fixed_tendon_offset", "offset", wp.float32, True), +] +# Note: set_fixed_tendon_position_limit is excluded because the PhysX backend stores +# pos_limits as (N, T, 2) float32 while the setter validates (N, T) float32. This data +# layout mismatch prevents consistent testing across mock and PhysX backends. + + +class TestArticulationWritersFixedTendon: + """Test fixed tendon writers/setters with all input combinations.""" + + @_tendon_backends + @_tendon_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, accepts_float", + _FIXED_TENDON_METHODS, + ids=[m[0] for m in _FIXED_TENDON_METHODS], + ) + def test_fixed_tendon_writer_index( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + accepts_float, + ): + art, _ = articulation_iface + if num_fixed_tendons == 0: + pytest.skip("No fixed tendons configured") + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_index") + sub_t = min(2, num_fixed_tendons) + sub_tendon_ids = list(range(sub_t)) + + # torch, all envs + all tendons + method(**{kwarg: _make_data_torch((num_instances, num_fixed_tendons), device, wp_dtype)}) + # torch, subset envs + subset tendons + method( + **{ + kwarg: _make_data_torch((1, sub_t), device, wp_dtype), + "fixed_tendon_ids": sub_tendon_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # warp, all envs + all tendons + method(**{kwarg: _make_data_warp((num_instances, num_fixed_tendons), device, wp_dtype)}) + # warp, subset + method( + **{ + kwarg: _make_data_warp((1, sub_t), device, wp_dtype), + "fixed_tendon_ids": sub_tendon_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # float scalar (only for accepts_float methods, and NOT for vec2f) + if accepts_float and wp_dtype != wp.vec2f: + method(**{kwarg: 1.0}) + # float scalar for vec2f should raise ValueError + if accepts_float and wp_dtype == wp.vec2f: + with pytest.raises((ValueError, TypeError)): + method(**{kwarg: 1.0}) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_torch((num_instances, num_fixed_tendons), device, wp_dtype)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_warp((num_instances, num_fixed_tendons), device, wp_dtype)}) + + @_tendon_backends + @_tendon_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, accepts_float", + _FIXED_TENDON_METHODS, + ids=[m[0] for m in _FIXED_TENDON_METHODS], + ) + def test_fixed_tendon_writer_mask( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + accepts_float, + ): + art, _ = articulation_iface + if num_fixed_tendons == 0: + pytest.skip("No fixed tendons configured") + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_mask") + sub_tendon_sel = list(range(min(2, num_fixed_tendons))) + + # torch, no mask + method(**{kwarg: _make_data_torch((num_instances, num_fixed_tendons), device, wp_dtype)}) + # torch, partial env_mask + tendon_mask + method( + **{ + kwarg: _make_data_torch((num_instances, num_fixed_tendons), device, wp_dtype), + "fixed_tendon_mask": _make_item_mask(num_fixed_tendons, sub_tendon_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # warp, no mask + method(**{kwarg: _make_data_warp((num_instances, num_fixed_tendons), device, wp_dtype)}) + # warp, partial env_mask + tendon_mask + method( + **{ + kwarg: _make_data_warp((num_instances, num_fixed_tendons), device, wp_dtype), + "fixed_tendon_mask": _make_item_mask(num_fixed_tendons, sub_tendon_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # float scalar (only for accepts_float methods, and NOT for vec2f) + if accepts_float and wp_dtype != wp.vec2f: + method(**{kwarg: 1.0}) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_torch((num_instances, num_fixed_tendons), device, wp_dtype)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_warp((num_instances, num_fixed_tendons), device, wp_dtype)}) + + +# --------------------------------------------------------------------------- +# Tests: Spatial tendon writers — torch/warp × index/mask × all/subset × negative +# --------------------------------------------------------------------------- + +_SPATIAL_TENDON_METHODS = [ + ("set_spatial_tendon_stiffness", "stiffness", wp.float32, True), + ("set_spatial_tendon_damping", "damping", wp.float32, True), + ("set_spatial_tendon_limit_stiffness", "limit_stiffness", wp.float32, True), + ("set_spatial_tendon_offset", "offset", wp.float32, True), +] + + +class TestArticulationWritersSpatialTendon: + """Test spatial tendon writers/setters with all input combinations.""" + + @_tendon_backends + @_tendon_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, accepts_float", + _SPATIAL_TENDON_METHODS, + ids=[m[0] for m in _SPATIAL_TENDON_METHODS], + ) + def test_spatial_tendon_writer_index( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + accepts_float, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_index") + sub_t = min(2, num_spatial_tendons) + sub_tendon_ids = list(range(sub_t)) + + # torch, all envs + all tendons + method(**{kwarg: _make_data_torch((num_instances, num_spatial_tendons), device, wp_dtype)}) + # torch, subset envs + subset tendons + method( + **{ + kwarg: _make_data_torch((1, sub_t), device, wp_dtype), + "spatial_tendon_ids": sub_tendon_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # warp, all envs + all tendons + method(**{kwarg: _make_data_warp((num_instances, num_spatial_tendons), device, wp_dtype)}) + # warp, subset + method( + **{ + kwarg: _make_data_warp((1, sub_t), device, wp_dtype), + "spatial_tendon_ids": sub_tendon_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # float scalar + if accepts_float: + method(**{kwarg: 1.0}) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_torch((num_instances, num_spatial_tendons), device, wp_dtype)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_warp((num_instances, num_spatial_tendons), device, wp_dtype)}) + + @_tendon_backends + @_tendon_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, accepts_float", + _SPATIAL_TENDON_METHODS, + ids=[m[0] for m in _SPATIAL_TENDON_METHODS], + ) + def test_spatial_tendon_writer_mask( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + method_base, + kwarg, + wp_dtype, + accepts_float, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + method = getattr(art, f"{method_base}_mask") + sub_tendon_sel = list(range(min(2, num_spatial_tendons))) + + # torch, no mask + method(**{kwarg: _make_data_torch((num_instances, num_spatial_tendons), device, wp_dtype)}) + # torch, partial env_mask + tendon_mask + method( + **{ + kwarg: _make_data_torch((num_instances, num_spatial_tendons), device, wp_dtype), + "spatial_tendon_mask": _make_item_mask(num_spatial_tendons, sub_tendon_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # warp, no mask + method(**{kwarg: _make_data_warp((num_instances, num_spatial_tendons), device, wp_dtype)}) + # warp, partial env_mask + tendon_mask + method( + **{ + kwarg: _make_data_warp((num_instances, num_spatial_tendons), device, wp_dtype), + "spatial_tendon_mask": _make_item_mask(num_spatial_tendons, sub_tendon_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # float scalar + if accepts_float: + method(**{kwarg: 1.0}) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_torch((num_instances, num_spatial_tendons), device, wp_dtype)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_bad_data_warp((num_instances, num_spatial_tendons), device, wp_dtype)}) + + +# --------------------------------------------------------------------------- +# Tests: Tendon write-to-sim smoke tests +# --------------------------------------------------------------------------- + + +class TestArticulationWritersTendonToSim: + """Smoke test write_*_tendon_properties_to_sim_index/mask methods.""" + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_write_fixed_tendon_properties_to_sim_index( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_fixed_tendons == 0: + pytest.skip("No fixed tendons configured") + art.data.update(dt=0.01) + # all envs + art.write_fixed_tendon_properties_to_sim_index() + # subset envs + art.write_fixed_tendon_properties_to_sim_index(env_ids=_make_env_ids(device, True)) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_write_fixed_tendon_properties_to_sim_mask( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_fixed_tendons == 0: + pytest.skip("No fixed tendons configured") + art.data.update(dt=0.01) + # no mask + art.write_fixed_tendon_properties_to_sim_mask() + # partial env mask + art.write_fixed_tendon_properties_to_sim_mask(env_mask=_make_env_mask(num_instances, device, True)) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_write_spatial_tendon_properties_to_sim_index( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + # all envs + art.write_spatial_tendon_properties_to_sim_index() + # subset envs + art.write_spatial_tendon_properties_to_sim_index(env_ids=_make_env_ids(device, True)) + + @_tendon_backends + @_tendon_dims + @_default_devices + def test_write_spatial_tendon_properties_to_sim_mask( + self, + backend, + num_instances, + num_joints, + num_bodies, + num_fixed_tendons, + num_spatial_tendons, + device, + articulation_iface, + ): + art, _ = articulation_iface + if num_spatial_tendons == 0: + pytest.skip("No spatial tendons configured") + art.data.update(dt=0.01) + # no mask + art.write_spatial_tendon_properties_to_sim_mask() + # partial env mask + art.write_spatial_tendon_properties_to_sim_mask(env_mask=_make_env_mask(num_instances, device, True)) diff --git a/source/isaaclab/test/assets/test_rigid_object_collection_iface.py b/source/isaaclab/test/assets/test_rigid_object_collection_iface.py new file mode 100644 index 000000000000..42c2e8d731b5 --- /dev/null +++ b/source/isaaclab/test/assets/test_rigid_object_collection_iface.py @@ -0,0 +1,1289 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + +""" +Checks that the rigid object collection interfaces are consistent across backends, and are providing +the exact same data as what the base rigid object collection class advertises. All rigid object +collection interfaces need to comply with the same interface contract. + +The setup is a bit convoluted so that we can run these tests without requiring Isaac Sim or GPU simulation. +""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +HEADLESS = True + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +from unittest.mock import MagicMock + +import numpy as np +import pytest +import torch +import warp as wp + +from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg +from isaaclab.assets.rigid_object_collection.rigid_object_collection_cfg import RigidObjectCollectionCfg +from isaaclab.test.mock_interfaces.utils import MockWrenchComposer + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + +""" +Check which backends are available. +""" + +BACKENDS = ["Mock"] # Mock backend is always available. + +try: + from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection import ( + RigidObjectCollection as PhysXRigidObjectCollection, + ) + from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection_data import ( + RigidObjectCollectionData as PhysXRigidObjectCollectionData, + ) + from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyViewWarp as PhysXMockRigidBodyViewWarp + + BACKENDS.append("physx") +except ImportError: + pass + +try: + from isaaclab_newton.assets.rigid_object_collection.rigid_object_collection import ( + RigidObjectCollection as NewtonRigidObjectCollection, + ) + from isaaclab_newton.assets.rigid_object_collection.rigid_object_collection_data import ( + RigidObjectCollectionData as NewtonRigidObjectCollectionData, + ) + from isaaclab_newton.test.mock_interfaces.mock_newton import MockWrenchComposer as NewtonMockWrenchComposer + from isaaclab_newton.test.mock_interfaces.views import MockNewtonCollectionView as NewtonMockCollectionView + + BACKENDS.append("newton") +except ImportError: + pass + + +def create_physx_rigid_object_collection( + num_instances: int = 2, + num_bodies: int = 3, + device: str = "cuda:0", +): + """Create a test RigidObjectCollection instance with mocked dependencies.""" + collection = object.__new__(PhysXRigidObjectCollection) + + rigid_objects = {f"object_{i}": RigidObjectCfg(prim_path=f"/World/Object_{i}") for i in range(num_bodies)} + collection.cfg = RigidObjectCollectionCfg(rigid_objects=rigid_objects) + + # View count = num_instances * num_bodies (body-major view order) + mock_view = PhysXMockRigidBodyViewWarp( + count=num_instances * num_bodies, + device=device, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + object.__setattr__(collection, "_root_view", mock_view) + object.__setattr__(collection, "_device", device) + object.__setattr__(collection, "_num_bodies", num_bodies) + object.__setattr__(collection, "_num_instances", num_instances) + object.__setattr__(collection, "_body_names_list", [f"object_{i}" for i in range(num_bodies)]) + + # Create RigidObjectCollectionData instance + data = PhysXRigidObjectCollectionData(mock_view, num_bodies, device) + object.__setattr__(collection, "_data", data) + data.body_names = [f"object_{i}" for i in range(num_bodies)] + + # Create mock wrench composers + mock_inst_wrench = MockWrenchComposer(collection) + mock_perm_wrench = MockWrenchComposer(collection) + object.__setattr__(collection, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(collection, "_permanent_wrench_composer", mock_perm_wrench) + + # Prevent __del__ / _clear_callbacks from raising AttributeError + object.__setattr__(collection, "_initialize_handle", None) + object.__setattr__(collection, "_invalidate_initialize_handle", None) + object.__setattr__(collection, "_prim_deletion_handle", None) + object.__setattr__(collection, "_debug_vis_handle", None) + + # Set up index arrays + object.__setattr__( + collection, "_ALL_ENV_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device) + ) + object.__setattr__(collection, "_ALL_BODY_INDICES", wp.array(np.arange(num_bodies, dtype=np.int32), device=device)) + + return collection, mock_view + + +def create_newton_rigid_object_collection( + num_instances: int = 2, + num_bodies: int = 3, + device: str = "cuda:0", +): + """Create a test Newton RigidObjectCollection instance with mocked dependencies.""" + import isaaclab_newton.assets.rigid_object_collection.rigid_object_collection as newton_coll_module + import isaaclab_newton.assets.rigid_object_collection.rigid_object_collection_data as newton_data_module + + body_names = [f"object_{i}" for i in range(num_bodies)] + + # Create collection-specific mock view with (N, B) root shapes + mock_view = NewtonMockCollectionView( + num_envs=num_instances, + num_bodies=num_bodies, + device=device, + body_names=body_names, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + # Mock NewtonManager (aliased as SimulationManager in Newton modules) + mock_model = MagicMock() + mock_model.gravity = wp.array(np.array([[0.0, 0.0, -9.81]], dtype=np.float32), dtype=wp.vec3f, device=device) + mock_state = MagicMock() + mock_control = MagicMock() + + mock_manager = MagicMock() + mock_manager.get_model.return_value = mock_model + mock_manager.get_state_0.return_value = mock_state + mock_manager.get_state_1.return_value = mock_state + mock_manager.get_control.return_value = mock_control + + # Patch SimulationManager in both data and collection modules + original_data_manager = newton_data_module.SimulationManager + original_coll_manager = newton_coll_module.SimulationManager + newton_data_module.SimulationManager = mock_manager + newton_coll_module.SimulationManager = mock_manager + + try: + data = NewtonRigidObjectCollectionData(mock_view, num_bodies, device) + finally: + newton_data_module.SimulationManager = original_data_manager + newton_coll_module.SimulationManager = original_coll_manager + + # Create collection shell (bypass __init__) + collection = object.__new__(NewtonRigidObjectCollection) + + rigid_objects = {f"object_{i}": RigidObjectCfg(prim_path=f"/World/Object_{i}") for i in range(num_bodies)} + collection.cfg = RigidObjectCollectionCfg(rigid_objects=rigid_objects) + + object.__setattr__(collection, "_root_view", mock_view) + object.__setattr__(collection, "_device", device) + object.__setattr__(collection, "_num_bodies", num_bodies) + object.__setattr__(collection, "_num_instances", num_instances) + object.__setattr__(collection, "_body_names_list", body_names) + object.__setattr__(collection, "_data", data) + data.body_names = body_names + + # Mock wrench composers (Newton-specific) + mock_inst_wrench = NewtonMockWrenchComposer(collection) + mock_perm_wrench = NewtonMockWrenchComposer(collection) + object.__setattr__(collection, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(collection, "_permanent_wrench_composer", mock_perm_wrench) + + # Prevent __del__ / _clear_callbacks from raising AttributeError + object.__setattr__(collection, "_initialize_handle", None) + object.__setattr__(collection, "_invalidate_initialize_handle", None) + object.__setattr__(collection, "_prim_deletion_handle", None) + object.__setattr__(collection, "_debug_vis_handle", None) + + # Index arrays (warp) + object.__setattr__( + collection, "_ALL_ENV_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device) + ) + object.__setattr__(collection, "_ALL_BODY_INDICES", wp.array(np.arange(num_bodies, dtype=np.int32), device=device)) + object.__setattr__(collection, "_ALL_ENV_MASK", wp.ones((num_instances,), dtype=wp.bool, device=device)) + object.__setattr__(collection, "_ALL_BODY_MASK", wp.ones((num_bodies,), dtype=wp.bool, device=device)) + + return collection, mock_view + + +def create_mock_rigid_object_collection( + num_instances: int = 2, + num_bodies: int = 3, + device: str = "cuda:0", +): + from isaaclab.test.mock_interfaces.assets.mock_rigid_object_collection import MockRigidObjectCollection + + obj = MockRigidObjectCollection( + num_instances=num_instances, + num_bodies=num_bodies, + device=device, + ) + return obj, None + + +def get_rigid_object_collection( + backend: str, + num_instances: int = 2, + num_bodies: int = 3, + device: str = "cuda:0", +): + if backend == "physx": + return create_physx_rigid_object_collection(num_instances, num_bodies, device) + elif backend == "newton": + return create_newton_rigid_object_collection(num_instances, num_bodies, device) + elif backend.lower() == "mock": + return create_mock_rigid_object_collection(num_instances, num_bodies, device) + else: + raise ValueError(f"Invalid backend: {backend}") + + +@pytest.fixture +def collection_iface(request): + backend = request.getfixturevalue("backend") + num_instances = request.getfixturevalue("num_instances") + num_bodies = request.getfixturevalue("num_bodies") + device = request.getfixturevalue("device") + return get_rigid_object_collection(backend, num_instances, num_bodies, device) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _check_proxy_array(arr, *, expected_shape: tuple, expected_dtype: type, name: str): + """Assert that `arr` is a ProxyArray with the expected shape and dtype.""" + from isaaclab.utils.warp import ProxyArray + + assert isinstance(arr, ProxyArray), f"{name}: expected ProxyArray, got {type(arr)}" + assert arr.shape == expected_shape, f"{name}: expected shape {expected_shape}, got {arr.shape}" + assert arr.dtype == expected_dtype, f"{name}: expected dtype {expected_dtype}, got {arr.dtype}" + + +# Common parametrize decorators +_backends = pytest.mark.parametrize("backend", BACKENDS, indirect=False) + +_default_dims = pytest.mark.parametrize("num_instances", [1, 2, 100]) + +_default_bodies = pytest.mark.parametrize("num_bodies", [1, 3]) + +_default_devices = pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +_index_resolution_backends = pytest.mark.parametrize( + "backend", [backend for backend in ("physx", "newton") if backend in BACKENDS], indirect=False +) + + +# --------------------------------------------------------------------------- +# Writer/setter test helpers +# --------------------------------------------------------------------------- + +_WP_DTYPE_TO_TRAILING = { + wp.transformf: 7, + wp.spatial_vectorf: 6, + wp.vec2f: 2, + wp.float32: 0, +} + + +def _make_data_torch(shape: tuple, device: str, wp_dtype=wp.float32) -> torch.Tensor: + """Create valid torch test data for a given warp dtype.""" + trailing = _WP_DTYPE_TO_TRAILING[wp_dtype] + if trailing: + full_shape = (*shape, trailing) + else: + full_shape = shape + data = torch.zeros(full_shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., 6] = 1.0 + elif wp_dtype == wp.float32: + data.fill_(1.0) + return data + + +def _make_data_warp(shape: tuple, device: str, wp_dtype=wp.float32) -> wp.array: + """Create valid warp test data for a given warp dtype.""" + t = _make_data_torch(shape, device, wp_dtype) + if wp_dtype == wp.float32: + return wp.from_torch(t, dtype=wp.float32) + return wp.from_torch(t.contiguous(), dtype=wp_dtype) + + +def _make_bad_data_torch(shape: tuple, device: str, wp_dtype=wp.float32) -> torch.Tensor: + """Create torch data with wrong leading shape for negative testing.""" + bad_shape = (shape[0] + 1,) + shape[1:] + return _make_data_torch(bad_shape, device, wp_dtype) + + +def _make_bad_data_warp(shape: tuple, device: str, wp_dtype=wp.float32) -> wp.array: + """Create warp data with wrong leading shape for negative testing.""" + bad_shape = (shape[0] + 1,) + shape[1:] + return _make_data_warp(bad_shape, device, wp_dtype) + + +def _make_env_mask(num_instances: int, device: str, partial: bool) -> wp.array | None: + """Create an env_mask: None for all envs, or a partial bool mask.""" + if not partial: + return None + mask_np = np.zeros(num_instances, dtype=bool) + mask_np[0] = True + return wp.array(mask_np, dtype=wp.bool, device=device) + + +def _make_env_ids(device: str, subset: bool) -> torch.Tensor | None: + """Create env_ids: None for all envs, or [0] for a subset.""" + if not subset: + return None + return torch.tensor([0], dtype=torch.int32, device=device) + + +def _make_body_ids(device: str, subset_ids: list[int] | None) -> torch.Tensor | None: + """Create body_ids: None for all bodies, or a list for a subset.""" + if subset_ids is None: + return None + return torch.tensor(subset_ids, dtype=torch.int32, device=device) + + +def _make_item_mask(total: int, selected: list[int], device: str) -> wp.array: + """Create a bool warp mask with True at `selected` indices, False elsewhere.""" + mask_np = np.zeros(total, dtype=bool) + for i in selected: + mask_np[i] = True + return wp.array(mask_np, dtype=wp.bool, device=device) + + +# --------------------------------------------------------------------------- +# Tests: Index resolution helpers +# --------------------------------------------------------------------------- + + +class TestCollectionIndexResolution: + """Test backend-specific index resolution helpers.""" + + @_index_resolution_backends + def test_resolve_env_ids_handles_tensor_view_shape(self, backend): + obj, _ = get_rigid_object_collection(backend, num_instances=4, device="cpu") + + env_ids = torch.arange(4, dtype=torch.int32, device="cpu") + resolved_full = obj._resolve_env_ids(env_ids) + resolved_view = obj._resolve_env_ids(env_ids[:2]) + + assert resolved_full.shape[0] == 4 + assert resolved_view.shape[0] == 2 + + @_index_resolution_backends + def test_resolve_body_ids_handles_tensor_view_shape(self, backend): + obj, _ = get_rigid_object_collection(backend, num_bodies=4, device="cpu") + + body_ids = torch.arange(4, dtype=torch.int32, device="cpu") + resolved_full = obj._resolve_body_ids(body_ids) + resolved_view = obj._resolve_body_ids(body_ids[:2]) + + assert resolved_full.shape[0] == 4 + assert resolved_view.shape[0] == 2 + + +# --------------------------------------------------------------------------- +# Tests: Collection properties +# --------------------------------------------------------------------------- + + +class TestCollectionProperties: + """Test that collection properties return the correct types/values.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_num_instances(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + assert obj.num_instances == num_instances + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_num_bodies(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + assert obj.num_bodies == num_bodies + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_names(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + names = obj.body_names + assert isinstance(names, list) + assert len(names) == num_bodies + assert all(isinstance(n, str) for n in names) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_data_returns_collection_data(self, backend, num_instances, num_bodies, device, collection_iface): + from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import ( + BaseRigidObjectCollectionData, + ) + + obj, _ = collection_iface + assert isinstance(obj.data, BaseRigidObjectCollectionData) + + +# --------------------------------------------------------------------------- +# Tests: Collection finder methods +# --------------------------------------------------------------------------- + + +class TestCollectionFinders: + """Test that finder methods return correct results.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_find_bodies_all(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + mask, names = obj.find_bodies(".*") + assert isinstance(mask, torch.Tensor) + assert isinstance(names, list) + assert len(names) == num_bodies + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_find_bodies_single(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + first_body = obj.body_names[0] + mask, names = obj.find_bodies(first_body) + assert len(names) == 1 + assert names == [first_body] + + +# --------------------------------------------------------------------------- +# Tests: Body state properties +# --------------------------------------------------------------------------- + + +class TestCollectionDataBodyState: + """Test data properties for body state.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_pose_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_pose_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.transformf, + name="body_link_pose_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_vel_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.spatial_vectorf, + name="body_link_vel_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_pose_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pose_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.transformf, + name="body_com_pose_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_vel_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.spatial_vectorf, + name="body_com_vel_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_acc_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_acc_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.spatial_vectorf, + name="body_com_acc_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_pose_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pose_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.transformf, + name="body_com_pose_b", + ) + + +# --------------------------------------------------------------------------- +# Tests: Sliced properties +# --------------------------------------------------------------------------- + + +class TestCollectionDataSliced: + """Test sliced data properties.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_pos_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_pos_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_pos_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_quat_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_quat_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.quatf, + name="body_link_quat_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_lin_vel_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_lin_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_ang_vel_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_ang_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_ang_vel_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_pos_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pos_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_pos_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_quat_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_quat_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.quatf, + name="body_com_quat_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_lin_vel_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_lin_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_ang_vel_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_ang_vel_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_ang_vel_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_lin_acc_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_lin_acc_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_lin_acc_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_ang_acc_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_ang_acc_w, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_ang_acc_w", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_pos_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pos_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_pos_b", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_quat_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_quat_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.quatf, + name="body_com_quat_b", + ) + + +# --------------------------------------------------------------------------- +# Tests: Derived properties +# --------------------------------------------------------------------------- + + +class TestCollectionDataDerived: + """Test derived/computed data properties.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_projected_gravity_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.projected_gravity_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="projected_gravity_b", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_heading_w(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.heading_w, expected_shape=(num_instances, num_bodies), expected_dtype=wp.float32, name="heading_w" + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_lin_vel_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_lin_vel_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_lin_vel_b", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_link_ang_vel_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_ang_vel_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_link_ang_vel_b", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_lin_vel_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_lin_vel_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_lin_vel_b", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_com_ang_vel_b(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_ang_vel_b, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.vec3f, + name="body_com_ang_vel_b", + ) + + +# --------------------------------------------------------------------------- +# Tests: Mass properties +# --------------------------------------------------------------------------- + + +class TestCollectionDataMass: + """Test body mass/inertia properties.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_mass(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_mass, expected_shape=(num_instances, num_bodies), expected_dtype=wp.float32, name="body_mass" + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_inertia(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_inertia, + expected_shape=(num_instances, num_bodies, 9), + expected_dtype=wp.float32, + name="body_inertia", + ) + + +# --------------------------------------------------------------------------- +# Tests: Default state properties +# --------------------------------------------------------------------------- + + +class TestCollectionDataDefaults: + """Test default state properties.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_default_body_pose(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.default_body_pose, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.transformf, + name="default_body_pose", + ) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_default_body_vel(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.default_body_vel, + expected_shape=(num_instances, num_bodies), + expected_dtype=wp.spatial_vectorf, + name="default_body_vel", + ) + + +# --------------------------------------------------------------------------- +# Tests: Body pose/velocity writers +# --------------------------------------------------------------------------- + +_BODY_POSE_METHODS = ["body_pose", "body_link_pose", "body_com_pose"] +_BODY_VEL_METHODS = ["body_velocity", "body_com_velocity", "body_link_velocity"] + + +class TestCollectionWritersPose: + """Test body pose/velocity writers with all input combinations.""" + + # -- index variants for pose -- + + @_backends + @_default_dims + @_default_bodies + @_default_devices + @pytest.mark.parametrize("method_suffix", _BODY_POSE_METHODS) + def test_write_body_pose_to_sim_index( + self, backend, num_instances, num_bodies, device, collection_iface, method_suffix + ): + obj, _ = collection_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_index") + + # torch, all envs + all bodies + method(body_poses=_make_data_torch((num_instances, num_bodies), device, wp.transformf)) + # torch, subset envs + method(body_poses=_make_data_torch((1, num_bodies), device, wp.transformf), env_ids=_make_env_ids(device, True)) + # torch, subset bodies + method( + body_poses=_make_data_torch((num_instances, 1), device, wp.transformf), body_ids=_make_body_ids(device, [0]) + ) + # torch, subset both + method( + body_poses=_make_data_torch((1, 1), device, wp.transformf), + env_ids=_make_env_ids(device, True), + body_ids=_make_body_ids(device, [0]), + ) + # warp, all envs + all bodies + method(body_poses=_make_data_warp((num_instances, num_bodies), device, wp.transformf)) + # warp, subset + method( + body_poses=_make_data_warp((1, 1), device, wp.transformf), + env_ids=_make_env_ids(device, True), + body_ids=_make_body_ids(device, [0]), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_poses=_make_bad_data_torch((num_instances, num_bodies), device, wp.transformf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_poses=_make_bad_data_warp((num_instances, num_bodies), device, wp.transformf)) + + # -- index variants for velocity -- + + @_backends + @_default_dims + @_default_bodies + @_default_devices + @pytest.mark.parametrize("method_suffix", _BODY_VEL_METHODS) + def test_write_body_velocity_to_sim_index( + self, backend, num_instances, num_bodies, device, collection_iface, method_suffix + ): + obj, _ = collection_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_index") + + # torch, all envs + all bodies + method(body_velocities=_make_data_torch((num_instances, num_bodies), device, wp.spatial_vectorf)) + # torch, subset envs + method( + body_velocities=_make_data_torch((1, num_bodies), device, wp.spatial_vectorf), + env_ids=_make_env_ids(device, True), + ) + # torch, subset bodies + method( + body_velocities=_make_data_torch((num_instances, 1), device, wp.spatial_vectorf), + body_ids=_make_body_ids(device, [0]), + ) + # torch, subset both + method( + body_velocities=_make_data_torch((1, 1), device, wp.spatial_vectorf), + env_ids=_make_env_ids(device, True), + body_ids=_make_body_ids(device, [0]), + ) + # warp, all envs + all bodies + method(body_velocities=_make_data_warp((num_instances, num_bodies), device, wp.spatial_vectorf)) + # warp, subset + method( + body_velocities=_make_data_warp((1, 1), device, wp.spatial_vectorf), + env_ids=_make_env_ids(device, True), + body_ids=_make_body_ids(device, [0]), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_velocities=_make_bad_data_torch((num_instances, num_bodies), device, wp.spatial_vectorf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_velocities=_make_bad_data_warp((num_instances, num_bodies), device, wp.spatial_vectorf)) + + # -- mask variants for pose -- + # Note: write_body_pose_to_sim_mask accepts body_mask, but write_body_link_pose_to_sim_mask + # and write_body_com_pose_to_sim_mask use body_ids instead. We only test body_mask on body_pose. + + @_backends + @_default_dims + @_default_bodies + @_default_devices + @pytest.mark.parametrize("method_suffix", _BODY_POSE_METHODS) + def test_write_body_pose_to_sim_mask( + self, backend, num_instances, num_bodies, device, collection_iface, method_suffix + ): + obj, _ = collection_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_mask") + + has_body_mask = method_suffix == "body_pose" + + # torch, no mask (all) + method(body_poses=_make_data_torch((num_instances, num_bodies), device, wp.transformf)) + # torch, partial env_mask + method( + body_poses=_make_data_torch((num_instances, num_bodies), device, wp.transformf), + env_mask=_make_env_mask(num_instances, device, True), + ) + if has_body_mask: + # torch, partial body_mask + method( + body_poses=_make_data_torch((num_instances, num_bodies), device, wp.transformf), + body_mask=_make_item_mask(num_bodies, [0], device), + ) + # torch, both masks + method( + body_poses=_make_data_torch((num_instances, num_bodies), device, wp.transformf), + env_mask=_make_env_mask(num_instances, device, True), + body_mask=_make_item_mask(num_bodies, [0], device), + ) + # warp, no mask + method(body_poses=_make_data_warp((num_instances, num_bodies), device, wp.transformf)) + # warp, partial env_mask + method( + body_poses=_make_data_warp((num_instances, num_bodies), device, wp.transformf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_poses=_make_bad_data_torch((num_instances, num_bodies), device, wp.transformf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_poses=_make_bad_data_warp((num_instances, num_bodies), device, wp.transformf)) + + # -- mask variants for velocity -- + # Note: write_body_velocity_to_sim_mask accepts body_mask, but the _link_/_com_ variants use body_ids. + + @_backends + @_default_dims + @_default_bodies + @_default_devices + @pytest.mark.parametrize("method_suffix", _BODY_VEL_METHODS) + def test_write_body_velocity_to_sim_mask( + self, backend, num_instances, num_bodies, device, collection_iface, method_suffix + ): + obj, _ = collection_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_mask") + + has_body_mask = method_suffix == "body_velocity" + + # torch, no mask + method(body_velocities=_make_data_torch((num_instances, num_bodies), device, wp.spatial_vectorf)) + # torch, partial env_mask + method( + body_velocities=_make_data_torch((num_instances, num_bodies), device, wp.spatial_vectorf), + env_mask=_make_env_mask(num_instances, device, True), + ) + if has_body_mask: + # torch, partial body_mask + method( + body_velocities=_make_data_torch((num_instances, num_bodies), device, wp.spatial_vectorf), + body_mask=_make_item_mask(num_bodies, [0], device), + ) + # warp, no mask + method(body_velocities=_make_data_warp((num_instances, num_bodies), device, wp.spatial_vectorf)) + # warp, partial env_mask + method( + body_velocities=_make_data_warp((num_instances, num_bodies), device, wp.spatial_vectorf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_velocities=_make_bad_data_torch((num_instances, num_bodies), device, wp.spatial_vectorf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(body_velocities=_make_bad_data_warp((num_instances, num_bodies), device, wp.spatial_vectorf)) + + +# --------------------------------------------------------------------------- +# Tests: Body property setters +# --------------------------------------------------------------------------- + +_BODY_METHODS = [ + ("set_masses", "masses", wp.float32, 0), + ("set_coms", "coms", wp.transformf, 7), + ("set_inertias", "inertias", wp.float32, 9), +] + + +class TestCollectionWritersBody: + """Test body property writers/setters with all input combinations.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, trailing", + _BODY_METHODS, + ids=[m[0] for m in _BODY_METHODS], + ) + def test_body_writer_index( + self, backend, num_instances, num_bodies, device, collection_iface, method_base, kwarg, wp_dtype, trailing + ): + if backend == "newton" and method_base == "set_coms": + pytest.xfail("Newton set_coms expects vec3f (position only), not transformf (pose)") + obj, _ = collection_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"{method_base}_index") + + def _torch_shape(n_envs, n_bods): + if trailing: + return (n_envs, n_bods, trailing) + return (n_envs, n_bods) + + def _make_torch(n_envs, n_bods): + shape = _torch_shape(n_envs, n_bods) + data = torch.ones(shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., :3] = 0.0 + data[..., 3:6] = 0.0 + data[..., 6] = 1.0 + return data + + def _make_warp(n_envs, n_bods): + t = _make_torch(n_envs, n_bods) + if wp_dtype == wp.transformf: + return wp.from_torch(t.contiguous(), dtype=wp.transformf) + return wp.from_torch(t.contiguous(), dtype=wp.float32) + + sub_body_ids = [0] + + # torch, all envs + all bodies + method(**{kwarg: _make_torch(num_instances, num_bodies)}) + # torch, subset + method( + **{ + kwarg: _make_torch(1, 1), + "body_ids": sub_body_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # warp, all envs + all bodies + method(**{kwarg: _make_warp(num_instances, num_bodies)}) + # warp, subset + method( + **{ + kwarg: _make_warp(1, 1), + "body_ids": sub_body_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # negative: bad torch shape (extra env) + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_torch(num_instances + 1, num_bodies)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_warp(num_instances + 1, num_bodies)}) + + @_backends + @_default_dims + @_default_bodies + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, trailing", + _BODY_METHODS, + ids=[m[0] for m in _BODY_METHODS], + ) + def test_body_writer_mask( + self, backend, num_instances, num_bodies, device, collection_iface, method_base, kwarg, wp_dtype, trailing + ): + if backend == "newton" and method_base == "set_coms": + pytest.xfail("Newton set_coms expects vec3f (position only), not transformf (pose)") + obj, _ = collection_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"{method_base}_mask") + + def _torch_shape(n_envs, n_bods): + if trailing: + return (n_envs, n_bods, trailing) + return (n_envs, n_bods) + + def _make_torch(n_envs, n_bods): + shape = _torch_shape(n_envs, n_bods) + data = torch.ones(shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., :3] = 0.0 + data[..., 3:6] = 0.0 + data[..., 6] = 1.0 + return data + + def _make_warp(n_envs, n_bods): + t = _make_torch(n_envs, n_bods) + if wp_dtype == wp.transformf: + return wp.from_torch(t.contiguous(), dtype=wp.transformf) + return wp.from_torch(t.contiguous(), dtype=wp.float32) + + sub_body_sel = [0] + + # torch, no mask + method(**{kwarg: _make_torch(num_instances, num_bodies)}) + # torch, partial env_mask + body_mask + method( + **{ + kwarg: _make_torch(num_instances, num_bodies), + "body_mask": _make_item_mask(num_bodies, sub_body_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # warp, no mask + method(**{kwarg: _make_warp(num_instances, num_bodies)}) + # warp, partial env_mask + body_mask + method( + **{ + kwarg: _make_warp(num_instances, num_bodies), + "body_mask": _make_item_mask(num_bodies, sub_body_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_torch(num_instances + 1, num_bodies)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_warp(num_instances + 1, num_bodies)}) + + +# --------------------------------------------------------------------------- +# Tests: Alias/shorthand properties +# --------------------------------------------------------------------------- + + +class TestCollectionDataAliases: + """Test that alias properties return the same shape/dtype as their canonical counterparts.""" + + @_backends + @_default_dims + @_default_bodies + @_default_devices + def test_body_aliases(self, backend, num_instances, num_bodies, device, collection_iface): + obj, _ = collection_iface + obj.data.update(dt=0.01) + d = obj.data + + # Pose aliases + assert d.body_pose_w.shape == d.body_link_pose_w.shape + assert d.body_pose_w.dtype == d.body_link_pose_w.dtype + assert d.body_pos_w.shape == d.body_link_pos_w.shape + assert d.body_pos_w.dtype == d.body_link_pos_w.dtype + assert d.body_quat_w.shape == d.body_link_quat_w.shape + assert d.body_quat_w.dtype == d.body_link_quat_w.dtype + + # Velocity aliases + assert d.body_vel_w.shape == d.body_com_vel_w.shape + assert d.body_vel_w.dtype == d.body_com_vel_w.dtype + assert d.body_lin_vel_w.shape == d.body_com_lin_vel_w.shape + assert d.body_lin_vel_w.dtype == d.body_com_lin_vel_w.dtype + assert d.body_ang_vel_w.shape == d.body_com_ang_vel_w.shape + assert d.body_ang_vel_w.dtype == d.body_com_ang_vel_w.dtype + + # Acceleration aliases + assert d.body_acc_w.shape == d.body_com_acc_w.shape + assert d.body_acc_w.dtype == d.body_com_acc_w.dtype + assert d.body_lin_acc_w.shape == d.body_com_lin_acc_w.shape + assert d.body_lin_acc_w.dtype == d.body_com_lin_acc_w.dtype + assert d.body_ang_acc_w.shape == d.body_com_ang_acc_w.shape + assert d.body_ang_acc_w.dtype == d.body_com_ang_acc_w.dtype + + # CoM body frame aliases + assert d.com_pos_b.shape == d.body_com_pos_b.shape + assert d.com_pos_b.dtype == d.body_com_pos_b.dtype + assert d.com_quat_b.shape == d.body_com_quat_b.shape + assert d.com_quat_b.dtype == d.body_com_quat_b.dtype diff --git a/source/isaaclab/test/assets/test_rigid_object_iface.py b/source/isaaclab/test/assets/test_rigid_object_iface.py new file mode 100644 index 000000000000..178feeddb603 --- /dev/null +++ b/source/isaaclab/test/assets/test_rigid_object_iface.py @@ -0,0 +1,1221 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + +""" +Checks that the rigid object interfaces are consistent across backends, and are providing the exact same data as what +the base rigid object class advertises. All rigid object interfaces need to comply with the same interface contract. + +The setup is a bit convoluted so that we can run these tests without requiring Isaac Sim or GPU simulation. +""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +HEADLESS = True + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +from unittest.mock import MagicMock + +import numpy as np +import pytest +import torch +import warp as wp + +from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg +from isaaclab.test.mock_interfaces.utils import MockWrenchComposer + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +# This is needed because the Data classes call SimulationManager.get_physics_sim_view().get_gravity() +# but there's no actual physics scene when running unit tests +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + +""" +Check which backends are available. +""" + +BACKENDS = ["Mock"] # Mock backend is always available. + +try: + from isaaclab_physx.assets.rigid_object.rigid_object import RigidObject as PhysXRigidObject + from isaaclab_physx.assets.rigid_object.rigid_object_data import RigidObjectData as PhysXRigidObjectData + from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyViewWarp as PhysXMockRigidBodyViewWarp + + BACKENDS.append("physx") +except ImportError: + pass + +try: + from isaaclab_newton.assets.rigid_object.rigid_object import RigidObject as NewtonRigidObject + from isaaclab_newton.assets.rigid_object.rigid_object_data import RigidObjectData as NewtonRigidObjectData + from isaaclab_newton.test.mock_interfaces.views import MockNewtonArticulationView as NewtonMockArticulationView + + BACKENDS.append("newton") +except ImportError: + pass + + +def create_physx_rigid_object( + num_instances: int = 2, + device: str = "cuda:0", +): + """Create a test RigidObject instance with mocked dependencies.""" + body_names = ["body_0"] + + rigid_object = object.__new__(PhysXRigidObject) + + rigid_object.cfg = RigidObjectCfg(prim_path="/World/Object") + + # Create PhysX mock view + mock_view = PhysXMockRigidBodyViewWarp( + count=num_instances, + device=device, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + object.__setattr__(rigid_object, "_root_view", mock_view) + object.__setattr__(rigid_object, "_device", device) + + # Create RigidObjectData instance (SimulationManager already mocked at module level) + data = PhysXRigidObjectData(mock_view, device) + object.__setattr__(rigid_object, "_data", data) + + # Set body names on data + data.body_names = body_names + + # Create mock wrench composers + mock_inst_wrench = MockWrenchComposer(rigid_object) + mock_perm_wrench = MockWrenchComposer(rigid_object) + object.__setattr__(rigid_object, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(rigid_object, "_permanent_wrench_composer", mock_perm_wrench) + + # Prevent __del__ / _clear_callbacks from raising AttributeError + object.__setattr__(rigid_object, "_initialize_handle", None) + object.__setattr__(rigid_object, "_invalidate_initialize_handle", None) + object.__setattr__(rigid_object, "_prim_deletion_handle", None) + object.__setattr__(rigid_object, "_debug_vis_handle", None) + + # Set up index arrays (warp arrays for rigid object) + object.__setattr__(rigid_object, "_ALL_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device)) + object.__setattr__(rigid_object, "_ALL_BODY_INDICES", wp.array(np.array([0], dtype=np.int32), device=device)) + + # Cached .view(wp.float32) wrappers + object.__setattr__(rigid_object, "_root_link_pose_w_f32", None) + object.__setattr__(rigid_object, "_root_com_vel_w_f32", None) + object.__setattr__(rigid_object, "_inst_wrench_force_f32", None) + object.__setattr__(rigid_object, "_inst_wrench_torque_f32", None) + object.__setattr__(rigid_object, "_perm_wrench_force_f32", None) + object.__setattr__(rigid_object, "_perm_wrench_torque_f32", None) + + # Pre-allocated pinned CPU buffers for PhysX TensorAPI writes + N, B = num_instances, 1 # rigid object has 1 body + cpu_env_ids = wp.array(np.arange(N, dtype=np.int32), device="cpu") + object.__setattr__(rigid_object, "_cpu_env_ids_all", cpu_env_ids) + object.__setattr__(rigid_object, "_cpu_body_mass", wp.zeros((N, B), dtype=wp.float32, device="cpu")) + object.__setattr__(rigid_object, "_cpu_body_coms", wp.zeros((N, B, 7), dtype=wp.float32, device="cpu")) + object.__setattr__(rigid_object, "_cpu_body_inertia", wp.zeros((N, B, 9), dtype=wp.float32, device="cpu")) + + return rigid_object, mock_view + + +def create_newton_rigid_object( + num_instances: int = 2, + device: str = "cuda:0", +): + """Create a test Newton RigidObject instance with mocked dependencies.""" + import isaaclab_newton.assets.rigid_object.rigid_object_data as newton_data_module + + body_names = ["body_0"] + + # Create Newton mock view (uses ArticulationView with num_bodies=1 for rigid objects) + mock_view = NewtonMockArticulationView( + num_instances=num_instances, + num_bodies=1, + num_joints=0, + device=device, + is_fixed_base=False, + joint_names=[], + body_names=body_names, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + # Mock NewtonManager (aliased as SimulationManager in Newton modules) + mock_model = MagicMock() + mock_model.gravity = wp.array(np.array([[0.0, 0.0, -9.81]], dtype=np.float32), dtype=wp.vec3f, device=device) + mock_state = MagicMock() + mock_control = MagicMock() + + mock_manager = MagicMock() + mock_manager.get_model.return_value = mock_model + mock_manager.get_state_0.return_value = mock_state + mock_manager.get_state_1.return_value = mock_state + mock_manager.get_control.return_value = mock_control + + # Patch SimulationManager in the Newton data module + original_sim_manager = newton_data_module.SimulationManager + newton_data_module.SimulationManager = mock_manager + + try: + data = NewtonRigidObjectData(mock_view, device) + finally: + newton_data_module.SimulationManager = original_sim_manager + + # Create RigidObject shell (bypass __init__) + rigid_object = object.__new__(NewtonRigidObject) + + rigid_object.cfg = RigidObjectCfg(prim_path="/World/Object") + + object.__setattr__(rigid_object, "_root_view", mock_view) + object.__setattr__(rigid_object, "_device", device) + object.__setattr__(rigid_object, "_data", data) + + # Mock wrench composers + mock_inst_wrench = MockWrenchComposer(rigid_object) + mock_perm_wrench = MockWrenchComposer(rigid_object) + object.__setattr__(rigid_object, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(rigid_object, "_permanent_wrench_composer", mock_perm_wrench) + + # Prevent __del__ / _clear_callbacks from raising AttributeError + object.__setattr__(rigid_object, "_initialize_handle", None) + object.__setattr__(rigid_object, "_invalidate_initialize_handle", None) + object.__setattr__(rigid_object, "_prim_deletion_handle", None) + object.__setattr__(rigid_object, "_debug_vis_handle", None) + + # Newton uses wp.array for indices + object.__setattr__(rigid_object, "_ALL_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device)) + object.__setattr__(rigid_object, "_ALL_BODY_INDICES", wp.array(np.array([0], dtype=np.int32), device=device)) + + # Newton uses wp.bool masks + object.__setattr__(rigid_object, "_ALL_ENV_MASK", wp.ones((num_instances,), dtype=wp.bool, device=device)) + object.__setattr__(rigid_object, "_ALL_BODY_MASK", wp.ones((1,), dtype=wp.bool, device=device)) + + return rigid_object, mock_view + + +def create_mock_rigid_object( + num_instances: int = 2, + device: str = "cuda:0", +): + from isaaclab.test.mock_interfaces.assets.mock_rigid_object import MockRigidObject + + obj = MockRigidObject( + num_instances=num_instances, + device=device, + ) + return obj, None # No view for mock backend + + +def get_rigid_object( + backend: str, + num_instances: int = 2, + device: str = "cuda:0", +): + if backend == "physx": + return create_physx_rigid_object(num_instances, device) + elif backend == "newton": + return create_newton_rigid_object(num_instances, device) + elif backend.lower() == "mock": + return create_mock_rigid_object(num_instances, device) + else: + raise ValueError(f"Invalid backend: {backend}") + + +@pytest.fixture +def rigid_object_iface(request): + backend = request.getfixturevalue("backend") + num_instances = request.getfixturevalue("num_instances") + device = request.getfixturevalue("device") + return get_rigid_object(backend, num_instances, device) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _check_proxy_array(arr, *, expected_shape: tuple, expected_dtype: type, name: str): + """Assert that `arr` is a ProxyArray with the expected shape and dtype.""" + from isaaclab.utils.warp import ProxyArray + + assert isinstance(arr, ProxyArray), f"{name}: expected ProxyArray, got {type(arr)}" + assert arr.shape == expected_shape, f"{name}: expected shape {expected_shape}, got {arr.shape}" + assert arr.dtype == expected_dtype, f"{name}: expected dtype {expected_dtype}, got {arr.dtype}" + + +# Common parametrize decorators +_backends = pytest.mark.parametrize("backend", BACKENDS, indirect=False) + +_default_dims = pytest.mark.parametrize("num_instances", [1, 2, 100]) + +_default_devices = pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +_index_resolution_backends = pytest.mark.parametrize( + "backend", [backend for backend in ("physx", "newton") if backend in BACKENDS], indirect=False +) + + +# --------------------------------------------------------------------------- +# Tests: Index resolution helpers +# --------------------------------------------------------------------------- + + +class TestRigidObjectIndexResolution: + """Test backend-specific index resolution helpers.""" + + @_index_resolution_backends + def test_resolve_env_ids_handles_tensor_view_shape(self, backend): + obj, _ = get_rigid_object(backend, num_instances=4, device="cpu") + + env_ids = torch.arange(4, dtype=torch.int32, device="cpu") + resolved_full = obj._resolve_env_ids(env_ids) + resolved_view = obj._resolve_env_ids(env_ids[:2]) + + assert resolved_full.shape[0] == 4 + assert resolved_view.shape[0] == 2 + + +# --------------------------------------------------------------------------- +# Tests: RigidObject properties +# --------------------------------------------------------------------------- + + +class TestRigidObjectProperties: + """Test that rigid object properties return the correct types/values.""" + + @_backends + @_default_dims + @_default_devices + def test_num_instances(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + assert obj.num_instances == num_instances + + @_backends + @_default_dims + @_default_devices + def test_num_bodies(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + assert obj.num_bodies == 1 + + @_backends + @_default_dims + @_default_devices + def test_body_names(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + names = obj.body_names + assert isinstance(names, list) + assert len(names) == 1 + assert all(isinstance(n, str) for n in names) + + @_backends + @_default_dims + @_default_devices + def test_data_returns_rigid_object_data(self, backend, num_instances, device, rigid_object_iface): + from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData + + obj, _ = rigid_object_iface + assert isinstance(obj.data, BaseRigidObjectData) + + +# --------------------------------------------------------------------------- +# Tests: RigidObject finder methods +# --------------------------------------------------------------------------- + + +class TestRigidObjectFinders: + """Test that finder methods return (list[int], list[str]) tuples.""" + + @_backends + @_default_dims + @_default_devices + def test_find_bodies_all(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + indices, names = obj.find_bodies(".*") + assert isinstance(indices, list) and isinstance(names, list) + assert len(indices) == 1 + assert len(names) == 1 + assert all(isinstance(i, int) for i in indices) + assert all(isinstance(n, str) for n in names) + + @_backends + @_default_dims + @_default_devices + def test_find_bodies_single(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + first_body = obj.body_names[0] + indices, names = obj.find_bodies(first_body) + assert indices == [0] + assert names == [first_body] + + +# --------------------------------------------------------------------------- +# Tests: RigidObjectData root state properties +# --------------------------------------------------------------------------- + + +class TestRigidObjectDataRootState: + """Test data properties for root rigid body state.""" + + @_backends + @_default_dims + @_default_devices + def test_root_link_pose_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_pose_w, + expected_shape=(num_instances,), + expected_dtype=wp.transformf, + name="root_link_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.spatial_vectorf, + name="root_link_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_pose_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_pose_w, + expected_shape=(num_instances,), + expected_dtype=wp.transformf, + name="root_com_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.spatial_vectorf, + name="root_com_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_pos_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_pos_w, expected_shape=(num_instances,), expected_dtype=wp.vec3f, name="root_link_pos_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_quat_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_quat_w, expected_shape=(num_instances,), expected_dtype=wp.quatf, name="root_link_quat_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_lin_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_lin_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_ang_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_ang_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_ang_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_pos_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_pos_w, expected_shape=(num_instances,), expected_dtype=wp.vec3f, name="root_com_pos_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_quat_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_quat_w, expected_shape=(num_instances,), expected_dtype=wp.quatf, name="root_com_quat_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_lin_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_lin_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_ang_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_ang_vel_w, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_ang_vel_w", + ) + + +# --------------------------------------------------------------------------- +# Tests: RigidObjectData derived properties +# --------------------------------------------------------------------------- + + +class TestRigidObjectDataDerivedProperties: + """Test derived/computed data properties.""" + + @_backends + @_default_dims + @_default_devices + def test_projected_gravity_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.projected_gravity_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="projected_gravity_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_heading_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.heading_w, expected_shape=(num_instances,), expected_dtype=wp.float32, name="heading_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_lin_vel_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_lin_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_lin_vel_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_link_ang_vel_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_link_ang_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_link_ang_vel_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_lin_vel_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_lin_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_lin_vel_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_root_com_ang_vel_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.root_com_ang_vel_b, + expected_shape=(num_instances,), + expected_dtype=wp.vec3f, + name="root_com_ang_vel_b", + ) + + +# --------------------------------------------------------------------------- +# Tests: RigidObjectData body state properties +# --------------------------------------------------------------------------- + + +class TestRigidObjectDataBodyState: + """Test data properties for all body states.""" + + @_backends + @_default_dims + @_default_devices + def test_body_link_pose_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_pose_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.transformf, + name="body_link_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_vel_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.spatial_vectorf, + name="body_link_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pose_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pose_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.transformf, + name="body_com_pose_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_vel_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.spatial_vectorf, + name="body_com_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_acc_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_acc_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.spatial_vectorf, + name="body_com_acc_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pose_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pose_b, + expected_shape=(num_instances, 1), + expected_dtype=wp.transformf, + name="body_com_pose_b", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_mass(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_mass, expected_shape=(num_instances, 1), expected_dtype=wp.float32, name="body_mass" + ) + + @_backends + @_default_dims + @_default_devices + def test_body_inertia(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_inertia, expected_shape=(num_instances, 1, 9), expected_dtype=wp.float32, name="body_inertia" + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_pos_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_pos_w, expected_shape=(num_instances, 1), expected_dtype=wp.vec3f, name="body_link_pos_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_quat_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_quat_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.quatf, + name="body_link_quat_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_lin_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_lin_vel_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.vec3f, + name="body_link_lin_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_link_ang_vel_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_link_ang_vel_w, + expected_shape=(num_instances, 1), + expected_dtype=wp.vec3f, + name="body_link_ang_vel_w", + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pos_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pos_w, expected_shape=(num_instances, 1), expected_dtype=wp.vec3f, name="body_com_pos_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_quat_w(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_quat_w, expected_shape=(num_instances, 1), expected_dtype=wp.quatf, name="body_com_quat_w" + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_pos_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_pos_b, expected_shape=(num_instances, 1), expected_dtype=wp.vec3f, name="body_com_pos_b" + ) + + @_backends + @_default_dims + @_default_devices + def test_body_com_quat_b(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.body_com_quat_b, expected_shape=(num_instances, 1), expected_dtype=wp.quatf, name="body_com_quat_b" + ) + + +# --------------------------------------------------------------------------- +# Tests: RigidObjectData defaults +# --------------------------------------------------------------------------- + + +class TestRigidObjectDataDefaults: + """Test default state properties.""" + + @_backends + @_default_dims + @_default_devices + def test_default_root_pose(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.default_root_pose, + expected_shape=(num_instances,), + expected_dtype=wp.transformf, + name="default_root_pose", + ) + + @_backends + @_default_dims + @_default_devices + def test_default_root_vel(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + _check_proxy_array( + obj.data.default_root_vel, + expected_shape=(num_instances,), + expected_dtype=wp.spatial_vectorf, + name="default_root_vel", + ) + + +# --------------------------------------------------------------------------- +# Writer/setter test helpers +# --------------------------------------------------------------------------- + +# Map warp structured dtypes to their torch trailing dimension size. +_WP_DTYPE_TO_TRAILING = { + wp.transformf: 7, + wp.spatial_vectorf: 6, + wp.vec2f: 2, + wp.float32: 0, # no trailing dimension +} + + +def _make_data_torch(shape: tuple, device: str, wp_dtype=wp.float32) -> torch.Tensor: + """Create valid torch test data for a given warp dtype.""" + trailing = _WP_DTYPE_TO_TRAILING[wp_dtype] + if trailing: + full_shape = (*shape, trailing) + else: + full_shape = shape + data = torch.zeros(full_shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., 6] = 1.0 # identity quat w + elif wp_dtype == wp.vec2f: + data[..., 0] = -1.0 + data[..., 1] = 1.0 + elif wp_dtype == wp.float32: + data.fill_(1.0) + return data + + +def _make_data_warp(shape: tuple, device: str, wp_dtype=wp.float32) -> wp.array: + """Create valid warp test data for a given warp dtype.""" + t = _make_data_torch(shape, device, wp_dtype) + if wp_dtype == wp.float32: + return wp.from_torch(t, dtype=wp.float32) + return wp.from_torch(t.contiguous(), dtype=wp_dtype) + + +def _make_bad_data_torch(shape: tuple, device: str, wp_dtype=wp.float32) -> torch.Tensor: + """Create torch data with wrong leading shape for negative testing.""" + bad_shape = (shape[0] + 1,) + shape[1:] + return _make_data_torch(bad_shape, device, wp_dtype) + + +def _make_bad_data_warp(shape: tuple, device: str, wp_dtype=wp.float32) -> wp.array: + """Create warp data with wrong leading shape for negative testing.""" + bad_shape = (shape[0] + 1,) + shape[1:] + return _make_data_warp(bad_shape, device, wp_dtype) + + +def _make_env_mask(num_instances: int, device: str, partial: bool) -> wp.array | None: + """Create an env_mask: None for all envs, or a partial bool mask.""" + if not partial: + return None + mask_np = np.zeros(num_instances, dtype=bool) + mask_np[0] = True + return wp.array(mask_np, dtype=wp.bool, device=device) + + +def _make_env_ids(device: str, subset: bool) -> torch.Tensor | None: + """Create env_ids: None for all envs, or [0] for a subset.""" + if not subset: + return None + return torch.tensor([0], dtype=torch.int32, device=device) + + +def _make_item_mask(total: int, selected: list[int], device: str) -> wp.array: + """Create a bool warp mask with True at `selected` indices, False elsewhere.""" + mask_np = np.zeros(total, dtype=bool) + for i in selected: + mask_np[i] = True + return wp.array(mask_np, dtype=wp.bool, device=device) + + +# --------------------------------------------------------------------------- +# Tests: Root writers — torch/warp × index/mask × all/subset × negative +# --------------------------------------------------------------------------- + +_ROOT_POSE_METHODS = ["root_pose", "root_link_pose", "root_com_pose"] +_ROOT_VEL_METHODS = ["root_velocity", "root_link_velocity", "root_com_velocity"] + + +class TestRigidObjectWritersRoot: + """Test root pose/velocity writers with all input combinations.""" + + # -- index variants -- + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_POSE_METHODS) + def test_write_root_pose_to_sim_index(self, backend, num_instances, device, rigid_object_iface, method_suffix): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_index") + + # torch, all envs + method(root_pose=_make_data_torch((num_instances,), device, wp.transformf)) + # torch, subset + method(root_pose=_make_data_torch((1,), device, wp.transformf), env_ids=_make_env_ids(device, True)) + # warp, all envs + method(root_pose=_make_data_warp((num_instances,), device, wp.transformf)) + # warp, subset + method(root_pose=_make_data_warp((1,), device, wp.transformf), env_ids=_make_env_ids(device, True)) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_torch((num_instances,), device, wp.transformf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_warp((num_instances,), device, wp.transformf)) + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_VEL_METHODS) + def test_write_root_velocity_to_sim_index(self, backend, num_instances, device, rigid_object_iface, method_suffix): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_index") + + # torch, all envs + method(root_velocity=_make_data_torch((num_instances,), device, wp.spatial_vectorf)) + # torch, subset + method(root_velocity=_make_data_torch((1,), device, wp.spatial_vectorf), env_ids=_make_env_ids(device, True)) + # warp, all envs + method(root_velocity=_make_data_warp((num_instances,), device, wp.spatial_vectorf)) + # warp, subset + method(root_velocity=_make_data_warp((1,), device, wp.spatial_vectorf), env_ids=_make_env_ids(device, True)) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_torch((num_instances,), device, wp.spatial_vectorf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_warp((num_instances,), device, wp.spatial_vectorf)) + + # -- mask variants -- + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_POSE_METHODS) + def test_write_root_pose_to_sim_mask(self, backend, num_instances, device, rigid_object_iface, method_suffix): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_mask") + + # torch, no mask (all) + method(root_pose=_make_data_torch((num_instances,), device, wp.transformf)) + # torch, partial mask + method( + root_pose=_make_data_torch((num_instances,), device, wp.transformf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # warp, no mask + method(root_pose=_make_data_warp((num_instances,), device, wp.transformf)) + # warp, partial mask + method( + root_pose=_make_data_warp((num_instances,), device, wp.transformf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_torch((num_instances,), device, wp.transformf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_pose=_make_bad_data_warp((num_instances,), device, wp.transformf)) + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize("method_suffix", _ROOT_VEL_METHODS) + def test_write_root_velocity_to_sim_mask(self, backend, num_instances, device, rigid_object_iface, method_suffix): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + method = getattr(obj, f"write_{method_suffix}_to_sim_mask") + + # torch, no mask + method(root_velocity=_make_data_torch((num_instances,), device, wp.spatial_vectorf)) + # torch, partial mask + method( + root_velocity=_make_data_torch((num_instances,), device, wp.spatial_vectorf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # warp, no mask + method(root_velocity=_make_data_warp((num_instances,), device, wp.spatial_vectorf)) + # warp, partial mask + method( + root_velocity=_make_data_warp((num_instances,), device, wp.spatial_vectorf), + env_mask=_make_env_mask(num_instances, device, True), + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_torch((num_instances,), device, wp.spatial_vectorf)) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(root_velocity=_make_bad_data_warp((num_instances,), device, wp.spatial_vectorf)) + + +# --------------------------------------------------------------------------- +# Tests: Body writers — torch/warp × index/mask × all/subset × negative +# --------------------------------------------------------------------------- + +# (method_name, kwarg_name, wp_dtype, trailing_dim) +_BODY_METHODS = [ + ("set_masses", "masses", wp.float32, 0), + ("set_coms", "coms", wp.transformf, 7), + ("set_inertias", "inertias", wp.float32, 9), +] + + +class TestRigidObjectWritersBody: + """Test body property writers/setters with all input combinations.""" + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, trailing", + _BODY_METHODS, + ids=[m[0] for m in _BODY_METHODS], + ) + def test_body_writer_index( + self, backend, num_instances, device, rigid_object_iface, method_base, kwarg, wp_dtype, trailing + ): + if backend == "newton" and method_base == "set_coms": + pytest.xfail("Newton set_coms expects vec3f (position only), not transformf (pose)") + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + num_bodies = 1 + method = getattr(obj, f"{method_base}_index") + + def _torch_shape(n_envs, n_bods): + if trailing: + return (n_envs, n_bods, trailing) + return (n_envs, n_bods) + + def _make_torch(n_envs, n_bods): + shape = _torch_shape(n_envs, n_bods) + data = torch.ones(shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., :3] = 0.0 + data[..., 3:6] = 0.0 + data[..., 6] = 1.0 + return data + + def _make_warp(n_envs, n_bods): + t = _make_torch(n_envs, n_bods) + if wp_dtype == wp.transformf: + return wp.from_torch(t.contiguous(), dtype=wp.transformf) + return wp.from_torch(t.contiguous(), dtype=wp.float32) + + sub_b = 1 # rigid object always has 1 body + sub_body_ids = [0] + + # torch, all envs + all bodies + method(**{kwarg: _make_torch(num_instances, num_bodies)}) + # torch, subset + method( + **{ + kwarg: _make_torch(1, sub_b), + "body_ids": sub_body_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # warp, all envs + all bodies + method(**{kwarg: _make_warp(num_instances, num_bodies)}) + # warp, subset + method( + **{ + kwarg: _make_warp(1, sub_b), + "body_ids": sub_body_ids, + "env_ids": _make_env_ids(device, True), + } + ) + # negative: bad torch shape (extra env) + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_torch(num_instances + 1, num_bodies)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_warp(num_instances + 1, num_bodies)}) + + @_backends + @_default_dims + @_default_devices + @pytest.mark.parametrize( + "method_base, kwarg, wp_dtype, trailing", + _BODY_METHODS, + ids=[m[0] for m in _BODY_METHODS], + ) + def test_body_writer_mask( + self, backend, num_instances, device, rigid_object_iface, method_base, kwarg, wp_dtype, trailing + ): + if backend == "newton" and method_base == "set_coms": + pytest.xfail("Newton set_coms expects vec3f (position only), not transformf (pose)") + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + num_bodies = 1 + method = getattr(obj, f"{method_base}_mask") + + def _torch_shape(n_envs, n_bods): + if trailing: + return (n_envs, n_bods, trailing) + return (n_envs, n_bods) + + def _make_torch(n_envs, n_bods): + shape = _torch_shape(n_envs, n_bods) + data = torch.ones(shape, device=device, dtype=torch.float32) + if wp_dtype == wp.transformf: + data[..., :3] = 0.0 + data[..., 3:6] = 0.0 + data[..., 6] = 1.0 + return data + + def _make_warp(n_envs, n_bods): + t = _make_torch(n_envs, n_bods) + if wp_dtype == wp.transformf: + return wp.from_torch(t.contiguous(), dtype=wp.transformf) + return wp.from_torch(t.contiguous(), dtype=wp.float32) + + sub_body_sel = [0] + + # torch, no mask + method(**{kwarg: _make_torch(num_instances, num_bodies)}) + # torch, partial env_mask + body_mask + method( + **{ + kwarg: _make_torch(num_instances, num_bodies), + "body_mask": _make_item_mask(num_bodies, sub_body_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # warp, no mask + method(**{kwarg: _make_warp(num_instances, num_bodies)}) + # warp, partial env_mask + body_mask + method( + **{ + kwarg: _make_warp(num_instances, num_bodies), + "body_mask": _make_item_mask(num_bodies, sub_body_sel, device), + "env_mask": _make_env_mask(num_instances, device, True), + } + ) + # negative: bad torch shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_torch(num_instances + 1, num_bodies)}) + # negative: bad warp shape + with pytest.raises((AssertionError, RuntimeError)): + method(**{kwarg: _make_warp(num_instances + 1, num_bodies)}) + + +# --------------------------------------------------------------------------- +# Tests: Alias/shorthand properties +# --------------------------------------------------------------------------- + + +class TestRigidObjectDataAliases: + """Test that alias properties return the same shape/dtype as their canonical counterparts.""" + + @_backends + @_default_dims + @_default_devices + def test_root_aliases(self, backend, num_instances, device, rigid_object_iface): + """root_pose_w == root_link_pose_w, root_vel_w == root_com_vel_w, etc.""" + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + d = obj.data + + assert d.root_pose_w.shape == d.root_link_pose_w.shape + assert d.root_pose_w.dtype == d.root_link_pose_w.dtype + assert d.root_pos_w.shape == d.root_link_pos_w.shape + assert d.root_quat_w.shape == d.root_link_quat_w.shape + + assert d.root_vel_w.shape == d.root_com_vel_w.shape + assert d.root_vel_w.dtype == d.root_com_vel_w.dtype + assert d.root_lin_vel_w.shape == d.root_com_lin_vel_w.shape + assert d.root_ang_vel_w.shape == d.root_com_ang_vel_w.shape + + @_backends + @_default_dims + @_default_devices + def test_body_aliases(self, backend, num_instances, device, rigid_object_iface): + obj, _ = rigid_object_iface + obj.data.update(dt=0.01) + d = obj.data + + assert d.body_pose_w.shape == d.body_link_pose_w.shape + assert d.body_pos_w.shape == d.body_link_pos_w.shape + assert d.body_quat_w.shape == d.body_link_quat_w.shape + assert d.body_vel_w.shape == d.body_com_vel_w.shape + assert d.body_lin_vel_w.shape == d.body_com_lin_vel_w.shape + assert d.body_ang_vel_w.shape == d.body_com_ang_vel_w.shape diff --git a/source/isaaclab/test/benchmark/test_benchmark_core.py b/source/isaaclab/test/benchmark/test_benchmark_core.py new file mode 100644 index 000000000000..d679880d82ce --- /dev/null +++ b/source/isaaclab/test/benchmark/test_benchmark_core.py @@ -0,0 +1,323 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for BaseIsaacLabBenchmark class.""" + +import json +import os +import tempfile + +import pytest + +from isaaclab.test.benchmark import backends +from isaaclab.test.benchmark.benchmark_core import BaseIsaacLabBenchmark +from isaaclab.test.benchmark.measurements import SingleMeasurement, StringMetadata + +# ============================================================================== +# BaseIsaacLabBenchmark Tests +# ============================================================================== + + +class TestBaseIsaacLabBenchmark: + """Tests for BaseIsaacLabBenchmark.""" + + @pytest.fixture + def temp_output_dir(self): + """Create a temporary output directory.""" + with tempfile.TemporaryDirectory() as tmpdir: + yield tmpdir + + @pytest.fixture(autouse=True) + def reset_backends(self): + """Reset backend instances before each test.""" + backends.MetricsBackend.reset_instances() + yield + backends.MetricsBackend.reset_instances() + + def test_initialization(self, temp_output_dir): + """Test benchmark initializes correctly.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + output_prefix="test", + ) + assert benchmark.benchmark_name == "test_benchmark" + assert benchmark.output_path == temp_output_dir + assert "test_" in benchmark.output_prefix + + def test_initialization_creates_output_dir(self): + """Test that initialization creates output directory if it doesn't exist.""" + with tempfile.TemporaryDirectory() as tmpdir: + output_path = os.path.join(tmpdir, "nested", "output") + _benchmark = BaseIsaacLabBenchmark( # noqa: F841 + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=output_path, + use_recorders=False, + ) + assert os.path.exists(output_path) + + def test_initialization_with_recorders(self, temp_output_dir): + """Test benchmark initializes with recorders enabled.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=True, + ) + assert benchmark._use_recorders is True + assert "CPUInfo" in benchmark._manual_recorders + assert "GPUInfo" in benchmark._manual_recorders + assert "MemoryInfo" in benchmark._manual_recorders + assert "VersionInfo" in benchmark._manual_recorders + + def test_initialization_without_recorders(self, temp_output_dir): + """Test benchmark initializes with recorders disabled.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + ) + assert benchmark._use_recorders is False + assert not hasattr(benchmark, "_manual_recorders") or benchmark._manual_recorders is None + + def test_add_measurement(self, temp_output_dir): + """Test adding measurements to phases.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + ) + measurement = SingleMeasurement(name="test_metric", value=42.0, unit="ms") + benchmark.add_measurement("test_phase", measurement=measurement) + assert "test_phase" in benchmark._phases + assert len(benchmark._phases["test_phase"].measurements) == 1 + assert benchmark._phases["test_phase"].measurements[0].name == "test_metric" + + def test_add_multiple_measurements(self, temp_output_dir): + """Test adding multiple measurements to a phase.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + ) + measurements = [ + SingleMeasurement(name="metric1", value=10.0, unit="ms"), + SingleMeasurement(name="metric2", value=20.0, unit="ms"), + ] + benchmark.add_measurement("test_phase", measurement=measurements) + assert len(benchmark._phases["test_phase"].measurements) == 2 + + def test_add_metadata(self, temp_output_dir): + """Test adding metadata to phases.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + ) + metadata = StringMetadata(name="test_key", data="test_value") + benchmark.add_measurement("test_phase", metadata=metadata) + assert "test_phase" in benchmark._phases + # Phase metadata includes automatic "phase" and "workflow_name" entries plus our custom one + assert len(benchmark._phases["test_phase"].metadata) == 3 + metadata_names = [m.name for m in benchmark._phases["test_phase"].metadata] + assert "test_key" in metadata_names + assert "phase" in metadata_names + assert "workflow_name" in metadata_names + + def test_update_manual_recorders(self, temp_output_dir): + """Test updating manual recorders.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=True, + ) + # Should not raise + benchmark.update_manual_recorders() + # Check recorders were updated - CPUInfoRecorder has _n attribute + assert benchmark._manual_recorders["CPUInfo"]._n >= 1 + assert benchmark._manual_recorders["MemoryInfo"]._rss_n >= 1 + + def test_update_manual_recorders_disabled(self, temp_output_dir): + """Test that update_manual_recorders is a no-op when recorders are disabled.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + ) + # Should not raise + benchmark.update_manual_recorders() + + def test_finalize_generates_output(self, temp_output_dir): + """Test that finalize creates output file.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=True, + output_prefix="test", + ) + benchmark.add_measurement( + "runtime", measurement=SingleMeasurement(name="execution_time", value=100.5, unit="ms") + ) + benchmark.update_manual_recorders() + benchmark._finalize_impl() + + # Check output file exists + assert os.path.exists(benchmark.output_file_path) + + def test_finalize_output_contains_measurements(self, temp_output_dir): + """Test that finalized output contains added measurements.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + output_prefix="test", + ) + benchmark.add_measurement( + "runtime", measurement=SingleMeasurement(name="execution_time", value=100.5, unit="ms") + ) + benchmark._finalize_impl() + + # Read and verify output + with open(benchmark.output_file_path) as f: + data = json.load(f) + + # Check that runtime phase is present with our measurement + assert "runtime" in data + assert "execution_time" in data["runtime"] + assert data["runtime"]["execution_time"] == 100.5 + + def test_finalize_cleans_up_recorders(self, temp_output_dir): + """Test that finalize cleans up recorders.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="test_benchmark", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=True, + output_prefix="test", + ) + benchmark.add_measurement( + "runtime", measurement=SingleMeasurement(name="execution_time", value=100.5, unit="ms") + ) + benchmark.update_manual_recorders() + benchmark._finalize_impl() + + # Recorders should be set to None + assert benchmark._manual_recorders is None + assert benchmark._frametime_recorders is None + + def test_workflow_metadata_in_output(self, temp_output_dir): + """Test that workflow name and timestamp metadata are in output.""" + benchmark = BaseIsaacLabBenchmark( + benchmark_name="my_workflow", + backend_type="omniperf", + output_path=temp_output_dir, + use_recorders=False, + output_prefix="test", + ) + benchmark._finalize_impl() + + with open(benchmark.output_file_path) as f: + data = json.load(f) + + # Check benchmark_info phase has workflow metadata + assert "benchmark_info" in data + assert "workflow_name" in data["benchmark_info"] + assert data["benchmark_info"]["workflow_name"] == "my_workflow" + assert "timestamp" in data["benchmark_info"] + + +# ============================================================================== +# MetricsBackend Factory Tests +# ============================================================================== + + +class TestMetricsBackendFactory: + """Tests for MetricsBackend factory class.""" + + @pytest.fixture + def temp_output_dir(self): + """Create a temporary output directory.""" + with tempfile.TemporaryDirectory() as tmpdir: + yield tmpdir + + @pytest.fixture(autouse=True) + def reset_backends(self): + """Reset backend instances before each test.""" + backends.MetricsBackend.reset_instances() + yield + backends.MetricsBackend.reset_instances() + + def test_get_json_backend(self): + """Test getting JSON backend instance.""" + backend = backends.MetricsBackend.get_instance("json") + assert isinstance(backend, backends.JSONFileMetrics) + + def test_get_osmo_backend(self): + """Test getting Osmo backend instance.""" + backend = backends.MetricsBackend.get_instance("osmo") + assert isinstance(backend, backends.OsmoKPIFile) + + def test_get_omniperf_backend(self): + """Test getting OmniPerf backend instance.""" + backend = backends.MetricsBackend.get_instance("omniperf") + assert isinstance(backend, backends.OmniPerfKPIFile) + + def test_get_summary_backend(self): + """Test getting Summary backend instance.""" + backend = backends.MetricsBackend.get_instance("summary") + assert isinstance(backend, backends.SummaryMetrics) + + def test_summary_backend_finalize_writes_json(self, temp_output_dir): + """Test that SummaryMetrics finalize writes JSON output (and does not raise).""" + backend = backends.MetricsBackend.get_instance("summary") + from isaaclab.test.benchmark.measurements import StringMetadata, TestPhase + + phase = TestPhase(phase_name="runtime") + phase.measurements.append(SingleMeasurement(name="Test FPS", value=60.0, unit="FPS")) + phase.metadata.append(StringMetadata(name="runtime workflow_name", data="summary_test")) + phase.metadata.append(StringMetadata(name="runtime phase", data="runtime")) + backend.add_metrics(phase) + output_path = temp_output_dir + output_filename = "summary_test" + backend.finalize(output_path, output_filename) + expected_path = os.path.join(output_path, f"{output_filename}.json") + assert os.path.exists(expected_path) + with open(expected_path) as f: + data = json.load(f) + assert isinstance(data, list) and len(data) >= 1 + assert any(p.get("phase_name") == "runtime" for p in data) + + def test_invalid_backend_type_raises_error(self): + """Test that invalid backend type raises ValueError.""" + with pytest.raises(ValueError, match="Unknown backend type"): + backends.MetricsBackend.get_instance("invalid_type") + + def test_backend_instance_is_cached(self): + """Test that backend instances are cached and reused.""" + backend1 = backends.MetricsBackend.get_instance("omniperf") + backend2 = backends.MetricsBackend.get_instance("omniperf") + assert backend1 is backend2 + + def test_reset_instances(self): + """Test that reset_instances clears the cache.""" + backend1 = backends.MetricsBackend.get_instance("omniperf") + backends.MetricsBackend.reset_instances() + backend2 = backends.MetricsBackend.get_instance("omniperf") + assert backend1 is not backend2 + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--maxfail=1"]) diff --git a/source/isaaclab/test/benchmark/test_recorders.py b/source/isaaclab/test/benchmark/test_recorders.py new file mode 100644 index 000000000000..fc519bb051d8 --- /dev/null +++ b/source/isaaclab/test/benchmark/test_recorders.py @@ -0,0 +1,559 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for benchmark recorder classes.""" + +import pytest + +from isaaclab.test.benchmark.interfaces import MeasurementData +from isaaclab.test.benchmark.recorders.record_cpu_info import CPUInfoRecorder +from isaaclab.test.benchmark.recorders.record_gpu_info import GPUInfoRecorder +from isaaclab.test.benchmark.recorders.record_memory_info import MemoryInfoRecorder +from isaaclab.test.benchmark.recorders.record_version_info import VersionInfoRecorder + +# ============================================================================== +# CPUInfoRecorder Tests +# ============================================================================== + + +class TestCPUInfoRecorder: + """Tests for CPUInfoRecorder.""" + + @pytest.fixture + def recorder(self): + """Create a CPUInfoRecorder fixture.""" + return CPUInfoRecorder() + + def test_initialization(self, recorder): + """Test that CPUInfoRecorder initializes correctly.""" + assert recorder._cpu_hardware_info is not None + assert recorder._cpu_runtime_info is not None + assert recorder._mean == 0 + assert recorder._std == 0 + assert recorder._n == 0 + assert recorder._m2 == 0 + + def test_get_initial_data_structure(self, recorder): + """Test that get_initial_data returns correct structure.""" + data = recorder.get_initial_data() + assert "cpu_metadata" in data + assert "physical_cores" in data["cpu_metadata"] + assert "name" in data["cpu_metadata"] + + def test_get_initial_data_values(self, recorder): + """Test that get_initial_data returns valid values.""" + data = recorder.get_initial_data() + assert isinstance(data["cpu_metadata"]["physical_cores"], int) + assert data["cpu_metadata"]["physical_cores"] > 0 + assert isinstance(data["cpu_metadata"]["name"], str) + + def test_update_increments_count(self, recorder): + """Test that update increments the sample count.""" + assert recorder._n == 0 + recorder.update() + assert recorder._n == 1 + recorder.update() + assert recorder._n == 2 + + def test_get_runtime_data_after_updates(self, recorder): + """Test that get_runtime_data returns stats after updates.""" + for _ in range(5): + recorder.update() + + data = recorder.get_runtime_data() + assert "cpu_utilization" in data + assert "mean" in data["cpu_utilization"] + assert "std" in data["cpu_utilization"] + assert "n" in data["cpu_utilization"] + assert data["cpu_utilization"]["n"] == 5 + + def test_runtime_data_types(self, recorder): + """Test that runtime data has correct types.""" + for _ in range(3): + recorder.update() + + data = recorder.get_runtime_data() + assert isinstance(data["cpu_utilization"]["mean"], float) + assert isinstance(data["cpu_utilization"]["std"], float) + assert isinstance(data["cpu_utilization"]["n"], int) + + def test_get_data_returns_measurement_data(self, recorder): + """Test that get_data returns a MeasurementData object.""" + for _ in range(3): + recorder.update() + + data = recorder.get_data() + assert isinstance(data, MeasurementData) + assert len(data.measurements) == 3 + assert len(data.metadata) == 2 + + def test_get_data_measurement_names(self, recorder): + """Test that get_data returns measurements with correct names.""" + for _ in range(3): + recorder.update() + + data = recorder.get_data() + names = [m.name for m in data.measurements] + assert "CPU Utilization" in names + assert "CPU Utilization std" in names + assert "CPU Utilization n" in names + + def test_get_data_metadata_names(self, recorder): + """Test that get_data returns metadata with correct names.""" + recorder.update() + data = recorder.get_data() + names = [m.name for m in data.metadata] + assert "cpu_name" in names + assert "physical_cores" in names + + +# ============================================================================== +# GPUInfoRecorder Tests +# ============================================================================== + + +class TestGPUInfoRecorder: + """Tests for GPUInfoRecorder.""" + + @pytest.fixture + def recorder(self): + """Create a GPUInfoRecorder fixture.""" + return GPUInfoRecorder() + + def test_initialization(self, recorder): + """Test that GPUInfoRecorder initializes correctly.""" + assert recorder._gpu_hardware_info is not None + assert recorder._gpu_runtime_info is not None + # These are now lists (one entry per GPU) + assert isinstance(recorder._mem_mean, list) + assert isinstance(recorder._mem_n, list) + assert isinstance(recorder._util_mean, list) + assert isinstance(recorder._util_n, list) + + def test_get_initial_data_structure(self, recorder): + """Test that get_initial_data returns correct structure.""" + data = recorder.get_initial_data() + assert "gpu_metadata" in data + assert "available" in data["gpu_metadata"] + + def test_get_initial_data_with_gpu(self, recorder): + """Test hardware info when GPU is available.""" + data = recorder.get_initial_data() + if data["gpu_metadata"]["available"]: + assert "devices" in data["gpu_metadata"] + assert "device_count" in data["gpu_metadata"] + assert "current_device" in data["gpu_metadata"] + # Check first device has expected fields + assert len(data["gpu_metadata"]["devices"]) > 0 + device = data["gpu_metadata"]["devices"][0] + assert "name" in device + assert "total_memory_gb" in device + assert "compute_capability" in device + assert "multi_processor_count" in device + + def test_multiple_gpu_info(self, recorder): + """Test that all GPUs are recorded.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + device_count = data["gpu_metadata"]["device_count"] + assert len(data["gpu_metadata"]["devices"]) == device_count + # Each device should have an index + for i, device in enumerate(data["gpu_metadata"]["devices"]): + assert device["index"] == i + + def test_update_increments_count(self, recorder): + """Test that update increments the sample count.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + assert all(n == 0 for n in recorder._mem_n) + recorder.update() + assert all(n == 1 for n in recorder._mem_n) + recorder.update() + assert all(n == 2 for n in recorder._mem_n) + + def test_get_runtime_data_after_updates(self, recorder): + """Test that get_runtime_data returns stats after updates.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + for _ in range(5): + recorder.update() + + runtime_data = recorder.get_runtime_data() + assert "gpu_utilization" in runtime_data + assert "devices" in runtime_data["gpu_utilization"] + # Check first device + device_runtime = runtime_data["gpu_utilization"]["devices"][0] + assert "memory_used_mean_bytes" in device_runtime + assert "memory_used_std_bytes" in device_runtime + assert "memory_n" in device_runtime + assert device_runtime["memory_n"] == 5 + + def test_runtime_data_types(self, recorder): + """Test that runtime data has correct types.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + for _ in range(3): + recorder.update() + + runtime_data = recorder.get_runtime_data() + device_runtime = runtime_data["gpu_utilization"]["devices"][0] + assert isinstance(device_runtime["memory_used_mean_bytes"], float) + assert isinstance(device_runtime["memory_used_std_bytes"], float) + assert isinstance(device_runtime["memory_n"], int) + + def test_memory_values_non_negative(self, recorder): + """Test that memory values are non-negative.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + for _ in range(5): + recorder.update() + + runtime_data = recorder.get_runtime_data() + for device_runtime in runtime_data["gpu_utilization"]["devices"]: + assert device_runtime["memory_used_mean_bytes"] >= 0 + assert device_runtime["memory_used_std_bytes"] >= 0 + + def test_get_data_returns_measurement_data(self, recorder): + """Test that get_data returns a MeasurementData object.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + for _ in range(3): + recorder.update() + + measurement_data = recorder.get_data() + assert isinstance(measurement_data, MeasurementData) + # GPU data includes measurements (memory and utilization stats) + # 6 measurements per GPU: memory (mean, std, n) + utilization (mean, std, n) + num_gpus = data["gpu_metadata"]["device_count"] + assert len(measurement_data.measurements) == 6 * num_gpus + # 4 metadata entries: device_count, current_device, cuda_version, gpu_devices dict + assert len(measurement_data.metadata) == 4 + + def test_get_data_metadata_names(self, recorder): + """Test that get_data returns metadata with correct names.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + recorder.update() + measurement_data = recorder.get_data() + names = [m.name for m in measurement_data.metadata] + # Global metadata + assert "gpu_device_count" in names + assert "gpu_current_device" in names + assert "cuda_version" in names + # Per-device data in dict + assert "gpu_devices" in names + + def test_get_data_devices_dict_structure(self, recorder): + """Test that gpu_devices dict contains per-device data.""" + data = recorder.get_initial_data() + if not data["gpu_metadata"]["available"]: + pytest.skip("GPU not available") + + for _ in range(3): + recorder.update() + + measurement_data = recorder.get_data() + # Find the gpu_devices metadata + gpu_devices = None + for m in measurement_data.metadata: + if m.name == "gpu_devices": + gpu_devices = m.data + break + + assert gpu_devices is not None + device_count = data["gpu_metadata"]["device_count"] + assert len(gpu_devices) == device_count + + # Check first device has expected hardware fields + device_0 = gpu_devices["0"] + assert "name" in device_0 + assert "total_memory_gb" in device_0 + assert "compute_capability" in device_0 + assert "multi_processor_count" in device_0 + + +# ============================================================================== +# MemoryInfoRecorder Tests +# ============================================================================== + + +class TestMemoryInfoRecorder: + """Tests for MemoryInfoRecorder.""" + + @pytest.fixture + def recorder(self): + """Create a MemoryInfoRecorder fixture.""" + return MemoryInfoRecorder() + + def test_initialization(self, recorder): + """Test that MemoryInfoRecorder initializes correctly.""" + assert recorder._memory_hardware_info is not None + assert recorder._memory_runtime_info is not None + assert recorder._rss_mean == 0 + assert recorder._rss_n == 0 + assert recorder._vms_mean == 0 + assert recorder._vms_n == 0 + assert recorder._uss_mean == 0 + assert recorder._uss_n == 0 + + def test_get_initial_data_structure(self, recorder): + """Test that get_initial_data returns correct structure.""" + data = recorder.get_initial_data() + assert "memory_metadata" in data + assert "total_ram_gb" in data["memory_metadata"] + + def test_get_initial_data_values(self, recorder): + """Test that get_initial_data returns valid values.""" + data = recorder.get_initial_data() + assert isinstance(data["memory_metadata"]["total_ram_gb"], float) + assert data["memory_metadata"]["total_ram_gb"] > 0 + + def test_update_increments_count(self, recorder): + """Test that update increments the sample count.""" + assert recorder._rss_n == 0 + assert recorder._vms_n == 0 + recorder.update() + assert recorder._rss_n == 1 + assert recorder._vms_n == 1 + recorder.update() + assert recorder._rss_n == 2 + assert recorder._vms_n == 2 + + def test_get_runtime_data_after_updates(self, recorder): + """Test that get_runtime_data returns stats after updates.""" + for _ in range(5): + recorder.update() + + data = recorder.get_runtime_data() + assert "memory_utilization" in data + # RSS stats + assert "rss_mean" in data["memory_utilization"] + assert "rss_std" in data["memory_utilization"] + assert "rss_n" in data["memory_utilization"] + # VMS stats + assert "vms_mean" in data["memory_utilization"] + assert "vms_std" in data["memory_utilization"] + assert "vms_n" in data["memory_utilization"] + # Check counts + assert data["memory_utilization"]["rss_n"] == 5 + assert data["memory_utilization"]["vms_n"] == 5 + + def test_runtime_data_types(self, recorder): + """Test that runtime data has correct types.""" + for _ in range(3): + recorder.update() + + data = recorder.get_runtime_data() + assert isinstance(data["memory_utilization"]["rss_mean"], float) + assert isinstance(data["memory_utilization"]["rss_std"], float) + assert isinstance(data["memory_utilization"]["rss_n"], int) + assert isinstance(data["memory_utilization"]["vms_mean"], float) + assert isinstance(data["memory_utilization"]["vms_std"], float) + assert isinstance(data["memory_utilization"]["vms_n"], int) + + def test_memory_values_positive(self, recorder): + """Test that memory values are positive.""" + for _ in range(5): + recorder.update() + + data = recorder.get_runtime_data() + assert data["memory_utilization"]["rss_mean"] > 0 + assert data["memory_utilization"]["vms_mean"] > 0 + + def test_std_non_negative(self, recorder): + """Test that standard deviation values are non-negative.""" + for _ in range(5): + recorder.update() + + data = recorder.get_runtime_data() + assert data["memory_utilization"]["rss_std"] >= 0 + assert data["memory_utilization"]["vms_std"] >= 0 + + def test_get_data_returns_measurement_data(self, recorder): + """Test that get_data returns a MeasurementData object.""" + for _ in range(3): + recorder.update() + + data = recorder.get_data() + assert isinstance(data, MeasurementData) + # 6 measurements for RSS and VMS (mean, std, n for each) + # Plus potentially 3 more for USS if available (mean, std, n) + assert len(data.measurements) >= 6 + assert len(data.measurements) <= 9 + assert len(data.metadata) == 1 + + def test_get_data_measurement_names(self, recorder): + """Test that get_data returns measurements with correct names.""" + for _ in range(3): + recorder.update() + + data = recorder.get_data() + names = [m.name for m in data.measurements] + # RSS measurements should always be present + assert "System Memory RSS" in names + assert "System Memory RSS std" in names + assert "System Memory RSS n" in names + # VMS measurements should always be present + assert "System Memory VMS" in names + assert "System Memory VMS std" in names + assert "System Memory VMS n" in names + # USS measurements may be present depending on platform + # We don't assert their presence since they're platform-dependent + + def test_get_data_metadata_names(self, recorder): + """Test that get_data returns metadata with correct names.""" + recorder.update() + data = recorder.get_data() + names = [m.name for m in data.metadata] + assert "total_ram_gb" in names + + +# ============================================================================== +# VersionInfoRecorder Tests +# ============================================================================== + + +class TestVersionInfoRecorder: + """Tests for VersionInfoRecorder.""" + + @pytest.fixture + def recorder(self): + """Create a VersionInfoRecorder fixture.""" + return VersionInfoRecorder() + + def test_initialization(self, recorder): + """Test that VersionInfoRecorder initializes correctly.""" + assert recorder._version_info is not None + assert recorder._dev_info is not None + + def test_get_initial_data_structure(self, recorder): + """Test that get_initial_data returns correct structure.""" + data = recorder.get_initial_data() + assert "version_metadata" in data + assert "dev" in data + + def test_captures_core_versions(self, recorder): + """Test that core package versions are captured.""" + data = recorder.get_initial_data() + versions = data["version_metadata"] + # These should always be available in the test environment + assert "torch" in versions + assert "numpy" in versions + assert "isaaclab" in versions + + def test_version_values_are_strings(self, recorder): + """Test that version values are strings.""" + data = recorder.get_initial_data() + for version in data["version_metadata"].values(): + assert isinstance(version, str) + assert len(version) > 0 + + def test_git_info_structure(self, recorder): + """Test that git info has expected fields when available.""" + data = recorder.get_initial_data() + dev = data["dev"] + # If git info is available, check structure + if dev: + # At least one of these should be present if git is available + possible_keys = ["commit_hash", "commit_hash_short", "branch", "commit_date", "dirty"] + assert any(key in dev for key in possible_keys) + + def test_commit_hash_format(self, recorder): + """Test that commit hash has correct format when available.""" + data = recorder.get_initial_data() + dev = data["dev"] + if "commit_hash" in dev: + # Full hash should be 40 hex characters + assert len(dev["commit_hash"]) == 40 + assert all(c in "0123456789abcdef" for c in dev["commit_hash"]) + if "commit_hash_short" in dev: + # Short hash should be 8 characters + assert len(dev["commit_hash_short"]) == 8 + + def test_update_is_noop(self, recorder): + """Test that update doesn't change anything.""" + data_before = recorder.get_initial_data() + recorder.update() + data_after = recorder.get_initial_data() + assert data_before == data_after + + def test_get_runtime_data_is_empty(self, recorder): + """Test that runtime data is empty (versions don't change).""" + data = recorder.get_runtime_data() + assert data == {} + + def test_get_data_returns_measurement_data(self, recorder): + """Test that get_data returns a MeasurementData object.""" + data = recorder.get_data() + assert isinstance(data, MeasurementData) + # No measurements, only metadata + assert len(data.measurements) == 0 + # Should have metadata for versions + dev info + assert len(data.metadata) > 0 + + def test_get_data_metadata_names(self, recorder): + """Test that get_data returns metadata with version names.""" + data = recorder.get_data() + names = [m.name for m in data.metadata] + # Check that version suffixes are present + assert "torch_version" in names + assert "numpy_version" in names + assert "isaaclab_version" in names + # Dev info is now in a DictMetadata entry named "dev" if git info is available + # We check if it's present (it may not be in all environments) + if any(name == "dev" for name in names): + # If dev metadata is present, verify it's a dict + dev_meta = next(m for m in data.metadata if m.name == "dev") + assert hasattr(dev_meta, "data") + assert isinstance(dev_meta.data, dict) + + +# ============================================================================== +# Welford's Algorithm Verification Tests +# ============================================================================== + + +class TestWelfordAlgorithm: + """Tests to verify Welford's algorithm implementation in recorders.""" + + def test_memory_recorder_welford_convergence(self): + """Test that MemoryInfoRecorder's Welford implementation produces stable results.""" + recorder = MemoryInfoRecorder() + + # Run many updates + for _ in range(100): + recorder.update() + + data = recorder.get_runtime_data() + + # Mean should be positive (process is using memory) + assert data["memory_utilization"]["rss_mean"] > 0 + + # Std should be defined after multiple samples + assert data["memory_utilization"]["rss_n"] == 100 + + def test_single_update_std_is_zero(self): + """Test that std is zero after a single update (no variance with one sample).""" + recorder = MemoryInfoRecorder() + recorder.update() + + data = recorder.get_runtime_data() + # With n=1, std should be 0 (or undefined, but we initialize to 0) + assert data["memory_utilization"]["rss_std"] == 0 + assert data["memory_utilization"]["vms_std"] == 0 diff --git a/source/isaaclab/test/cli/test_install.py b/source/isaaclab/test/cli/test_install.py new file mode 100644 index 000000000000..13494cec696d --- /dev/null +++ b/source/isaaclab/test/cli/test_install.py @@ -0,0 +1,224 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for CLI utility functions used by the uv installation path.""" + +import os +import subprocess +import sys +from pathlib import Path +from unittest import mock + +import pytest + +from isaaclab.cli.utils import ( + determine_python_version, + extract_isaacsim_path, + extract_python_exe, + get_pip_command, +) + + +def _python_in_venv(venv: Path) -> Path: + if sys.platform == "win32": + return venv / "Scripts" / "python.exe" + return venv / "bin" / "python" + + +def _python_for_conda(base: Path) -> Path: + if sys.platform == "win32": + return base / "python.exe" + return base / "bin" / "python" + + +# --------------------------------------------------------------------------- +# get_pip_command +# --------------------------------------------------------------------------- + + +class TestGetPipCommand: + """Tests for :func:`get_pip_command`.""" + + def test_returns_uv_pip_in_venv_without_pip_module(self, tmp_path): + """When VIRTUAL_ENV is set, uv is on PATH, and pip module is missing, return uv pip.""" + fake_python = str(tmp_path / "python") + + with ( + mock.patch.dict(os.environ, {"VIRTUAL_ENV": str(tmp_path)}), + mock.patch("isaaclab.cli.utils.shutil.which", return_value="/usr/bin/uv"), + mock.patch( + "isaaclab.cli.utils.subprocess.run", + return_value=subprocess.CompletedProcess(args=[], returncode=1), + ), + ): + result = get_pip_command(python_exe=fake_python) + assert result == ["uv", "pip"] + + def test_returns_uv_pip_in_venv_with_uv(self, tmp_path): + """When VIRTUAL_ENV is set and uv is on PATH, always return uv pip.""" + fake_python = str(tmp_path / "python") + + with ( + mock.patch.dict(os.environ, {"VIRTUAL_ENV": str(tmp_path)}), + mock.patch("isaaclab.cli.utils.shutil.which", return_value="/usr/bin/uv"), + ): + result = get_pip_command(python_exe=fake_python) + assert result == ["uv", "pip"] + + def test_returns_python_pip_without_uv(self, tmp_path): + """When uv is not installed, always return python -m pip.""" + fake_python = str(tmp_path / "python") + + with ( + mock.patch.dict(os.environ, {"VIRTUAL_ENV": str(tmp_path)}), + mock.patch("isaaclab.cli.utils.shutil.which", return_value=None), + ): + result = get_pip_command(python_exe=fake_python) + assert result == [fake_python, "-m", "pip"] + + def test_returns_python_pip_in_conda_without_uv(self, tmp_path): + """When in a conda env and uv is not available, return python -m pip.""" + fake_python = str(tmp_path / "python") + + env = os.environ.copy() + env.pop("VIRTUAL_ENV", None) + env["CONDA_PREFIX"] = str(tmp_path) + with ( + mock.patch.dict(os.environ, env, clear=True), + mock.patch("isaaclab.cli.utils.shutil.which", return_value=None), + ): + result = get_pip_command(python_exe=fake_python) + assert result == [fake_python, "-m", "pip"] + + +# --------------------------------------------------------------------------- +# extract_python_exe +# --------------------------------------------------------------------------- + + +class TestExtractPythonExe: + """Tests for :func:`extract_python_exe`.""" + + def test_uses_virtual_env_when_set(self, tmp_path): + """Should return the venv Python when VIRTUAL_ENV is set.""" + venv_python = _python_in_venv(tmp_path) + venv_python.parent.mkdir(parents=True, exist_ok=True) + venv_python.touch() + + with mock.patch.dict(os.environ, {"VIRTUAL_ENV": str(tmp_path)}, clear=False): + result = extract_python_exe() + assert Path(result) == venv_python + + def test_uses_conda_prefix_when_no_venv(self, tmp_path): + """Should return conda Python when CONDA_PREFIX is set and no VIRTUAL_ENV.""" + conda_python = _python_for_conda(tmp_path) + conda_python.parent.mkdir(parents=True, exist_ok=True) + conda_python.touch() + + env = os.environ.copy() + env.pop("VIRTUAL_ENV", None) + env["CONDA_PREFIX"] = str(tmp_path) + with mock.patch.dict(os.environ, env, clear=True): + result = extract_python_exe() + assert Path(result) == conda_python + + +# --------------------------------------------------------------------------- +# extract_isaacsim_path +# --------------------------------------------------------------------------- + + +class TestExtractIsaacsimPath: + """Tests for :func:`extract_isaacsim_path`.""" + + def test_returns_none_when_not_required(self): + """When required=False and Isaac Sim is not found, return None.""" + with ( + mock.patch("isaaclab.cli.utils.DEFAULT_ISAAC_SIM_PATH", Path("/nonexistent/path")), + mock.patch( + "isaaclab.cli.utils.subprocess.run", + return_value=subprocess.CompletedProcess(args=[], returncode=1), + ), + ): + result = extract_isaacsim_path(required=False) + assert result is None + + def test_exits_when_required(self): + """When required=True and Isaac Sim is not found, sys.exit.""" + with ( + mock.patch("isaaclab.cli.utils.DEFAULT_ISAAC_SIM_PATH", Path("/nonexistent/path")), + mock.patch( + "isaaclab.cli.utils.subprocess.run", + return_value=subprocess.CompletedProcess(args=[], returncode=1), + ), + pytest.raises(SystemExit), + ): + extract_isaacsim_path(required=True) + + def test_returns_path_when_symlink_exists(self, tmp_path): + """When the default path exists, return it.""" + fake_sim = tmp_path / "_isaac_sim" + fake_sim.mkdir() + + with mock.patch("isaaclab.cli.utils.DEFAULT_ISAAC_SIM_PATH", fake_sim): + result = extract_isaacsim_path(required=True) + assert result == fake_sim + + +# --------------------------------------------------------------------------- +# determine_python_version +# --------------------------------------------------------------------------- + + +class TestDeterminePythonVersion: + """Tests for :func:`determine_python_version`.""" + + def test_defaults_to_3_12_when_no_sim(self): + """Without Isaac Sim, should default to python 3.12 (Isaac Sim 6.x requirement).""" + with ( + mock.patch("isaaclab.cli.utils.extract_isaacsim_path", return_value=None), + mock.patch("importlib.metadata.version", side_effect=Exception("not found")), + ): + result = determine_python_version() + assert result == "3.12" + + def test_returns_3_11_for_sim_5(self, tmp_path): + """Isaac Sim 5.x should map to Python 3.11.""" + version_file = tmp_path / "VERSION" + version_file.write_text("5.0.0") + + with mock.patch("isaaclab.cli.utils.extract_isaacsim_path", return_value=tmp_path): + result = determine_python_version() + assert result == "3.11" + + def test_returns_3_12_for_sim_6(self, tmp_path): + """Isaac Sim 6.x should map to Python 3.12.""" + version_file = tmp_path / "VERSION" + version_file.write_text("6.0.0") + + with mock.patch("isaaclab.cli.utils.extract_isaacsim_path", return_value=tmp_path): + result = determine_python_version() + assert result == "3.12" + + def test_raises_for_unknown_version(self, tmp_path): + """Unknown Isaac Sim version should raise RuntimeError.""" + version_file = tmp_path / "VERSION" + version_file.write_text("99.0.0") + + with ( + mock.patch("isaaclab.cli.utils.extract_isaacsim_path", return_value=tmp_path), + pytest.raises(RuntimeError, match="Unsupported Isaac Sim version"), + ): + determine_python_version() + + def test_uses_package_metadata_when_no_version_file(self, tmp_path): + """Should fall back to importlib.metadata when VERSION file doesn't exist.""" + # tmp_path exists but has no VERSION file + with ( + mock.patch("isaaclab.cli.utils.extract_isaacsim_path", return_value=tmp_path), + mock.patch("importlib.metadata.version", return_value="5.1.0"), + ): + result = determine_python_version() + assert result == "3.11" diff --git a/source/isaaclab/test/cli/test_install_commands.py b/source/isaaclab/test/cli/test_install_commands.py new file mode 100644 index 000000000000..a7c89ccd9d53 --- /dev/null +++ b/source/isaaclab/test/cli/test_install_commands.py @@ -0,0 +1,786 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for install command functions. + +Covers all combinations of: +- Python environment types: uv venv, pip venv, conda, Isaac Sim kit Python, system Python +- Isaac Sim installation methods: local _isaac_sim symlink, pip-installed isaacsim, none +""" + +import subprocess +from contextlib import contextmanager +from pathlib import Path +from unittest import mock + +import pytest + +from isaaclab.cli.commands.install import ( + _PREBUNDLE_REPOINT_PACKAGES, + _ensure_cuda_torch, + _maybe_uninstall_prebundled_torch, + _repoint_prebundle_packages, + _torch_first_on_sys_path_is_prebundle, +) + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _cp(returncode: int = 0, stdout: str = "") -> mock.MagicMock: + """Return a mock CompletedProcess with the given returncode and stdout.""" + r = mock.MagicMock(spec=subprocess.CompletedProcess) + r.returncode = returncode + r.stdout = stdout + return r + + +def _make_prebundle(base: Path, packages: list[str]) -> Path: + """Create a fake pip_prebundle directory populated with the given package dirs.""" + prebundle = base / "pip_prebundle" + prebundle.mkdir(parents=True) + for pkg in packages: + (prebundle / pkg).mkdir() + return prebundle + + +def _make_site_packages( + base: Path, + packages: list[str], + subdirs: dict[str, list[str]] | None = None, +) -> Path: + """Create a fake site-packages directory. + + Args: + packages: Top-level package directory names to create. + subdirs: Optional mapping of package name → list of subdirectory names to create inside it. + """ + site_pkgs = base / "site-packages" + site_pkgs.mkdir(parents=True, exist_ok=True) + for pkg in packages: + (site_pkgs / pkg).mkdir(exist_ok=True) + for pkg, subs in (subdirs or {}).items(): + for sub in subs: + (site_pkgs / pkg / sub).mkdir(parents=True, exist_ok=True) + return site_pkgs + + +# --------------------------------------------------------------------------- +# _torch_first_on_sys_path_is_prebundle +# --------------------------------------------------------------------------- + + +class TestTorchProbe: + """Tests for :func:`_torch_first_on_sys_path_is_prebundle`. + + The function shells out to ``python_exe -c `` and interprets the + subprocess exit code: 1 → prebundle is first; 0 → it is not. + """ + + def test_returns_true_when_prebundle_first(self, tmp_path): + """Probe exits 1 → the first torch on sys.path is under a pip_prebundle directory.""" + with mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(returncode=1)): + result = _torch_first_on_sys_path_is_prebundle( + str(tmp_path / "python"), + env={"PYTHONPATH": "/fake/extsDeprecated/pip_prebundle"}, + ) + assert result is True + + def test_returns_false_when_site_packages_first(self, tmp_path): + """Probe exits 0 → the first torch on sys.path is in regular site-packages.""" + with mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(returncode=0)): + result = _torch_first_on_sys_path_is_prebundle( + str(tmp_path / "python"), + env={"PYTHONPATH": "/conda/lib/python3.12/site-packages"}, + ) + assert result is False + + def test_returns_false_when_torch_not_found_anywhere(self, tmp_path): + """Probe exits 0 (no torch on sys.path at all) → returns False.""" + with mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(returncode=0)): + result = _torch_first_on_sys_path_is_prebundle( + str(tmp_path / "python"), + env={}, + ) + assert result is False + + def test_passes_env_to_subprocess(self, tmp_path): + """The custom env dict is forwarded to run_command.""" + env_sent = {"PYTHONPATH": "/some/path"} + with mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0)) as mock_run: + _torch_first_on_sys_path_is_prebundle(str(tmp_path / "python"), env=env_sent) + call_kwargs = mock_run.call_args + assert call_kwargs.kwargs.get("env") == env_sent or ( + len(call_kwargs.args) > 1 and call_kwargs.args[1] == env_sent + ) + + +# --------------------------------------------------------------------------- +# _maybe_uninstall_prebundled_torch +# --------------------------------------------------------------------------- + + +class TestMaybeUninstallTorch: + """Tests for :func:`_maybe_uninstall_prebundled_torch`.""" + + def test_does_not_uninstall_when_probe_false(self, tmp_path): + """When the probe returns False, no pip uninstall command is issued.""" + py = str(tmp_path / "python") + with ( + mock.patch( + "isaaclab.cli.commands.install._torch_first_on_sys_path_is_prebundle", + return_value=False, + ), + mock.patch("isaaclab.cli.commands.install.run_command") as mock_run, + ): + _maybe_uninstall_prebundled_torch(py, [py, "-m", "pip"], using_uv=False, probe_env={}) + mock_run.assert_not_called() + + def test_uninstalls_torch_stack_with_minus_y_for_pip(self, tmp_path): + """When probe returns True and pip is in use, uninstall includes -y flag.""" + py = str(tmp_path / "python") + with ( + mock.patch( + "isaaclab.cli.commands.install._torch_first_on_sys_path_is_prebundle", + return_value=True, + ), + mock.patch("isaaclab.cli.commands.install.run_command") as mock_run, + ): + _maybe_uninstall_prebundled_torch(py, [py, "-m", "pip"], using_uv=False, probe_env={}) + mock_run.assert_called_once() + issued = mock_run.call_args[0][0] + assert "uninstall" in issued + assert "-y" in issued + assert "torch" in issued + assert "torchvision" in issued + assert "torchaudio" in issued + + def test_uninstalls_torch_stack_without_minus_y_for_uv(self, tmp_path): + """When probe returns True and uv pip is in use, uninstall omits -y (uv doesn't accept it).""" + with ( + mock.patch( + "isaaclab.cli.commands.install._torch_first_on_sys_path_is_prebundle", + return_value=True, + ), + mock.patch("isaaclab.cli.commands.install.run_command") as mock_run, + ): + _maybe_uninstall_prebundled_torch("/fake/python", ["uv", "pip"], using_uv=True, probe_env={}) + issued = mock_run.call_args[0][0] + assert "uninstall" in issued + assert "-y" not in issued + + def test_probe_receives_original_pythonpath(self, tmp_path): + """The probe_env dict is forwarded unchanged to the torch-probe function.""" + py = str(tmp_path / "python") + probe_env = {"PYTHONPATH": "/a/extsDeprecated/pip_prebundle:/b/site-packages"} + with mock.patch( + "isaaclab.cli.commands.install._torch_first_on_sys_path_is_prebundle", + return_value=False, + ) as mock_probe: + _maybe_uninstall_prebundled_torch(py, [py, "-m", "pip"], using_uv=False, probe_env=probe_env) + mock_probe.assert_called_once_with(py, env=probe_env) + + +# --------------------------------------------------------------------------- +# _ensure_cuda_torch — architecture × environment combinations +# --------------------------------------------------------------------------- + + +class TestEnsureCudaTorch: + """Tests for :func:`_ensure_cuda_torch` across architectures and environment types. + + Combinations tested: + - Architecture: x86 (cu128) vs ARM (cu130) + - Pip command: ``python -m pip`` (venv/conda/kit) vs ``uv pip`` (uv venv) + - Torch state: already installed at correct version; wrong CUDA tag; not installed + """ + + # ---- x86 scenarios ------------------------------------------------------- + + def test_x86_skips_install_when_correct_version_present(self, tmp_path): + """x86: torch 2.10.0+cu128 already installed → pip install is not called.""" + py = str(tmp_path / "python") + pip_cmd = [py, "-m", "pip"] + pip_show_out = "Name: torch\nVersion: 2.10.0+cu128\n" + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, pip_show_out)) as mock_run, + ): + _ensure_cuda_torch() + + # Only the initial ``pip show torch`` call; no install. + assert mock_run.call_count == 1 + assert "show" in mock_run.call_args[0][0] + + def test_x86_installs_cu128_when_torch_missing(self, tmp_path): + """x86: no torch installed → installs torch+cu128 from pytorch.org/whl/cu128.""" + py = str(tmp_path / "python") + pip_cmd = [py, "-m", "pip"] + calls: list[list[str]] = [] + + def _run(cmd, **kwargs): + calls.append(list(cmd)) + return _cp(0, "") # pip show returns nothing → torch absent + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", side_effect=_run), + ): + _ensure_cuda_torch() + + install_cmds = [c for c in calls if "install" in c] + combined = " ".join(str(t) for c in install_cmds for t in c) + assert "cu128" in combined + assert "torch" in combined + + def test_x86_reinstalls_when_wrong_cuda_tag(self, tmp_path): + """x86: torch+cu130 installed (ARM build) → uninstalls and reinstalls as cu128.""" + py = str(tmp_path / "python") + pip_cmd = [py, "-m", "pip"] + calls: list[list[str]] = [] + + def _run(cmd, **kwargs): + calls.append(list(cmd)) + stdout = "Name: torch\nVersion: 2.10.0+cu130\n" if "show" in cmd else "" + return _cp(0, stdout) + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", side_effect=_run), + ): + _ensure_cuda_torch() + + assert any("uninstall" in c for c in calls), "Expected an uninstall call" + install_cmds = [c for c in calls if "install" in c] + combined = " ".join(str(t) for c in install_cmds for t in c) + assert "cu128" in combined + + # ---- ARM scenarios ------------------------------------------------------- + + def test_arm_installs_cu130_when_torch_missing(self, tmp_path): + """ARM: no torch installed → installs torch+cu130 from pytorch.org/whl/cu130.""" + py = str(tmp_path / "python") + pip_cmd = [py, "-m", "pip"] + calls: list[list[str]] = [] + + def _run(cmd, **kwargs): + calls.append(list(cmd)) + return _cp(0, "") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=True), + mock.patch("isaaclab.cli.commands.install.run_command", side_effect=_run), + ): + _ensure_cuda_torch() + + install_cmds = [c for c in calls if "install" in c] + combined = " ".join(str(t) for c in install_cmds for t in c) + assert "cu130" in combined + + def test_arm_skips_install_when_correct_version_present(self, tmp_path): + """ARM: torch 2.10.0+cu130 already installed → pip install is not called.""" + py = str(tmp_path / "python") + pip_cmd = [py, "-m", "pip"] + pip_show_out = "Name: torch\nVersion: 2.10.0+cu130\n" + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=True), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, pip_show_out)) as mock_run, + ): + _ensure_cuda_torch() + + assert mock_run.call_count == 1 + + def test_arm_reinstalls_when_wrong_cuda_tag(self, tmp_path): + """ARM: torch+cu128 installed (x86 build) → uninstalls and reinstalls as cu130.""" + py = str(tmp_path / "python") + pip_cmd = [py, "-m", "pip"] + calls: list[list[str]] = [] + + def _run(cmd, **kwargs): + calls.append(list(cmd)) + stdout = "Name: torch\nVersion: 2.10.0+cu128\n" if "show" in cmd else "" + return _cp(0, stdout) + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=True), + mock.patch("isaaclab.cli.commands.install.run_command", side_effect=_run), + ): + _ensure_cuda_torch() + + assert any("uninstall" in c for c in calls) + install_cmds = [c for c in calls if "install" in c] + combined = " ".join(str(t) for c in install_cmds for t in c) + assert "cu130" in combined + + # ---- uv venv environment ------------------------------------------------ + + def test_uv_venv_uses_uv_pip_command(self, tmp_path): + """In a uv venv get_pip_command returns ['uv', 'pip'] and uninstall omits -y.""" + py = str(tmp_path / "python") + calls: list[list[str]] = [] + + def _run(cmd, **kwargs): + calls.append(list(cmd)) + return _cp(0, "") # no current torch → triggers install + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=["uv", "pip"]), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", side_effect=_run), + ): + _ensure_cuda_torch() + + assert calls[0][0] == "uv", "Expected uv as the pip command prefix" + uninstall_calls = [c for c in calls if "uninstall" in c] + assert uninstall_calls, "Expected an uninstall call before reinstall" + assert "-y" not in uninstall_calls[0], "uv pip uninstall must not include -y" + + # ---- conda / pip venv / kit Python environments ------------------------- + + def test_conda_uses_python_m_pip_with_minus_y(self, tmp_path): + """In a conda env (no uv), get_pip_command returns python -m pip; uninstall uses -y.""" + py = str(tmp_path / "conda" / "bin" / "python") + pip_cmd = [py, "-m", "pip"] + calls: list[list[str]] = [] + + def _run(cmd, **kwargs): + calls.append(list(cmd)) + return _cp(0, "") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", side_effect=_run), + ): + _ensure_cuda_torch() + + uninstall_calls = [c for c in calls if "uninstall" in c] + assert uninstall_calls + assert "-y" in uninstall_calls[0], "pip uninstall must include -y" + assert py in uninstall_calls[0], "Expected python exe in pip command" + + def test_kit_python_uses_python_sh_as_pip_prefix(self, tmp_path): + """With Isaac Sim's kit Python, python.sh is the executable prefix in the pip command.""" + python_sh = str(tmp_path / "_isaac_sim" / "python.sh") + pip_cmd = [python_sh, "-m", "pip"] + calls: list[list[str]] = [] + + def _run(cmd, **kwargs): + calls.append(list(cmd)) + return _cp(0, "") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=python_sh), + mock.patch("isaaclab.cli.commands.install.get_pip_command", return_value=pip_cmd), + mock.patch("isaaclab.cli.commands.install.is_arm", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", side_effect=_run), + ): + _ensure_cuda_torch() + + assert calls[0][0] == python_sh + + +# --------------------------------------------------------------------------- +# _repoint_prebundle_packages — Isaac Sim install method × venv type +# --------------------------------------------------------------------------- + + +class TestRePointPrebundlePackages: + """Tests for :func:`_repoint_prebundle_packages`. + + Covers all combinations of: + - Isaac Sim installation method: local _isaac_sim symlink, pip-installed isaacsim, none + - Python environment / site-packages source: uv venv, pip venv, conda, kit Python + - nvidia namespace package special handling: cudnn present vs absent + """ + + # ---- shared fixtures / helpers ------------------------------------------ + + def _sim_with_prebundle(self, base: Path, packages: list[str]) -> tuple[Path, Path]: + """Create a minimal fake Isaac Sim tree containing a pip_prebundle dir. + + Returns ``(isaacsim_path, prebundle_dir)``. + """ + isaacsim_path = base / "isaac_sim" + isaacsim_path.mkdir(parents=True) + prebundle = isaacsim_path / "exts" / "some.ext" / "pip_prebundle" + prebundle.mkdir(parents=True) + for pkg in packages: + (prebundle / pkg).mkdir() + return isaacsim_path, prebundle + + @contextmanager + def _patch(self, isaacsim_path: Path | None, site_packages: Path, python_exe: str): + """Context manager that mocks all external calls in _repoint_prebundle_packages.""" + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=python_exe), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=False), + mock.patch( + "isaaclab.cli.commands.install.run_command", + return_value=_cp(0, str(site_packages)), + ), + ): + yield + + # ---- no Isaac Sim -------------------------------------------------------- + + def test_no_op_when_isaac_sim_absent(self, tmp_path): + """When Isaac Sim is not found, _repoint_prebundle_packages returns immediately without touching anything.""" + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=None), + mock.patch("isaaclab.cli.commands.install.run_command") as mock_run, + ): + _repoint_prebundle_packages() + mock_run.assert_not_called() + + # ---- no pip_prebundle directories ---------------------------------------- + + def test_no_op_when_no_pip_prebundle_dirs(self, tmp_path): + """When Isaac Sim has no pip_prebundle directories, nothing is repointed.""" + isaacsim_path = tmp_path / "isaac_sim" + isaacsim_path.mkdir() + site_pkgs = _make_site_packages(tmp_path / "env", ["torch"]) + py = str(tmp_path / "python") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert not (site_pkgs.parent / "pip_prebundle" / "torch").exists() + + # ---- local _isaac_sim symlink (local build) ------------------------------ + + def test_local_build_symlinks_torch_to_venv_site_packages(self, tmp_path): + """Local _isaac_sim symlink + uv/pip venv: prebundle torch → venv site-packages/torch.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["torch"]) + site_pkgs = _make_site_packages(tmp_path / "env", ["torch"]) + py = str(tmp_path / "env" / "bin" / "python") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + symlink = prebundle / "torch" + assert symlink.is_symlink(), "torch should be a symlink after repoint" + assert symlink.resolve() == (site_pkgs / "torch").resolve() + assert (prebundle / "torch.bak").is_dir(), "Original torch should be backed up" + + def test_local_build_skips_nvidia_when_cudnn_absent_kit_python(self, tmp_path): + """Local build + kit Python: site-packages/nvidia has only 'srl' (no cudnn) → nvidia NOT repointed. + + This is the real-world failure mode that caused the libcudnn.so.9 import error: + kit Python's site-packages/nvidia has only the 'srl' namespace sub-package, so + replacing the prebundle's nvidia/ (which contains the CUDA shared libraries) with + a symlink to that stripped-down directory would make libcudnn.so.9 unreachable. + """ + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["nvidia"]) + # Simulate kit Python's site-packages: nvidia/ exists but contains only 'srl' + site_pkgs = _make_site_packages(tmp_path / "kit" / "python" / "site-packages", ["nvidia"]) + (site_pkgs / "nvidia" / "srl").mkdir() + py = str(tmp_path / "isaac_sim" / "python.sh") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert not (prebundle / "nvidia").is_symlink(), "nvidia must NOT be repointed when cudnn is missing" + assert (prebundle / "nvidia").is_dir(), "Original nvidia directory must be preserved" + + def test_local_build_repoints_nvidia_when_cudnn_present_venv(self, tmp_path): + """Local build + CUDA-capable venv: site-packages/nvidia has cudnn → nvidia IS repointed. + + This covers the conda or pip venv case where the user installed torch+cu128/cu130 + with its nvidia-cudnn-cu12 dependency, giving site-packages/nvidia/cudnn/. + """ + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["nvidia"]) + # Full CUDA venv: nvidia/ has cudnn and cublas + site_pkgs = _make_site_packages( + tmp_path / "env", + ["nvidia"], + subdirs={"nvidia": ["cudnn", "cublas"]}, + ) + py = str(tmp_path / "env" / "bin" / "python") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + symlink = prebundle / "nvidia" + assert symlink.is_symlink(), "nvidia should be repointed when cudnn is present" + assert symlink.resolve() == (site_pkgs / "nvidia").resolve() + + def test_idempotent_when_symlink_already_correct(self, tmp_path): + """Calling _repoint_prebundle_packages twice does not break the symlinks.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", []) + site_pkgs = _make_site_packages(tmp_path / "env", ["torch"]) + py = str(tmp_path / "env" / "bin" / "python") + + # Pre-create the correct symlink (as if a previous install already ran). + (prebundle / "torch").symlink_to(site_pkgs / "torch") + original_target = (prebundle / "torch").resolve() + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert (prebundle / "torch").resolve() == original_target, "Correct symlink must not be changed" + + def test_updates_stale_symlink_pointing_to_old_env(self, tmp_path): + """A symlink from a previous venv that no longer matches current site-packages is updated.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", []) + site_pkgs = _make_site_packages(tmp_path / "env_new", ["torch"]) + old_env = _make_site_packages(tmp_path / "env_old", ["torch"]) + py = str(tmp_path / "env_new" / "bin" / "python") + + # Pre-create a stale symlink pointing at the old env. + (prebundle / "torch").symlink_to(old_env / "torch") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert (prebundle / "torch").resolve() == (site_pkgs / "torch").resolve(), "Stale symlink must be updated" + + def test_removes_old_backup_before_renaming(self, tmp_path): + """A pre-existing .bak directory is removed before the current package is backed up.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["torch"]) + site_pkgs = _make_site_packages(tmp_path / "env", ["torch"]) + py = str(tmp_path / "env" / "bin" / "python") + + # Simulate leftover backup from a previous partial install. + old_backup = prebundle / "torch.bak" + old_backup.mkdir() + (old_backup / "stale_file.py").touch() + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert (prebundle / "torch").is_symlink(), "torch must be repointed" + # The old backup was replaced by the fresh backup. + assert (prebundle / "torch.bak").is_dir() + + # ---- pip-installed isaacsim (path found via import probe) ---------------- + + def test_pip_isaacsim_symlinks_torch(self, tmp_path): + """pip-installed isaacsim: extract_isaacsim_path() returns its directory and torch is repointed.""" + # With pip-installed isaacsim the path may be inside site-packages rather than a symlink + # at the repo root, but _repoint_prebundle_packages treats it identically. + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "pip_isaacsim", ["torch"]) + site_pkgs = _make_site_packages(tmp_path / "env", ["torch"]) + py = str(tmp_path / "env" / "bin" / "python") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert (prebundle / "torch").is_symlink() + assert (prebundle / "torch").resolve() == (site_pkgs / "torch").resolve() + + def test_pip_isaacsim_skips_nvidia_without_cudnn(self, tmp_path): + """pip-installed isaacsim + no cudnn in site-packages → nvidia prebundle preserved.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "pip_isaacsim", ["nvidia"]) + # site-packages has nvidia/ but without a cudnn sub-package + site_pkgs = _make_site_packages(tmp_path / "env", ["nvidia"]) + py = str(tmp_path / "env" / "bin" / "python") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert not (prebundle / "nvidia").is_symlink(), "nvidia must not be repointed without cudnn" + + # ---- different venv types ------------------------------------------------ + + def test_uv_venv_repoints_torch_using_venv_site_packages(self, tmp_path): + """uv venv: site-packages inside VIRTUAL_ENV is used as the symlink target.""" + venv_site = tmp_path / "env_uv" / "lib" / "python3.12" / "site-packages" + venv_site.mkdir(parents=True) + (venv_site / "torch").mkdir() + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["torch"]) + py = str(tmp_path / "env_uv" / "bin" / "python") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, str(venv_site))), + ): + _repoint_prebundle_packages() + + assert (prebundle / "torch").is_symlink() + assert (prebundle / "torch").resolve() == (venv_site / "torch").resolve() + + def test_conda_repoints_torch_using_conda_site_packages(self, tmp_path): + """conda env: site-packages inside CONDA_PREFIX is used as the symlink target.""" + conda_site = tmp_path / "conda" / "lib" / "python3.12" / "site-packages" + conda_site.mkdir(parents=True) + (conda_site / "torch").mkdir() + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["torch"]) + py = str(tmp_path / "conda" / "bin" / "python") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, str(conda_site))), + ): + _repoint_prebundle_packages() + + assert (prebundle / "torch").is_symlink() + assert (prebundle / "torch").resolve() == (conda_site / "torch").resolve() + + def test_conda_repoints_nvidia_when_full_cuda_torch_installed(self, tmp_path): + """conda env with nvidia-cudnn-cu12 installed: nvidia/ is repointed because cudnn subdir exists.""" + conda_site = tmp_path / "conda" / "lib" / "python3.12" / "site-packages" + conda_site.mkdir(parents=True) + (conda_site / "nvidia").mkdir() + (conda_site / "nvidia" / "cudnn").mkdir() + (conda_site / "nvidia" / "cublas").mkdir() + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["nvidia"]) + py = str(tmp_path / "conda" / "bin" / "python") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, str(conda_site))), + ): + _repoint_prebundle_packages() + + assert (prebundle / "nvidia").is_symlink() + + def test_conda_skips_nvidia_when_no_cudnn(self, tmp_path): + """conda env without CUDA torch: site-packages/nvidia lacks cudnn → nvidia not repointed.""" + conda_site = tmp_path / "conda" / "lib" / "python3.12" / "site-packages" + conda_site.mkdir(parents=True) + (conda_site / "nvidia").mkdir() # exists but no cudnn inside + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["nvidia"]) + py = str(tmp_path / "conda" / "bin" / "python") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, str(conda_site))), + ): + _repoint_prebundle_packages() + + assert not (prebundle / "nvidia").is_symlink() + + # ---- multiple prebundle directories ------------------------------------- + + def test_repoints_across_multiple_prebundle_dirs(self, tmp_path): + """When Isaac Sim has multiple pip_prebundle directories, each is processed.""" + isaacsim_path = tmp_path / "isaac_sim" + isaacsim_path.mkdir() + + # Two separate extension pip_prebundle dirs, each with torch. + pb1 = isaacsim_path / "exts" / "ext_a" / "pip_prebundle" + pb2 = isaacsim_path / "exts" / "ext_b" / "pip_prebundle" + for pb in (pb1, pb2): + pb.mkdir(parents=True) + (pb / "torch").mkdir() + + site_pkgs = _make_site_packages(tmp_path / "env", ["torch"]) + py = str(tmp_path / "env" / "bin" / "python") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + for pb in (pb1, pb2): + assert (pb / "torch").is_symlink(), f"torch in {pb} should be repointed" + + # ---- Windows: copy instead of symlink ----------------------------------- + + def test_copies_package_on_windows_instead_of_symlinking(self, tmp_path): + """On Windows, packages are copied rather than symlinked (Windows doesn't support posix symlinks).""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["torch"]) + site_pkgs = _make_site_packages(tmp_path / "env", ["torch"]) + (site_pkgs / "torch" / "version.py").write_text("__version__ = '2.10.0'") + py = str(tmp_path / "env" / "bin" / "python") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=True), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, str(site_pkgs))), + ): + _repoint_prebundle_packages() + + torch_in_prebundle = prebundle / "torch" + assert torch_in_prebundle.is_dir(), "torch should be a directory (copy) on Windows" + assert not torch_in_prebundle.is_symlink(), "torch must not be a symlink on Windows" + assert (torch_in_prebundle / "version.py").exists(), "Copied file should be present" + + # ---- error handling ----------------------------------------------------- + + def test_oserror_on_one_package_does_not_abort_others(self, tmp_path): + """An OSError while repointing one package is logged and processing continues for others.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["torch", "torchvision"]) + site_pkgs = _make_site_packages(tmp_path / "env", ["torch", "torchvision"]) + py = str(tmp_path / "env" / "bin" / "python") + + original_symlink_to = Path.symlink_to + call_count: list[int] = [0] + + def _selective_symlink(self_path: Path, target: Path, **kwargs) -> None: + call_count[0] += 1 + # Fail on the first symlink_to call (torch) but succeed for others. + if call_count[0] == 1: + raise OSError("Permission denied") + return original_symlink_to(self_path, target, **kwargs) + + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=False), + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(0, str(site_pkgs))), + mock.patch.object(Path, "symlink_to", _selective_symlink), + ): + _repoint_prebundle_packages() # must not raise + + # torchvision (second package) must still be repointed despite torch failure. + assert (prebundle / "torchvision").is_symlink(), "torchvision must succeed after torch OSError" + + def test_skips_gracefully_when_site_packages_probe_fails(self, tmp_path): + """When the site-packages probe subprocess fails, _repoint_prebundle_packages is a no-op.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", ["torch"]) + py = str(tmp_path / "python") + + with ( + mock.patch("isaaclab.cli.commands.install.extract_isaacsim_path", return_value=isaacsim_path), + mock.patch("isaaclab.cli.commands.install.extract_python_exe", return_value=py), + mock.patch("isaaclab.cli.commands.install.is_windows", return_value=False), + # Probe subprocess exits non-zero + mock.patch("isaaclab.cli.commands.install.run_command", return_value=_cp(returncode=1, stdout="")), + ): + _repoint_prebundle_packages() + + assert not (prebundle / "torch").is_symlink(), "No symlink should be created when probe fails" + + # ---- all packages in the repoint list are covered ----------------------- + + @pytest.mark.parametrize("pkg_name", [p for p in _PREBUNDLE_REPOINT_PACKAGES if p != "nvidia"]) + def test_all_non_nvidia_packages_are_repointed(self, tmp_path, pkg_name): + """Every non-nvidia entry in _PREBUNDLE_REPOINT_PACKAGES is repointed when it exists.""" + isaacsim_path, prebundle = self._sim_with_prebundle(tmp_path / "sim", [pkg_name]) + site_pkgs = _make_site_packages(tmp_path / "env", [pkg_name]) + py = str(tmp_path / "env" / "bin" / "python") + + with self._patch(isaacsim_path, site_pkgs, py): + _repoint_prebundle_packages() + + assert (prebundle / pkg_name).is_symlink(), f"{pkg_name} should be repointed" + assert (prebundle / pkg_name).resolve() == (site_pkgs / pkg_name).resolve() diff --git a/source/isaaclab/test/cli/test_install_prebundle.py b/source/isaaclab/test/cli/test_install_prebundle.py new file mode 100644 index 000000000000..ef08d0a1a726 --- /dev/null +++ b/source/isaaclab/test/cli/test_install_prebundle.py @@ -0,0 +1,81 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for prebundle probe and _split_install_items. + +Supplements test_install_commands.py with tests that verify the probe +script text and the comma-separated install item parser. +""" + +from unittest import mock + +from isaaclab.cli.commands.install import ( + _split_install_items, + _torch_first_on_sys_path_is_prebundle, +) + +# --------------------------------------------------------------------------- +# _split_install_items +# --------------------------------------------------------------------------- + + +class TestSplitInstallItems: + """Tests for :func:`_split_install_items`.""" + + def test_single_item(self): + assert _split_install_items("assets") == ["assets"] + + def test_comma_separated(self): + assert _split_install_items("assets,tasks,rl") == ["assets", "tasks", "rl"] + + def test_with_spaces(self): + assert _split_install_items(" assets , tasks , rl ") == ["assets", "tasks", "rl"] + + def test_brackets_preserved(self): + """Commas inside brackets should not split.""" + assert _split_install_items("visualizers[rerun,newton],tasks") == [ + "visualizers[rerun,newton]", + "tasks", + ] + + def test_nested_brackets(self): + assert _split_install_items("a[b[c,d],e],f") == ["a[b[c,d],e]", "f"] + + def test_empty_string(self): + assert _split_install_items("") == [] + + def test_trailing_comma(self): + assert _split_install_items("assets,tasks,") == ["assets", "tasks"] + + def test_single_with_extra(self): + assert _split_install_items("visualizers[all]") == ["visualizers[all]"] + + +# --------------------------------------------------------------------------- +# _torch_first_on_sys_path_is_prebundle — probe script verification +# --------------------------------------------------------------------------- + + +class TestTorchProbeScriptContent: + """Verify that the probe script checks for 'pip_prebundle' not 'extsDeprecated'.""" + + def test_probe_script_checks_pip_prebundle(self): + """The inline Python probe must use 'pip_prebundle' as its path indicator.""" + import subprocess + + captured_cmd = None + + def fake_run(cmd, *, env=None, check=False, capture_output=False, text=False): + nonlocal captured_cmd + captured_cmd = cmd + return subprocess.CompletedProcess(args=cmd, returncode=0) + + with mock.patch("isaaclab.cli.commands.install.run_command", side_effect=fake_run): + _torch_first_on_sys_path_is_prebundle("/fake/python", env={}) + + assert captured_cmd is not None + probe_script = captured_cmd[2] # [python_exe, "-c", probe] + assert "pip_prebundle" in probe_script, "Probe must check for 'pip_prebundle'" + assert "extsDeprecated" not in probe_script, "Probe must NOT check only for 'extsDeprecated'" diff --git a/source/isaaclab/test/controllers/test_differential_ik.py b/source/isaaclab/test/controllers/test_differential_ik.py index 65ce828129f3..47abc78e0de6 100644 --- a/source/isaaclab/test/controllers/test_differential_ik.py +++ b/source/isaaclab/test/controllers/test_differential_ik.py @@ -14,10 +14,10 @@ import pytest import torch - -from isaacsim.core.cloner import GridCloner +import warp as wp import isaaclab.sim as sim_utils +from isaaclab import cloner from isaaclab.assets import Articulation from isaaclab.controllers import DifferentialIKController, DifferentialIKControllerCfg @@ -41,7 +41,7 @@ def sim(): # Wait for spawning stage = sim_utils.create_new_stage() # Constants - num_envs = 128 + num_envs = 1 # Load kit helper sim_cfg = sim_utils.SimulationCfg(dt=0.01) sim = sim_utils.SimulationContext(sim_cfg) @@ -52,24 +52,21 @@ def sim(): cfg = sim_utils.GroundPlaneCfg() cfg.func("/World/GroundPlane", cfg) - # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") - env_prim_paths = cloner.generate_paths("/World/envs/env", num_envs) + # Create environment clones using Isaac Lab's cloner utilities + env_prim_paths = [f"/World/envs/env_{i}" for i in range(num_envs)] + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_envs, dtype=torch.long, device=sim.device) + env_origins, _ = cloner.grid_transforms(num_envs, spacing=2.0, device=sim.device) # create source prim stage.DefinePrim(env_prim_paths[0], "Xform") # clone the env xform - cloner.clone( - source_prim_path=env_prim_paths[0], - prim_paths=env_prim_paths, - replicate_physics=True, - ) + cloner.usd_replicate(stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) - # Define goals for the arm + # Define goals for the arm (x, y, z, qx, qy, qz, qw) ee_goals_set = [ - [0.5, 0.5, 0.7, 0.707, 0, 0.707, 0], - [0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0], - [0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0], + [0.5, 0.5, 0.7, 0, 0.707, 0, 0.707], + [0.5, -0.4, 0.6, 0.707, 0, 0, 0.707], + [0.5, 0, 0.5, 1.0, 0.0, 0.0, 0.0], ] ee_pose_b_des_set = torch.tensor(ee_goals_set, device=sim.device) @@ -77,8 +74,6 @@ def sim(): # Cleanup sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -157,8 +152,8 @@ def _run_ik_controller( ee_pose_b_des = torch.zeros(num_envs, diff_ik_controller.action_dim, device=sim.device) ee_pose_b_des[:] = ee_pose_b_des_set[current_goal_idx] # Compute current pose of the end-effector - ee_pose_w = robot.data.body_pose_w[:, ee_frame_idx] - root_pose_w = robot.data.root_pose_w + ee_pose_w = robot.data.body_pose_w.torch[:, ee_frame_idx] + root_pose_w = robot.data.root_pose_w.torch ee_pos_b, ee_quat_b = subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] ) @@ -172,22 +167,22 @@ def _run_ik_controller( pos_error, rot_error = compute_pose_error( ee_pos_b, ee_quat_b, ee_pose_b_des[:, 0:3], ee_pose_b_des[:, 3:7] ) - pos_error_norm = torch.norm(pos_error, dim=-1) - rot_error_norm = torch.norm(rot_error, dim=-1) + pos_error_norm = torch.linalg.norm(pos_error, dim=-1) + rot_error_norm = torch.linalg.norm(rot_error, dim=-1) # desired error (zer) des_error = torch.zeros_like(pos_error_norm) # check convergence torch.testing.assert_close(pos_error_norm, des_error, rtol=0.0, atol=1e-3) torch.testing.assert_close(rot_error_norm, des_error, rtol=0.0, atol=1e-3) # reset joint state - joint_pos = robot.data.default_joint_pos.clone() - joint_vel = robot.data.default_joint_vel.clone() + joint_pos = robot.data.default_joint_pos.torch.clone() + joint_vel = robot.data.default_joint_vel.torch.clone() # joint_pos *= sample_uniform(0.9, 1.1, joint_pos.shape, joint_pos.device) robot.write_joint_state_to_sim(joint_pos, joint_vel) robot.set_joint_position_target(joint_pos) robot.write_data_to_sim() # randomize root state yaw, ik should work regardless base rotation - root_state = robot.data.root_state_w.clone() + root_state = robot.data.root_state_w.torch.clone() root_state[:, 3:7] = random_yaw_orientation(num_envs, sim.device) robot.write_root_pose_to_sim(root_state[:, :7]) robot.write_root_velocity_to_sim(root_state[:, 7:]) @@ -204,14 +199,14 @@ def _run_ik_controller( # at reset, the jacobians are not updated to the latest state # so we MUST skip the first step # obtain quantities from simulation - jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] - ee_pose_w = robot.data.body_pose_w[:, ee_frame_idx] - root_pose_w = robot.data.root_pose_w + jacobian = wp.to_torch(robot.root_view.get_jacobians())[:, ee_jacobi_idx, :, arm_joint_ids] + ee_pose_w = robot.data.body_pose_w.torch[:, ee_frame_idx] + root_pose_w = robot.data.root_pose_w.torch base_rot = root_pose_w[:, 3:7] base_rot_matrix = matrix_from_quat(quat_inv(base_rot)) jacobian[:, :3, :] = torch.bmm(base_rot_matrix, jacobian[:, :3, :]) jacobian[:, 3:, :] = torch.bmm(base_rot_matrix, jacobian[:, 3:, :]) - joint_pos = robot.data.joint_pos[:, arm_joint_ids] + joint_pos = robot.data.joint_pos.torch[:, arm_joint_ids] # compute frame in root frame ee_pos_b, ee_quat_b = subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] diff --git a/source/isaaclab/test/controllers/test_ik_configs/README.md b/source/isaaclab/test/controllers/test_ik_configs/README.md index ccbdae06b52e..e18623e4db2c 100644 --- a/source/isaaclab/test/controllers/test_ik_configs/README.md +++ b/source/isaaclab/test/controllers/test_ik_configs/README.md @@ -49,9 +49,9 @@ From `g1_locomanipulation_robot_cfg.py`: - **Joint positions**: Standing pose with slight knee bend ### EEF Pose Format -Each pose: `[x, y, z, qw, qx, qy, qz]` +Each pose: `[x, y, z, qx, qy, qz, qw]` - **Position**: Cartesian coordinates relative to robot base frame -- **Orientation**: Quaternion relative to the world. Typically you want this to start in the same orientation as robot base. (e.g. if robot base is reset to (0.7071, 0.0, 0.0, 0.7071), hand pose should be the same) +- **Orientation**: Quaternion relative to the world. Typically you want this to start in the same orientation as robot base. (e.g. if robot base is reset to (0.0, 0.0, 0.7071, 0.7071), hand pose should be the same) **Note**: The system automatically compensates for hand rotational offsets, so specify orientations relative to the robot's reset orientation. @@ -82,12 +82,12 @@ Common test types: ```json "horizontal_movement": { "left_hand_pose": [ - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.28, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "right_hand_pose": [ - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.28, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "allowed_steps_per_motion": 100, "repeat": 2, @@ -98,9 +98,9 @@ Common test types: ## Pose Guidelines ### Orientation Examples -- **Default**: `[0.7071, 0.0, 0.0, 0.7071]` (90° around X-axis) -- **Z-rotation**: `[0.5, 0.0, 0.0, 0.866]` (60° around Z) -- **Y-rotation**: `[0.866, 0.0, 0.5, 0.0]` (60° around Y) +- **Default**: `[0.0, 0.0, 0.7071, 0.7071]` (90° around X-axis) +- **Z-rotation**: `[0.0, 0.0, 0.866, 0.5]` (60° around Z) +- **Y-rotation**: `[0.0, 0.5, 0.0, 0.866]` (60° around Y) ## Testing Process diff --git a/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.json b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.json index f5d0d60717da..165d0cba8f4b 100644 --- a/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.json +++ b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.json @@ -1,20 +1,20 @@ { "tolerances": { - "position": 0.003, + "position": 0.025, "pd_position": 0.002, - "rotation": 0.017, + "rotation": 0.030, "check_errors": true }, "allowed_steps_to_settle": 50, "tests": { "horizontal_movement": { "left_hand_pose": [ - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.28, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "right_hand_pose": [ - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.28, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.28, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "allowed_steps_per_motion": 15, "repeat": 2, @@ -22,12 +22,12 @@ }, "horizontal_small_movement": { "left_hand_pose": [ - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.19, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.19, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "right_hand_pose": [ - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.19, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.19, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "allowed_steps_per_motion": 15, "repeat": 2, @@ -35,12 +35,12 @@ }, "stay_still": { "left_hand_pose": [ - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "right_hand_pose": [ - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "allowed_steps_per_motion": 20, "repeat": 4, @@ -48,16 +48,16 @@ }, "vertical_movement": { "left_hand_pose": [ - [-0.18, 0.15, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071], - [-0.18, 0.15, 0.9, 0.7071, 0.0, 0.0, 0.7071], - [-0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071] + [-0.18, 0.15, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.18, 0.15, 0.85, 0.0, 0.0, 0.7071, 0.7071], + [-0.18, 0.15, 0.9, 0.0, 0.0, 0.7071, 0.7071], + [-0.18, 0.15, 0.85, 0.0, 0.0, 0.7071, 0.7071] ], "right_hand_pose": [ - [0.18, 0.15, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071], - [0.18, 0.15, 0.9, 0.7071, 0.0, 0.0, 0.7071], - [0.18, 0.15, 0.85, 0.7071, 0.0, 0.0, 0.7071] + [0.18, 0.15, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.18, 0.15, 0.85, 0.0, 0.0, 0.7071, 0.7071], + [0.18, 0.15, 0.9, 0.0, 0.0, 0.7071, 0.7071], + [0.18, 0.15, 0.85, 0.0, 0.0, 0.7071, 0.7071] ], "allowed_steps_per_motion": 30, "repeat": 2, @@ -65,14 +65,14 @@ }, "forward_waist_bending_movement": { "left_hand_pose": [ - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.18, 0.2, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.18, 0.3, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.18, 0.2, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.18, 0.3, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "right_hand_pose": [ - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.18, 0.2, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.18, 0.3, 0.8, 0.7071, 0.0, 0.0, 0.7071] + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.18, 0.2, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.18, 0.3, 0.8, 0.0, 0.0, 0.7071, 0.7071] ], "allowed_steps_per_motion": 60, "repeat": 2, @@ -80,27 +80,27 @@ }, "rotation_movements": { "left_hand_pose": [ - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.2, 0.11, 0.8, 0.6946, 0.1325, 0.1325, 0.6946], - [-0.2, 0.11, 0.8, 0.6533, 0.2706, 0.2706, 0.6533], - [-0.2, 0.11, 0.8, 0.5848, 0.3975, 0.3975, 0.5848], + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.2, 0.11, 0.8, 0.1325, 0.1325, 0.6946, 0.6946], + [-0.2, 0.11, 0.8, 0.2706, 0.2706, 0.6533, 0.6533], + [-0.2, 0.11, 0.8, 0.3975, 0.3975, 0.5848, 0.5848], [-0.2, 0.11, 0.8, 0.5, 0.5, 0.5, 0.5], - [-0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [-0.2, 0.11, 0.8, 0.6946, -0.1325, -0.1325, 0.6946], - [-0.2, 0.11, 0.8, 0.6533, -0.2706, -0.2706, 0.6533], - [-0.2, 0.11, 0.8, 0.5848, -0.3975, -0.3975, 0.5848], - [-0.2, 0.11, 0.8, 0.5, -0.5, -0.5, 0.5] + [-0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [-0.2, 0.11, 0.8, -0.1325, -0.1325, 0.6946, 0.6946], + [-0.2, 0.11, 0.8, -0.2706, -0.2706, 0.6533, 0.6533], + [-0.2, 0.11, 0.8, -0.3975, -0.3975, 0.5848, 0.5848], + [-0.2, 0.11, 0.8, -0.5, -0.5, 0.5, 0.5] ], "right_hand_pose": [ - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.2, 0.11, 0.8, 0.6946, -0.1325, -0.1325, 0.6946], - [0.2, 0.11, 0.8, 0.6533, -0.2706, -0.2706, 0.6533], - [0.2, 0.11, 0.8, 0.5848, -0.3975, -0.3975, 0.5848], - [0.2, 0.11, 0.8, 0.5, -0.5, -0.5, 0.5], - [0.18, 0.1, 0.8, 0.7071, 0.0, 0.0, 0.7071], - [0.2, 0.11, 0.8, 0.6946, 0.1325, 0.1325, 0.6946], - [0.2, 0.11, 0.8, 0.6533, 0.2706, 0.2706, 0.6533], - [0.2, 0.11, 0.8, 0.5848, 0.3975, 0.3975, 0.5848], + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.2, 0.11, 0.8, -0.1325, -0.1325, 0.6946, 0.6946], + [0.2, 0.11, 0.8, -0.2706, -0.2706, 0.6533, 0.6533], + [0.2, 0.11, 0.8, -0.3975, -0.3975, 0.5848, 0.5848], + [0.2, 0.11, 0.8, -0.5, -0.5, 0.5, 0.5], + [0.18, 0.1, 0.8, 0.0, 0.0, 0.7071, 0.7071], + [0.2, 0.11, 0.8, 0.1325, 0.1325, 0.6946, 0.6946], + [0.2, 0.11, 0.8, 0.2706, 0.2706, 0.6533, 0.6533], + [0.2, 0.11, 0.8, 0.3975, 0.3975, 0.5848, 0.5848], [0.2, 0.11, 0.8, 0.5, 0.5, 0.5, 0.5] ], "allowed_steps_per_motion": 25, diff --git a/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.json b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.json index be40d7cf7abc..c666198517e4 100644 --- a/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.json +++ b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.json @@ -9,12 +9,12 @@ "tests": { "vertical_movement": { "left_hand_pose": [ - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.32, 1.2, 0.5, 0.5, -0.5, 0.5] + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [-0.23, 0.32, 1.2, 0.5, -0.5, 0.5, 0.5] ], "right_hand_pose": [ - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.32, 1.2, 0.5, 0.5, -0.5, 0.5] + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [0.23, 0.32, 1.2, 0.5, -0.5, 0.5, 0.5] ], "allowed_steps_per_motion": 8, "repeat": 2, @@ -22,12 +22,12 @@ }, "stay_still": { "left_hand_pose": [ - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5] + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5] ], "right_hand_pose": [ - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5] + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5] ], "allowed_steps_per_motion": 8, "repeat": 4, @@ -35,12 +35,12 @@ }, "horizontal_movement": { "left_hand_pose": [ - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.13, 0.32, 1.1, 0.5, 0.5, -0.5, 0.5] + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [-0.13, 0.32, 1.1, 0.5, -0.5, 0.5, 0.5] ], "right_hand_pose": [ - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.13, 0.32, 1.1, 0.5, 0.5, -0.5, 0.5] + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [0.13, 0.32, 1.1, 0.5, -0.5, 0.5, 0.5] ], "allowed_steps_per_motion": 8, "repeat": 2, @@ -48,12 +48,12 @@ }, "horizontal_small_movement": { "left_hand_pose": [ - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.22, 0.32, 1.1, 0.5, 0.5, -0.5, 0.5] + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [-0.22, 0.32, 1.1, 0.5, -0.5, 0.5, 0.5] ], "right_hand_pose": [ - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.22, 0.32, 1.1, 0.5, 0.5, -0.5, 0.5] + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [0.22, 0.32, 1.1, 0.5, -0.5, 0.5, 0.5] ], "allowed_steps_per_motion": 8, "repeat": 2, @@ -61,12 +61,12 @@ }, "forward_waist_bending_movement": { "left_hand_pose": [ - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.5, 1.05, 0.5, 0.5, -0.5, 0.5] + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [-0.23, 0.5, 1.05, 0.5, -0.5, 0.5, 0.5] ], "right_hand_pose": [ - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.5, 1.05, 0.5, 0.5, -0.5, 0.5] + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [0.23, 0.5, 1.05, 0.5, -0.5, 0.5, 0.5] ], "allowed_steps_per_motion": 25, "repeat": 3, @@ -74,16 +74,16 @@ }, "rotation_movements": { "left_hand_pose": [ - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.32, 1.1, 0.7071, 0.7071, 0.0, 0.0], - [-0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [-0.23, 0.32, 1.1, 0.0, 0.0, -0.7071, 0.7071] + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [-0.23, 0.32, 1.1, 0.7071, 0.0, 0.0, 0.7071], + [-0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [-0.23, 0.32, 1.1, 0.0, -0.7071, 0.7071, 0.0] ], "right_hand_pose": [ - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.32, 1.1, 0.0, 0.0, -0.7071, 0.7071], - [0.23, 0.28, 1.1, 0.5, 0.5, -0.5, 0.5], - [0.23, 0.32, 1.1, 0.7071, 0.7071, 0.0, 0.0] + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [0.23, 0.32, 1.1, 0.0, -0.7071, 0.7071, 0.0], + [0.23, 0.28, 1.1, 0.5, -0.5, 0.5, 0.5], + [0.23, 0.32, 1.1, 0.7071, 0.0, 0.0, 0.7071] ], "allowed_steps_per_motion": 10, "repeat": 2, diff --git a/source/isaaclab/test/controllers/test_local_frame_task.py b/source/isaaclab/test/controllers/test_local_frame_task.py index 69790724248a..e3ebec56c27b 100644 --- a/source/isaaclab/test/controllers/test_local_frame_task.py +++ b/source/isaaclab/test/controllers/test_local_frame_task.py @@ -5,14 +5,6 @@ """Test cases for LocalFrameTask class.""" -# Import pinocchio in the main script to force the use of the dependencies installed -# by IsaacLab and not the one installed by Isaac Sim -# pinocchio is required by the Pink IK controller -import sys - -if sys.platform != "win32": - import pinocchio # noqa: F401 - from isaaclab.app import AppLauncher # launch omniverse app @@ -24,8 +16,8 @@ import pinocchio as pin import pytest -from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask from isaaclab.controllers.pink_ik.pink_kinematics_configuration import PinkKinematicsConfiguration +from isaaclab.controllers.pink_ik.pink_tasks import LocalFrameTask # class TestLocalFrameTask: # """Test suite for LocalFrameTask class.""" diff --git a/source/isaaclab/test/controllers/test_null_space_posture_task.py b/source/isaaclab/test/controllers/test_null_space_posture_task.py index a1d0e1ac50d5..23e75937374b 100644 --- a/source/isaaclab/test/controllers/test_null_space_posture_task.py +++ b/source/isaaclab/test/controllers/test_null_space_posture_task.py @@ -4,18 +4,6 @@ # SPDX-License-Identifier: BSD-3-Clause """Launch Isaac Sim Simulator first.""" -# Import pinocchio in the main script to force the use of the dependencies installed -# by IsaacLab and not the one installed by Isaac Sim -# pinocchio is required by the Pink IK controller -import sys - -if sys.platform != "win32": - import pinocchio # noqa: F401 - import pinocchio as pin # noqa: F401 -else: - import pinocchio # noqa: F401 - import pinocchio as pin # noqa: F401 - from isaaclab.app import AppLauncher # launch omniverse app @@ -24,12 +12,14 @@ """Unit tests for NullSpacePostureTask with simplified robot configuration using Pink library directly.""" import numpy as np +import pinocchio as pin import pytest from pink.configuration import Configuration from pink.tasks import FrameTask from pinocchio.robot_wrapper import RobotWrapper from isaaclab.controllers.pink_ik.null_space_posture_task import NullSpacePostureTask +from isaaclab.controllers.pink_ik.pink_task_cfg import NullSpacePostureTaskCfg class TestNullSpacePostureTaskSimplifiedRobot: @@ -73,15 +63,17 @@ def tasks(self): return [ FrameTask("left_hand_pitch_link", position_cost=1.0, orientation_cost=1.0), NullSpacePostureTask( - cost=1.0, - controlled_frames=["left_hand_pitch_link"], - controlled_joints=[ - "waist_yaw_joint", - "waist_pitch_joint", - "waist_roll_joint", - "left_shoulder_pitch_joint", - "left_shoulder_roll_joint", - ], + NullSpacePostureTaskCfg( + cost=1.0, + controlled_frames=["left_hand_pitch_link"], + controlled_joints=[ + "waist_yaw_joint", + "waist_pitch_joint", + "waist_roll_joint", + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + ], + ) ), ] @@ -96,7 +88,7 @@ def test_null_space_jacobian_zero_end_effector_velocity( frame_task = tasks[0] # Create pin.SE3 from position and quaternion position = np.array([0.5, 0.3, 0.8]) # x, y, z - quaternion = pin.Quaternion(1.0, 0.0, 0.0, 0.0) # w, x, y, z (identity quaternion) + quaternion = pin.Quaternion(0.0, 0.0, 0.0, 1.0) # x, y, z, w (identity quaternion) target_pose = pin.SE3(quaternion, position) frame_task.set_target(target_pose) @@ -160,7 +152,9 @@ def test_null_space_jacobian_identity_when_no_frame_tasks( ): """Test that null space Jacobian is identity when no frame tasks are defined.""" # Create null space task without frame task controlled joints - null_space_task = NullSpacePostureTask(cost=1.0, controlled_frames=[], controlled_joints=[]) + null_space_task = NullSpacePostureTask( + NullSpacePostureTaskCfg(cost=1.0, controlled_frames=[], controlled_joints=[]) + ) # Set specific joint configuration robot_configuration.q = joint_configurations["sequential"] @@ -193,7 +187,7 @@ def test_null_space_jacobian_consistency_across_configurations( frame_task = tasks[0] # Create pin.SE3 from position and quaternion position = np.array([0.3, 0.3, 0.5]) - quaternion = pin.Quaternion(1.0, 0.0, 0.0, 0.0) # w, x, y, z (identity quaternion) + quaternion = pin.Quaternion(0.0, 0.0, 0.0, 1.0) # x, y, z, w (identity quaternion) target_pose = pin.SE3(quaternion, position) frame_task.set_target(target_pose) @@ -223,9 +217,11 @@ def test_null_space_jacobian_consistency_across_configurations( def test_compute_error_without_target(self, robot_configuration, joint_configurations): """Test that compute_error raises ValueError when no target is set.""" null_space_task = NullSpacePostureTask( - cost=1.0, - controlled_frames=["left_hand_pitch_link"], - controlled_joints=["waist_yaw_joint", "waist_pitch_joint"], + NullSpacePostureTaskCfg( + cost=1.0, + controlled_frames=["left_hand_pitch_link"], + controlled_joints=["waist_yaw_joint", "waist_pitch_joint"], + ) ) robot_configuration.q = joint_configurations["sequential"] @@ -241,7 +237,9 @@ def test_joint_masking(self, robot_configuration, joint_configurations, num_join # Create task with specific controlled joints null_space_task = NullSpacePostureTask( - cost=1.0, controlled_frames=["left_hand_pitch_link"], controlled_joints=controlled_joint_names + NullSpacePostureTaskCfg( + cost=1.0, controlled_frames=["left_hand_pitch_link"], controlled_joints=controlled_joint_names + ) ) # Find the joint indexes in robot_configuration.model.names.tolist()[1:] @@ -273,7 +271,7 @@ def test_joint_masking(self, robot_configuration, joint_configurations, num_join def test_empty_controlled_joints(self, robot_configuration, joint_configurations, num_joints): """Test behavior when controlled_joints is empty.""" null_space_task = NullSpacePostureTask( - cost=1.0, controlled_frames=["left_hand_pitch_link"], controlled_joints=[] + NullSpacePostureTaskCfg(cost=1.0, controlled_frames=["left_hand_pitch_link"], controlled_joints=[]) ) current_config = joint_configurations["sequential"] @@ -290,9 +288,11 @@ def test_empty_controlled_joints(self, robot_configuration, joint_configurations def test_set_target_from_configuration(self, robot_configuration, joint_configurations): """Test set_target_from_configuration method.""" null_space_task = NullSpacePostureTask( - cost=1.0, - controlled_frames=["left_hand_pitch_link"], - controlled_joints=["waist_yaw_joint", "waist_pitch_joint"], + NullSpacePostureTaskCfg( + cost=1.0, + controlled_frames=["left_hand_pitch_link"], + controlled_joints=["waist_yaw_joint", "waist_pitch_joint"], + ) ) # Set a specific configuration @@ -310,9 +310,11 @@ def test_multiple_frame_tasks(self, robot_configuration, joint_configurations, n """Test null space projection with multiple frame tasks.""" # Create task with multiple controlled frames null_space_task = NullSpacePostureTask( - cost=1.0, - controlled_frames=["left_hand_pitch_link", "right_hand_pitch_link"], - controlled_joints=["waist_yaw_joint", "waist_pitch_joint", "waist_roll_joint"], + NullSpacePostureTaskCfg( + cost=1.0, + controlled_frames=["left_hand_pitch_link", "right_hand_pitch_link"], + controlled_joints=["waist_yaw_joint", "waist_pitch_joint", "waist_roll_joint"], + ) ) current_config = joint_configurations["sequential"] diff --git a/source/isaaclab/test/controllers/test_operational_space.py b/source/isaaclab/test/controllers/test_operational_space.py index eeec14d877dc..63c1633c5f7c 100644 --- a/source/isaaclab/test/controllers/test_operational_space.py +++ b/source/isaaclab/test/controllers/test_operational_space.py @@ -14,15 +14,30 @@ import pytest import torch +import warp as wp +from flaky import flaky -from isaacsim.core.cloner import GridCloner - +import isaaclab.envs.mdp as mdp import isaaclab.sim as sim_utils +from isaaclab import cloner from isaaclab.assets import Articulation +from isaaclab.assets.articulation import ArticulationCfg from isaaclab.controllers import OperationalSpaceController, OperationalSpaceControllerCfg + +## +# Pre-defined configs +## +from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg +from isaaclab.envs.mdp.actions.actions_cfg import OperationalSpaceControllerActionCfg +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg from isaaclab.markers import VisualizationMarkers from isaaclab.markers.config import FRAME_MARKER_CFG +from isaaclab.scene import InteractiveSceneCfg from isaaclab.sensors import ContactSensor, ContactSensorCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass as lab_configclass from isaaclab.utils.math import ( apply_delta_pose, combine_frame_transforms, @@ -33,10 +48,7 @@ subtract_frame_transforms, ) -## -# Pre-defined configs -## -from isaaclab_assets import FRANKA_PANDA_CFG # isort:skip +from isaaclab_assets import FRANKA_PANDA_CFG, G1_29DOF_CFG # isort:skip @pytest.fixture @@ -69,18 +81,15 @@ def sim(): translation=[0, 0, 1], ) - # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") - env_prim_paths = cloner.generate_paths("/World/envs/env", num_envs) + # Create environment clones using Isaac Lab's cloner utilities + env_prim_paths = [f"/World/envs/env_{i}" for i in range(num_envs)] + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_envs, dtype=torch.long, device=sim.device) + env_origins, _ = cloner.grid_transforms(num_envs, spacing=2.0, device=sim.device) # create source prim stage.DefinePrim(env_prim_paths[0], "Xform") # clone the env xform - cloner.clone( - source_prim_path=env_prim_paths[0], - prim_paths=env_prim_paths, - replicate_physics=True, - ) + cloner.usd_replicate(stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) robot_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/envs/env_.*/Robot") robot_cfg.actuators["panda_shoulder"].stiffness = 0.0 @@ -103,9 +112,9 @@ def sim(): ) ee_goal_abs_quad_set_b = torch.tensor( [ - [0.707, 0.0, 0.707, 0.0], - [0.707, 0.707, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], + [0.0, 0.707, 0.0, 0.707], + [0.707, 0.0, 0.0, 0.707], + [1.0, 0.0, 0.0, 0.0], ], device=sim.device, ) @@ -149,19 +158,21 @@ def sim(): ], device=sim.device, ) + # Format: [x, y, z, qx, qy, qz, qw, force_x, force_y, force_z, torque_x, torque_y, torque_z] ee_goal_hybrid_set_b = torch.tensor( [ - [0.6, 0.2, 0.5, 0.0, 0.707, 0.0, 0.707, 10.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.6, -0.29, 0.6, 0.0, 0.707, 0.0, 0.707, 10.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.6, 0.1, 0.8, 0.0, 0.5774, 0.0, 0.8165, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.6, 0.2, 0.5, 0.707, 0.0, 0.707, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.6, -0.29, 0.6, 0.707, 0.0, 0.707, 0.0, 10.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.6, 0.1, 0.8, 0.5774, 0.0, 0.8165, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0], ], device=sim.device, ) + # Format: [x, y, z, qx, qy, qz, qw] - quaternions converted from wxyz to xyzw format ee_goal_pose_set_tilted_b = torch.tensor( [ - [0.6, 0.15, 0.3, 0.0, 0.92387953, 0.0, 0.38268343], - [0.6, -0.3, 0.3, 0.0, 0.92387953, 0.0, 0.38268343], - [0.8, 0.0, 0.5, 0.0, 0.92387953, 0.0, 0.38268343], + [0.6, 0.15, 0.3, 0.92387953, 0.0, 0.38268343, 0.0], + [0.6, -0.3, 0.3, 0.92387953, 0.0, 0.38268343, 0.0], + [0.8, 0.0, 0.5, 0.92387953, 0.0, 0.38268343, 0.0], ], device=sim.device, ) @@ -176,7 +187,7 @@ def sim(): # Define goals for the arm [xyz] target_abs_pos_set_b = ee_goal_abs_pos_set_b.clone() - # Define goals for the arm [xyz + quat_wxyz] + # Define goals for the arm [xyz + quat_xyzw] target_abs_pose_set_b = torch.cat([ee_goal_abs_pos_set_b, ee_goal_abs_quad_set_b], dim=-1) # Define goals for the arm [xyz] target_rel_pos_set = ee_goal_rel_pos_set.clone() @@ -184,15 +195,15 @@ def sim(): target_rel_pose_set_b = torch.cat([ee_goal_rel_pos_set, ee_goal_rel_axisangle_set], dim=-1) # Define goals for the arm [force_xyz + torque_xyz] target_abs_wrench_set = ee_goal_abs_wrench_set_b.clone() - # Define goals for the arm [xyz + quat_wxyz] and variable kp [kp_xyz + kp_rot_xyz] + # Define goals for the arm [xyz + quat_xyzw] and variable kp [kp_xyz + kp_rot_xyz] target_abs_pose_variable_kp_set = torch.cat([target_abs_pose_set_b, kp_set], dim=-1) - # Define goals for the arm [xyz + quat_wxyz] and the variable imp. [kp_xyz + kp_rot_xyz + d_xyz + d_rot_xyz] + # Define goals for the arm [xyz + quat_xyzw] and the variable imp. [kp_xyz + kp_rot_xyz + d_xyz + d_rot_xyz] target_abs_pose_variable_set = torch.cat([target_abs_pose_set_b, kp_set, d_ratio_set], dim=-1) - # Define goals for the arm pose [xyz + quat_wxyz] and wrench [force_xyz + torque_xyz] + # Define goals for the arm pose [xyz + quat_xyzw] and wrench [force_xyz + torque_xyz] target_hybrid_set_b = ee_goal_hybrid_set_b.clone() # Define goals for the arm pose, and wrench, and kp target_hybrid_variable_kp_set = torch.cat([target_hybrid_set_b, kp_set], dim=-1) - # Define goals for the arm pose [xyz + quat_wxyz] in root and and wrench [force_xyz + torque_xyz] in task frame + # Define goals for the arm pose [xyz + quat_xyzw] in root and and wrench [force_xyz + torque_xyz] in task frame target_hybrid_set_tilted = torch.cat([ee_goal_pose_set_tilted_b, ee_goal_wrench_set_tilted_task], dim=-1) # Reference frame for targets @@ -220,8 +231,6 @@ def sim(): # Cleanup sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -559,19 +568,19 @@ def test_franka_wrench_abs_open_loop(sim): "/World/envs/env_.*/obstacle1", obstacle_spawn_cfg, translation=(0.2, 0.0, 0.93), - orientation=(0.9848, 0.0, -0.1736, 0.0), + orientation=(0.0, -0.1736, 0.0, 0.9848), ) obstacle_spawn_cfg.func( "/World/envs/env_.*/obstacle2", obstacle_spawn_cfg, translation=(0.2, 0.35, 0.7), - orientation=(0.707, 0.707, 0.0, 0.0), + orientation=(0.707, 0.0, 0.0, 0.707), ) obstacle_spawn_cfg.func( "/World/envs/env_.*/obstacle3", obstacle_spawn_cfg, translation=(0.55, 0.0, 0.7), - orientation=(0.707, 0.0, 0.707, 0.0), + orientation=(0.0, 0.707, 0.0, 0.707), ) contact_forces_cfg = ContactSensorCfg( prim_path="/World/envs/env_.*/obstacle.*", @@ -640,19 +649,19 @@ def test_franka_wrench_abs_closed_loop(sim): "/World/envs/env_.*/obstacle1", obstacle_spawn_cfg, translation=(0.2, 0.0, 0.93), - orientation=(0.9848, 0.0, -0.1736, 0.0), + orientation=(0.0, -0.1736, 0.0, 0.9848), ) obstacle_spawn_cfg.func( "/World/envs/env_.*/obstacle2", obstacle_spawn_cfg, translation=(0.2, 0.35, 0.7), - orientation=(0.707, 0.707, 0.0, 0.0), + orientation=(0.707, 0.0, 0.0, 0.707), ) obstacle_spawn_cfg.func( "/World/envs/env_.*/obstacle3", obstacle_spawn_cfg, translation=(0.55, 0.0, 0.7), - orientation=(0.707, 0.0, 0.707, 0.0), + orientation=(0.0, 0.707, 0.0, 0.707), ) contact_forces_cfg = ContactSensorCfg( prim_path="/World/envs/env_.*/obstacle.*", @@ -729,7 +738,7 @@ def test_franka_hybrid_decoupled_motion(sim): "/World/envs/env_.*/obstacle1", obstacle_spawn_cfg, translation=(target_hybrid_set_b[0, 0] + 0.05, 0.0, 0.7), - orientation=(0.707, 0.0, 0.707, 0.0), + orientation=(0.0, 0.707, 0.0, 0.707), ) contact_forces_cfg = ContactSensorCfg( prim_path="/World/envs/env_.*/obstacle.*", @@ -770,6 +779,7 @@ def test_franka_hybrid_decoupled_motion(sim): @pytest.mark.isaacsim_ci +@flaky(max_runs=3, min_passes=1) def test_franka_hybrid_variable_kp_impedance(sim): """Test hybrid control with variable kp impedance and inertial dynamics decoupling.""" ( @@ -805,7 +815,7 @@ def test_franka_hybrid_variable_kp_impedance(sim): "/World/envs/env_.*/obstacle1", obstacle_spawn_cfg, translation=(target_hybrid_set_b[0, 0] + 0.05, 0.0, 0.7), - orientation=(0.707, 0.0, 0.707, 0.0), + orientation=(0.0, 0.707, 0.0, 0.707), ) contact_forces_cfg = ContactSensorCfg( prim_path="/World/envs/env_.*/obstacle.*", @@ -829,6 +839,7 @@ def test_franka_hybrid_variable_kp_impedance(sim): ) osc = OperationalSpaceController(osc_cfg, num_envs=num_envs, device=sim_context.device) + # Use more convergence steps for hybrid control which is less precise _run_op_space_controller( robot, osc, @@ -841,6 +852,7 @@ def test_franka_hybrid_variable_kp_impedance(sim): goal_marker, contact_forces, frame, + convergence_steps=750, ) @@ -983,7 +995,7 @@ def test_franka_taskframe_hybrid(sim): "/World/envs/env_.*/obstacle1", obstacle_spawn_cfg, translation=(target_hybrid_set_tilted[0, 0] + 0.085, 0.0, 0.3), - orientation=(0.9238795325, 0.0, -0.3826834324, 0.0), + orientation=(0.0, -0.3826834324, 0.0, 0.9238795325), ) contact_forces_cfg = ContactSensorCfg( prim_path="/World/envs/env_.*/obstacle.*", @@ -1212,7 +1224,7 @@ def test_franka_taskframe_hybrid_with_nullspace_centering(sim): "/World/envs/env_.*/obstacle1", obstacle_spawn_cfg, translation=(target_hybrid_set_tilted[0, 0] + 0.085, 0.0, 0.3), - orientation=(0.9238795325, 0.0, -0.3826834324, 0.0), + orientation=(0.0, -0.3826834324, 0.0, 0.9238795325), ) contact_forces_cfg = ContactSensorCfg( prim_path="/World/envs/env_.*/obstacle.*", @@ -1253,6 +1265,146 @@ def test_franka_taskframe_hybrid_with_nullspace_centering(sim): ) +## +# Floating-base regression test configs (PR #5107) +## + +_G1_ARM_JOINT_NAMES = [ + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "left_elbow_joint", +] + + +@lab_configclass +class _FloatingBaseOscSceneCfg(InteractiveSceneCfg): + """Minimal scene with a floating-base G1 humanoid.""" + + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane", debug_vis=False) + robot: ArticulationCfg = G1_29DOF_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + def __post_init__(self): + super().__post_init__() + self.robot.spawn.articulation_props.fix_root_link = False + self.robot.spawn.rigid_props.disable_gravity = True + + +@lab_configclass +class _FloatingBaseOscActionsCfg: + arm_action: OperationalSpaceControllerActionCfg = OperationalSpaceControllerActionCfg( + asset_name="robot", + joint_names=_G1_ARM_JOINT_NAMES, + body_name="left_elbow_link", + controller_cfg=OperationalSpaceControllerCfg( + target_types=["pose_abs"], + impedance_mode="fixed", + inertial_dynamics_decoupling=True, + gravity_compensation=False, + motion_stiffness_task=500.0, + motion_damping_ratio_task=1.0, + ), + ) + + +@lab_configclass +class _FloatingBaseOscObsCfg: + @lab_configclass + class _PolicyCfg(ObsGroup): + joint_pos = ObsTerm(func=mdp.joint_pos, params={"asset_cfg": SceneEntityCfg("robot")}) + + policy: _PolicyCfg = _PolicyCfg() + + +@lab_configclass +class _FloatingBaseOscEnvCfg(ManagerBasedEnvCfg): + scene: _FloatingBaseOscSceneCfg = _FloatingBaseOscSceneCfg(num_envs=4, env_spacing=4.0) + actions: _FloatingBaseOscActionsCfg = _FloatingBaseOscActionsCfg() + observations: _FloatingBaseOscObsCfg = _FloatingBaseOscObsCfg() + decimation: int = 1 + sim: sim_utils.SimulationCfg = sim_utils.SimulationCfg(dt=0.01) + + +@pytest.mark.isaacsim_ci +def test_floating_base_osc_action_term_indexing(): + """Regression test for #4999 / PR #5107: verify OperationalSpaceControllerAction uses correct + indices for mass matrix and gravity on floating-base robots. + + For floating-base robots, PhysX prepends 6 virtual DOFs to the generalized mass matrix and + gravity vectors. The action term's ``_compute_dynamic_quantities()`` must use + ``_jacobi_joint_idx`` (with +6 offset) instead of ``_joint_ids``. This test instantiates the + real action term via a ManagerBasedEnv, triggers ``_compute_dynamic_quantities()``, and verifies + the extracted mass matrix and gravity match a manual extraction using the correct PhysX indices. + + If someone reverts ``_jacobi_joint_idx`` back to ``_joint_ids`` in ``_compute_dynamic_quantities``, + this test will fail. + """ + env_cfg = _FloatingBaseOscEnvCfg() + env_cfg.sim.device = "cuda:0" + env = ManagerBasedEnv(cfg=env_cfg) + num_envs = env.num_envs + + try: + robot: Articulation = env.scene["robot"] + + # --- 1. Verify the robot is floating-base --- + assert not robot.is_fixed_base, "G1_29DOF_CFG must be floating-base for this test" + + # --- 2. Get the action term --- + action_term = env.action_manager._terms["arm_action"] + num_arm_joints = action_term._num_DoF + + # --- 3. Step the env to populate physics buffers --- + zero_actions = torch.zeros(num_envs, action_term.action_dim, device=env.device) + action_term.process_actions(zero_actions) + action_term.apply_actions() + + # --- 4. The action term's _mass_matrix and _gravity are now populated --- + term_mass = action_term._mass_matrix.clone() + term_gravity = action_term._gravity.clone() + + # --- 5. Manually extract using the CORRECT indices (what the fix does) --- + jacobi_joint_idx = action_term._jacobi_joint_idx + full_mass_matrix = wp.to_torch(robot.root_view.get_generalized_mass_matrices()) + full_gravity = wp.to_torch(robot.root_view.get_gravity_compensation_forces()) + + manual_mass = full_mass_matrix[:, jacobi_joint_idx, :][:, :, jacobi_joint_idx] + manual_gravity = full_gravity[:, jacobi_joint_idx] + + # --- 6. KEY ASSERTION: action term output must match manual extraction with correct indices --- + torch.testing.assert_close(term_mass, manual_mass, atol=1e-5, rtol=0) + torch.testing.assert_close(term_gravity, manual_gravity, atol=1e-5, rtol=0) + + # --- 7. Verify the full PhysX tensor has +6 virtual DOFs --- + expected_physx_dofs = robot.num_joints + 6 + assert full_mass_matrix.shape[1] == expected_physx_dofs, ( + f"Mass matrix should have {expected_physx_dofs} DOFs, got {full_mass_matrix.shape[1]}" + ) + + # --- 8. Verify correct indices differ from raw joint_ids (the old bug) --- + # Reconstruct the original joint_ids before any slice(None) optimization + original_joint_ids, _ = robot.find_joints(_G1_ARM_JOINT_NAMES) + buggy_mass = full_mass_matrix[:, original_joint_ids, :][:, :, original_joint_ids] + assert not torch.allclose(term_mass, buggy_mass, atol=1e-6), ( + "Action term mass matrix should NOT match extraction with raw joint_ids (no +6 offset)" + ) + + # --- 9. Verify physically reasonable values --- + diag = torch.diagonal(term_mass, dim1=-2, dim2=-1) + assert (diag > 0).all(), f"Mass matrix diagonal must be positive, got min={diag.min().item():.6f}" + assert diag.max().item() < 100.0, ( + f"Mass matrix diagonal too large ({diag.max().item():.1f}), possibly contaminated by base DOFs" + ) + assert torch.allclose(term_mass, term_mass.transpose(-2, -1), atol=1e-5), "Mass matrix should be symmetric" + + # --- 10. Verify shapes --- + assert term_mass.shape == (num_envs, num_arm_joints, num_arm_joints) + assert term_gravity.shape == (num_envs, num_arm_joints) + + finally: + env.close() + + def _run_op_space_controller( robot: Articulation, osc: OperationalSpaceController, @@ -1265,6 +1417,7 @@ def _run_op_space_controller( goal_marker: VisualizationMarkers, contact_forces: ContactSensor | None, frame: str, + convergence_steps: int = 500, ): """Run the operational space controller with the given parameters. @@ -1280,6 +1433,7 @@ def _run_op_space_controller( goal_marker (VisualizationMarkers): The goal marker. contact_forces (ContactSensor | None): The contact forces sensor. frame (str): The reference frame for targets. + convergence_steps (int): Number of simulation steps to run before checking convergence. Defaults to 500. """ # Initialize the masks for evaluating target convergence according to selection matrices pos_mask = torch.tensor(osc.cfg.motion_control_axes_task[:3], device=sim.device).view(1, 3) @@ -1302,7 +1456,7 @@ def _run_op_space_controller( robot.update(dt=sim_dt) # Get the center of the robot soft joint limits - joint_centers = torch.mean(robot.data.soft_joint_pos_limits[:, arm_joint_ids, :], dim=-1) + joint_centers = torch.mean(robot.data.soft_joint_pos_limits.torch[:, arm_joint_ids, :], dim=-1) # get the updated states ( @@ -1331,17 +1485,19 @@ def _run_op_space_controller( joint_efforts = torch.zeros(num_envs, len(arm_joint_ids), device=sim.device) # Now we are ready! - for count in range(1501): - # reset every 500 steps - if count % 500 == 0: + # Run for 3 target cycles plus 1 step to trigger final convergence check + total_steps = 3 * convergence_steps + 1 + for count in range(total_steps): + # reset every convergence_steps steps + if count % convergence_steps == 0: # check that we converged to the goal if count > 0: _check_convergence( osc, ee_pose_b, ee_target_pose_b, ee_force_b, command, pos_mask, rot_mask, force_mask, frame ) # reset joint state to default - default_joint_pos = robot.data.default_joint_pos.clone() - default_joint_vel = robot.data.default_joint_vel.clone() + default_joint_pos = robot.data.default_joint_pos.torch.clone() + default_joint_vel = robot.data.default_joint_vel.torch.clone() robot.write_joint_state_to_sim(default_joint_pos, default_joint_vel) robot.set_joint_effort_target(zero_joint_efforts) # Set zero torques in the initial step robot.write_data_to_sim() @@ -1434,29 +1590,29 @@ def _update_states( """ # obtain dynamics related quantities from simulation ee_jacobi_idx = ee_frame_idx - 1 - jacobian_w = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] - mass_matrix = robot.root_physx_view.get_generalized_mass_matrices()[:, arm_joint_ids, :][:, :, arm_joint_ids] - gravity = robot.root_physx_view.get_gravity_compensation_forces()[:, arm_joint_ids] + jacobian_w = wp.to_torch(robot.root_view.get_jacobians())[:, ee_jacobi_idx, :, arm_joint_ids] + mass_matrix = wp.to_torch(robot.root_view.get_generalized_mass_matrices())[:, arm_joint_ids, :][:, :, arm_joint_ids] + gravity = wp.to_torch(robot.root_view.get_gravity_compensation_forces())[:, arm_joint_ids] # Convert the Jacobian from world to root frame jacobian_b = jacobian_w.clone() - root_rot_matrix = matrix_from_quat(quat_inv(robot.data.root_quat_w)) + root_rot_matrix = matrix_from_quat(quat_inv(robot.data.root_quat_w.torch)) jacobian_b[:, :3, :] = torch.bmm(root_rot_matrix, jacobian_b[:, :3, :]) jacobian_b[:, 3:, :] = torch.bmm(root_rot_matrix, jacobian_b[:, 3:, :]) # Compute current pose of the end-effector - root_pose_w = robot.data.root_pose_w - ee_pose_w = robot.data.body_pose_w[:, ee_frame_idx] + root_pose_w = robot.data.root_pose_w.torch + ee_pose_w = robot.data.body_pose_w.torch[:, ee_frame_idx] ee_pos_b, ee_quat_b = subtract_frame_transforms( root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] ) ee_pose_b = torch.cat([ee_pos_b, ee_quat_b], dim=-1) # Compute the current velocity of the end-effector - ee_vel_w = robot.data.body_vel_w[:, ee_frame_idx, :] # Extract end-effector velocity in the world frame - root_vel_w = robot.data.root_vel_w # Extract root velocity in the world frame + ee_vel_w = robot.data.body_vel_w.torch[:, ee_frame_idx, :] # Extract end-effector velocity in the world frame + root_vel_w = robot.data.root_vel_w.torch # Extract root velocity in the world frame relative_vel_w = ee_vel_w - root_vel_w # Compute the relative velocity in the world frame - ee_lin_vel_b = quat_apply_inverse(robot.data.root_quat_w, relative_vel_w[:, 0:3]) # From world to root frame - ee_ang_vel_b = quat_apply_inverse(robot.data.root_quat_w, relative_vel_w[:, 3:6]) + ee_lin_vel_b = quat_apply_inverse(robot.data.root_quat_w.torch, relative_vel_w[:, 0:3]) # From world to root frame + ee_ang_vel_b = quat_apply_inverse(robot.data.root_quat_w.torch, relative_vel_w[:, 3:6]) ee_vel_b = torch.cat([ee_lin_vel_b, ee_ang_vel_b], dim=-1) # Calculate the contact force @@ -1466,14 +1622,14 @@ def _update_states( contact_forces.update(sim_dt) # update contact sensor # Calculate the contact force by averaging over last four time steps (i.e., to smoothen) and # taking the max of three surfaces as only one should be the contact of interest - ee_force_w, _ = torch.max(torch.mean(contact_forces.data.net_forces_w_history, dim=1), dim=1) + ee_force_w, _ = torch.max(torch.mean(contact_forces.data.net_forces_w_history.torch, dim=1), dim=1) # This is a simplification, only for the sake of testing. ee_force_b = ee_force_w # Get joint positions and velocities - joint_pos = robot.data.joint_pos[:, arm_joint_ids] - joint_vel = robot.data.joint_vel[:, arm_joint_ids] + joint_pos = robot.data.joint_pos.torch[:, arm_joint_ids] + joint_vel = robot.data.joint_vel.torch[:, arm_joint_ids] return ( jacobian_b, @@ -1633,8 +1789,8 @@ def _check_convergence( pos_error, rot_error = compute_pose_error( ee_pose_b[:, 0:3], ee_pose_b[:, 3:7], ee_target_pose_b[:, 0:3], ee_target_pose_b[:, 3:7] ) - pos_error_norm = torch.norm(pos_error * pos_mask, dim=-1) - rot_error_norm = torch.norm(rot_error * rot_mask, dim=-1) + pos_error_norm = torch.linalg.norm(pos_error * pos_mask, dim=-1) + rot_error_norm = torch.linalg.norm(rot_error * rot_mask, dim=-1) # desired error (zer) des_error = torch.zeros_like(pos_error_norm) # check convergence @@ -1645,8 +1801,8 @@ def _check_convergence( pos_error, rot_error = compute_pose_error( ee_pose_b[:, 0:3], ee_pose_b[:, 3:7], ee_target_pose_b[:, 0:3], ee_target_pose_b[:, 3:7] ) - pos_error_norm = torch.norm(pos_error * pos_mask, dim=-1) - rot_error_norm = torch.norm(rot_error * rot_mask, dim=-1) + pos_error_norm = torch.linalg.norm(pos_error * pos_mask, dim=-1) + rot_error_norm = torch.linalg.norm(rot_error * rot_mask, dim=-1) # desired error (zer) des_error = torch.zeros_like(pos_error_norm) # check convergence @@ -1661,12 +1817,24 @@ def _check_convergence( R_task_b = matrix_from_quat(task_frame_pose_b[:, 3:]) force_target_b[:] = (R_task_b @ force_target_b[:].unsqueeze(-1)).squeeze(-1) force_error = ee_force_b - force_target_b - force_error_norm = torch.norm( + force_error_norm = torch.linalg.norm( force_error * force_mask, dim=-1 ) # ignore torque part as we cannot measure it - des_error = torch.zeros_like(force_error_norm) - # check convergence: big threshold here as the force control is not precise when the robot moves - torch.testing.assert_close(force_error_norm, des_error, rtol=0.0, atol=1.0) + # Check convergence using statistical thresholds instead of a blanket all-environments + # tolerance. Contact force steady-state is sensitive to physics engine internals (PhysX + # solver iterations, contact resolution, penetration depth) which causes outlier + # environments. A tight median check catches real controller regressions while a loose + # max check catches catastrophic failures without breaking on single-environment noise. + median_error = torch.median(force_error_norm).item() + max_error = torch.max(force_error_norm).item() + assert median_error < 5.0, ( + f"Median force error {median_error:.1f} N exceeds 5.0 N threshold" + f" (max: {max_error:.1f} N, per-env: {force_error_norm.tolist()})" + ) + assert max_error < 50.0, ( + f"Max force error {max_error:.1f} N exceeds 50.0 N sanity threshold" + f" (median: {median_error:.1f} N, per-env: {force_error_norm.tolist()})" + ) cmd_idx += 6 else: raise ValueError("Undefined target_type within _check_convergence().") diff --git a/source/isaaclab/test/controllers/test_pink_ik.py b/source/isaaclab/test/controllers/test_pink_ik.py index 43ab48ae0590..4c7669b1c72e 100644 --- a/source/isaaclab/test/controllers/test_pink_ik.py +++ b/source/isaaclab/test/controllers/test_pink_ik.py @@ -5,14 +5,6 @@ """Launch Isaac Sim Simulator first.""" -# Import pinocchio in the main script to force the use of the dependencies -# installed by IsaacLab and not the one installed by Isaac Sim -# pinocchio is required by the Pink IK controller -import sys - -if sys.platform != "win32": - import pinocchio # noqa: F401 - from isaaclab.app import AppLauncher # launch omniverse app @@ -32,15 +24,14 @@ from pink.configuration import Configuration from pink.tasks import FrameTask -import omni.usd - +import isaaclab.sim as sim_utils from isaaclab.utils.math import axis_angle_from_quat, matrix_from_quat, quat_from_matrix, quat_inv import isaaclab_tasks # noqa: F401 -import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 -import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 from isaaclab_tasks.utils.parse_cfg import parse_env_cfg +pytestmark = pytest.mark.isaacsim_ci + def load_test_config(env_name): """Load test configuration based on environment type.""" @@ -72,7 +63,7 @@ def create_test_env(env_name, num_envs): """Create a test environment with the Pink IK controller.""" device = "cuda:0" - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() try: env_cfg = parse_env_cfg(env_name, device=device, num_envs=num_envs) @@ -104,9 +95,10 @@ def env_and_cfg(request): env, env_cfg = create_test_env(env_name, num_envs=1) - # Get only the FrameTasks from variable_input_tasks + # Read instantiated task objects from the live action term/controller, not raw cfg wrappers. + action_term = env.action_manager.get_term(name="upper_body_ik") variable_input_tasks = [ - task for task in env_cfg.actions.upper_body_ik.controller.variable_input_tasks if isinstance(task, FrameTask) + task for task in action_term._ik_controllers[0].cfg.variable_input_tasks if isinstance(task, FrameTask) ] assert len(variable_input_tasks) == 2, "Expected exactly two FrameTasks (left and right hand)." frames = [task.frame for task in variable_input_tasks] @@ -297,7 +289,7 @@ def run_movement_test(test_setup, test_config, test_cfg, aux_function=None): def get_link_pose(env, link_name): """Get the position and orientation of a link.""" link_index = env.scene["robot"].data.body_names.index(link_name) - link_states = env.scene._articulations["robot"]._data.body_link_state_w + link_states = env.scene._articulations["robot"].data.body_link_state_w.torch link_pose = link_states[:, link_index, :7] return link_pose[:, :3], link_pose[:, 3:7] @@ -350,7 +342,7 @@ def compute_errors( isaaclab_controlled_joint_ids = action_term._isaaclab_controlled_joint_ids # Get current and target positions for controlled joints only - curr_joints = articulation.data.joint_pos[:, isaaclab_controlled_joint_ids].cpu().numpy()[0] + curr_joints = articulation.data.joint_pos.torch[:, isaaclab_controlled_joint_ids].cpu().numpy()[0] target_joints = action_term.processed_actions[:, : len(isaaclab_controlled_joint_ids)].cpu().numpy()[0] # Reorder joints for Pink IK (using controlled joint ordering) @@ -397,7 +389,7 @@ def verify_errors(errors, test_setup, tolerances): for hand in ["left", "right"]: # Check PD controller errors - pd_error_norm = torch.norm(errors[f"{hand}_pd_error"], dim=1) + pd_error_norm = torch.linalg.norm(errors[f"{hand}_pd_error"], dim=1) torch.testing.assert_close( pd_error_norm, zero_tensor, @@ -410,7 +402,7 @@ def verify_errors(errors, test_setup, tolerances): ) # Check IK position errors - pos_error_norm = torch.norm(errors[f"{hand}_pos_error"], dim=1) + pos_error_norm = torch.linalg.norm(errors[f"{hand}_pos_error"], dim=1) torch.testing.assert_close( pos_error_norm, zero_tensor, diff --git a/source/isaaclab/test/controllers/test_pink_ik_components.py b/source/isaaclab/test/controllers/test_pink_ik_components.py index 6bde4c30a1b6..2533e78e0123 100644 --- a/source/isaaclab/test/controllers/test_pink_ik_components.py +++ b/source/isaaclab/test/controllers/test_pink_ik_components.py @@ -5,14 +5,6 @@ """Test cases for PinkKinematicsConfiguration class.""" -# Import pinocchio in the main script to force the use of the dependencies installed -# by IsaacLab and not the one installed by Isaac Sim -# pinocchio is required by the Pink IK controller -import sys - -if sys.platform != "win32": - import pinocchio # noqa: F401 - from isaaclab.app import AppLauncher # launch omniverse app @@ -27,6 +19,8 @@ from isaaclab.controllers.pink_ik.pink_kinematics_configuration import PinkKinematicsConfiguration +pytestmark = pytest.mark.isaacsim_ci + class TestPinkKinematicsConfiguration: """Test suite for PinkKinematicsConfiguration class.""" diff --git a/source/isaaclab/test/deps/isaacsim/check_camera.py b/source/isaaclab/test/deps/isaacsim/check_camera.py index 81f481f23e3f..ee2c9d5adf28 100644 --- a/source/isaaclab/test/deps/isaacsim/check_camera.py +++ b/source/isaaclab/test/deps/isaacsim/check_camera.py @@ -3,247 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause -""" -This script shows the issue with renderer in Isaac Sim that affects episodic resets. +"""Historic camera episodic-reset reproducer (retired). -The first few images of every new episode are not updated. They take multiple steps to update -and have the same image as the previous episode for the first few steps. +This script previously depended on Isaac Sim core extensions that Isaac Sim now carries under +``source/deprecated`` in the Isaac Sim repository. It has been retired from Isaac Lab to avoid +depending on those extension IDs and import paths. -``` -# run with cube -_isaac_sim/python.sh source/isaaclab/test/deps/isaacsim/check_camera.py --scenario cube -# run with anymal -_isaac_sim/python.sh source/isaaclab/test/deps/isaacsim/check_camera.py --scenario anymal -``` +Use :class:`~isaaclab.sim.SimulationContext`, Isaac Lab sensors, and ``isaacsim.core.experimental.*`` +APIs when debugging rendering and camera pipelines. """ -"""Launch Isaac Sim Simulator first.""" - -import argparse - -# isaaclab -from isaaclab.app import AppLauncher - -# add argparse arguments -parser = argparse.ArgumentParser( - description="This script shows the issue with renderer in Isaac Sim that affects episodic resets." +raise RuntimeError( + "This standalone reproducer was retired; it depended on deprecated Isaac Sim core extensions. " + "Use Isaac Lab :mod:`isaaclab.sim` and ``isaacsim.core.experimental.*`` instead." ) -parser.add_argument("--gpu", action="store_true", default=False, help="Use GPU device for camera rendering output.") -parser.add_argument("--scenario", type=str, default="anymal", help="Scenario to load.", choices=["anymal", "cube"]) -# append AppLauncher cli args -AppLauncher.add_app_launcher_args(parser) -# parse the arguments -args_cli = parser.parse_args() - -# launch omniverse app -app_launcher = AppLauncher(args_cli) -simulation_app = app_launcher.app - -"""Rest everything follows.""" - -import os -import random - -import numpy as np -from PIL import Image, ImageChops - -import isaacsim.core.utils.nucleus as nucleus_utils -import isaacsim.core.utils.prims as prim_utils -import omni.replicator.core as rep -from isaacsim.core.api.world import World -from isaacsim.core.prims import Articulation, RigidPrim, SingleGeometryPrim, SingleRigidPrim -from isaacsim.core.utils.viewports import set_camera_view -from pxr import Gf, UsdGeom - -# check nucleus connection -if nucleus_utils.get_assets_root_path() is None: - msg = ( - "Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n" - "\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus" - ) - raise RuntimeError(msg) - -ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac" -"""Path to the `Isaac` directory on the NVIDIA Nucleus Server.""" - - -def main(): - """Runs a camera sensor from isaaclab.""" - - # Load kit helper - world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cpu") - # Set main camera - set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) - - # Enable flatcache which avoids passing data over to USD structure - # this speeds up the read-write operation of GPU buffers - if world.get_physics_context().use_gpu_pipeline: - world.get_physics_context().enable_flatcache(True) - # Enable hydra scene-graph instancing - # this is needed to visualize the scene when flatcache is enabled - world._settings.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) - - # Populate scene - # Ground - world.scene.add_default_ground_plane() - # Lights-1 - prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0)) - # Lights-2 - prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0)) - # Xform to hold objects - if args_cli.scenario == "cube": - prim_utils.create_prim("/World/Objects", "Xform") - # Random objects - for i in range(8): - # sample random position - position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0]) - position *= np.asarray([1.5, 1.5, 0.5]) - # create prim - prim_type = random.choice(["Cube", "Sphere", "Cylinder"]) - _ = prim_utils.create_prim( - f"/World/Objects/Obj_{i:02d}", - prim_type, - translation=position, - scale=(0.25, 0.25, 0.25), - semantic_label=prim_type, - ) - # add rigid properties - SingleGeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True) - rigid_obj = SingleRigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0) - # cast to geom prim - geom_prim = getattr(UsdGeom, prim_type)(rigid_obj.prim) - # set random color - color = Gf.Vec3f(random.random(), random.random(), random.random()) - geom_prim.CreateDisplayColorAttr() - geom_prim.GetDisplayColorAttr().Set([color]) - # Setup camera sensor on the world - cam_prim_path = "/World/CameraSensor" - else: - # Robot - prim_utils.create_prim( - "/World/Robot", - usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_c/anymal_c.usd", - translation=(0.0, 0.0, 0.6), - ) - # Setup camera sensor on the robot - cam_prim_path = "/World/CameraSensor" - - # Create camera - cam_prim = prim_utils.create_prim( - cam_prim_path, - prim_type="Camera", - translation=(5.0, 5.0, 5.0), - orientation=(0.33985113, 0.17591988, 0.42470818, 0.82047324), - ) - _ = UsdGeom.Camera(cam_prim) - # Get render product - render_prod_path = rep.create.render_product(cam_prim_path, resolution=(640, 480)) - # create annotator node - rep_registry = {} - for name in ["rgb", "distance_to_image_plane"]: - # create annotator - rep_annotator = rep.AnnotatorRegistry.get_annotator(name, device="cpu") - rep_annotator.attach(render_prod_path) - # add to registry - rep_registry[name] = rep_annotator - - # Create replicator writer - output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera", args_cli.scenario) - os.makedirs(output_dir, exist_ok=True) - - # Create a view of the stuff we want to see - if args_cli.scenario == "cube": - view: RigidPrim = world.scene.add(RigidPrim("/World/Objects/.*", name="my_object")) - else: - view: Articulation = world.scene.add(Articulation("/World/Robot", name="my_object")) - # Play simulator - world.reset() - # Get initial state - if args_cli.scenario == "cube": - initial_pos, initial_quat = view.get_world_poses() - initial_joint_pos = None - initial_joint_vel = None - else: - initial_pos, initial_quat = view.get_world_poses() - initial_joint_pos = view.get_joint_positions() - initial_joint_vel = view.get_joint_velocities() - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - world.step(render=True) - - # Counter - count = 0 - prev_im = None - # make episode directory - episode_count = 0 - episode_dir = os.path.join(output_dir, f"episode_{episode_count:06d}") - os.makedirs(episode_dir, exist_ok=True) - # Simulate physics - while simulation_app.is_running(): - # If simulation is stopped, then exit. - if world.is_stopped(): - break - # If simulation is paused, then skip. - if not world.is_playing(): - world.step(render=False) - continue - # Reset on intervals - if count % 25 == 0: - # reset all the state - view.set_world_poses(initial_pos, initial_quat) - if initial_joint_pos is not None: - view.set_joint_positions(initial_joint_pos) - if initial_joint_vel is not None: - view.set_joint_velocities(initial_joint_vel) - # make a new episode directory - episode_dir = os.path.join(output_dir, f"episode_{episode_count:06d}") - os.makedirs(episode_dir, exist_ok=True) - # reset counters - count = 0 - episode_count += 1 - # Step simulation - for _ in range(15): - world.step(render=False) - world.render() - # Update camera data - rgb_data = rep_registry["rgb"].get_data() - depth_data = rep_registry["distance_to_image_plane"].get_data() - - # Show current image number - print(f"[Epi {episode_count:03d}] Current image number: {count:06d}") - # Save data - curr_im = Image.fromarray(rgb_data) - curr_im.save(os.path.join(episode_dir, f"{count:06d}_rgb.png")) - # Save diff - if prev_im is not None: - diff_im = ImageChops.difference(curr_im, prev_im) - # convert to grayscale and threshold - diff_im = diff_im.convert("L") - threshold = 30 - diff_im = diff_im.point(lambda p: p > threshold and 255) - # Save all of them together - dst_im = Image.new("RGB", (curr_im.width + prev_im.width + diff_im.width, diff_im.height)) - dst_im.paste(prev_im, (0, 0)) - dst_im.paste(curr_im, (prev_im.width, 0)) - dst_im.paste(diff_im, (2 * prev_im.width, 0)) - dst_im.save(os.path.join(episode_dir, f"{count:06d}_diff.png")) - - # Save to previous - prev_im = curr_im.copy() - # Update counter - count += 1 - - # Print camera info - print("Received shape of rgb image: ", rgb_data.shape) - print("Received shape of depth image: ", depth_data.shape) - print("-------------------------------") - - -if __name__ == "__main__": - # run the main function - main() - # close sim app - simulation_app.close() diff --git a/source/isaaclab/test/deps/isaacsim/check_floating_base_made_fixed.py b/source/isaaclab/test/deps/isaacsim/check_floating_base_made_fixed.py index 0be9a55bd4cb..f03012610e97 100644 --- a/source/isaaclab/test/deps/isaacsim/check_floating_base_made_fixed.py +++ b/source/isaaclab/test/deps/isaacsim/check_floating_base_made_fixed.py @@ -3,205 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""This script demonstrates how to make a floating robot fixed in Isaac Sim.""" - -"""Launch Isaac Sim Simulator first.""" - - -import argparse -import contextlib - -with contextlib.suppress(ModuleNotFoundError): - import isaacsim # noqa: F401 - -from isaacsim import SimulationApp - -# add argparse arguments -parser = argparse.ArgumentParser( - description="This script shows the issue in Isaac Sim with making a floating robot fixed." -) -parser.add_argument("--headless", action="store_true", help="Run in headless mode.") -parser.add_argument("--fix-base", action="store_true", help="Whether to fix the base of the robot.") -# parse the arguments -args_cli = parser.parse_args() - -# launch omniverse app -simulation_app = SimulationApp({"headless": args_cli.headless}) - -"""Rest everything follows.""" - -import logging - -import torch - -import isaacsim.core.utils.nucleus as nucleus_utils -import isaacsim.core.utils.prims as prim_utils -import isaacsim.core.utils.stage as stage_utils -import omni.kit.commands -import omni.physx -from isaacsim.core.api.world import World -from isaacsim.core.prims import Articulation -from isaacsim.core.utils.viewports import set_camera_view -from pxr import PhysxSchema, UsdPhysics - -# import logger -logger = logging.getLogger(__name__) - - -# check nucleus connection -if nucleus_utils.get_assets_root_path() is None: - msg = ( - "Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n" - "\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus" - ) - logger.error(msg) - raise RuntimeError(msg) - - -ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac" -"""Path to the `Isaac` directory on the NVIDIA Nucleus Server.""" - -ISAACLAB_NUCLEUS_DIR = f"{ISAAC_NUCLEUS_DIR}/IsaacLab" -"""Path to the `Isaac/IsaacLab` directory on the NVIDIA Nucleus Server.""" +"""Historic floating-base / fixed-base reproducer (retired). +This script previously depended on Isaac Sim core extensions that Isaac Sim now carries under +``source/deprecated`` in the Isaac Sim repository. It has been retired from Isaac Lab to avoid +depending on those extension IDs and import paths. +Use :class:`~isaaclab.sim.SimulationContext` and ``isaacsim.core.experimental.prims.Articulation`` +for articulation-level debugging. """ -Main -""" - - -def main(): - """Spawns the ANYmal robot and makes it fixed.""" - # Load kit helper - world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cpu") - # Set main camera - set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) - - # Enable hydra scene-graph instancing - # this is needed to visualize the scene when flatcache is enabled - world._settings.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) - - # Spawn things into stage - # Ground-plane - world.scene.add_default_ground_plane(prim_path="/World/defaultGroundPlane", z_position=0.0) - # Lights-1 - prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0)) - # Lights-2 - prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0)) - # -- Robot - # resolve asset - usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd" - root_prim_path = "/World/Robot/base" - # add asset - print("Loading robot from: ", usd_path) - prim_utils.create_prim( - "/World/Robot", - usd_path=usd_path, - translation=(0.0, 0.0, 0.6), - ) - # create fixed joint - if args_cli.fix_base: - # get all necessary information - stage = stage_utils.get_current_stage() - root_prim = stage.GetPrimAtPath(root_prim_path) - parent_prim = root_prim.GetParent() - - # here we assume that the root prim is a rigid body - # there is no clear way to deal with situation where the root prim is not a rigid body but has articulation api - # in that case, it is unclear how to get the link to the first link in the tree - if not root_prim.HasAPI(UsdPhysics.RigidBodyAPI): - raise RuntimeError("The root prim does not have the RigidBodyAPI applied.") - - # create fixed joint - omni.kit.commands.execute( - "CreateJointCommand", - stage=stage, - joint_type="Fixed", - from_prim=None, - to_prim=root_prim, - ) - - # move the root to the parent if this is a rigid body - # having a fixed joint on a rigid body makes physx treat it as a part of the maximal coordinate tree - # if we put to joint on the parent, physx parser treats it as a fixed base articulation - # get parent prim - parent_prim = root_prim.GetParent() - # apply api to parent - UsdPhysics.ArticulationRootAPI.Apply(parent_prim) - PhysxSchema.PhysxArticulationAPI.Apply(parent_prim) - - # copy the attributes - # -- usd attributes - root_usd_articulation_api = UsdPhysics.ArticulationRootAPI(root_prim) - for attr_name in root_usd_articulation_api.GetSchemaAttributeNames(): - attr = root_prim.GetAttribute(attr_name) - parent_prim.GetAttribute(attr_name).Set(attr.Get()) - # -- physx attributes - root_physx_articulation_api = PhysxSchema.PhysxArticulationAPI(root_prim) - for attr_name in root_physx_articulation_api.GetSchemaAttributeNames(): - attr = root_prim.GetAttribute(attr_name) - parent_prim.GetAttribute(attr_name).Set(attr.Get()) - # remove api from root - root_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) - root_prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI) - - # rename root path to parent path - root_prim_path = parent_prim.GetPath().pathString - - # Setup robot - robot_view = Articulation(root_prim_path, name="ANYMAL") - world.scene.add(robot_view) - # Play the simulator - world.reset() - - # Now we are ready! - print("[INFO]: Setup complete...") - - # dummy actions - # actions = torch.zeros(robot.count, robot.num_actions, device=robot.device) - - init_root_pos_w, init_root_quat_w = robot_view.get_world_poses() - # Define simulation stepping - sim_dt = world.get_physics_dt() - # episode counter - sim_time = 0.0 - count = 0 - # Simulate physics - while simulation_app.is_running(): - # If simulation is stopped, then exit. - if world.is_stopped(): - break - # If simulation is paused, then skip. - if not world.is_playing(): - world.step(render=False) - continue - # do reset - if count % 20 == 0: - # reset - sim_time = 0.0 - count = 0 - # reset root state - root_pos_w = init_root_pos_w.clone() - root_pos_w[:, :2] += torch.rand_like(root_pos_w[:, :2]) * 0.5 - robot_view.set_world_poses(root_pos_w, init_root_quat_w) - # print if it is fixed base - print("Fixed base: ", robot_view._physics_view.shared_metatype.fixed_base) - print("Moving base to: ", root_pos_w[0].cpu().numpy()) - print("-" * 50) - - # apply random joint actions - actions = torch.rand_like(robot_view.get_joint_positions()) * 0.001 - robot_view.set_joint_efforts(actions) - # perform step - world.step() - # update sim-time - sim_time += sim_dt - count += 1 - - -if __name__ == "__main__": - # run the main function - main() - # close sim app - simulation_app.close() +raise RuntimeError( + "This standalone reproducer was retired; it depended on deprecated Isaac Sim core extensions. " + "Use Isaac Lab :mod:`isaaclab.sim` and ``isaacsim.core.experimental.*`` instead." +) diff --git a/source/isaaclab/test/deps/isaacsim/check_legged_robot_clone.py b/source/isaaclab/test/deps/isaacsim/check_legged_robot_clone.py index 57c016d7522d..54b13a2f0c1c 100644 --- a/source/isaaclab/test/deps/isaacsim/check_legged_robot_clone.py +++ b/source/isaaclab/test/deps/isaacsim/check_legged_robot_clone.py @@ -3,183 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause -""" -This script demonstrates how to use the cloner API from Isaac Sim. - -Reference: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_gym_cloner.html -""" - -"""Launch Isaac Sim Simulator first.""" - - -import argparse -import contextlib - -with contextlib.suppress(ModuleNotFoundError): - import isaacsim # noqa: F401 - -from isaacsim import SimulationApp - -# add argparse arguments -parser = argparse.ArgumentParser( - description="This script shows the issue in Isaac Sim with GPU simulation of floating robots." -) -parser.add_argument("--num_robots", type=int, default=128, help="Number of robots to spawn.") -parser.add_argument( - "--asset", - type=str, - default="isaaclab", - help="The asset source location for the robot. Can be: isaaclab, oige, custom asset path.", -) -parser.add_argument("--headless", action="store_true", help="Run in headless mode.") -# parse the arguments -args_cli = parser.parse_args() - -# launch omniverse app -simulation_app = SimulationApp({"headless": args_cli.headless}) - -"""Rest everything follows.""" - -import logging -import os - -import torch - -import isaacsim.core.utils.nucleus as nucleus_utils -import isaacsim.core.utils.prims as prim_utils -from isaacsim.core.api.world import World -from isaacsim.core.cloner import GridCloner -from isaacsim.core.prims import Articulation -from isaacsim.core.utils.viewports import set_camera_view - -# import logger -logger = logging.getLogger(__name__) - -# check nucleus connection -if nucleus_utils.get_assets_root_path() is None: - msg = ( - "Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n" - "\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus" - ) - logger.error(msg) - raise RuntimeError(msg) - - -ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac" -"""Path to the `Isaac` directory on the NVIDIA Nucleus Server.""" - -ISAACLAB_NUCLEUS_DIR = f"{ISAAC_NUCLEUS_DIR}/IsaacLab" -"""Path to the `Isaac/IsaacLab` directory on the NVIDIA Nucleus Server.""" +"""Historic legged-robot cloning reproducer (retired). +This script previously depended on Isaac Sim core extensions that Isaac Sim now carries under +``source/deprecated`` in the Isaac Sim repository. It has been retired from Isaac Lab to avoid +depending on those extension IDs and import paths. +Use :class:`~isaaclab.sim.SimulationContext`, :class:`isaacsim.core.cloner.GridCloner`, and +``isaacsim.core.experimental.prims.Articulation`` for cloning workflows. """ -Main -""" - - -def main(): - """Spawns the ANYmal robot and clones it using Isaac Sim Cloner API.""" - - # Load kit helper - world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cuda:0") - # Set main camera - set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) - - # Enable hydra scene-graph instancing - # this is needed to visualize the scene when flatcache is enabled - world._settings.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) - - # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") - # Everything under the namespace "/World/envs/env_0" will be cloned - prim_utils.define_prim("/World/envs/env_0") - # Spawn things into stage - # Ground-plane - world.scene.add_default_ground_plane(prim_path="/World/defaultGroundPlane", z_position=0.0) - # Lights-1 - prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0)) - # Lights-2 - prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0)) - # -- Robot - # resolve asset - if args_cli.asset == "isaaclab": - usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd" - root_prim_path = "/World/envs/env_.*/Robot/base" - elif args_cli.asset == "oige": - usd_path = f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_c/anymal_c.usd" - root_prim_path = "/World/envs/env_.*/Robot" - elif os.path.exists(args_cli.asset): - usd_path = args_cli.asset - else: - raise ValueError(f"Invalid asset: {args_cli.asset}. Must be one of: isaaclab, oige.") - # add asset - print("Loading robot from: ", usd_path) - prim_utils.create_prim( - "/World/envs/env_0/Robot", - usd_path=usd_path, - translation=(0.0, 0.0, 0.6), - ) - - # Clone the scene - num_envs = args_cli.num_robots - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) - envs_positions = cloner.clone( - source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True - ) - # convert environment positions to torch tensor - envs_positions = torch.tensor(envs_positions, dtype=torch.float, device=world.device) - # filter collisions within each environment instance - physics_scene_path = world.get_physics_context().prim_path - cloner.filter_collisions( - physics_scene_path, "/World/collisions", envs_prim_paths, global_paths=["/World/defaultGroundPlane"] - ) - - # Resolve robot prim paths - if args_cli.asset == "isaaclab": - root_prim_path = "/World/envs/env_.*/Robot/base" - elif args_cli.asset == "oige": - root_prim_path = "/World/envs/env_.*/Robot" - elif os.path.exists(args_cli.asset): - usd_path = args_cli.asset - root_prim_path = "/World/envs/env_.*/Robot" - else: - raise ValueError(f"Invalid asset: {args_cli.asset}. Must be one of: isaaclab, oige.") - # Setup robot - robot_view = Articulation(root_prim_path, name="ANYMAL") - world.scene.add(robot_view) - # Play the simulator - world.reset() - - # Now we are ready! - print("[INFO]: Setup complete...") - - # dummy actions - # actions = torch.zeros(robot.count, robot.num_actions, device=robot.device) - - # Define simulation stepping - sim_dt = world.get_physics_dt() - # episode counter - sim_time = 0.0 - # Simulate physics - while simulation_app.is_running(): - # If simulation is stopped, then exit. - if world.is_stopped(): - break - # If simulation is paused, then skip. - if not world.is_playing(): - world.step(render=False) - continue - # perform step - world.step() - # update sim-time - sim_time += sim_dt - - -if __name__ == "__main__": - # run the main function - main() - # close sim app - simulation_app.close() +raise RuntimeError( + "This standalone reproducer was retired; it depended on deprecated Isaac Sim core extensions. " + "Use Isaac Lab :mod:`isaaclab.sim` and ``isaacsim.core.experimental.*`` instead." +) diff --git a/source/isaaclab/test/deps/isaacsim/check_ref_count.py b/source/isaaclab/test/deps/isaacsim/check_ref_count.py index 7927b8cb01a1..02465f722f3d 100644 --- a/source/isaaclab/test/deps/isaacsim/check_ref_count.py +++ b/source/isaaclab/test/deps/isaacsim/check_ref_count.py @@ -20,7 +20,6 @@ """Launch Isaac Sim Simulator first.""" - import contextlib with contextlib.suppress(ModuleNotFoundError): @@ -39,10 +38,12 @@ import torch # noqa: F401 -import isaacsim.core.utils.nucleus as nucleus_utils -import isaacsim.core.utils.prims as prim_utils -from isaacsim.core.api.simulation_context import SimulationContext -from isaacsim.core.prims import Articulation +import carb +from isaacsim.core.experimental.prims import Articulation + +import isaaclab.sim.utils.nucleus as nucleus_utils +import isaaclab.sim.utils.prims as prim_utils +from isaaclab.sim import SimulationCfg, SimulationContext # import logger logger = logging.getLogger(__name__) @@ -84,17 +85,13 @@ def __init__(self): # Resolve robot prim paths root_prim_path = "/World/Robot/base" # Setup robot - self.view = Articulation(root_prim_path, name="ANYMAL") + self.view = Articulation(root_prim_path) def __del__(self): """Delete the Anymal articulation class.""" print("Deleting the Anymal view.") self.view = None - def initialize(self): - """Initialize the Anymal view.""" - self.view.initialize() - """ Main @@ -104,12 +101,10 @@ def initialize(self): def main(): """Spawns the ANYmal robot and clones it using Isaac Sim Cloner API.""" - # Load kit helper - sim = SimulationContext(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cuda:0") + carb.settings.get_settings().set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) - # Enable hydra scene-graph instancing - # this is needed to visualize the scene when flatcache is enabled - sim._settings.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) + # Load kit helper + sim = SimulationContext(cfg=SimulationCfg(dt=0.005, device="cuda:0")) # Create a dummy tensor for testing # Uncommenting the following line will yield a reference count of 1 for the robot (as desired) @@ -129,8 +124,6 @@ def main(): print("Referrers of the robot view: ", gc.get_referrers(robot)) print("---" * 10) - robot.initialize() - print("Reference count of the robot view: ", ctypes.c_long.from_address(id(robot)).value) print("Referrers of the robot view: ", gc.get_referrers(robot)) print("---" * 10) @@ -143,7 +136,7 @@ def main(): print("---" * 10) # Clean up - sim.clear() + SimulationContext.clear_instance() print("Reference count of the robot view: ", ctypes.c_long.from_address(id(robot)).value) print("Referrers of the robot view: ", gc.get_referrers(robot)) diff --git a/source/isaaclab/test/deps/isaacsim/check_rep_texture_randomizer.py b/source/isaaclab/test/deps/isaacsim/check_rep_texture_randomizer.py index 170bf1b9683c..dd3875994e38 100644 --- a/source/isaaclab/test/deps/isaacsim/check_rep_texture_randomizer.py +++ b/source/isaaclab/test/deps/isaacsim/check_rep_texture_randomizer.py @@ -3,162 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause -""" -This script shows how to use replicator to randomly change the textures of a USD scene. - -Note: - Currently this script fails since cloner does not support changing textures of cloned - USD prims. This is because the prims are cloned using `Sdf.ChangeBlock` which does not - allow individual texture changes. - -Usage: - -.. code-block:: bash +"""Historic replicator texture randomization reproducer (retired). - ./isaaclab.sh -p source/isaaclab/test/deps/isaacsim/check_rep_texture_randomizer.py +This script previously depended on Isaac Sim core extensions that Isaac Sim now carries under +``source/deprecated`` in the Isaac Sim repository. It has been retired from Isaac Lab to avoid +depending on those extension IDs and import paths. +Use :class:`~isaaclab.sim.SimulationContext`, :class:`isaacsim.core.cloner.GridCloner`, and +``isaacsim.core.experimental.*`` when experimenting with Omniverse Replicator workflows. """ -"""Launch Isaac Sim Simulator first.""" - -import argparse - -# isaaclab -from isaaclab.app import AppLauncher - -# add argparse arguments -parser = argparse.ArgumentParser( - description="This script shows how to use replicator to randomly change the textures of a USD scene." +raise RuntimeError( + "This standalone reproducer was retired; it depended on deprecated Isaac Sim core extensions. " + "Use Isaac Lab :mod:`isaaclab.sim` and ``isaacsim.core.experimental.*`` instead." ) -# append AppLauncher cli args -AppLauncher.add_app_launcher_args(parser) -# parse the arguments -args_cli = parser.parse_args() - -# launch omniverse app -app_launcher = AppLauncher(args_cli) -simulation_app = app_launcher.app - - -"""Rest everything follows.""" - -import numpy as np -import torch - -import isaacsim.core.utils.prims as prim_utils -import omni.replicator.core as rep -from isaacsim.core.api.objects import DynamicSphere -from isaacsim.core.api.simulation_context import SimulationContext -from isaacsim.core.cloner import GridCloner -from isaacsim.core.prims import RigidPrim -from isaacsim.core.utils.viewports import set_camera_view - - -def main(): - """Spawn a bunch of balls and randomly change their textures.""" - - # Load kit helper - sim_params = { - "use_gpu": True, - "use_gpu_pipeline": True, - "use_flatcache": True, # deprecated from Isaac Sim 2023.1 onwards - "use_fabric": True, # used from Isaac Sim 2023.1 onwards - "enable_scene_query_support": True, - } - sim = SimulationContext( - physics_dt=1.0 / 60.0, rendering_dt=1.0 / 60.0, sim_params=sim_params, backend="torch", device="cuda:0" - ) - # Set main camera - set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5]) - - # Parameters - num_balls = 128 - - # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") - # Everything under the namespace "/World/envs/env_0" will be cloned - prim_utils.define_prim("/World/envs/env_0") - - # Define the scene - # -- Ball - DynamicSphere(prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25) - - # Clone the scene - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls) - env_positions = cloner.clone( - source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True, copy_from_source=True - ) - physics_scene_path = sim.get_physics_context().prim_path - cloner.filter_collisions( - physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] - ) - - # Use replicator to randomize color on the spheres - with rep.new_layer(): - # Define a function to get all the shapes - def get_shapes(): - shapes = rep.get.prims(path_pattern="/World/envs/env_.*/ball") - with shapes: - rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1))) - return shapes.node - - # Register the function - rep.randomizer.register(get_shapes) - # Specify the frequency of randomization - with rep.trigger.on_frame(): - rep.randomizer.get_shapes() - - # Set ball positions over terrain origins - # Create a view over all the balls - ball_view = RigidPrim("/World/envs/env_.*/ball", reset_xform_properties=False) - # cache initial state of the balls - ball_initial_positions = torch.tensor(env_positions, dtype=torch.float, device=sim.device) - ball_initial_positions[:, 2] += 5.0 - # set initial poses - # note: setting here writes to USD :) - ball_view.set_world_poses(positions=ball_initial_positions) - - # Play simulator - sim.reset() - # Step replicator to randomize colors - rep.orchestrator.step(pause_timeline=False) - # Stop replicator to prevent further randomization - rep.orchestrator.stop() - # Pause simulator at the beginning for inspection - sim.pause() - - # Initialize the ball views for physics simulation - ball_view.initialize() - ball_initial_velocities = ball_view.get_velocities() - - # Create a counter for resetting the scene - step_count = 0 - # Simulate physics - while simulation_app.is_running(): - # If simulation is stopped, then exit. - if sim.is_stopped(): - break - # If simulation is paused, then skip. - if not sim.is_playing(): - sim.step() - continue - # Reset the scene - if step_count % 500 == 0: - # reset the balls - ball_view.set_world_poses(positions=ball_initial_positions) - ball_view.set_velocities(ball_initial_velocities) - # reset the counter - step_count = 0 - # Step simulation - sim.step() - # Update counter - step_count += 1 - - -if __name__ == "__main__": - # run the main function - main() - # close sim app - simulation_app.close() diff --git a/source/isaaclab/test/devices/check_keyboard.py b/source/isaaclab/test/devices/check_keyboard.py index c1a8b07bef82..439aeb92795a 100644 --- a/source/isaaclab/test/devices/check_keyboard.py +++ b/source/isaaclab/test/devices/check_keyboard.py @@ -21,7 +21,7 @@ """Rest everything follows.""" -import ctypes +import sys from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg from isaaclab.sim import SimulationCfg, SimulationContext @@ -51,9 +51,9 @@ def main(): print("Press 'L' to print a message. Press 'ESC' to quit.") - # Check that boundedness of articulation is correct - if ctypes.c_long.from_address(id(teleop_interface)).value != 1: - raise RuntimeError("Teleoperation interface is not bounded to a single instance.") + # Check that the framework doesn't hold excessive strong references. + if sys.getrefcount(teleop_interface) >= 10: + raise RuntimeError("Possible reference leak for teleoperation interface.") # Reset interface internals teleop_interface.reset() diff --git a/source/isaaclab/test/devices/test_device_constructors.py b/source/isaaclab/test/devices/test_device_constructors.py index fb1bded4d61a..c05652a2411c 100644 --- a/source/isaaclab/test/devices/test_device_constructors.py +++ b/source/isaaclab/test/devices/test_device_constructors.py @@ -14,18 +14,14 @@ import importlib import json -from typing import cast import pytest import torch # Import device classes to test from isaaclab.devices import ( - DeviceCfg, HaplyDevice, HaplyDeviceCfg, - OpenXRDevice, - OpenXRDeviceCfg, Se2Gamepad, Se2GamepadCfg, Se2Keyboard, @@ -39,11 +35,6 @@ Se3SpaceMouse, Se3SpaceMouseCfg, ) -from isaaclab.devices.openxr import XrCfg -from isaaclab.devices.openxr.retargeters import GripperRetargeterCfg, Se3AbsRetargeterCfg - -# Import teleop device factory for testing -from isaaclab.devices.teleop_device_factory import create_teleop_device @pytest.fixture @@ -71,24 +62,10 @@ def mock_environment(mocker): carb_mock.input.KeyboardEventType.KEY_PRESS = 1 carb_mock.input.KeyboardEventType.KEY_RELEASE = 2 - # Mock carb events used by OpenXRDevice - events_mock = mocker.MagicMock() - events_mock.type_from_string.return_value = 0 - carb_mock.events = events_mock - # Mock the SpaceMouse hid_mock.enumerate.return_value = [{"product_string": "SpaceMouse Compact", "vendor_id": 123, "product_id": 456}] hid_mock.device.return_value = device_mock - # Mock OpenXR - # xr_core_mock = mocker.MagicMock() - message_bus_mock = mocker.MagicMock() - singleton_mock = mocker.MagicMock() - omni_mock.kit.xr.core.XRCore.get_singleton.return_value = singleton_mock - singleton_mock.get_message_bus.return_value = message_bus_mock - omni_mock.kit.xr.core.XRPoseValidityFlags.POSITION_VALID = 1 - omni_mock.kit.xr.core.XRPoseValidityFlags.ORIENTATION_VALID = 2 - # Mock Haply WebSocket websockets_mock = mocker.MagicMock() websocket_mock = mocker.MagicMock() @@ -278,67 +255,6 @@ def test_se3spacemouse_constructors(mock_environment, mocker): assert result.shape == (7,) # (pos_x, pos_y, pos_z, rot_x, rot_y, rot_z, gripper) -""" -Test OpenXR devices. -""" - - -def test_openxr_constructors(mock_environment, mocker): - """Test constructor for OpenXRDevice.""" - # Test config-based constructor with custom XrCfg - xr_cfg = XrCfg( - anchor_pos=(1.0, 2.0, 3.0), - anchor_rot=(0.0, 0.1, 0.2, 0.3), - near_plane=0.2, - ) - config = OpenXRDeviceCfg(xr_cfg=xr_cfg) - - # Create mock retargeters - mock_controller_retargeter = mocker.MagicMock() - mock_head_retargeter = mocker.MagicMock() - retargeters = [mock_controller_retargeter, mock_head_retargeter] - - device_mod = importlib.import_module("isaaclab.devices.openxr.openxr_device") - mocker.patch.dict( - "sys.modules", - { - "carb": mock_environment["carb"], - "omni.kit.xr.core": mock_environment["omni"].kit.xr.core, - "isaacsim.core.prims": mocker.MagicMock(), - }, - ) - mocker.patch.object(device_mod, "carb", mock_environment["carb"]) - mocker.patch.object(device_mod, "XRCore", mock_environment["omni"].kit.xr.core.XRCore) - mocker.patch.object(device_mod, "XRPoseValidityFlags", mock_environment["omni"].kit.xr.core.XRPoseValidityFlags) - mock_single_xform = mocker.patch.object(device_mod, "SingleXFormPrim") - - # Configure the mock to return a string for prim_path - mock_instance = mock_single_xform.return_value - mock_instance.prim_path = "/XRAnchor" - - # Create the device using the factory - device = OpenXRDevice(config) - - # Verify the device was created successfully - assert device._xr_cfg == xr_cfg - - # Test with retargeters - device = OpenXRDevice(cfg=config, retargeters=retargeters) - - # Verify retargeters were correctly assigned as a list - assert device._retargeters == retargeters - - # Test with config and retargeters - device = OpenXRDevice(cfg=config, retargeters=retargeters) - - # Verify both config and retargeters were correctly assigned - assert device._xr_cfg == xr_cfg - assert device._retargeters == retargeters - - # Test reset functionality - device.reset() - - """ Test Haply devices. """ @@ -424,7 +340,7 @@ def test_haply_constructors(mock_environment, mocker): haply.running = True haply.cached_data = { "position": torch.tensor([0.1, 0.2, 0.3], dtype=torch.float32).numpy(), - "quaternion": torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32).numpy(), + "quaternion": torch.tensor([0.0, 0.0, 1.0, 0.0], dtype=torch.float32).numpy(), "buttons": {"a": False, "b": False, "c": False}, "inverse3_connected": True, "versegrip_connected": True, @@ -476,138 +392,3 @@ def test_haply_constructors(mock_environment, mocker): # Test reset functionality haply.reset() assert haply.feedback_force == {"x": 0.0, "y": 0.0, "z": 0.0} - - -""" -Test teleop device factory. -""" - - -def test_create_teleop_device_basic(mock_environment, mocker): - """Test creating devices using the teleop device factory.""" - # Create device configuration - keyboard_cfg = Se3KeyboardCfg(pos_sensitivity=0.8, rot_sensitivity=1.2) - - # Create devices configuration dictionary - devices_cfg: dict[str, DeviceCfg] = {"test_keyboard": keyboard_cfg} - - # Mock Se3Keyboard class - device_mod = importlib.import_module("isaaclab.devices.keyboard.se3_keyboard") - mocker.patch.dict("sys.modules", {"carb": mock_environment["carb"], "omni": mock_environment["omni"]}) - mocker.patch.object(device_mod, "carb", mock_environment["carb"]) - mocker.patch.object(device_mod, "omni", mock_environment["omni"]) - - # Create the device using the factory - device = create_teleop_device("test_keyboard", devices_cfg) - - # Verify the device was created correctly - assert isinstance(device, Se3Keyboard) - assert device.pos_sensitivity == 0.8 - assert device.rot_sensitivity == 1.2 - - -def test_create_teleop_device_with_callbacks(mock_environment, mocker): - """Test creating device with callbacks.""" - # Create device configuration - xr_cfg = XrCfg(anchor_pos=(0.0, 0.0, 0.0), anchor_rot=(1.0, 0.0, 0.0, 0.0), near_plane=0.15) - openxr_cfg = OpenXRDeviceCfg(xr_cfg=xr_cfg) - - # Create devices configuration dictionary - devices_cfg: dict[str, DeviceCfg] = {"test_xr": openxr_cfg} - - # Create mock callbacks - button_a_callback = mocker.MagicMock() - button_b_callback = mocker.MagicMock() - callbacks = {"button_a": button_a_callback, "button_b": button_b_callback} - - # Mock OpenXRDevice class and dependencies - device_mod = importlib.import_module("isaaclab.devices.openxr.openxr_device") - mocker.patch.dict( - "sys.modules", - { - "carb": mock_environment["carb"], - "omni.kit.xr.core": mock_environment["omni"].kit.xr.core, - "isaacsim.core.prims": mocker.MagicMock(), - }, - ) - mocker.patch.object(device_mod, "carb", mock_environment["carb"]) - mocker.patch.object(device_mod, "XRCore", mock_environment["omni"].kit.xr.core.XRCore) - mocker.patch.object(device_mod, "XRPoseValidityFlags", mock_environment["omni"].kit.xr.core.XRPoseValidityFlags) - mock_single_xform = mocker.patch.object(device_mod, "SingleXFormPrim") - - # Configure the mock to return a string for prim_path - mock_instance = mock_single_xform.return_value - mock_instance.prim_path = "/XRAnchor" - - # Create the device using the factory - device = create_teleop_device("test_xr", devices_cfg, callbacks) - - # Verify the device was created correctly - assert isinstance(device, OpenXRDevice) - - # Verify callbacks were registered by the factory - assert set(device._additional_callbacks.keys()) == {"button_a", "button_b"} - - -def test_create_teleop_device_with_retargeters(mock_environment, mocker): - """Test creating device with retargeters.""" - # Create retargeter configurations - retargeter_cfg1 = Se3AbsRetargeterCfg() - retargeter_cfg2 = GripperRetargeterCfg() - - # Create device configuration with retargeters - xr_cfg = XrCfg() - device_cfg = OpenXRDeviceCfg(xr_cfg=xr_cfg, retargeters=[retargeter_cfg1, retargeter_cfg2]) - - # Create devices configuration dictionary - devices_cfg: dict[str, DeviceCfg] = {"test_xr": device_cfg} - - # Mock OpenXRDevice class and dependencies - device_mod = importlib.import_module("isaaclab.devices.openxr.openxr_device") - mocker.patch.dict( - "sys.modules", - { - "carb": mock_environment["carb"], - "omni.kit.xr.core": mock_environment["omni"].kit.xr.core, - "isaacsim.core.prims": mocker.MagicMock(), - }, - ) - mocker.patch.object(device_mod, "carb", mock_environment["carb"]) - mocker.patch.object(device_mod, "XRCore", mock_environment["omni"].kit.xr.core.XRCore) - mocker.patch.object(device_mod, "XRPoseValidityFlags", mock_environment["omni"].kit.xr.core.XRPoseValidityFlags) - mock_single_xform = mocker.patch.object(device_mod, "SingleXFormPrim") - - # Configure the mock to return a string for prim_path - mock_instance = mock_single_xform.return_value - mock_instance.prim_path = "/XRAnchor" - - # Create the device using the factory - device = create_teleop_device("test_xr", devices_cfg) - - # Verify retargeters were created - assert len(device._retargeters) == 2 - - -def test_create_teleop_device_device_not_found(): - """Test error when device name is not found in configuration.""" - # Create devices configuration dictionary - devices_cfg: dict[str, DeviceCfg] = {"keyboard": Se3KeyboardCfg()} - - # Try to create a non-existent device - with pytest.raises(ValueError, match="Device 'gamepad' not found"): - create_teleop_device("gamepad", devices_cfg) - - -def test_create_teleop_device_unsupported_config(): - """Test error when device configuration type is not supported.""" - - # Create a custom unsupported configuration class - class UnsupportedCfg: - pass - - # Create devices configuration dictionary with unsupported config - devices_cfg: dict[str, DeviceCfg] = cast(dict[str, DeviceCfg], {"unsupported": UnsupportedCfg()}) - - # Try to create a device with unsupported configuration - with pytest.raises(ValueError, match="does not declare class_type"): - create_teleop_device("unsupported", devices_cfg) diff --git a/source/isaaclab/test/envs/check_manager_based_env_anymal_locomotion.py b/source/isaaclab/test/envs/check_manager_based_env_anymal_locomotion.py index c6169c94d197..07dc3d73bd14 100644 --- a/source/isaaclab/test/envs/check_manager_based_env_anymal_locomotion.py +++ b/source/isaaclab/test/envs/check_manager_based_env_anymal_locomotion.py @@ -48,7 +48,7 @@ from isaaclab.terrains import TerrainImporterCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, NVIDIA_NUCLEUS_DIR, check_file_path, read_file -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise ## # Pre-defined configs diff --git a/source/isaaclab/test/envs/check_manager_based_env_floating_cube.py b/source/isaaclab/test/envs/check_manager_based_env_floating_cube.py index fb7622ae67cd..4762cc855cc5 100644 --- a/source/isaaclab/test/envs/check_manager_based_env_floating_cube.py +++ b/source/isaaclab/test/envs/check_manager_based_env_floating_cube.py @@ -128,8 +128,8 @@ def process_actions(self, actions: torch.Tensor): def apply_actions(self): # implement a PD controller to track the target position - pos_error = self._processed_actions - (self._asset.data.root_pos_w - self._env.scene.env_origins) - vel_error = -self._asset.data.root_lin_vel_w + pos_error = self._processed_actions - (self._asset.data.root_pos_w.torch - self._env.scene.env_origins) + vel_error = -self._asset.data.root_lin_vel_w.torch # set velocity targets self._vel_command[:, :3] = self.p_gain * pos_error + self.d_gain * vel_error self._asset.write_root_velocity_to_sim(self._vel_command) @@ -151,7 +151,7 @@ def base_position(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg) -> torch.Tens """Root linear velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_pos_w - env.scene.env_origins + return asset.data.root_pos_w.torch - env.scene.env_origins ## @@ -253,7 +253,7 @@ def main(): # step env obs, _ = env.step(target_position) # print mean squared position error between target and current position - error = torch.norm(obs["policy"] - target_position).mean().item() + error = torch.linalg.norm(obs["policy"] - target_position).mean().item() print(f"[Step: {count:04d}]: Mean position error: {error:.4f}") # update counter count += 1 diff --git a/source/isaaclab/test/envs/test_action_state_recorder_term.py b/source/isaaclab/test/envs/test_action_state_recorder_term.py index 16ae866dfce2..9e3a27115aa7 100644 --- a/source/isaaclab/test/envs/test_action_state_recorder_term.py +++ b/source/isaaclab/test/envs/test_action_state_recorder_term.py @@ -20,9 +20,8 @@ import pytest import torch -import carb -import omni.usd - +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.envs.mdp.recorders.recorders_cfg import ActionStateRecorderManagerCfg import isaaclab_tasks # noqa: F401 @@ -31,9 +30,8 @@ @pytest.fixture(scope="session", autouse=True) def setup_carb_settings(): - """Set up carb settings to prevent simulation getting stuck.""" - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) + """Set up settings to prevent simulation getting stuck.""" + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) @pytest.fixture @@ -90,7 +88,7 @@ def check_initial_state_recorder_term(env): @pytest.mark.parametrize("num_envs", [1, 2]) def test_action_state_recorder_terms(task_name, device, num_envs, temp_dir): """Check action state recorder terms.""" - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() dummy_dataset_filename = f"{uuid.uuid4()}.hdf5" diff --git a/source/isaaclab/test/envs/test_color_randomization.py b/source/isaaclab/test/envs/test_color_randomization.py index 619c7b3368fc..a7edda7036c1 100644 --- a/source/isaaclab/test/envs/test_color_randomization.py +++ b/source/isaaclab/test/envs/test_color_randomization.py @@ -22,9 +22,8 @@ import pytest import torch -import omni.usd - import isaaclab.envs.mdp as mdp +import isaaclab.sim as sim_utils from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import ObservationGroupCfg as ObsGroup @@ -143,7 +142,7 @@ def test_color_randomization(device): pytest.skip("Color randomization test hangs in this version of Isaac Sim") # Create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() try: # Set the arguments @@ -170,4 +169,4 @@ def test_color_randomization(device): env.close() finally: # Clean up stage - omni.usd.get_context().close_stage() + sim_utils.close_stage() diff --git a/source/isaaclab/test/envs/test_direct_marl_env.py b/source/isaaclab/test/envs/test_direct_marl_env.py index d7ebd04610b4..981ebd72203a 100644 --- a/source/isaaclab/test/envs/test_direct_marl_env.py +++ b/source/isaaclab/test/envs/test_direct_marl_env.py @@ -19,8 +19,7 @@ import pytest -import omni.usd - +import isaaclab.sim as sim_utils from isaaclab.envs import DirectMARLEnv, DirectMARLEnvCfg from isaaclab.scene import InteractiveSceneCfg from isaaclab.utils import configclass @@ -57,7 +56,7 @@ class EmptyEnvCfg(DirectMARLEnvCfg): def test_initialization(device): """Test initialization of DirectMARLEnv.""" # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() try: # create environment env = DirectMARLEnv(cfg=get_empty_base_env_cfg(device=device)) diff --git a/source/isaaclab/test/envs/test_env_rendering_logic.py b/source/isaaclab/test/envs/test_env_rendering_logic.py index 70f0a01f212a..59299e2e93ee 100644 --- a/source/isaaclab/test/envs/test_env_rendering_logic.py +++ b/source/isaaclab/test/envs/test_env_rendering_logic.py @@ -7,7 +7,7 @@ from isaaclab.app import AppLauncher -# launch omniverse app +# launch Kit app # need to set "enable_cameras" true to be able to do rendering tests simulation_app = AppLauncher(headless=True, enable_cameras=True).app @@ -15,9 +15,10 @@ import pytest import torch +from isaaclab_physx.physics import IsaacEvents +from isaaclab_visualizers.kit import KitVisualizer, KitVisualizerCfg -import omni.usd - +import isaaclab.sim as sim_utils from isaaclab.envs import ( DirectRLEnv, DirectRLEnvCfg, @@ -30,6 +31,8 @@ from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.utils import configclass +pytestmark = pytest.mark.isaacsim_ci + @configclass class EmptyManagerCfg: @@ -47,7 +50,9 @@ class EnvCfg(ManagerBasedEnvCfg): decimation: int = 4 episode_length_s: float = 100.0 - sim: SimulationCfg = SimulationCfg(dt=0.005, render_interval=render_interval) + sim: SimulationCfg = SimulationCfg( + dt=0.005, render_interval=render_interval, visualizer_cfgs=KitVisualizerCfg() + ) scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=1, env_spacing=1.0) actions: EmptyManagerCfg = EmptyManagerCfg() observations: EmptyManagerCfg = EmptyManagerCfg() @@ -64,7 +69,9 @@ class EnvCfg(ManagerBasedRLEnvCfg): decimation: int = 4 episode_length_s: float = 100.0 - sim: SimulationCfg = SimulationCfg(dt=0.005, render_interval=render_interval) + sim: SimulationCfg = SimulationCfg( + dt=0.005, render_interval=render_interval, visualizer_cfgs=KitVisualizerCfg() + ) scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=1, env_spacing=1.0) actions: EmptyManagerCfg = EmptyManagerCfg() observations: EmptyManagerCfg = EmptyManagerCfg() @@ -74,18 +81,29 @@ class EnvCfg(ManagerBasedRLEnvCfg): return ManagerBasedRLEnv(cfg=EnvCfg()) -def create_direct_rl_env(render_interval: int): - """Create a direct RL environment.""" +def create_direct_rl_env(render_interval: int, episode_length_steps: int | None = None): + """Create a direct RL environment. + + Args: + render_interval: Render interval in physics steps. + episode_length_steps: If provided, the episode terminates (via time-out) after this + many *env* steps. Useful for testing post-reset re-render paths. + """ + # Compute episode_length_s from step count when requested + _dt = 0.005 + _decimation = 4 + _step_dt = _dt * _decimation + _episode_length_s = (episode_length_steps * _step_dt) if episode_length_steps is not None else 100.0 @configclass class EnvCfg(DirectRLEnvCfg): """Configuration for the test environment.""" - decimation: int = 4 + decimation: int = _decimation action_space: int = 0 observation_space: int = 0 - episode_length_s: float = 100.0 - sim: SimulationCfg = SimulationCfg(dt=0.005, render_interval=render_interval) + episode_length_s: float = _episode_length_s + sim: SimulationCfg = SimulationCfg(dt=_dt, render_interval=render_interval, visualizer_cfgs=KitVisualizerCfg()) scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=1, env_spacing=1.0) class Env(DirectRLEnv): @@ -104,7 +122,8 @@ def _get_rewards(self): return {} def _get_dones(self): - return torch.zeros(1, dtype=torch.bool), torch.zeros(1, dtype=torch.bool) + time_out = self.episode_length_buf >= self.max_episode_length + return torch.zeros_like(time_out), time_out return Env(cfg=EnvCfg()) @@ -129,24 +148,30 @@ def render_callback(): render_time = 0.0 num_render_steps = 0 - def callback(event): + def callback(dt): nonlocal render_time, num_render_steps - render_time += event.payload["dt"] + render_time += dt num_render_steps += 1 return callback, lambda: (render_time, num_render_steps) @pytest.mark.parametrize("env_type", ["manager_based_env", "manager_based_rl_env", "direct_rl_env"]) -@pytest.mark.parametrize("render_interval", [1, 2, 4, 8, 10]) +@pytest.mark.parametrize("render_interval", [1, 4, 10]) def test_env_rendering_logic(env_type, render_interval, physics_callback, render_callback): """Test the rendering logic of the different environment workflows.""" physics_cb, get_physics_stats = physics_callback render_cb, get_render_stats = render_callback - # create a new stage - omni.usd.get_context().new_stage() + env = None + physics_handle = None + original_step = None + viz = None + try: + # create a new stage + sim_utils.create_new_stage() + # create environment if env_type == "manager_based_env": env = create_manager_based_env(render_interval) @@ -154,55 +179,307 @@ def test_env_rendering_logic(env_type, render_interval, physics_callback, render env = create_manager_based_rl_env(render_interval) else: env = create_direct_rl_env(render_interval) - except Exception as e: - if "env" in locals() and hasattr(env, "_is_closed"): + + # enable the flag to render the environment + # note: this is only done for the unit testing to "fake" camera rendering. + # normally this is set to True when cameras are created. + env.sim.set_setting("/isaaclab/render/rtx_sensors", True) + + # disable the app from shutting down when the environment is closed + # FIXME: Why is this needed in this test but not in the other tests? + # Without it, the test will exit after the environment is closed + env.sim._app_control_on_stop_handle = None # type: ignore + + # Reset to initialize visualizers (they're created lazily in reset()) + env.reset() + + # Ensure the default Kit visualizer is active for rendering callbacks. + assert isinstance(env.sim.visualizers[0], KitVisualizer) + + # add physics callback via physics manager (IsaacEvents is PhysX-specific) + physics_handle = env.sim.physics_manager.register_callback( + physics_cb, IsaacEvents.POST_PHYSICS_STEP, name="physics_step" + ) + + # Wrap visualizer step to track render calls + viz = env.sim.visualizers[0] + original_step = viz.step + render_dt = env.cfg.sim.dt * env.cfg.sim.render_interval + + def wrapped_step(dt): + original_step(dt) + render_cb(render_dt) + + viz.step = wrapped_step + + # create a zero action tensor for stepping the environment + actions = torch.zeros((env.num_envs, 0), device=env.device) + + # run the environment and check the rendering logic + for i in range(10): + # apply zero actions + env.step(action=actions) + + # check that we have completed the correct number of physics steps + _, num_physics_steps = get_physics_stats() + assert num_physics_steps == (i + 1) * env.cfg.decimation, "Physics steps mismatch" + # check that we have simulated physics for the correct amount of time + physics_time, _ = get_physics_stats() + assert abs(physics_time - num_physics_steps * env.cfg.sim.dt) < 1e-6, "Physics time mismatch" + + # check that we have completed the correct number of rendering steps + _, num_render_steps = get_render_stats() + assert num_render_steps == (i + 1) * env.cfg.decimation // env.cfg.sim.render_interval, ( + "Render steps mismatch" + ) + # check that we have rendered for the correct amount of time + render_time, _ = get_render_stats() + assert abs(render_time - num_render_steps * env.cfg.sim.dt * env.cfg.sim.render_interval) < 1e-6, ( + "Render time mismatch" + ) + + finally: + # Restore original step method + if viz is not None and original_step is not None: + viz.step = original_step + # Deregister physics callback + if physics_handle is not None: + physics_handle.deregister() + # Close environment (this also clears SimulationContext) + if env is not None: + env.close() + else: + # If env creation failed, still clear the singleton + SimulationContext.clear_instance() + + +@pytest.mark.parametrize("env_type", ["manager_based_env", "manager_based_rl_env", "direct_rl_env"]) +def test_env_render_false_skips_rendering(env_type, physics_callback, render_callback): + """Test that setting render_enabled=False skips all rendering while physics continues.""" + physics_cb, get_physics_stats = physics_callback + render_cb, get_render_stats = render_callback + + env = None + physics_handle = None + original_step = None + viz = None + + try: + # create a new stage + sim_utils.create_new_stage() + + # create environment with render_interval=1 so rendering would happen every physics step + if env_type == "manager_based_env": + env = create_manager_based_env(render_interval=1) + elif env_type == "manager_based_rl_env": + env = create_manager_based_rl_env(render_interval=1) + else: + env = create_direct_rl_env(render_interval=1) + + # enable the flag to render the environment + env.sim.set_setting("/isaaclab/render/rtx_sensors", True) + + # disable the app from shutting down when the environment is closed + env.sim._app_control_on_stop_handle = None # type: ignore + + # Reset to initialize visualizers + env.reset() + + # Ensure the default Kit visualizer is active for rendering callbacks. + assert isinstance(env.sim.visualizers[0], KitVisualizer) + + # add physics callback + physics_handle = env.sim.physics_manager.register_callback( + physics_cb, IsaacEvents.POST_PHYSICS_STEP, name="physics_step" + ) + + # Wrap visualizer step to track render calls + viz = env.sim.visualizers[0] + original_step = viz.step + render_dt = env.cfg.sim.dt * env.cfg.sim.render_interval + + def wrapped_step(dt): + original_step(dt) + render_cb(render_dt) + + viz.step = wrapped_step + + # create a zero action tensor for stepping the environment + actions = torch.zeros((env.num_envs, 0), device=env.device) + + # Step with render_enabled=False for several steps + env.render_enabled = False + for i in range(10): + env.step(action=actions) + + # Physics should still advance normally + _, num_physics_steps = get_physics_stats() + assert num_physics_steps == (i + 1) * env.cfg.decimation, "Physics steps mismatch with render_enabled=False" + + # No rendering should have occurred + _, num_render_steps = get_render_stats() + assert num_render_steps == 0, f"Expected 0 render steps with render_enabled=False, got {num_render_steps}" + + finally: + if viz is not None and original_step is not None: + viz.step = original_step + if physics_handle is not None: + physics_handle.deregister() + if env is not None: + env.close() + else: + SimulationContext.clear_instance() + + +@pytest.mark.parametrize("env_type", ["manager_based_env", "manager_based_rl_env", "direct_rl_env"]) +def test_env_render_flag_mixed_steps(env_type, physics_callback, render_callback): + """Test that render_enabled can be toggled between steps and rendering counts are correct.""" + physics_cb, get_physics_stats = physics_callback + render_cb, get_render_stats = render_callback + + env = None + physics_handle = None + original_step = None + viz = None + + try: + # create a new stage + sim_utils.create_new_stage() + + # create environment with render_interval=1 so every decimation step renders + if env_type == "manager_based_env": + env = create_manager_based_env(render_interval=1) + elif env_type == "manager_based_rl_env": + env = create_manager_based_rl_env(render_interval=1) + else: + env = create_direct_rl_env(render_interval=1) + + # enable the flag to render the environment + env.sim.set_setting("/isaaclab/render/rtx_sensors", True) + + # disable the app from shutting down when the environment is closed + env.sim._app_control_on_stop_handle = None # type: ignore + + # Reset to initialize visualizers + env.reset() + + # Ensure the default Kit visualizer is active for rendering callbacks. + assert isinstance(env.sim.visualizers[0], KitVisualizer) + + # add physics callback + physics_handle = env.sim.physics_manager.register_callback( + physics_cb, IsaacEvents.POST_PHYSICS_STEP, name="physics_step" + ) + + # Wrap visualizer step to track render calls + viz = env.sim.visualizers[0] + original_step = viz.step + render_dt = env.cfg.sim.dt * env.cfg.sim.render_interval + + def wrapped_step(dt): + original_step(dt) + render_cb(render_dt) + + viz.step = wrapped_step + + # create a zero action tensor for stepping the environment + actions = torch.zeros((env.num_envs, 0), device=env.device) + + expected_render_steps = 0 + + # Step 5 times with render_enabled=True, then 5 with render_enabled=False + for i in range(10): + should_render = i < 5 + env.render_enabled = should_render + env.step(action=actions) + + # Physics always advances + _, num_physics_steps = get_physics_stats() + assert num_physics_steps == (i + 1) * env.cfg.decimation, "Physics steps mismatch in mixed test" + + # Rendering only happens in the first 5 steps + if should_render: + expected_render_steps += env.cfg.decimation # render_interval=1, so renders every decimation step + + _, num_render_steps = get_render_stats() + assert num_render_steps == expected_render_steps, ( + f"Render steps mismatch at step {i}: expected {expected_render_steps}, got {num_render_steps}" + ) + + finally: + if viz is not None and original_step is not None: + viz.step = original_step + if physics_handle is not None: + physics_handle.deregister() + if env is not None: env.close() else: - if hasattr(e, "obj") and hasattr(e.obj, "_is_closed"): - e.obj.close() - pytest.fail(f"Failed to set-up the environment {env_type}. Error: {e}") - - # enable the flag to render the environment - # note: this is only done for the unit testing to "fake" camera rendering. - # normally this is set to True when cameras are created. - env.sim.set_setting("/isaaclab/render/rtx_sensors", True) - - # disable the app from shutting down when the environment is closed - # FIXME: Why is this needed in this test but not in the other tests? - # Without it, the test will exit after the environment is closed - env.sim._app_control_on_stop_handle = None # type: ignore - - # check that we are in partial rendering mode for the environment - # this is enabled due to app launcher setting "enable_cameras=True" - assert env.sim.render_mode == SimulationContext.RenderMode.PARTIAL_RENDERING - - # add physics and render callbacks - env.sim.add_physics_callback("physics_step", physics_cb) - env.sim.add_render_callback("render_step", render_cb) - - # create a zero action tensor for stepping the environment - actions = torch.zeros((env.num_envs, 0), device=env.device) - - # run the environment and check the rendering logic - for i in range(50): - # apply zero actions - env.step(action=actions) - - # check that we have completed the correct number of physics steps - _, num_physics_steps = get_physics_stats() - assert num_physics_steps == (i + 1) * env.cfg.decimation, "Physics steps mismatch" - # check that we have simulated physics for the correct amount of time - physics_time, _ = get_physics_stats() - assert abs(physics_time - num_physics_steps * env.cfg.sim.dt) < 1e-6, "Physics time mismatch" - - # check that we have completed the correct number of rendering steps - _, num_render_steps = get_render_stats() - assert num_render_steps == (i + 1) * env.cfg.decimation // env.cfg.sim.render_interval, "Render steps mismatch" - # check that we have rendered for the correct amount of time - render_time, _ = get_render_stats() - assert abs(render_time - num_render_steps * env.cfg.sim.dt * env.cfg.sim.render_interval) < 1e-6, ( - "Render time mismatch" + SimulationContext.clear_instance() + + +def test_env_render_false_with_resets(physics_callback, render_callback): + """Test that render_enabled=False skips post-reset re-renders during short episodes. + + Uses a direct RL env with a 3-step episode so that resets occur during the + test loop, exercising the post-reset re-render gate. + """ + physics_cb, get_physics_stats = physics_callback + render_cb, get_render_stats = render_callback + + env = None + physics_handle = None + original_step = None + viz = None + + try: + sim_utils.create_new_stage() + + # 3-step episode: resets will occur at steps 3, 6, 9, ... + env = create_direct_rl_env(render_interval=1, episode_length_steps=3) + + env.sim.set_setting("/isaaclab/render/rtx_sensors", True) + env.sim._app_control_on_stop_handle = None # type: ignore + + env.reset() + assert isinstance(env.sim.visualizers[0], KitVisualizer) + + physics_handle = env.sim.physics_manager.register_callback( + physics_cb, IsaacEvents.POST_PHYSICS_STEP, name="physics_step" ) - # close the environment - env.close() + viz = env.sim.visualizers[0] + original_step = viz.step + render_dt = env.cfg.sim.dt * env.cfg.sim.render_interval + + def wrapped_step(dt): + original_step(dt) + render_cb(render_dt) + + viz.step = wrapped_step + + actions = torch.zeros((env.num_envs, 0), device=env.device) + + # Disable rendering and run past several episode boundaries + env.render_enabled = False + for i in range(10): + env.step(action=actions) + + # Physics always advances + _, num_physics_steps = get_physics_stats() + assert num_physics_steps == (i + 1) * env.cfg.decimation, f"Physics steps mismatch at step {i} with resets" + + # No Kit rendering should occur — including post-reset re-renders + _, num_render_steps = get_render_stats() + assert num_render_steps == 0, ( + f"Expected 0 render steps with render_enabled=False and resets, got {num_render_steps} at step {i}" + ) + + finally: + if viz is not None and original_step is not None: + viz.step = original_step + if physics_handle is not None: + physics_handle.deregister() + if env is not None: + env.close() + else: + SimulationContext.clear_instance() diff --git a/source/isaaclab/test/envs/test_manager_based_env.py b/source/isaaclab/test/envs/test_manager_based_env.py index 7ec9ef2d43f8..153129a39263 100644 --- a/source/isaaclab/test/envs/test_manager_based_env.py +++ b/source/isaaclab/test/envs/test_manager_based_env.py @@ -20,8 +20,7 @@ import pytest import torch -import omni.usd - +import isaaclab.sim as sim_utils from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm @@ -115,7 +114,7 @@ def __post_init__(self): def test_initialization(device): """Test initialization of ManagerBasedEnv.""" # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # create environment env = ManagerBasedEnv(cfg=get_empty_base_env_cfg(device=device)) # check size of action manager terms @@ -143,7 +142,7 @@ def test_observation_history_changes_only_after_step(device): The history buffer should only change after a step is taken. """ # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # create environment with history length of 5 env = ManagerBasedEnv(cfg=get_empty_base_env_cfg_with_history(device=device)) diff --git a/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py b/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py index 72525ddb8e03..8eaeb88f0de5 100644 --- a/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py +++ b/source/isaaclab/test/envs/test_manager_based_rl_env_obs_spaces.py @@ -15,8 +15,7 @@ import pytest import torch -import omni.usd - +import isaaclab.sim as sim_utils from isaaclab.envs import ManagerBasedRLEnv from isaaclab_tasks.manager_based.classic.cartpole.cartpole_camera_env_cfg import ( @@ -38,7 +37,7 @@ def test_non_concatenated_obs_groups_contain_all_terms(device): ) # new USD stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # configure the stack env - it has multiple non-concatenated observation groups env_cfg = FrankaCubeStackEnvCfg() @@ -116,12 +115,14 @@ def test_non_concatenated_obs_groups_contain_all_terms(device): ) @pytest.mark.parametrize("device", ["cpu", "cuda"]) def test_obs_space_follows_clip_contraint(env_cfg_cls, device): - """Ensure curriculum terms apply correctly after the fallback and replacement.""" + """Ensure observation space bounds reflect the clip constraint on each term.""" # new USD stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() + + # configure the env -- resolve Hydra presets so _Preset fields become plain values + from isaaclab_tasks.utils.hydra import resolve_presets - # configure the cartpole env - env_cfg = env_cfg_cls() + env_cfg = resolve_presets(env_cfg_cls()) env_cfg.scene.num_envs = 2 # keep num_envs small for testing env_cfg.observations.policy.concatenate_terms = False env_cfg.sim.device = device diff --git a/source/isaaclab/test/envs/test_manager_based_rl_env_ui.py b/source/isaaclab/test/envs/test_manager_based_rl_env_ui.py index f35c11a1c401..2a6157120848 100644 --- a/source/isaaclab/test/envs/test_manager_based_rl_env_ui.py +++ b/source/isaaclab/test/envs/test_manager_based_rl_env_ui.py @@ -16,10 +16,9 @@ """Rest everything follows.""" -import carb -import omni.usd -from isaacsim.core.utils.extensions import enable_extension +from isaacsim.core.experimental.utils.app import enable_extension +import isaaclab.sim as sim_utils from isaaclab.envs import ManagerBasedRLEnv, ManagerBasedRLEnvCfg from isaaclab.envs.ui import ManagerBasedRLEnvWindow from isaaclab.scene import InteractiveSceneCfg @@ -78,9 +77,11 @@ def test_ui_window(): """Test UI window of ManagerBasedRLEnv.""" device = "cuda:0" # override sim setting to enable UI - carb.settings.get_settings().set_bool("/app/window/enabled", True) + from isaaclab.app.settings_manager import get_settings_manager + + get_settings_manager().set_bool("/app/window/enabled", True) # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # create environment env = ManagerBasedRLEnv(cfg=get_empty_base_env_cfg(device=device)) # close the environment diff --git a/source/isaaclab/test/envs/test_modify_env_param_curr_term.py b/source/isaaclab/test/envs/test_modify_env_param_curr_term.py index a23a29f38606..683b6eae02c7 100644 --- a/source/isaaclab/test/envs/test_modify_env_param_curr_term.py +++ b/source/isaaclab/test/envs/test_modify_env_param_curr_term.py @@ -13,9 +13,8 @@ import pytest import torch -import omni.usd - import isaaclab.envs.mdp as mdp +import isaaclab.sim as sim_utils from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedRLEnv from isaaclab.managers import CurriculumTermCfg as CurrTerm @@ -68,7 +67,7 @@ class CurriculumsCfg: def test_curriculum_modify_env_param(device): """Ensure curriculum terms apply correctly after the fallback and replacement.""" # new USD stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # configure the cartpole env env_cfg = CartpoleEnvCfg() @@ -89,14 +88,14 @@ def test_curriculum_modify_env_param(device): # test before curriculum kicks in, value agrees with default configuration joint_ids = env.event_manager.cfg.reset_cart_position.params["asset_cfg"].joint_ids assert env.observation_manager.cfg.policy.joint_pos_rel.func == mdp.joint_pos_rel - assert torch.any(robot.data.joint_pos[:, joint_ids] != 0.0) + assert torch.any(robot.data.joint_pos.torch[:, joint_ids] != 0.0) assert env.max_episode_length_s == env_cfg.episode_length_s if count == 2: # test after curriculum makes effect, value agrees with new values assert env.observation_manager.cfg.policy.joint_pos_rel.func == mdp.joint_pos joint_ids = env.event_manager.cfg.reset_cart_position.params["asset_cfg"].joint_ids - assert torch.all(robot.data.joint_pos[:, joint_ids] == 0.0) + assert torch.all(robot.data.joint_pos.torch[:, joint_ids] == 0.0) assert env.max_episode_length_s == 20 env.step(actions) diff --git a/source/isaaclab/test/envs/test_scale_randomization.py b/source/isaaclab/test/envs/test_scale_randomization.py index 282c6b2a3d85..af5dc220e63f 100644 --- a/source/isaaclab/test/envs/test_scale_randomization.py +++ b/source/isaaclab/test/envs/test_scale_randomization.py @@ -23,7 +23,6 @@ import pytest import torch -import omni.usd from pxr import Sdf import isaaclab.envs.mdp as mdp @@ -102,8 +101,8 @@ def process_actions(self, actions: torch.Tensor): def apply_actions(self): # implement a PD controller to track the target position - pos_error = self._processed_actions - (self._asset.data.root_pos_w - self._env.scene.env_origins) - vel_error = -self._asset.data.root_lin_vel_w + pos_error = self._processed_actions - (self._asset.data.root_pos_w.torch - self._env.scene.env_origins) + vel_error = -self._asset.data.root_lin_vel_w.torch # set velocity targets self._vel_command[:, :3] = self.p_gain * pos_error + self.d_gain * vel_error self._asset.write_root_velocity_to_sim(self._vel_command) @@ -131,7 +130,7 @@ def base_position(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg) -> torch.Tens """Root linear velocity in the asset's root frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return asset.data.root_pos_w - env.scene.env_origins + return asset.data.root_pos_w.torch - env.scene.env_origins ## @@ -262,6 +261,7 @@ class CubeEnvCfg(ManagerBasedEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings + # Note: replicate_physics=False is required for prestartup events (scale randomization) scene: MySceneCfg = MySceneCfg(num_envs=10, env_spacing=2.5, replicate_physics=False) # Basic settings observations: ObservationsCfg = ObservationsCfg() @@ -282,7 +282,7 @@ def __post_init__(self): def test_scale_randomization(device): """Test scale randomization for cube environment.""" # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # set the device env_cfg = CubeEnvCfg() @@ -305,7 +305,7 @@ def test_scale_randomization(device): prim_paths = sim_utils.find_matching_prim_paths("/World/envs/env_.*/cube1") # get the stage - stage = omni.usd.get_context().get_stage() + stage = sim_utils.get_current_stage() # check if the scale values are truly random for i in range(3): @@ -339,7 +339,7 @@ def test_scale_randomization(device): def test_scale_randomization_failure_replicate_physics(): """Test scale randomization failure when replicate physics is set to True.""" # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # set the arguments cfg_failure = CubeEnvCfg() cfg_failure.scene.replicate_physics = True diff --git a/source/isaaclab/test/envs/test_texture_randomization.py b/source/isaaclab/test/envs/test_texture_randomization.py index e2cbe7d54486..b5fe287c48ce 100644 --- a/source/isaaclab/test/envs/test_texture_randomization.py +++ b/source/isaaclab/test/envs/test_texture_randomization.py @@ -22,9 +22,8 @@ import pytest import torch -import omni.usd - import isaaclab.envs.mdp as mdp +import isaaclab.sim as sim_utils from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import ObservationGroupCfg as ObsGroup @@ -185,7 +184,7 @@ def __post_init__(self): def test_texture_randomization(device): """Test texture randomization for cartpole environment.""" # Create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() try: # Set the arguments @@ -212,13 +211,13 @@ def test_texture_randomization(device): env.close() finally: # Clean up stage - omni.usd.get_context().close_stage() + sim_utils.close_stage() def test_texture_randomization_failure_replicate_physics(): """Test texture randomization failure when replicate physics is set to True.""" # Create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() try: # Set the arguments @@ -232,4 +231,4 @@ def test_texture_randomization_failure_replicate_physics(): env.close() finally: # Clean up stage - omni.usd.get_context().close_stage() + sim_utils.close_stage() diff --git a/source/isaaclab/test/envs/test_video_recorder.py b/source/isaaclab/test/envs/test_video_recorder.py new file mode 100644 index 000000000000..91af56632d6b --- /dev/null +++ b/source/isaaclab/test/envs/test_video_recorder.py @@ -0,0 +1,433 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +"""Unit tests for VideoRecorder.""" + +import math +import sys +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +import numpy as np +import pytest + +from isaaclab.envs.utils import video_recorder as _video_recorder_module +from isaaclab.envs.utils.video_recorder import VideoRecorder, _resolve_video_backend, _sync_camera_from_visualizer + +pytestmark = pytest.mark.isaacsim_ci +_BLANK_720p = np.zeros((720, 1280, 3), dtype=np.uint8) +_DEFAULT_CFG = dict( + env_render_mode="rgb_array", + eye=(7.5, 7.5, 7.5), + lookat=(0.0, 0.0, 0.0), + backend_source="visualizer", + window_width=1280, + window_height=720, +) + + +def _create_recorder(**kw): + """Return a VideoRecorder with ``__init__`` bypassed and all deps mocked out.""" + backend = kw.pop("_backend", None) + matched_visualizer = kw.pop("_matched_visualizer", None) + live_visualizer = kw.pop("_live_visualizer", None) + recorder = object.__new__(VideoRecorder) + recorder.cfg = SimpleNamespace(**{**_DEFAULT_CFG, **kw}) + recorder._scene = MagicMock() + recorder._scene.sensors = {} + recorder._scene._sensor_renderer_types = MagicMock(return_value=[]) + recorder._scene.sim.visualizers = [] + recorder._backend = backend + recorder._matched_visualizer = matched_visualizer + recorder._live_visualizer = live_visualizer + cap = MagicMock() + cap.render_rgb_array = MagicMock(return_value=_BLANK_720p) + recorder._capture = cap if backend else None + return recorder + + +def test_init_perspective_mode_creates_kit_capture(): + """With kit backend, __init__ builds Isaac Sim Kit perspective capture.""" + scene = MagicMock() + scene.sensors = {} + scene.num_envs = 1 + cfg = SimpleNamespace(**_DEFAULT_CFG) + fake_capture = MagicMock() + kit_mod = MagicMock() + kit_mod.create_isaacsim_kit_perspective_video = MagicMock(return_value=fake_capture) + with patch.object(_video_recorder_module, "_resolve_video_backend", return_value=("kit", None)): + with patch.object(_video_recorder_module, "_sync_camera_from_visualizer"): + with patch.dict( + sys.modules, + { + "isaaclab_physx.video_recording": MagicMock(), + "isaaclab_physx.video_recording.isaacsim_kit_perspective_video": kit_mod, + "isaaclab_physx.video_recording.isaacsim_kit_perspective_video_cfg": MagicMock(), + }, + ): + vr = VideoRecorder(cfg, scene) + kit_mod.create_isaacsim_kit_perspective_video.assert_called_once() + assert vr._capture is fake_capture + assert vr._matched_visualizer is None + + +def test_init_newton_backend_creates_newton_capture(): + """With newton_gl backend, __init__ builds Newton GL perspective capture.""" + scene = MagicMock() + cfg = SimpleNamespace(**_DEFAULT_CFG) + fake_capture = MagicMock() + newton_mod = MagicMock() + newton_mod.create_newton_gl_perspective_video = MagicMock(return_value=fake_capture) + with patch.object(_video_recorder_module, "_resolve_video_backend", return_value=("newton_gl", "newton")): + with patch.object(_video_recorder_module, "_sync_camera_from_visualizer"): + with patch.dict( + sys.modules, + { + "pyglet": MagicMock(), + "isaaclab_newton.video_recording": MagicMock(), + "isaaclab_newton.video_recording.newton_gl_perspective_video": newton_mod, + "isaaclab_newton.video_recording.newton_gl_perspective_video_cfg": MagicMock(), + }, + ): + vr = VideoRecorder(cfg, scene) + newton_mod.create_newton_gl_perspective_video.assert_called_once() + assert vr._capture is fake_capture + assert vr._matched_visualizer == "newton" + + +def test_init_kit_from_visualizer_syncs_camera(): + """When backend comes from a visualizer, _sync_camera_from_visualizer is called.""" + scene = MagicMock() + cfg = SimpleNamespace(**_DEFAULT_CFG) + with patch.object(_video_recorder_module, "_resolve_video_backend", return_value=("kit", "kit")): + with patch.object(_video_recorder_module, "_sync_camera_from_visualizer") as mock_sync: + with patch.dict( + sys.modules, + { + "isaaclab_physx.video_recording": MagicMock(), + "isaaclab_physx.video_recording.isaacsim_kit_perspective_video": MagicMock(), + "isaaclab_physx.video_recording.isaacsim_kit_perspective_video_cfg": MagicMock(), + }, + ): + VideoRecorder(cfg, scene) + mock_sync.assert_called_once_with(scene, "kit", cfg) + + +def test_init_no_visualizer_skips_camera_sync(): + """When backend comes from physics/renderer stack, camera sync is skipped.""" + scene = MagicMock() + cfg = SimpleNamespace(**_DEFAULT_CFG) + with patch.object(_video_recorder_module, "_resolve_video_backend", return_value=("kit", None)): + with patch.object(_video_recorder_module, "_sync_camera_from_visualizer") as mock_sync: + with patch.dict( + sys.modules, + { + "isaaclab_physx.video_recording": MagicMock(), + "isaaclab_physx.video_recording.isaacsim_kit_perspective_video": MagicMock(), + "isaaclab_physx.video_recording.isaacsim_kit_perspective_video_cfg": MagicMock(), + }, + ): + VideoRecorder(cfg, scene) + mock_sync.assert_not_called() + + +def _make_scene(visualizer_types, physics_name="PhysxPhysicsManager", renderer_types=None): + scene = MagicMock() + scene.sim.resolve_visualizer_types.return_value = visualizer_types + scene.sim.physics_manager.__name__ = physics_name + scene._sensor_renderer_types.return_value = renderer_types or [] + return scene + + +def test_resolve_backend_prefers_kit_visualizer(): + """When 'kit' visualizer is active, backend is 'kit' with matched type 'kit'.""" + scene = _make_scene(["kit"]) + backend, matched = _resolve_video_backend(scene) + assert backend == "kit" + assert matched == "kit" + + +def test_resolve_backend_prefers_newton_visualizer(): + """When 'newton' visualizer is active, backend is 'newton_gl' with matched type 'newton'.""" + scene = _make_scene(["newton"], physics_name="NewtonPhysicsManager") + backend, matched = _resolve_video_backend(scene) + assert backend == "newton_gl" + assert matched == "newton" + + +def test_resolve_backend_renderer_source_ignores_visualizer(): + """When backend_source is 'renderer', active visualizers do not drive backend selection.""" + scene = _make_scene(["newton"], physics_name="PhysxPhysicsManager") + backend, matched = _resolve_video_backend(scene, backend_source="renderer") + assert backend == "kit" + assert matched is None + + +def test_resolve_backend_kit_wins_over_newton_visualizer(): + """When both kit and newton visualizers are active, kit takes priority.""" + scene = _make_scene(["newton", "kit"]) + backend, matched = _resolve_video_backend(scene) + assert backend == "kit" + assert matched == "kit" + + +def test_resolve_backend_unsupported_visualizer_falls_through(): + """viser/rerun visualizers fall through to physics stack detection.""" + scene = _make_scene(["viser"], physics_name="PhysxPhysicsManager") + backend, matched = _resolve_video_backend(scene) + assert backend == "kit" + assert matched is None + + +def test_resolve_backend_fallback_physx_returns_none_matched(): + """Physics/renderer fallback returns None as matched visualizer.""" + scene = _make_scene([], physics_name="PhysxPhysicsManager") + backend, matched = _resolve_video_backend(scene) + assert backend == "kit" + assert matched is None + + +def test_resolve_backend_fallback_newton_physics_returns_none_matched(): + """Newton physics fallback returns None as matched visualizer.""" + scene = _make_scene([], physics_name="NewtonPhysicsManager") + backend, matched = _resolve_video_backend(scene) + assert backend == "newton_gl" + assert matched is None + + +def test_resolve_backend_raises_when_no_supported_backend(): + """RuntimeError when no supported backend can be detected.""" + scene = _make_scene([], physics_name="UnknownManager") + with pytest.raises(RuntimeError, match="No supported backend detected"): + _resolve_video_backend(scene) + + +def test_resolve_backend_raises_for_invalid_backend_source(): + """Only 'visualizer' and 'renderer' are valid backend source modes.""" + scene = _make_scene([]) + with pytest.raises(ValueError, match="backend_source"): + _resolve_video_backend(scene, backend_source="invalid") + + +def _make_visualizer_cfg(visualizer_type, eye=None, lookat=None): + return SimpleNamespace(visualizer_type=visualizer_type, eye=eye, lookat=lookat) + + +def test_sync_camera_overwrites_cfg_from_visualizer(): + """Visualizer cfg eye/lookat are written into VideoRecorderCfg.""" + scene = MagicMock() + scene.sim._resolve_visualizer_cfgs.return_value = [ + _make_visualizer_cfg("newton", eye=(1.0, 2.0, 3.0), lookat=(4.0, 5.0, 6.0)), + ] + cfg = SimpleNamespace(**_DEFAULT_CFG) + _sync_camera_from_visualizer(scene, "newton", cfg) + assert cfg.eye == (1.0, 2.0, 3.0) + assert cfg.lookat == (4.0, 5.0, 6.0) + + +def test_sync_camera_skips_wrong_visualizer_type(): + """Only the matching visualizer type updates the cfg.""" + scene = MagicMock() + scene.sim._resolve_visualizer_cfgs.return_value = [ + _make_visualizer_cfg("kit", eye=(9.0, 9.0, 9.0), lookat=(1.0, 1.0, 1.0)), + ] + cfg = SimpleNamespace(**_DEFAULT_CFG) + original_eye = cfg.eye + _sync_camera_from_visualizer(scene, "newton", cfg) + assert cfg.eye == original_eye # unchanged + + +def test_sync_camera_handles_missing_camera_fields(): + """If visualizer cfg has no camera fields, existing cfg values are kept.""" + scene = MagicMock() + vcfg = _make_visualizer_cfg("newton", eye=None, lookat=None) + scene.sim._resolve_visualizer_cfgs.return_value = [vcfg] + cfg = SimpleNamespace(**_DEFAULT_CFG) + original_eye = cfg.eye + _sync_camera_from_visualizer(scene, "newton", cfg) + assert cfg.eye == original_eye + + +def test_sync_camera_handles_resolve_exception(): + """If _resolve_visualizer_cfgs raises, no exception propagates and cfg is unchanged.""" + scene = MagicMock() + scene.sim._resolve_visualizer_cfgs.side_effect = RuntimeError("boom") + cfg = SimpleNamespace(**_DEFAULT_CFG) + original_eye = cfg.eye + _sync_camera_from_visualizer(scene, "newton", cfg) + assert cfg.eye == original_eye + + +def test_render_rgb_array_delegates_to_capture(): + """render_rgb_array returns capture.render_rgb_array().""" + recorder = _create_recorder(_backend="kit") + result = recorder.render_rgb_array() + recorder._capture.render_rgb_array.assert_called_once() + assert result.shape == (720, 1280, 3) + + +def test_render_rgb_array_none_when_no_backend(): + """Without rgb_array env_render_mode, _capture is None and render returns None.""" + recorder = _create_recorder(env_render_mode=None) + recorder._backend = None + recorder._capture = None + assert recorder.render_rgb_array() is None + + +def test_capture_exception_propagates(): + """Failures in backend capture propagate.""" + recorder = _create_recorder(_backend="newton_gl") + recorder._capture.render_rgb_array.side_effect = RuntimeError("fail") + with pytest.raises(RuntimeError, match="fail"): + recorder.render_rgb_array() + + +def test_render_rgb_array_calls_capture_each_step(): + """Each render_rgb_array call hits the backend capture.""" + recorder = _create_recorder(_backend="kit") + for _ in range(3): + recorder.render_rgb_array() + assert recorder._capture.render_rgb_array.call_count == 3 + + +def test_render_rgb_array_calls_sync_newton_camera_when_newton_visualizer(): + """render_rgb_array triggers _sync_newton_camera when matched_visualizer is 'newton'.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + with patch.object(recorder, "_sync_newton_camera") as mock_sync: + recorder.render_rgb_array() + mock_sync.assert_called_once() + + +def test_render_rgb_array_skips_sync_for_kit_visualizer(): + """render_rgb_array does NOT call _sync_newton_camera for kit backend.""" + recorder = _create_recorder(_backend="kit", _matched_visualizer="kit") + with patch.object(recorder, "_sync_newton_camera") as mock_sync: + recorder.render_rgb_array() + mock_sync.assert_not_called() + + +def test_render_rgb_array_skips_sync_when_no_visualizer(): + """render_rgb_array does NOT call _sync_newton_camera when using physics/renderer stack.""" + recorder = _create_recorder(_backend="kit", _matched_visualizer=None) + with patch.object(recorder, "_sync_newton_camera") as mock_sync: + recorder.render_rgb_array() + mock_sync.assert_not_called() + + +def _make_newton_visualizer(pos=(1.0, 2.0, 3.0), yaw_deg=45.0, pitch_deg=30.0): + """Return a mock that quacks like a NewtonVisualizer with a live camera.""" + viz = MagicMock() + viz.cfg.visualizer_type = "newton" + cam = MagicMock() + cam.pos = pos + cam.yaw = yaw_deg + cam.pitch = pitch_deg + viz._viewer = MagicMock() + viz._viewer.camera = cam + return viz + + +def test_sync_newton_camera_lazy_lookup_finds_visualizer(): + """_sync_newton_camera resolves the Newton visualizer on the first call.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + newton_viz = _make_newton_visualizer() + recorder._scene.sim.visualizers = [newton_viz] + + recorder._sync_newton_camera() + + assert recorder._live_visualizer is newton_viz + recorder._capture.update_camera.assert_called_once() + + +def test_sync_newton_camera_uses_cached_visualizer(): + """_sync_newton_camera uses the cached _live_visualizer and skips the list walk.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + newton_viz = _make_newton_visualizer() + # pre-cache the visualizer + recorder._live_visualizer = newton_viz + + other_viz = _make_newton_visualizer(pos=(99.0, 99.0, 99.0)) + # replace sim.visualizers with a second Newton visualizer + # if the cache is bypassed the recorder would use this one instead. + recorder._scene.sim.visualizers = [other_viz] + recorder._sync_newton_camera() + position = recorder._capture.update_camera.call_args[0][0] + assert position != (99.0, 99.0, 99.0) + recorder._capture.update_camera.assert_called_once() + + +def test_sync_newton_camera_correct_position_forwarded(): + """_sync_newton_camera reads cam.pos and passes it as position to update_camera.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + newton_viz = _make_newton_visualizer(pos=(10.0, 20.0, 30.0), yaw_deg=0.0, pitch_deg=0.0) + recorder._live_visualizer = newton_viz + + recorder._sync_newton_camera() + + args = recorder._capture.update_camera.call_args + position = args[0][0] + assert position == (10.0, 20.0, 30.0) + + +def test_sync_newton_camera_target_derived_from_pitch_yaw(): + """Target is reconstructed from pitch/yaw and is unit-distance from position.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + pos = (0.0, 0.0, 0.0) + yaw_deg, pitch_deg = 0.0, 0.0 # looking along +X at horizon + newton_viz = _make_newton_visualizer(pos=pos, yaw_deg=yaw_deg, pitch_deg=pitch_deg) + recorder._live_visualizer = newton_viz + + recorder._sync_newton_camera() + + args = recorder._capture.update_camera.call_args[0] + position, target = args + dx = target[0] - position[0] + dy = target[1] - position[1] + dz = target[2] - position[2] + dist = math.sqrt(dx**2 + dy**2 + dz**2) + assert abs(dist - 1.0) < 1e-6 + assert abs(dx - 1.0) < 1e-6 + assert abs(dy) < 1e-6 + assert abs(dz) < 1e-6 + + +def test_sync_newton_camera_no_visualizer_does_not_raise(): + """_sync_newton_camera silently skips when no Newton visualizer is registered.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + recorder._scene.sim.visualizers = [] + recorder._sync_newton_camera() # must not raise + recorder._capture.update_camera.assert_not_called() + + +def test_sync_newton_camera_skips_non_newton_visualizers(): + """_sync_newton_camera ignores visualizers whose type is not 'newton'.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + kit_viz = MagicMock() + kit_viz.cfg.visualizer_type = "kit" + recorder._scene.sim.visualizers = [kit_viz] + recorder._sync_newton_camera() + recorder._capture.update_camera.assert_not_called() + + +def test_sync_newton_camera_skips_when_viewer_is_none(): + """_sync_newton_camera skips camera update when _viewer is None (headless fallback).""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + viz = MagicMock() + viz.cfg.visualizer_type = "newton" + viz._viewer = None + recorder._live_visualizer = viz + recorder._sync_newton_camera() + recorder._capture.update_camera.assert_not_called() + + +def test_sync_newton_camera_called_per_frame(): + """_sync_newton_camera (and thus update_camera) is called on every render step.""" + recorder = _create_recorder(_backend="newton_gl", _matched_visualizer="newton") + newton_viz = _make_newton_visualizer() + recorder._live_visualizer = newton_viz + + for _ in range(4): + recorder.render_rgb_array() + + assert recorder._capture.update_camera.call_count == 4 diff --git a/source/isaaclab/test/install_ci/conftest.py b/source/isaaclab/test/install_ci/conftest.py new file mode 100644 index 000000000000..c4bbc94ab200 --- /dev/null +++ b/source/isaaclab/test/install_ci/conftest.py @@ -0,0 +1,141 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared pytest fixtures and configuration for installation CI tests.""" + +from __future__ import annotations + +import os +import platform +import subprocess +import sys +from pathlib import Path + +import pytest +import utils as _utils +from utils import find_isaaclab_root, run_cmd # noqa: F401 – re-exported for tests + +_CYAN_BRIGHT = "\033[96m" +_RESET = "\033[0m" + +_EXECUTION_ENVIRONMENT_KEY = pytest.StashKey[_utils.ExecutionEnvironment]() + + +# Fixtures + + +@pytest.fixture(scope="session") +def isaaclab_root() -> Path: + """Resolved absolute path to the IsaacLab repository root.""" + return find_isaaclab_root() + + +@pytest.fixture +def tmp_venv(tmp_path: Path): + """Create a temporary Python virtual-environment and tear it down after the test. + + Yields a dict with: + ``path`` – Path to the venv directory + ``python`` – Path to the venv's python executable + ``pip`` – Path to the venv's pip executable + """ + venv_dir = tmp_path / "venv" + subprocess.check_call([sys.executable, "-m", "venv", str(venv_dir)]) + + if platform.system() == "Windows": + python_exe = venv_dir / "Scripts" / "python.exe" + pip_exe = venv_dir / "Scripts" / "pip.exe" + else: + python_exe = venv_dir / "bin" / "python" + pip_exe = venv_dir / "bin" / "pip" + + # Upgrade pip inside the venv to avoid old-pip issues + subprocess.check_call([str(pip_exe), "install", "--upgrade", "pip"], timeout=120) + + yield {"path": venv_dir, "python": python_exe, "pip": pip_exe} + + # Cleanup is handled by tmp_path (pytest removes it automatically) + + +@pytest.fixture(scope="session") +def wheel_path() -> Path | None: + """Path to a pre-built isaaclab wheel, or None. + + Set the ``ISAACLAB_WHEEL`` environment variable to the wheel file path + before running tests. + """ + value = os.environ.get("ISAACLAB_WHEEL") + if value: + p = Path(value).resolve() + if not p.exists(): + pytest.fail(f"ISAACLAB_WHEEL points to non-existent file: {p}") + return p + return None + + +# Markers + + +def pytest_configure(config: pytest.Config) -> None: + config.addinivalue_line("markers", "bug: bug-regression tests (use bug id as argument)") + config.addinivalue_line("markers", "gpu: tests that require a GPU") + config.addinivalue_line("markers", "docker: tests that only run inside Docker") + config.addinivalue_line("markers", "native: tests that only run natively (not in Docker)") + config.addinivalue_line("markers", "slow: tests that take a long time") + config.addinivalue_line("markers", "uv: tests that require the uv package manager") + + try: + config.stash[_EXECUTION_ENVIRONMENT_KEY] = _utils.detect_execution_environment() + except ValueError as exc: + raise pytest.UsageError(str(exc)) from exc + + # Enable real-time output when pytest capture is disabled (-s) + capture = config.getoption("capture", default="fd") + _utils.stream_output = capture == "no" + + +def pytest_report_header(config: pytest.Config) -> str: + """Show the detected install_ci execution environment in the test header.""" + return f"install_ci execution environment: {config.stash[_EXECUTION_ENVIRONMENT_KEY]}" + + +def pytest_runtest_logreport(report: pytest.TestReport) -> None: + """Print a newline after the PASSED/FAILED/SKIPPED result.""" + if report.when == "call" or (report.when == "setup" and report.skipped): + sys.stdout.write("\n") + + +@pytest.hookimpl(tryfirst=True) +def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None: + """Map dynamic bug markers and skip items with mismatched env markers. + + This allows filtering by bug ID natively in pytest: `-m "nvbugs_5968136"` + instead of the (unsupported natively) `-m "bug('nvbugs_5968136')"`. + """ + execution_environment = config.stash[_EXECUTION_ENVIRONMENT_KEY] + known_bugs = set() + for item in items: + for mark in item.iter_markers(name="bug"): + for arg in mark.args: + if isinstance(arg, str): + known_bugs.add(arg) + + for bug in known_bugs: + config.addinivalue_line("markers", f"{bug}: dynamically generated bug marker") + + for item in items: + for mark in item.iter_markers(name="bug"): + for arg in mark.args: + if isinstance(arg, str): + item.add_marker(arg) + + marker_names = {mark.name for mark in item.iter_markers()} + try: + skip_reason = _utils.get_execution_environment_skip_reason(marker_names, execution_environment) + except ValueError as exc: + raise pytest.UsageError(f"{item.nodeid}: {exc}") from exc + + if skip_reason: + item.add_marker(pytest.mark.skip(reason=skip_reason)) diff --git a/source/isaaclab/test/install_ci/pytest.ini b/source/isaaclab/test/install_ci/pytest.ini new file mode 100644 index 000000000000..67eac9b17ca7 --- /dev/null +++ b/source/isaaclab/test/install_ci/pytest.ini @@ -0,0 +1,15 @@ +[pytest] +testpaths = . +python_files = + test_*.py + *.test.py + *_test.py + *_tests.py +markers = + bug: bug-regression tests (use bug id as argument) + gpu: tests that require a GPU + docker: tests that only run inside Docker + native: tests that only run natively (not in Docker) + slow: tests that take a long time + uv: tests that require the uv package manager +timeout = 1200 diff --git a/source/isaaclab/test/install_ci/test_environment_markers.py b/source/isaaclab/test/install_ci/test_environment_markers.py new file mode 100644 index 000000000000..c002353b5c3e --- /dev/null +++ b/source/isaaclab/test/install_ci/test_environment_markers.py @@ -0,0 +1,77 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for install_ci execution-environment marker handling.""" + +from __future__ import annotations + +import pytest +from utils import ( + detect_execution_environment, + get_execution_environment_skip_reason, +) + + +class TestDetectExecutionEnvironment: + """Tests for detect_execution_environment().""" + + def test_uses_override(self, tmp_path): + environment = detect_execution_environment( + environ={"ISAACLAB_INSTALL_CI_ENV": "docker"}, + filesystem_root=tmp_path, + ) + + assert environment == "docker" + + def test_detects_marker_file(self, tmp_path): + (tmp_path / ".dockerenv").touch() + + environment = detect_execution_environment(environ={}, filesystem_root=tmp_path) + + assert environment == "docker" + + def test_detects_cgroup_hint(self, tmp_path): + cgroup_path = tmp_path / "proc" / "self" + cgroup_path.mkdir(parents=True) + (cgroup_path / "cgroup").write_text("0::/docker/container-id\n", encoding="utf-8") + + environment = detect_execution_environment(environ={}, filesystem_root=tmp_path) + + assert environment == "docker" + + def test_defaults_to_native(self, tmp_path): + environment = detect_execution_environment(environ={}, filesystem_root=tmp_path) + + assert environment == "native" + + def test_rejects_invalid_override(self, tmp_path): + with pytest.raises(ValueError, match="ISAACLAB_INSTALL_CI_ENV"): + detect_execution_environment( + environ={"ISAACLAB_INSTALL_CI_ENV": "virtual-machine"}, + filesystem_root=tmp_path, + ) + + +class TestGetExecutionEnvironmentSkipReason: + """Tests for get_execution_environment_skip_reason().""" + + @pytest.mark.parametrize( + ("marker_names", "execution_environment", "expected_reason"), + [ + ({"docker"}, "native", "requires Docker execution environment, detected native"), + ({"native"}, "docker", "requires native execution environment, detected docker"), + ({"docker"}, "docker", None), + ({"native"}, "native", None), + (set(), "native", None), + ], + ) + def test_skip_reason(self, marker_names, execution_environment, expected_reason): + skip_reason = get_execution_environment_skip_reason(marker_names, execution_environment) + + assert skip_reason == expected_reason + + def test_rejects_conflicting_markers(self): + with pytest.raises(ValueError, match="docker"): + get_execution_environment_skip_reason({"docker", "native"}, "native") diff --git a/source/isaaclab/test/install_ci/test_isaaclabx_i_newton.py b/source/isaaclab/test/install_ci/test_isaaclabx_i_newton.py new file mode 100644 index 000000000000..4f7740dfe6fd --- /dev/null +++ b/source/isaaclab/test/install_ci/test_isaaclabx_i_newton.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test installing isaaclab_newton and running its test suite.""" + +from __future__ import annotations + +import shutil + +import pytest +from utils import UV_Mixin, find_isaaclab_root + + +class Test_Install_Newton(UV_Mixin): + """Install ./isaaclab.sh -i newton and run the isaaclab_newton test suite.""" + + @classmethod + def setup_class(cls): + # check if uv is available + if not shutil.which("uv"): + pytest.skip("uv is not available") + + # check if isaacsim is importable + # or "_isaac_sim" link is present + try: + import isaacsim # noqa: F401 + except ImportError: + print("[DEBUG] Module isaacsim is not importable") + isaac_sim_link = find_isaaclab_root() / "_isaac_sim" + if not isaac_sim_link.exists(): + print(f'[DEBUG] Link "{isaac_sim_link}" does not exist') + pytest.skip("isaacsim is not importable and _isaac_sim link not found, skipping") + + @pytest.mark.uv + @pytest.mark.gpu + @pytest.mark.slow + @pytest.mark.native + @pytest.mark.timeout(3600) + def test_install_newton_and_run_tests(self, isaaclab_root): + """Install newton extension and run the isaaclab_newton test suite.""" + + try: + self.create_uv_env(isaaclab_root) + + # ./isaaclab.sh -i newton + result = self.run_in_uv_env([str(self.cli_script), "-i", "newton"], cwd=isaaclab_root) + assert result.returncode == 0, f"isaaclab -i newton failed:\n{result.stdout}\n{result.stderr}" + + # Run isaaclab_newton test suite + test_dir = str(isaaclab_root / "source" / "isaaclab_newton" / "test") + result = self.run_in_uv_env( + ["python", "-m", "pytest", test_dir, "-sv", "--tb=short"], + cwd=isaaclab_root, + ) + output = result.stdout + result.stderr + assert result.returncode == 0, f"isaaclab_newton tests failed (rc={result.returncode}):\n{output}" + + finally: + self.destroy_uv_env() diff --git a/source/isaaclab/test/install_ci/test_isaaclabx_i_physx.py b/source/isaaclab/test/install_ci/test_isaaclabx_i_physx.py new file mode 100644 index 000000000000..04bf2b346b23 --- /dev/null +++ b/source/isaaclab/test/install_ci/test_isaaclabx_i_physx.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test installing isaaclab_physx and running its test suite.""" + +from __future__ import annotations + +import shutil + +import pytest +from utils import UV_Mixin, find_isaaclab_root + + +class Test_Install_Physx(UV_Mixin): + """Install ./isaaclab.sh -i physx and run the isaaclab_physx test suite.""" + + @classmethod + def setup_class(cls): + # check if uv is available + if not shutil.which("uv"): + pytest.skip("uv is not available") + + # check if isaacsim is importable + # or "_isaac_sim" link is present + try: + import isaacsim # noqa: F401 + except ImportError: + print("[DEBUG] Module isaacsim is not importable") + isaac_sim_link = find_isaaclab_root() / "_isaac_sim" + if not isaac_sim_link.exists(): + print(f'[DEBUG] Link "{isaac_sim_link}" does not exist') + pytest.skip("isaacsim is not importable and _isaac_sim link not found, skipping") + + @pytest.mark.uv + @pytest.mark.gpu + @pytest.mark.slow + @pytest.mark.native + @pytest.mark.timeout(3600) + def test_install_physx_and_run_tests(self, isaaclab_root): + """Install physx extension and run the isaaclab_physx test suite.""" + + try: + self.create_uv_env(isaaclab_root) + + # ./isaaclab.sh -i physx + result = self.run_in_uv_env([str(self.cli_script), "-i", "physx"], cwd=isaaclab_root) + assert result.returncode == 0, f"isaaclab -i physx failed:\n{result.stdout}\n{result.stderr}" + + # Run isaaclab_physx test suite + test_dir = str(isaaclab_root / "source" / "isaaclab_physx" / "test") + result = self.run_in_uv_env( + ["python", "-m", "pytest", test_dir, "-sv", "--tb=short"], + cwd=isaaclab_root, + ) + output = result.stdout + result.stderr + assert result.returncode == 0, f"isaaclab_physx tests failed (rc={result.returncode}):\n{output}" + + finally: + self.destroy_uv_env() diff --git a/source/isaaclab/test/install_ci/test_isaaclabx_uv_smoke.py b/source/isaaclab/test/install_ci/test_isaaclabx_uv_smoke.py new file mode 100644 index 000000000000..8bff8426e1fe --- /dev/null +++ b/source/isaaclab/test/install_ci/test_isaaclabx_uv_smoke.py @@ -0,0 +1,73 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test uv-based installation scenarios for isaaclab.""" + +from __future__ import annotations + +import shutil + +import pytest +from utils import UV_Mixin + + +class Test_UV_Env_Smoke(UV_Mixin): + """Test ./isaaclab.x -u, then validate with some quick checks.""" + + @classmethod + def setup_class(cls): + if not shutil.which("uv"): + pytest.skip("uv is not available") + + @pytest.mark.uv + @pytest.mark.timeout(10) + def test_isaaclab_sh_uv_creates_env_with_python_312(self, isaaclab_root): + """Run ./isaaclab.x -u and verify the created env has Python 3.12.""" + + try: + self.create_uv_env(isaaclab_root) + # python --version + version_output = self.run_in_uv_env(["python", "--version"]).stdout.strip() + assert "3.12" in version_output, f"Expected Python 3.12, got: {version_output}" + finally: + self.destroy_uv_env() + + @pytest.mark.uv + @pytest.mark.timeout(200) + def test_isaaclab_install_assets(self, isaaclab_root): + """Run ./isaaclab.x -i 'assets' and verify isaaclab_assets is importable.""" + + try: + self.create_uv_env(isaaclab_root) + + # ./isaaclab.x -i assets + result = self.run_in_uv_env([str(self.cli_script), "-i", "assets"], cwd=isaaclab_root) + assert result.returncode == 0, f"isaaclab -i assets failed:\n{result.stdout}\n{result.stderr}" + + # import isaaclab_assets + result = self.run_in_uv_env(["python", "-c", "import isaaclab_assets; print(isaaclab_assets.__version__)"]) + assert result.returncode == 0, f"import isaaclab_assets failed:\n{result.stdout}\n{result.stderr}" + + finally: + self.destroy_uv_env() + + @pytest.mark.uv + @pytest.mark.timeout(300) + def test_isaaclab_newton_installs_isaaclab_newton(self, isaaclab_root): + """Run ./isaaclab.x -i 'newton' and verify isaaclab_newton is importable.""" + + try: + self.create_uv_env(isaaclab_root) + + # ./isaaclab.x -i newton + result = self.run_in_uv_env([str(self.cli_script), "-i", "newton"], cwd=isaaclab_root) + assert result.returncode == 0, f"isaaclab -i newton failed:\n{result.stdout}\n{result.stderr}" + + # import isaaclab_newton + result = self.run_in_uv_env(["python", "-c", "import isaaclab_newton; print(isaaclab_newton.__version__)"]) + assert result.returncode == 0, f"import isaaclab_newton failed:\n{result.stdout}\n{result.stderr}" + + finally: + self.destroy_uv_env() diff --git a/source/isaaclab/test/install_ci/test_isaaclabx_uv_training.py b/source/isaaclab/test/install_ci/test_isaaclabx_uv_training.py new file mode 100644 index 000000000000..9bd2bfa40f32 --- /dev/null +++ b/source/isaaclab/test/install_ci/test_isaaclabx_uv_training.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Heavy uv-based installation and training tests for isaaclab.""" + +from __future__ import annotations + +import shutil + +import pytest +from utils import UV_Mixin + + +class Test_UV_Env_Heavy(UV_Mixin): + """Test ./isaaclab.x -u, then run heavy training.""" + + @classmethod + def setup_class(cls): + if not shutil.which("uv"): + pytest.skip("uv is not available") + + @pytest.mark.uv + @pytest.mark.slow + @pytest.mark.gpu + @pytest.mark.bug("nvbugs_5968136") + @pytest.mark.skip(reason="Cartpole training fails in MuJoCo stiffness conversion.") + @pytest.mark.timeout(1200) + def test_install_and_train_cartpole(self, isaaclab_root): + """`isaaclab.x -i assets,tasks,rl[all],physx,newton,contrib` then train Isaac-Cartpole-Direct-v0""" + + try: + self.create_uv_env(isaaclab_root) + + # Install assets, tasks, rl[all], physx, newton, contrib + result = self.run_in_uv_env( + [str(self.cli_script), "-i", "assets,tasks,rl[all],physx,newton,contrib"], cwd=isaaclab_root + ) + assert result.returncode == 0, f"isaaclab -i failed:\n{result.stdout}\n{result.stderr}" + + # Run a short training + result = self.run_in_uv_env( + [ + str(self.cli_script), + "-p", + "scripts/reinforcement_learning/rsl_rl/train.py", + "--task", + "Isaac-Cartpole-Direct-v0", + "--num_envs", + "4096", + "presets=newton_mjwarp", + "--max_iterations", + "5", + ], + cwd=isaaclab_root, + ) + output = result.stdout + result.stderr + assert result.returncode == 0, f"Training failed (rc={result.returncode}):\n{output}" + assert "Traceback (most recent call last):" not in output, ( + f"Training produced a Python traceback:\n{output}" + ) + finally: + self.destroy_uv_env() diff --git a/source/isaaclab/test/install_ci/test_wheel_builder.py b/source/isaaclab/test/install_ci/test_wheel_builder.py new file mode 100644 index 000000000000..51b93a6a4234 --- /dev/null +++ b/source/isaaclab/test/install_ci/test_wheel_builder.py @@ -0,0 +1,106 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test wheel build and install scenarios for isaaclab.""" + +from __future__ import annotations + +import glob +import shutil + +import pytest +from utils import UV_Mixin, run_cmd + + +class Test_Wheel_Builder(UV_Mixin): + """Test building the isaaclab wheel and installing it in a uv environment.""" + + @classmethod + def setup_class(cls): + if not shutil.which("uv"): + pytest.skip("uv is not available") + + @pytest.fixture(autouse=True, scope="class") + def _build_and_install_wheel(self, isaaclab_root): + """Build the wheel and install it in a uv environment once for all tests.""" + + cls = self.__class__ + build_script = isaaclab_root / "tools" / "wheel_builder" / "build.sh" + dist_dir = isaaclab_root / "tools" / "wheel_builder" / "build" / "dist" + + # Build the wheel + result = run_cmd(["bash", str(build_script)], cwd=isaaclab_root) + assert result.returncode == 0, f"build.sh failed:\n{result.stdout}\n{result.stderr}" + + # Find the built wheel + wheels = glob.glob(str(dist_dir / "isaaclab-*.whl")) + assert len(wheels) == 1, f"Expected exactly 1 wheel in {dist_dir}, found: {wheels}" + cls.wheel_path = wheels[0] + + # Create uv environment and install the wheel + self.create_uv_env(isaaclab_root) + + # Share env state with all test instances via the class + cls.env_path = self.env_path + cls.python = self.python + cls.cli_script = self.cli_script + result = self.run_in_uv_env(["uv", "pip", "install", cls.wheel_path + "[all]"]) + assert result.returncode == 0, f"uv pip install wheel failed:\n{result.stdout}\n{result.stderr}" + + yield + + self.destroy_uv_env() + + # import isaaclab + def test_import_isaaclab(self): + """Verify 'isaaclab' is importable.""" + result = self.run_in_uv_env(["python", "-c", "import isaaclab;"]) + assert result.returncode == 0, f"import isaaclab failed:\n{result.stdout}\n{result.stderr}" + + # from isaaclab import __version__; print(__version__) + def test_version_matches_wheel(self): + """Verify isaaclab.__version__ matches the wheel version.""" + result = self.run_in_uv_env(["python", "-c", "from isaaclab import __version__; print(__version__)"]) + imported_version = result.stdout.strip() + expected_version = self.wheel_path.split("/")[-1].split("-")[1] + assert imported_version == expected_version, ( + f"isaaclab.__version__ mismatch: expected {expected_version}, got {imported_version}" + ) + + # from isaaclab.app import AppLauncher + def test_import_isaaclab_app(self): + """Verify isaaclab.app and AppLauncher are importable.""" + result = self.run_in_uv_env(["python", "-c", "from isaaclab.app import AppLauncher"]) + assert result.returncode == 0, f"import isaaclab.app failed:\n{result.stdout}\n{result.stderr}" + + # from isaaclab.envs import ViewerCfg + def test_import_isaaclab_envs(self): + """Verify isaaclab.envs is importable.""" + result = self.run_in_uv_env(["python", "-c", "from isaaclab.envs import ViewerCfg"]) + assert result.returncode == 0, f"import isaaclab.envs failed:\n{result.stdout}\n{result.stderr}" + + # from isaaclab_assets.robots.allegro import ALLEGRO_HAND_CFG + def test_import_isaaclab_assets(self): + """Verify isaaclab_assets is importable.""" + result = self.run_in_uv_env(["python", "-c", "from isaaclab_assets.robots.allegro import ALLEGRO_HAND_CFG"]) + assert result.returncode == 0, f"import isaaclab_assets failed:\n{result.stdout}\n{result.stderr}" + + # from isaaclab.scene import InteractiveSceneCfg + def test_import_isaaclab_scene(self): + """Verify isaaclab.scene and InteractiveSceneCfg are importable.""" + result = self.run_in_uv_env(["python", "-c", "from isaaclab.scene import InteractiveSceneCfg"]) + assert result.returncode == 0, f"import isaaclab.scene failed:\n{result.stdout}\n{result.stderr}" + + # python -m isaaclab --help + def test_cli_help(self): + """Verify the isaaclab CLI is functional.""" + result = self.run_in_uv_env(["python", "-m", "isaaclab", "--help"]) + assert result.returncode == 0, f"isaaclab CLI help failed:\n{result.stdout}\n{result.stderr}" + + # import pinocchio as pin; print(pin.__version__) + def test_pinocchio_import(self): + """Verify pinocchio is importable and has the expected version.""" + result = self.run_in_uv_env(["python", "-c", "import pinocchio as pin; print(pin.__version__)"]) + assert result.returncode == 0, f"import pinocchio failed:\n{result.stdout}\n{result.stderr}" diff --git a/source/isaaclab/test/install_ci/utils.py b/source/isaaclab/test/install_ci/utils.py new file mode 100644 index 000000000000..85055f26c0bc --- /dev/null +++ b/source/isaaclab/test/install_ci/utils.py @@ -0,0 +1,244 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared utilities for Isaac Lab installation CI tests.""" + +from __future__ import annotations + +import os +import platform +import shlex +import shutil +import subprocess +import sys +import time +from pathlib import Path +from typing import Literal + +_DIM = "\033[2m" +_MAGENTA = "\033[95m" +_RESET = "\033[0m" + +# Controls whether run_cmd() streams output by default. +# Set to True by conftest.py when pytest runs with -s / --capture=no. +stream_output: bool = False + +# ISAACLAB_INSTALL_CI_ENV can be set to override execution +# environment detection in install_ci tests +# (for testing the testing while testing). + +ExecutionEnvironment = Literal["docker", "native"] + + +def detect_execution_environment( + environ: dict[str, str] | None = None, + filesystem_root: Path | None = None, +) -> ExecutionEnvironment: + """Detect whether install_ci tests are running in Docker or natively.""" + env = environ if environ is not None else os.environ + root = filesystem_root if filesystem_root is not None else Path("/") + + override = env.get("ISAACLAB_INSTALL_CI_ENV") + if override: + cleaned = override.strip().lower() + if cleaned not in ("docker", "native"): + raise ValueError(f"ISAACLAB_INSTALL_CI_ENV must be 'docker' or 'native', got: {override!r}") + return cleaned # type: ignore[return-value] + + if (root / ".dockerenv").exists() or (root / "run" / ".containerenv").exists(): + return "docker" + + for cgroup_path in (root / "proc" / "1" / "cgroup", root / "proc" / "self" / "cgroup"): + try: + cgroup_text = cgroup_path.read_text(encoding="utf-8", errors="ignore") + except OSError: + continue + if any( + hint in cgroup_text + for hint in ( + "docker", + "containerd", + "kubepods", + "libpod", + "podman", + ) + ): + return "docker" + + if env.get("container"): + return "docker" + + return "native" + + +def get_execution_environment_skip_reason( + marker_names: set[str], + execution_environment: ExecutionEnvironment, +) -> str | None: + """Return a skip reason when environment markers do not match the runtime.""" + has_docker = "docker" in marker_names + has_native = "native" in marker_names + + if has_docker and has_native: + raise ValueError("tests cannot be marked with both 'docker' and 'native'") + + if has_docker and execution_environment != "docker": + return f"requires Docker execution environment, detected {execution_environment}" + + if has_native and execution_environment != "native": + return f"requires native execution environment, detected {execution_environment}" + + return None + + +def find_isaaclab_root() -> Path: + """Walk up from this file to find the repo root (contains isaaclab.sh).""" + here = Path(__file__).resolve() + for parent in [here] + list(here.parents): + if (parent / "isaaclab.sh").exists(): + return parent + raise FileNotFoundError("Could not locate IsaacLab repository root (no isaaclab.sh found)") + + +def run_cmd( + args: list[str], + *, + cwd: str | Path | None = None, + env: dict[str, str] | None = None, + timeout: int = 600, + err_on_err: bool = False, + stream: bool | None = None, +) -> subprocess.CompletedProcess: + """Run a command, merging *env* into the current environment. + + Args: + args: Command and arguments to run. + cwd: Working directory for the subprocess. + env: Extra environment variables merged into the current environment. + timeout: Timeout in seconds. + err_on_err: Raise CalledProcessError on non-zero exit. + stream: When True, stream stdout/stderr to the console in + real time instead of capturing them. Defaults to True when + pytest is invoked with ``-s`` (``--capture=no``). + + Returns: + The CompletedProcess; raises CalledProcessError when *check* is + True and return code != 0. + """ + if stream is None: + stream = stream_output + merged_env = {**os.environ, **(env or {})} + cmd_str = " ".join(str(a) for a in args) + if stream: + sys.stdout.write(f"{_MAGENTA}[COMMAND] {cmd_str}{_RESET}\n") + sys.stdout.flush() + # Stream output to console in real time. + t0 = time.monotonic() + proc = subprocess.Popen( + [str(a) for a in args], + cwd=str(cwd) if cwd else None, + env=merged_env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ) + assert proc.stdout is not None + lines: list[str] = [] + try: + for line in proc.stdout: + lines.append(line) + sys.stdout.write(f"{_DIM}{line}{_RESET}") + sys.stdout.flush() + except Exception: + proc.kill() + raise + try: + proc.wait(timeout=timeout) + except subprocess.TimeoutExpired: + proc.kill() + proc.wait() + raise + elapsed = time.monotonic() - t0 + sys.stdout.write(f"{_MAGENTA}[{elapsed:.1f}s]{_RESET}\n") + sys.stdout.flush() + result = subprocess.CompletedProcess( + args=proc.args, + returncode=proc.returncode, + stdout="".join(lines), + stderr="", + ) + if err_on_err and result.returncode != 0: + raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) + return result + return subprocess.run( + [str(a) for a in args], + cwd=str(cwd) if cwd else None, + env=merged_env, + capture_output=True, + text=True, + timeout=timeout, + check=err_on_err, + ) + + +_IS_WINDOWS = platform.system() == "Windows" + + +class UV_Mixin: + """Mixin providing uv virtual-environment helpers for test classes.""" + + env_path: Path + python: Path + cli_script: Path + + def create_uv_env(self, isaaclab_root: Path, env_name: str = "") -> None: + """Create a uv environment and store info on self. + + Sets ``self.env_path``, ``self.python``, and ``self.cli_script``. + + Args: + isaaclab_root: Path to the IsaacLab repository root. + env_name: Name for the venv directory. A random name is + generated when empty. + """ + env_name = env_name if env_name else f"_isaaclab_install_ci_{os.urandom(4).hex()}" + + self.env_path = isaaclab_root / env_name + self.cli_script = isaaclab_root / ("isaaclab.bat" if _IS_WINDOWS else "isaaclab.sh") + + if self.env_path.exists(): + shutil.rmtree(self.env_path) + + result = run_cmd([str(self.cli_script), "-u", env_name], cwd=isaaclab_root, err_on_err=False) + assert result.returncode == 0, f"uv env creation failed:\n{result.stdout}\n{result.stderr}" + assert self.env_path.exists(), f"Expected env directory {self.env_path} was not created" + + # Prevent the venv from being tracked by git. + (self.env_path / ".gitignore").write_text("*\n") + + self.python = (self.env_path / "Scripts" / "python.exe") if _IS_WINDOWS else (self.env_path / "bin" / "python") + assert self.python.exists(), f"Python executable not found at {self.python}" + + def destroy_uv_env(self) -> None: + """Remove the uv environment directory if it exists.""" + if hasattr(self, "env_path") and self.env_path.exists(): + shutil.rmtree(self.env_path) + + def run_in_uv_env(self, cmd: list[str], **kwargs) -> subprocess.CompletedProcess: + """Run a command inside the activated venv by sourcing the activate script. + + Args: + cmd: Command and arguments to run inside the venv. + **kwargs: Extra keyword arguments forwarded to :func:`run_cmd`. + """ + escaped = " ".join(shlex.quote(str(a)) for a in cmd) + if _IS_WINDOWS: + activate = str(self.env_path / "Scripts" / "activate.bat") + shell_cmd = f'call "{activate}" && {escaped}' + return run_cmd(["cmd", "/c", shell_cmd], **kwargs) + else: + activate = shlex.quote(str(self.env_path / "bin" / "activate")) + shell_cmd = f"source {activate} && {escaped}" + return run_cmd(["bash", "-c", shell_cmd], **kwargs) diff --git a/source/isaaclab/test/managers/test_event_manager.py b/source/isaaclab/test/managers/test_event_manager.py index 171cc8be65e9..362e5c85c3e6 100644 --- a/source/isaaclab/test/managers/test_event_manager.py +++ b/source/isaaclab/test/managers/test_event_manager.py @@ -84,7 +84,7 @@ def __call__( @pytest.fixture def env(): - num_envs = 32 + num_envs = 2 device = "cpu" # create dummy tensors dummy1 = torch.zeros((num_envs, 2), device=device) diff --git a/source/isaaclab/test/managers/test_manager_base.py b/source/isaaclab/test/managers/test_manager_base.py new file mode 100644 index 000000000000..87d068a300de --- /dev/null +++ b/source/isaaclab/test/managers/test_manager_base.py @@ -0,0 +1,318 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + +"""Tests for recursive _process_term_cfg_at_play / _resolve_param_value. + +These tests exercise ManagerBase's parameter resolution logic and do NOT +require an Isaac Sim launch, so they can run without AppLauncher. +""" + +from collections import namedtuple +from collections.abc import Sequence +from unittest.mock import MagicMock + +import pytest +import torch + +from isaaclab.envs import ManagerBasedEnv +from isaaclab.managers import ManagerTermBase, ManagerTermBaseCfg +from isaaclab.managers.manager_base import ManagerBase + +DummyEnv = namedtuple("ManagerBasedRLEnv", ["num_envs", "dt", "device", "sim", "dummy1", "dummy2"]) +"""Dummy environment for testing.""" + + +class SimpleManager(ManagerBase): + """Minimal concrete ManagerBase for testing term resolution.""" + + def __init__(self, cfg: dict, env): + self._term_cfgs: list[tuple[str, ManagerTermBaseCfg]] = [] + super().__init__(cfg, env) + + @property + def active_terms(self) -> list[str]: + return [name for name, _ in self._term_cfgs] + + def _prepare_terms(self): + if isinstance(self.cfg, dict): + cfg_items = self.cfg.items() + else: + cfg_items = self.cfg.__dict__.items() + for term_name, term_cfg in cfg_items: + if term_cfg is None: + continue + self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2) + self._term_cfgs.append((term_name, term_cfg)) + + def apply(self, env_ids: torch.Tensor): + """Call each registered term with the environment.""" + for _, term_cfg in self._term_cfgs: + term_cfg.func(self._env, env_ids, **term_cfg.params) + + +def increment_dummy1_by_one(env, env_ids: torch.Tensor): + env.dummy1[env_ids] += 1 + + +def change_dummy1_by_value(env, env_ids: torch.Tensor, value: int): + env.dummy1[env_ids] += value + + +def reset_dummy2_to_zero(env, env_ids: torch.Tensor): + env.dummy2[env_ids] = 0 + + +class reset_dummy2_to_zero_class(ManagerTermBase): + def __init__(self, cfg: ManagerTermBaseCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + pass + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor, + ) -> None: + env.dummy2[env_ids] = 0 + + +class chained_terms_class(ManagerTermBase): + """A class-based term whose params contain nested ManagerTermBaseCfg dicts.""" + + def __init__(self, cfg: ManagerTermBaseCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self.sub_terms: dict[str, ManagerTermBaseCfg] = cfg.params["terms"] + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + pass + + def __call__(self, env: ManagerBasedEnv, env_ids: torch.Tensor, terms: dict) -> None: + for term_cfg in terms.values(): + term_cfg.func(env, env_ids, **term_cfg.params) + + +class list_terms_class(ManagerTermBase): + """A class-based term whose params contain a list of nested ManagerTermBaseCfg.""" + + def __init__(self, cfg: ManagerTermBaseCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self.sub_terms: list[ManagerTermBaseCfg] = cfg.params["term_list"] + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + pass + + def __call__(self, env: ManagerBasedEnv, env_ids: torch.Tensor, term_list: list) -> None: + for term_cfg in term_list: + term_cfg.func(env, env_ids, **term_cfg.params) + + +@pytest.fixture +def env(): + num_envs = 2 + device = "cpu" + dummy1 = torch.zeros((num_envs, 2), device=device) + dummy2 = torch.zeros((num_envs, 10), device=device) + sim = MagicMock() + sim.is_playing.return_value = True + return DummyEnv(num_envs, 0.01, device, sim, dummy1, dummy2) + + +def test_nested_term_cfg_in_dict_params(env): + """Test that nested ManagerTermBaseCfg inside a dict param are recursively resolved.""" + cfg = { + "chained": ManagerTermBaseCfg( + func=chained_terms_class, + params={ + "terms": { + "step_a": ManagerTermBaseCfg(func=increment_dummy1_by_one), + "step_b": ManagerTermBaseCfg(func=change_dummy1_by_value, params={"value": 5}), + } + }, + ), + } + manager = SimpleManager(cfg, env) + + # The outer chained_terms_class should be instantiated (class -> instance). + outer_cfg = manager._term_cfgs[0][1] + assert isinstance(outer_cfg.func, chained_terms_class) + + # Inner terms should have their func resolved to callables (not strings). + inner_terms = outer_cfg.params["terms"] + assert callable(inner_terms["step_a"].func), "Nested func should be resolved to a callable" + assert callable(inner_terms["step_b"].func), "Nested func should be resolved to a callable" + assert inner_terms["step_a"].func is increment_dummy1_by_one + assert inner_terms["step_b"].func is change_dummy1_by_value + + # Functionally: applying the chained term should run both inner terms. + manager.apply(torch.arange(env.num_envs, device=env.device)) + # increment_dummy1_by_one adds 1, then change_dummy1_by_value adds 5 -> total 6 + torch.testing.assert_close(env.dummy1, 6 * torch.ones_like(env.dummy1)) + + +def test_nested_term_cfg_in_list_params(env): + """Test that nested ManagerTermBaseCfg inside a list param are recursively resolved.""" + cfg = { + "list_chained": ManagerTermBaseCfg( + func=list_terms_class, + params={ + "term_list": [ + ManagerTermBaseCfg(func=increment_dummy1_by_one), + ManagerTermBaseCfg(func=change_dummy1_by_value, params={"value": 3}), + ] + }, + ), + } + manager = SimpleManager(cfg, env) + + # Inner terms in the list should have callable funcs. + outer_cfg = manager._term_cfgs[0][1] + term_list = outer_cfg.params["term_list"] + assert isinstance(term_list, list) + assert callable(term_list[0].func) + assert callable(term_list[1].func) + + # Apply and verify: +1 then +3 -> total 4 + manager.apply(torch.arange(env.num_envs, device=env.device)) + torch.testing.assert_close(env.dummy1, 4 * torch.ones_like(env.dummy1)) + + +def test_string_func_in_nested_term_cfg(env): + """Test that string-based func references inside nested term cfgs are resolved.""" + this_module = __name__ + + cfg = { + "chained": ManagerTermBaseCfg( + func=chained_terms_class, + params={ + "terms": { + "step_a": ManagerTermBaseCfg( + func=f"{this_module}:increment_dummy1_by_one", + ), + "step_b": ManagerTermBaseCfg( + func=f"{this_module}:change_dummy1_by_value", + params={"value": 10}, + ), + } + }, + ), + } + manager = SimpleManager(cfg, env) + + # String funcs in nested terms should be resolved to actual callables. + outer_cfg = manager._term_cfgs[0][1] + inner_terms = outer_cfg.params["terms"] + assert inner_terms["step_a"].func is increment_dummy1_by_one + assert inner_terms["step_b"].func is change_dummy1_by_value + + # Apply and verify: +1 then +10 -> 11 + manager.apply(torch.arange(env.num_envs, device=env.device)) + torch.testing.assert_close(env.dummy1, 11 * torch.ones_like(env.dummy1)) + + +def test_string_func_top_level_class_term(env): + """Test that a top-level string-based func pointing to a class is properly instantiated.""" + this_module = __name__ + + cfg = { + "term_class_str": ManagerTermBaseCfg( + func=f"{this_module}:reset_dummy2_to_zero_class", + ), + } + manager = SimpleManager(cfg, env) + + # The string func should be resolved and the class instantiated. + outer_cfg = manager._term_cfgs[0][1] + assert isinstance(outer_cfg.func, reset_dummy2_to_zero_class) + + # Verify it works: set dummy2 to non-zero, apply, check it's zero. + env.dummy2[:] = 42.0 + manager.apply(torch.arange(env.num_envs, device=env.device)) + torch.testing.assert_close(env.dummy2, torch.zeros_like(env.dummy2)) + + +def test_deeply_nested_dict_in_params(env): + """Test that term cfgs are resolved even when nested inside dict values.""" + cfg = { + "chained": ManagerTermBaseCfg( + func=chained_terms_class, + params={ + "terms": { + "only": ManagerTermBaseCfg( + func=change_dummy1_by_value, + params={"value": 7}, + ), + } + }, + ), + } + manager = SimpleManager(cfg, env) + + outer_cfg = manager._term_cfgs[0][1] + inner_cfg = outer_cfg.params["terms"]["only"] + assert callable(inner_cfg.func) + assert inner_cfg.params == {"value": 7} + + manager.apply(torch.arange(env.num_envs, device=env.device)) + torch.testing.assert_close(env.dummy1, 7 * torch.ones_like(env.dummy1)) + + +def test_chained_containing_chained_and_list(env): + """Test multi-level nesting: a chained term whose children are chained and list terms.""" + cfg = { + "outer": ManagerTermBaseCfg( + func=chained_terms_class, + params={ + "terms": { + "inner_chain": ManagerTermBaseCfg( + func=chained_terms_class, + params={ + "terms": { + "add_1": ManagerTermBaseCfg(func=increment_dummy1_by_one), + "add_2": ManagerTermBaseCfg(func=change_dummy1_by_value, params={"value": 2}), + } + }, + ), + "inner_list": ManagerTermBaseCfg( + func=list_terms_class, + params={ + "term_list": [ + ManagerTermBaseCfg(func=change_dummy1_by_value, params={"value": 10}), + ManagerTermBaseCfg(func=change_dummy1_by_value, params={"value": 20}), + ] + }, + ), + } + }, + ), + } + manager = SimpleManager(cfg, env) + + # Outer term should be instantiated. + outer_cfg = manager._term_cfgs[0][1] + assert isinstance(outer_cfg.func, chained_terms_class) + + # Mid-level terms should also be instantiated class instances. + inner_chain_cfg = outer_cfg.params["terms"]["inner_chain"] + inner_list_cfg = outer_cfg.params["terms"]["inner_list"] + assert isinstance(inner_chain_cfg.func, chained_terms_class) + assert isinstance(inner_list_cfg.func, list_terms_class) + + # Leaf-level funcs inside the inner chain should be resolved callables. + leaf_terms = inner_chain_cfg.params["terms"] + assert leaf_terms["add_1"].func is increment_dummy1_by_one + assert leaf_terms["add_2"].func is change_dummy1_by_value + + # Leaf-level funcs inside the inner list should be resolved callables. + leaf_list = inner_list_cfg.params["term_list"] + assert leaf_list[0].func is change_dummy1_by_value + assert leaf_list[1].func is change_dummy1_by_value + + # Apply and verify: inner_chain adds (1 + 2) = 3, inner_list adds (10 + 20) = 30 -> total 33 + manager.apply(torch.arange(env.num_envs, device=env.device)) + torch.testing.assert_close(env.dummy1, 33 * torch.ones_like(env.dummy1)) diff --git a/source/isaaclab/test/managers/test_recorder_manager.py b/source/isaaclab/test/managers/test_recorder_manager.py index 8a8e8c78a9d2..f85a1a191da6 100644 --- a/source/isaaclab/test/managers/test_recorder_manager.py +++ b/source/isaaclab/test/managers/test_recorder_manager.py @@ -26,8 +26,7 @@ import pytest import torch -import omni.usd - +import isaaclab.sim as sim_utils from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg from isaaclab.managers import DatasetExportMode, RecorderManager, RecorderManagerBaseCfg, RecorderTerm, RecorderTermCfg from isaaclab.scene import InteractiveSceneCfg @@ -198,6 +197,14 @@ def dataset_dir(): shutil.rmtree(test_dir) +@pytest.fixture(autouse=True) +def cleanup_simulation_context(): + """Fixture to ensure SimulationContext is cleared after each test.""" + yield + # Cleanup after test + SimulationContext.clear_instance() + + def test_str(dataset_dir): """Test the string representation of the recorder manager.""" # create recorder manager @@ -260,7 +267,7 @@ def test_close(device, dataset_dir): """Test whether data is correctly exported in the close function when fully integrated with ManagerBasedEnv and `export_in_close` is True.""" # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # create environment env_cfg = get_empty_base_env_cfg(device=device, num_envs=2) cfg = DummyRecorderManagerCfg() diff --git a/source/isaaclab/test/markers/check_markers_visibility.py b/source/isaaclab/test/markers/check_markers_visibility.py index 98dbee8ddcd7..eadc8af82a53 100644 --- a/source/isaaclab/test/markers/check_markers_visibility.py +++ b/source/isaaclab/test/markers/check_markers_visibility.py @@ -96,14 +96,14 @@ def run_simulator( count = 0 # reset the scene entities # root state - root_state = scene["robot"].data.default_root_state.clone() + root_state = scene["robot"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins scene["robot"].write_root_pose_to_sim(root_state[:, :7]) scene["robot"].write_root_velocity_to_sim(root_state[:, 7:]) # set joint positions with some noise joint_pos, joint_vel = ( - scene["robot"].data.default_joint_pos.clone(), - scene["robot"].data.default_joint_vel.clone(), + scene["robot"].data.default_joint_pos.torch.clone(), + scene["robot"].data.default_joint_vel.torch.clone(), ) scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) # clear internal buffers diff --git a/source/isaaclab/test/markers/test_visualization_markers.py b/source/isaaclab/test/markers/test_visualization_markers.py index ebc183b804b8..906c14ccb7d5 100644 --- a/source/isaaclab/test/markers/test_visualization_markers.py +++ b/source/isaaclab/test/markers/test_visualization_markers.py @@ -126,6 +126,7 @@ def test_multiple_prototypes_marker(sim): sim.step() +@pytest.mark.flaky(max_runs=3, min_passes=1) def test_visualization_time_based_on_prototypes(sim): """Test with time taken when number of prototypes is increased.""" # create a marker diff --git a/source/isaaclab/test/performance/test_kit_startup_performance.py b/source/isaaclab/test/performance/test_kit_startup_performance.py index f4134d04ae14..0a39f23ea4aa 100644 --- a/source/isaaclab/test/performance/test_kit_startup_performance.py +++ b/source/isaaclab/test/performance/test_kit_startup_performance.py @@ -20,4 +20,4 @@ def test_kit_start_up_time(): end_time = time.time() elapsed_time = end_time - start_time # we are doing some more imports on the automate side - will investigate using warp instead of numba cuda - assert elapsed_time <= 12.0 + assert elapsed_time <= 15.0 diff --git a/source/isaaclab/test/performance/test_robot_load_performance.py b/source/isaaclab/test/performance/test_robot_load_performance.py index 42d5f1c4fffb..a298c8b746e5 100644 --- a/source/isaaclab/test/performance/test_robot_load_performance.py +++ b/source/isaaclab/test/performance/test_robot_load_performance.py @@ -14,39 +14,62 @@ simulation_app = AppLauncher(headless=True).app import pytest +import torch +from isaaclab_physx.cloner import physx_replicate -import omni -from isaacsim.core.cloner import GridCloner - +import isaaclab.sim.utils as sim_utils +from isaaclab import cloner from isaaclab.assets import Articulation from isaaclab.sim import build_simulation_context from isaaclab.utils.timer import Timer from isaaclab_assets import ANYMAL_D_CFG, CARTPOLE_CFG +NUM_ENVS = 4096 +SPACING = 2.0 + @pytest.mark.parametrize( "test_config,device", [ - ({"name": "Cartpole", "robot_cfg": CARTPOLE_CFG, "expected_load_time": 10.0}, "cuda:0"), - ({"name": "Cartpole", "robot_cfg": CARTPOLE_CFG, "expected_load_time": 10.0}, "cpu"), - ({"name": "Anymal_D", "robot_cfg": ANYMAL_D_CFG, "expected_load_time": 40.0}, "cuda:0"), - ({"name": "Anymal_D", "robot_cfg": ANYMAL_D_CFG, "expected_load_time": 40.0}, "cpu"), + # TODO: regression - this used to be 10 + ({"name": "Cartpole", "robot_cfg": CARTPOLE_CFG, "expected_load_time": 15.0}, "cuda:0"), + ({"name": "Cartpole", "robot_cfg": CARTPOLE_CFG, "expected_load_time": 15.0}, "cpu"), + # TODO: regression - this used to be 40 + ({"name": "Anymal_D", "robot_cfg": ANYMAL_D_CFG, "expected_load_time": 60.0}, "cuda:0"), + ({"name": "Anymal_D", "robot_cfg": ANYMAL_D_CFG, "expected_load_time": 60.0}, "cpu"), ], ) def test_robot_load_performance(test_config, device): """Test robot load time.""" with build_simulation_context(device=device) as sim: sim._app_control_on_stop_handle = None - cloner = GridCloner(spacing=2) - target_paths = cloner.generate_paths("/World/Robots", 4096) - omni.usd.get_context().get_stage().DefinePrim(target_paths[0], "Xform") - _ = cloner.clone( - source_prim_path=target_paths[0], - prim_paths=target_paths, - replicate_physics=False, - copy_from_source=True, + stage = sim_utils.get_current_stage() + + # Generate grid positions for environments + positions, _ = cloner.grid_transforms(NUM_ENVS, SPACING, device=device) + + # Create environment prims using USD replicate + env_paths = [f"/World/Robots_{i}" for i in range(NUM_ENVS)] + stage.DefinePrim(env_paths[0], "Xform") + cloner.usd_replicate( + stage=stage, + sources=[env_paths[0]], + destinations=["/World/Robots_{}"], + env_ids=torch.arange(NUM_ENVS), + positions=positions, ) + + # Replicate physics - mapping is (num_sources, num_envs) bool mask + physx_replicate( + stage=stage, + sources=[env_paths[0]], + destinations=["/World/Robots_{}"], + env_ids=torch.arange(NUM_ENVS), + mapping=torch.ones(1, NUM_ENVS, dtype=torch.bool), # 1 source -> all envs + device=device, + ) + with Timer(f"{test_config['name']} load time for device {device}") as timer: robot = Articulation(test_config["robot_cfg"].replace(prim_path="/World/Robots_.*/Robot")) # noqa: F841 sim.reset() diff --git a/source/isaaclab/test/renderers/test_camera_output_contract.py b/source/isaaclab/test/renderers/test_camera_output_contract.py new file mode 100644 index 000000000000..2d6087d29708 --- /dev/null +++ b/source/isaaclab/test/renderers/test_camera_output_contract.py @@ -0,0 +1,228 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for the renderer→camera output contract.""" + +import warnings + +import pytest +import torch + +pytest.importorskip("isaaclab_physx") + +from isaaclab.sensors.camera import CameraCfg, TiledCameraCfg +from isaaclab.sensors.camera.camera_data import CameraData, RenderBufferKind, RenderBufferSpec +from isaaclab.sim import PinholeCameraCfg + +_SPAWN = PinholeCameraCfg( + focal_length=24.0, + focus_distance=400.0, + horizontal_aperture=20.955, + clipping_range=(0.1, 1.0e5), +) + + +@pytest.mark.parametrize( + "field_name,deprecated_value", + [ + ("colorize_semantic_segmentation", False), + ("colorize_instance_segmentation", False), + ("colorize_instance_id_segmentation", False), + ("semantic_filter", ["class"]), + ("semantic_segmentation_mapping", {"class:cube": (1, 2, 3, 4)}), + ("depth_clipping_behavior", "max"), + ], +) +def test_camera_cfg_forwards_deprecated_fields_to_renderer_cfg(field_name, deprecated_value): + """Deprecated CameraCfg field is forwarded to renderer_cfg with a warning.""" + kwargs = { + "height": 64, + "width": 64, + "prim_path": "/World/Camera", + "spawn": _SPAWN, + "data_types": ["rgb"], + field_name: deprecated_value, + } + with warnings.catch_warnings(record=True) as caught: + warnings.simplefilter("always") + cfg = CameraCfg(**kwargs) + + deprecation_warnings = [w for w in caught if issubclass(w.category, DeprecationWarning)] + assert any(f"CameraCfg.{field_name}" in str(w.message) for w in deprecation_warnings) + assert getattr(cfg.renderer_cfg, field_name) == deprecated_value + + +def test_camera_cfg_default_does_not_warn_or_forward(): + """Default-valued deprecated fields stay silent.""" + with warnings.catch_warnings(record=True) as caught: + warnings.simplefilter("always") + cfg = CameraCfg( + height=64, + width=64, + prim_path="/World/Camera", + spawn=_SPAWN, + data_types=["rgb"], + ) + + deprecation_warnings = [ + w for w in caught if issubclass(w.category, DeprecationWarning) and "CameraCfg." in str(w.message) + ] + assert deprecation_warnings == [] + assert cfg.renderer_cfg.colorize_semantic_segmentation is True + + +def test_camera_cfg_post_construction_mutation_is_silent_no_op(): + """Mutating a deprecated field after construction does not propagate to renderer_cfg.""" + cfg = CameraCfg( + height=64, + width=64, + prim_path="/World/Camera", + spawn=_SPAWN, + data_types=["rgb"], + ) + assert cfg.renderer_cfg.colorize_semantic_segmentation is True + cfg.colorize_semantic_segmentation = False + assert cfg.renderer_cfg.colorize_semantic_segmentation is True + + +def test_tiled_camera_cfg_does_not_forward_deprecated_fields(): + """TiledCameraCfg skips CameraCfg's per-field forwarder.""" + with warnings.catch_warnings(record=True) as caught: + warnings.simplefilter("always") + cfg = TiledCameraCfg( + height=64, + width=64, + prim_path="/World/Camera", + spawn=_SPAWN, + data_types=["rgb"], + colorize_semantic_segmentation=False, + ) + + tiled_warnings = [ + w for w in caught if issubclass(w.category, DeprecationWarning) and "TiledCameraCfg" in str(w.message) + ] + assert tiled_warnings + + field_warnings = [ + w for w in caught if issubclass(w.category, DeprecationWarning) and "CameraCfg.colorize_" in str(w.message) + ] + assert field_warnings == [] + + assert cfg.renderer_cfg.colorize_semantic_segmentation is True + + +def test_newton_warp_supported_output_types_key_set(): + """NewtonWarpRenderer publishes the documented key set.""" + pytest.importorskip("isaaclab_newton") + pytest.importorskip("newton") + from isaaclab_newton.renderers.newton_warp_renderer import NewtonWarpRenderer + from isaaclab_newton.renderers.newton_warp_renderer_cfg import NewtonWarpRendererCfg + + renderer = NewtonWarpRenderer.__new__(NewtonWarpRenderer) + renderer.cfg = NewtonWarpRendererCfg() + specs = renderer.supported_output_types() + + assert set(specs.keys()) == { + RenderBufferKind.RGB, + RenderBufferKind.RGBA, + RenderBufferKind.ALBEDO, + RenderBufferKind.DEPTH, + RenderBufferKind.NORMALS, + RenderBufferKind.INSTANCE_SEGMENTATION_FAST, + } + + +def _make_camera_cfg(data_types: list[str]) -> CameraCfg: + return CameraCfg( + height=8, + width=16, + prim_path="/World/Camera", + spawn=_SPAWN, + data_types=data_types, + ) + + +def test_camera_data_allocates_supported_subset_and_aliases_rgb(): + """CameraData allocates the intersection of requested + supported and aliases rgb into rgba.""" + cfg = _make_camera_cfg(["rgb", "rgba", "depth"]) + specs = { + RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.DEPTH: RenderBufferSpec(1, torch.float32), + RenderBufferKind.NORMALS: RenderBufferSpec(3, torch.float32), + } + data = CameraData.allocate( + data_types=cfg.data_types, height=8, width=16, num_views=2, device="cpu", supported_specs=specs + ) + + assert set(data.output.keys()) == {"rgba", "rgb", "depth"} + assert data.output["rgba"].shape == (2, 8, 16, 4) + assert data.output["rgba"].dtype == torch.uint8 + assert data.output["depth"].shape == (2, 8, 16, 1) + assert data.output["depth"].dtype == torch.float32 + assert data.output["rgb"].data_ptr() == data.output["rgba"].data_ptr() + assert data.image_shape == (8, 16) + assert data.info == {"rgba": None, "rgb": None, "depth": None} + + +def test_camera_data_drops_requested_types_not_in_supported_specs(): + """Requested types absent from supported_specs are absent from data.output.""" + cfg = _make_camera_cfg(["rgb", "normals"]) + specs = { + RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + } + data = CameraData.allocate( + data_types=cfg.data_types, height=4, width=4, num_views=1, device="cpu", supported_specs=specs + ) + + assert "normals" not in data.output + assert {"rgb", "rgba"} <= set(data.output.keys()) + + +def test_camera_data_no_arg_construction_yields_empty_container(): + """Bare CameraData() produces an all-None container.""" + data = CameraData() + assert data.pos_w is None + assert data.quat_w_world is None + assert data.intrinsic_matrices is None + assert data.output is None + assert data.info is None + assert data.image_shape is None + + +def test_camera_data_segmentation_dtype_follows_supported_spec(): + """CameraData consumes the layout dtype declared by the renderer spec.""" + cfg = _make_camera_cfg(["instance_segmentation_fast"]) + raw_specs = {RenderBufferKind.INSTANCE_SEGMENTATION_FAST: RenderBufferSpec(1, torch.int32)} + colorized_specs = {RenderBufferKind.INSTANCE_SEGMENTATION_FAST: RenderBufferSpec(4, torch.uint8)} + + raw = CameraData.allocate( + data_types=cfg.data_types, height=4, width=4, num_views=1, device="cpu", supported_specs=raw_specs + ) + colorized = CameraData.allocate( + data_types=cfg.data_types, height=4, width=4, num_views=1, device="cpu", supported_specs=colorized_specs + ) + + assert raw.output["instance_segmentation_fast"].dtype == torch.int32 + assert raw.output["instance_segmentation_fast"].shape == (1, 4, 4, 1) + assert colorized.output["instance_segmentation_fast"].dtype == torch.uint8 + assert colorized.output["instance_segmentation_fast"].shape == (1, 4, 4, 4) + + +def test_camera_data_allocate_raises_on_unknown_name(): + """An unknown data_types name raises ValueError naming the offender.""" + supported_specs = {RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8)} + with pytest.raises(ValueError) as exc_info: + CameraData.allocate( + data_types=["not_a_real_type"], + height=4, + width=4, + num_views=1, + device="cpu", + supported_specs=supported_specs, + ) + assert "not_a_real_type" in str(exc_info.value) + assert "RenderBufferKind" in str(exc_info.value) diff --git a/source/isaaclab/test/renderers/test_renderer_factory.py b/source/isaaclab/test/renderers/test_renderer_factory.py new file mode 100644 index 000000000000..66bd76fba598 --- /dev/null +++ b/source/isaaclab/test/renderers/test_renderer_factory.py @@ -0,0 +1,98 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for the Renderer factory.""" + +from unittest.mock import Mock + +import pytest + +from isaaclab.renderers import Renderer +from isaaclab.renderers.base_renderer import BaseRenderer + +pytest.importorskip("isaaclab_physx") +pytest.importorskip("isaaclab_newton") +pytest.importorskip("isaaclab_ov") + +from isaaclab_newton.renderers import NewtonWarpRendererCfg +from isaaclab_ov.renderers import OVRTXRendererCfg +from isaaclab_physx.renderers import IsaacRtxRendererCfg + + +def _make_mock_renderer_class(name: str): + """Create a minimal concrete BaseRenderer subclass for testing.""" + + class MockRenderer(BaseRenderer): + def __init__(self, cfg=None): + pass + + def supported_output_types(self): + return {} + + def prepare_stage(self, stage, num_envs): + pass + + def create_render_data(self, spec): + return None + + def set_outputs(self, render_data, output_data): + pass + + def update_transforms(self): + pass + + def update_camera(self, render_data, positions, orientations, intrinsics): + pass + + def render(self, render_data): + pass + + def read_output(self, render_data, camera_data): + pass + + def cleanup(self, render_data): + pass + + MockRenderer.__name__ = name + return MockRenderer + + +def test_renderer_factory_backend_mapping(): + """Renderer._get_backend maps config renderer_type to correct backend.""" + assert Renderer._get_backend(IsaacRtxRendererCfg()) == "physx" + assert Renderer._get_backend(NewtonWarpRendererCfg()) == "newton" + assert Renderer._get_backend(OVRTXRendererCfg()) == "ov" + + # If someone decide to hack and specify renderer_type it should default to physx + assert Renderer._get_backend(Mock(renderer_type="unknown")) == "physx" + + +@pytest.mark.parametrize( + "cfg_cls,expected_class_name", + [ + (IsaacRtxRendererCfg, "IsaacRtxRenderer"), + (NewtonWarpRendererCfg, "NewtonWarpRenderer"), + (OVRTXRendererCfg, "OVRTXRenderer"), + ], + ids=["IsaacRtxRendererCfg", "NewtonWarpRendererCfg", "OVRTXRendererCfg"], +) +def test_renderer_factory_returns_correct_backend(cfg_cls, expected_class_name): + """Renderer(cfg) returns instance of correct class when registry is populated.""" + cfg = cfg_cls() + backend = Renderer._get_backend(cfg) + + mock_cls = _make_mock_renderer_class(expected_class_name) + original = Renderer._registry.get(backend) + Renderer._registry[backend] = mock_cls + + try: + renderer = Renderer(cfg) + assert type(renderer).__name__ == expected_class_name + assert isinstance(renderer, BaseRenderer) + finally: + if original is not None: + Renderer._registry[backend] = original + else: + Renderer._registry.pop(backend, None) diff --git a/source/isaaclab/test/renderers/test_simulation_render_context.py b/source/isaaclab/test/renderers/test_simulation_render_context.py new file mode 100644 index 000000000000..905643eefed1 --- /dev/null +++ b/source/isaaclab/test/renderers/test_simulation_render_context.py @@ -0,0 +1,207 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for :class:`~isaaclab.renderers.render_context.RenderContext`.""" + +from __future__ import annotations + +from collections.abc import Generator +from typing import Any, cast +from unittest.mock import patch + +import pytest +import torch + +from isaaclab.renderers.base_renderer import BaseRenderer +from isaaclab.renderers.output_contract import RenderBufferKind, RenderBufferSpec +from isaaclab.renderers.render_context import RenderContext +from isaaclab.renderers.renderer_cfg import RendererCfg +from isaaclab.sensors.camera.camera_data import CameraData + +pytest.importorskip("isaaclab_physx") +pytest.importorskip("isaaclab_newton") +pytest.importorskip("isaaclab_ov") + +from isaaclab_newton.renderers import NewtonWarpRendererCfg +from isaaclab_physx.renderers import IsaacRtxRendererCfg + + +class _FakeBackend(BaseRenderer): + """Test double for :class:`BaseRenderer`; does not load PhysX/Newton/OV renderer classes.""" + + __slots__ = ("_prepare_hits", "_update_transforms_hits", "_event_log") + + def __init__( + self, + *, + prepare_hits: list[int] | None = None, + update_transforms_hits: list[int] | None = None, + event_log: list[str] | None = None, + ) -> None: + super().__init__() + self._prepare_hits = prepare_hits + self._update_transforms_hits = update_transforms_hits + self._event_log = event_log + + def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: + return {} + + def prepare_stage(self, stage: Any, num_envs: int) -> None: + if self._prepare_hits is not None: + self._prepare_hits.append(1) + + def create_render_data(self, spec: Any) -> Any: + return object() + + def set_outputs(self, render_data: Any, output_data: dict[str, torch.Tensor]) -> None: + pass + + def update_transforms(self) -> None: + if self._update_transforms_hits is not None: + self._update_transforms_hits.append(1) + if self._event_log is not None: + self._event_log.append("ut") + + def update_camera( + self, + render_data: Any, + positions: torch.Tensor, + orientations: torch.Tensor, + intrinsics: torch.Tensor, + ) -> None: + pass + + def render(self, render_data: Any) -> None: + if self._event_log is not None: + self._event_log.append("render") + + def read_output(self, render_data: Any, camera_data: CameraData) -> None: + if self._event_log is not None: + self._event_log.append("read") + + def cleanup(self, render_data: Any) -> None: + pass + + +def _set_entries(ctx: RenderContext, *cfg_backend_pairs: tuple[RendererCfg, BaseRenderer]) -> None: + ctx._renderer_entries = list(cfg_backend_pairs) # type: ignore[assignment] # noqa: SLF001 + + +@pytest.fixture(autouse=True) +def _patch_renderer_factory() -> Generator[None, None, None]: + """Never construct :class:`~isaaclab.renderers.renderer.Renderer` (real backends) in this module.""" + + with patch( + "isaaclab.renderers.render_context.Renderer", + side_effect=lambda *_args, **_kwargs: _FakeBackend(), + ): + yield + + +def test_get_renderer_returns_equal_cfg_singleton(): + ctx = RenderContext() + cfg = IsaacRtxRendererCfg() + r1 = ctx.get_renderer(cfg) + r2 = ctx.get_renderer(cfg) + assert r1 is r2 + + +def test_get_renderer_two_different_concrete_types_coexist(): + """Different renderer_cfg concrete classes register distinct backends (no error).""" + + ctx = RenderContext() + rtx = ctx.get_renderer(IsaacRtxRendererCfg()) + nw = ctx.get_renderer(NewtonWarpRendererCfg()) + assert rtx is not nw + + +def test_ensure_prepare_stage_idempotent(): + """Second ``ensure_prepare_stage`` with same args does not call ``prepare_stage`` again.""" + + ctx = RenderContext() + prepares: list[int] = [] + cfg = IsaacRtxRendererCfg() + _set_entries(ctx, (cfg, _FakeBackend(prepare_hits=prepares))) + + ctx.ensure_prepare_stage(None, 4) + ctx.ensure_prepare_stage(None, 4) + assert len(prepares) == 1 + + +def test_ensure_prepare_stage_num_envs_mismatch(): + ctx = RenderContext() + cfg = IsaacRtxRendererCfg() + _set_entries(ctx, (cfg, _FakeBackend())) + + ctx.ensure_prepare_stage(None, 4) + with pytest.raises(RuntimeError, match="different num_envs"): + ctx.ensure_prepare_stage(None, 8) + + +def test_update_transforms_dedupes_per_physics_step(): + """All backends' update_transforms run once per physics step index.""" + + ctx = RenderContext() + hits: list[int] = [] + cfg = NewtonWarpRendererCfg() + _set_entries(ctx, (cfg, _FakeBackend(update_transforms_hits=hits))) + + ctx.update_transforms(1) + ctx.update_transforms(1) + assert len(hits) == 1 + + ctx.update_transforms(2) + assert len(hits) == 2 + + +def test_render_into_camera_calls_update_render_read_order(): + """render_into_camera runs update_transforms then render then read_output; dedupes UT per step.""" + ctx = RenderContext() + events: list[str] = [] + cfg = IsaacRtxRendererCfg() + fake = _FakeBackend(event_log=events) + _set_entries(ctx, (cfg, fake)) + + rd = object() + cam_data = CameraData() + ctx.render_into_camera(cast(BaseRenderer, fake), rd, cam_data, physics_step_count=1) + assert events == ["ut", "render", "read"] + + ctx.render_into_camera(cast(BaseRenderer, fake), rd, cam_data, physics_step_count=1) + assert events == ["ut", "render", "read", "render", "read"] + + +def test_reset_stage_prepare_flag_allows_second_prepare_stage(): + """After reset_stage_prepare_flag, ensure_prepare_stage invokes prepare_stage again.""" + ctx = RenderContext() + prepares: list[int] = [] + cfg = IsaacRtxRendererCfg() + _set_entries(ctx, (cfg, _FakeBackend(prepare_hits=prepares))) + + ctx.ensure_prepare_stage(None, 4) + assert len(prepares) == 1 + ctx.ensure_prepare_stage(None, 4) + assert len(prepares) == 1 + + ctx.reset_stage_prepare_flag() + ctx.ensure_prepare_stage(None, 4) + assert len(prepares) == 2 + + +def test_reset_transform_cadence_allows_repeat_update_transforms_same_step(): + """reset_transform_cadence clears step dedupe so the same physics_step_count can sync again.""" + ctx = RenderContext() + hits: list[int] = [] + cfg = IsaacRtxRendererCfg() + _set_entries(ctx, (cfg, _FakeBackend(update_transforms_hits=hits))) + + ctx.update_transforms(1) + assert len(hits) == 1 + ctx.update_transforms(1) + assert len(hits) == 1 + + ctx.reset_transform_cadence() + ctx.update_transforms(1) + assert len(hits) == 2 diff --git a/source/isaaclab/test/scene/check_interactive_scene.py b/source/isaaclab/test/scene/check_interactive_scene.py index 5b2463b315a9..df0c4a12a945 100644 --- a/source/isaaclab/test/scene/check_interactive_scene.py +++ b/source/isaaclab/test/scene/check_interactive_scene.py @@ -103,8 +103,8 @@ def main(): print("[INFO]: Setup complete...") # default joint targets - robot_1_actions = scene.articulations["robot_1"].data.default_joint_pos.clone() - robot_2_actions = scene.articulations["robot_2"].data.default_joint_pos.clone() + robot_1_actions = scene.articulations["robot_1"].data.default_joint_pos.torch.clone() + robot_2_actions = scene.articulations["robot_2"].data.default_joint_pos.torch.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() sim_time = 0.0 @@ -124,10 +124,10 @@ def main(): sim_time = 0.0 count = 0 # reset root state - root_state = scene.articulations["robot_1"].data.default_root_state.clone() + root_state = scene.articulations["robot_1"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins - joint_pos = scene.articulations["robot_1"].data.default_joint_pos - joint_vel = scene.articulations["robot_1"].data.default_joint_vel + joint_pos = scene.articulations["robot_1"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot_1"].data.default_joint_vel.torch # -- set root state # -- robot 1 scene.articulations["robot_1"].write_root_pose_to_sim(root_state[:, :7]) diff --git a/source/isaaclab/test/scene/test_interactive_scene.py b/source/isaaclab/test/scene/test_interactive_scene.py index 1a42a340baa1..31b577db634f 100644 --- a/source/isaaclab/test/scene/test_interactive_scene.py +++ b/source/isaaclab/test/scene/test_interactive_scene.py @@ -12,14 +12,17 @@ """Rest everything follows.""" +import contextlib +from types import SimpleNamespace + import pytest import torch import isaaclab.sim as sim_utils from isaaclab.actuators import ImplicitActuatorCfg -from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from isaaclab.assets import ArticulationCfg, RigidObjectCfg +from isaaclab.physics.scene_data_requirements import SceneDataRequirement from isaaclab.scene import InteractiveScene, InteractiveSceneCfg -from isaaclab.sensors import ContactSensorCfg from isaaclab.sim import build_simulation_context from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR @@ -33,7 +36,7 @@ class MySceneCfg(InteractiveSceneCfg): robot = ArticulationCfg( prim_path="/World/envs/env_.*/Robot", spawn=sim_utils.UsdFileCfg( - usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/IsaacSim/SimpleArticulation/revolute_articulation.usd" + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/IsaacSim/SimpleArticulation/revolute_articulation.usd", ), actuators={ "joint": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=100.0, damping=1.0), @@ -66,56 +69,7 @@ def make_scene(num_envs: int, env_spacing: float = 1.0): return scene_cfg yield make_scene, sim - sim.stop() - sim.clear() - sim.clear_all_callbacks() - sim.clear_instance() - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -def test_scene_entity_isolation(device, setup_scene): - """Tests that multiple instances of InteractiveScene do not share any data. - - In this test, two InteractiveScene instances are created in a loop and added to a list. - The scene at index 0 of the list will have all of its entities cleared manually, and - the test compares that the data held in the scene at index 1 remained intact. - """ - make_scene, sim = setup_scene - scene_cfg = make_scene(num_envs=1) - # set additional light to test 'extras' attribute of the scene - setattr( - scene_cfg, - "light", - AssetBaseCfg( - prim_path="/World/light", - spawn=sim_utils.DistantLightCfg(), - ), - ) - # set additional sensor to test 'sensors' attribute of the scene - setattr(scene_cfg, "sensor", ContactSensorCfg(prim_path="/World/envs/env_.*/Robot")) - - scene_list = [] - # create two InteractiveScene instances - for _ in range(2): - with build_simulation_context(device=device, dt=sim.get_physics_dt()) as _: - scene = InteractiveScene(scene_cfg) - scene_list.append(scene) - scene_0 = scene_list[0] - scene_1 = scene_list[1] - # clear entities for scene_0 - this should not affect any data in scene_1 - scene_0.articulations.clear() - scene_0.rigid_objects.clear() - scene_0.sensors.clear() - scene_0.extras.clear() - # check that scene_0 and scene_1 do not share entity data via dictionary comparison - assert scene_0.articulations == dict() - assert scene_0.articulations != scene_1.articulations - assert scene_0.rigid_objects == dict() - assert scene_0.rigid_objects != scene_1.rigid_objects - assert scene_0.sensors == dict() - assert scene_0.sensors != scene_1.sensors - assert scene_0.extras == dict() - assert scene_0.extras != scene_1.extras + # Note: cleanup is handled by build_simulation_context's finally block @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @@ -131,7 +85,8 @@ def test_relative_flag(device, setup_scene): # test is relative == False prev_state = scene.get_state(is_relative=False) scene["robot"].write_joint_state_to_sim( - position=torch.rand_like(scene["robot"].data.joint_pos), velocity=torch.rand_like(scene["robot"].data.joint_pos) + position=torch.rand_like(scene["robot"].data.joint_pos.torch), + velocity=torch.rand_like(scene["robot"].data.joint_pos.torch), ) next_state = scene.get_state(is_relative=False) assert_state_different(prev_state, next_state) @@ -141,7 +96,8 @@ def test_relative_flag(device, setup_scene): # test is relative == True prev_state = scene.get_state(is_relative=True) scene["robot"].write_joint_state_to_sim( - position=torch.rand_like(scene["robot"].data.joint_pos), velocity=torch.rand_like(scene["robot"].data.joint_pos) + position=torch.rand_like(scene["robot"].data.joint_pos.torch), + velocity=torch.rand_like(scene["robot"].data.joint_pos.torch), ) next_state = scene.get_state(is_relative=True) assert_state_different(prev_state, next_state) @@ -159,19 +115,136 @@ def test_reset_to_env_ids_input_types(device, setup_scene): # test env_ids = None prev_state = scene.get_state() scene["robot"].write_joint_state_to_sim( - position=torch.rand_like(scene["robot"].data.joint_pos), velocity=torch.rand_like(scene["robot"].data.joint_pos) + position=torch.rand_like(scene["robot"].data.joint_pos.torch), + velocity=torch.rand_like(scene["robot"].data.joint_pos.torch), ) scene.reset_to(prev_state, env_ids=None) assert_state_equal(prev_state, scene.get_state()) # test env_ids = torch tensor scene["robot"].write_joint_state_to_sim( - position=torch.rand_like(scene["robot"].data.joint_pos), velocity=torch.rand_like(scene["robot"].data.joint_pos) + position=torch.rand_like(scene["robot"].data.joint_pos.torch), + velocity=torch.rand_like(scene["robot"].data.joint_pos.torch), ) - scene.reset_to(prev_state, env_ids=torch.arange(scene.num_envs, device=scene.device)) + scene.reset_to(prev_state, env_ids=torch.arange(scene.num_envs, device=scene.device, dtype=torch.int32)) assert_state_equal(prev_state, scene.get_state()) +def test_clone_environments_non_cfg_publishes_clone_plans(monkeypatch: pytest.MonkeyPatch): + """Non-cfg clone path must dispatch physics + USD replicate and publish a ``ClonePlan``. + + Replaces the old test that asserted a per-call visualizer clone callback was invoked. The + visualizer-fn callback was removed in favor of providers reading + :meth:`SimulationContext.get_clone_plans`; this test asserts the new contract: even + without prototype templates, the scene synthesizes a single trivial ClonePlan. + """ + from isaaclab.cloner import ClonePlan + + scene = object.__new__(InteractiveScene) + scene.cfg = SimpleNamespace(replicate_physics=False, num_envs=3) + scene.stage = object() + scene.physics_backend = "physx" + scene._sensors = {} + + set_plans_calls: list = [] + sim_state: dict = {"plans": {}} + + def _set_clone_plans(plans): + sim_state["plans"] = plans + set_plans_calls.append(plans) + + scene.sim = SimpleNamespace( + get_scene_data_requirements=lambda: SceneDataRequirement(), + update_scene_data_requirements=lambda requirements: None, + set_clone_plans=_set_clone_plans, + get_clone_plans=lambda: sim_state["plans"], + ) + scene.env_fmt = "/World/envs/env_{}" + scene._ALL_INDICES = torch.arange(3, dtype=torch.long) + scene._default_env_origins = torch.zeros((3, 3), dtype=torch.float32) + scene._is_scene_setup_from_cfg = lambda: False + + # Avoid binding this unit test to global SimulationContext singleton state. + monkeypatch.setattr(InteractiveScene, "device", property(lambda self: "cpu")) + + # ``disabled_fabric_change_notifies`` resolves the stage via UsdUtils.StageCache and would + # crash on the bare ``object()`` mocked above. This unit test exercises clone-dispatch + # logic only; the fabric notice path has its own coverage in ``test_cloner.py``. + @contextlib.contextmanager + def _noop_fabric_notices(stage, *, restore=True): + yield + + monkeypatch.setattr("isaaclab.scene.interactive_scene.cloner.disabled_fabric_change_notifies", _noop_fabric_notices) + + physics_calls = [] + usd_calls = [] + + def _physics_clone_fn(stage, *args, **kwargs): + physics_calls.append((stage, args, kwargs)) + + def _usd_replicate(stage, *args, **kwargs): + usd_calls.append((stage, args, kwargs)) + + scene.cloner_cfg = SimpleNamespace( + device="cpu", + physics_clone_fn=_physics_clone_fn, + clone_usd=True, + ) + monkeypatch.setattr("isaaclab.scene.interactive_scene.cloner.usd_replicate", _usd_replicate) + + scene.clone_environments(copy_from_source=False) + assert len(physics_calls) == 1 + assert len(usd_calls) == 1 + mapping = physics_calls[0][1][3] + assert mapping.dtype == torch.bool + assert mapping.shape == (1, scene.num_envs) + # Plans are published once per clone, regardless of physics/usd flag combinations. + assert len(set_plans_calls) == 1 + plans = set_plans_calls[-1] + assert set(plans.keys()) == {scene.env_fmt} + plan = plans[scene.env_fmt] + assert isinstance(plan, ClonePlan) + assert plan.dest_template == scene.env_fmt + assert plan.prototype_paths == [scene.env_fmt.format(0)] + assert plan.clone_mask.shape == (1, scene.num_envs) + assert scene.clone_plans is plans + + physics_calls.clear() + usd_calls.clear() + set_plans_calls.clear() + scene.clone_environments(copy_from_source=True) + assert len(physics_calls) == 0 + assert len(usd_calls) == 1 + assert len(set_plans_calls) == 1 + + +def test_aggregate_scene_data_requirements_merges_visualizers_and_renderers(monkeypatch: pytest.MonkeyPatch): + """Scene aggregation must OR visualizer and sensor-renderer requirements onto sim context. + + Replaces the old test that asserted a clone-time visualizer hook was installed from + requirements. The hook is gone; the only remaining behavior is publishing the merged + :class:`SceneDataRequirement` to the simulation context. + """ + scene = object.__new__(InteractiveScene) + scene.physics_backend = "physx" + scene.stage = object() + scene._sensors = { + "cam": SimpleNamespace(cfg=SimpleNamespace(renderer_cfg=SimpleNamespace(renderer_type="newton_warp"))) + } + + posted: list = [] + scene.sim = SimpleNamespace( + get_scene_data_requirements=lambda: SceneDataRequirement(), + update_scene_data_requirements=posted.append, + ) + + scene._aggregate_scene_data_requirements({"rerun"}) + + assert len(posted) == 1 + merged = posted[0] + assert merged.requires_newton_model + + def assert_state_equal(s1: dict, s2: dict, path=""): """ Recursively assert that s1 and s2 have the same nested keys diff --git a/source/isaaclab/test/sensors/check_multi_mesh_ray_caster.py b/source/isaaclab/test/sensors/check_multi_mesh_ray_caster.py index 73750d0de874..eed776431279 100644 --- a/source/isaaclab/test/sensors/check_multi_mesh_ray_caster.py +++ b/source/isaaclab/test/sensors/check_multi_mesh_ray_caster.py @@ -46,10 +46,9 @@ import torch -from isaacsim.core.cloner import GridCloner - import isaaclab.sim as sim_utils import isaaclab.terrains as terrain_gen +from isaaclab import cloner as lab_cloner from isaaclab.assets import RigidObject, RigidObjectCfg from isaaclab.sensors.ray_caster import MultiMeshRayCaster, MultiMeshRayCasterCfg, patterns from isaaclab.sim import SimulationCfg, SimulationContext @@ -63,8 +62,10 @@ def design_scene(sim: SimulationContext, num_envs: int = 2048): """Design the scene.""" # Create interface to clone the scene - cloner = GridCloner(spacing=10.0) - cloner.define_base_env("/World/envs") + # Create environment clones using Lab's cloner utilities + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_envs, dtype=torch.long, device=sim.device) + env_origins, _ = lab_cloner.grid_transforms(num_envs, spacing=10.0, device=sim.device) # Everything under the namespace "/World/envs/env_0" will be cloned sim.stage.DefinePrim("/World/envs/env_0", "Xform") # Define the scene @@ -99,12 +100,11 @@ def design_scene(sim: SimulationContext, num_envs: int = 2048): ) # Clone the scene - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) - cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) + envs_prim_paths = [f"/World/envs/env_{i}" for i in range(num_envs)] + lab_cloner.usd_replicate(sim.stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) physics_scene_path = sim.get_physics_context().prim_path - cloner.filter_collisions( - physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] + lab_cloner.filter_collisions( + sim.stage, physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] ) @@ -142,7 +142,7 @@ def main(): prim_path="/World/envs/env_.*/ball", mesh_prim_paths=mesh_targets, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=(1.6, 1.0)), - attach_yaw_only=True, + ray_alignment="yaw", debug_vis=not args_cli.headless, ) ray_caster = MultiMeshRayCaster(cfg=ray_caster_cfg) @@ -164,8 +164,8 @@ def main(): print(ray_caster) # Get the initial positions of the balls - ball_initial_poses = balls.data.root_pose_w.clone() - ball_initial_velocities = balls.data.root_vel_w.clone() + ball_initial_poses = balls.data.root_pose_w.torch.clone() + ball_initial_velocities = balls.data.root_vel_w.torch.clone() # Create a counter for resetting the scene step_count = 0 diff --git a/source/isaaclab/test/sensors/check_ray_caster.py b/source/isaaclab/test/sensors/check_ray_caster.py index 78f314fdebd6..821fc247743e 100644 --- a/source/isaaclab/test/sensors/check_ray_caster.py +++ b/source/isaaclab/test/sensors/check_ray_caster.py @@ -41,10 +41,9 @@ import torch -from isaacsim.core.cloner import GridCloner - import isaaclab.sim as sim_utils import isaaclab.terrains as terrain_gen +from isaaclab import cloner as lab_cloner from isaaclab.assets import RigidObject, RigidObjectCfg from isaaclab.sensors.ray_caster import RayCaster, RayCasterCfg, patterns from isaaclab.sim import SimulationCfg, SimulationContext @@ -57,8 +56,10 @@ def design_scene(sim: SimulationContext, num_envs: int = 2048): """Design the scene.""" # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") + # Create environment clones using Lab's cloner utilities + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_envs, dtype=torch.long, device=sim.device) + env_origins, _ = lab_cloner.grid_transforms(num_envs, spacing=2.0, device=sim.device) # Everything under the namespace "/World/envs/env_0" will be cloned sim.stage.DefinePrim("/World/envs/env_0", "Xform") # Define the scene @@ -75,12 +76,11 @@ def design_scene(sim: SimulationContext, num_envs: int = 2048): ) cfg.func("/World/envs/env_0/ball", cfg, translation=(0.0, 0.0, 5.0)) # Clone the scene - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) - cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) + envs_prim_paths = [f"/World/envs/env_{i}" for i in range(num_envs)] + lab_cloner.usd_replicate(sim.stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) physics_scene_path = sim.get_physics_context().prim_path - cloner.filter_collisions( - physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] + lab_cloner.filter_collisions( + sim.stage, physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] ) @@ -133,8 +133,8 @@ def main(): print(ray_caster) # Get the initial positions of the balls - ball_initial_poses = balls.data.root_pose_w.clone() - ball_initial_velocities = balls.data.root_vel_w.clone() + ball_initial_poses = balls.data.root_pose_w.torch.clone() + ball_initial_velocities = balls.data.root_vel_w.torch.clone() # Create a counter for resetting the scene step_count = 0 diff --git a/source/isaaclab/test/sensors/test_camera.py b/source/isaaclab/test/sensors/test_camera.py index 584394bfd54f..daed8e95773d 100644 --- a/source/isaaclab/test/sensors/test_camera.py +++ b/source/isaaclab/test/sensors/test_camera.py @@ -16,7 +16,6 @@ """Rest everything follows.""" import copy -import os import random import numpy as np @@ -25,20 +24,19 @@ import torch import omni.replicator.core as rep -from isaacsim.core.prims import SingleGeometryPrim, SingleRigidPrim from pxr import Gf, Usd, UsdGeom import isaaclab.sim as sim_utils from isaaclab.sensors.camera import Camera, CameraCfg -from isaaclab.utils import convert_dict_to_backend -from isaaclab.utils.math import convert_quat -from isaaclab.utils.timer import Timer + +pytestmark = pytest.mark.isaacsim_ci # sample camera poses POSITION = (2.5, 2.5, 2.5) -QUAT_ROS = (-0.17591989, 0.33985114, 0.82047325, -0.42470819) -QUAT_OPENGL = (0.33985113, 0.17591988, 0.42470818, 0.82047324) -QUAT_WORLD = (-0.3647052, -0.27984815, -0.1159169, 0.88047623) +# Quaternions in xyzw format +QUAT_ROS = (0.33985114, 0.82047325, -0.42470819, -0.17591989) +QUAT_OPENGL = (0.17591988, 0.42470818, 0.82047324, 0.33985113) +QUAT_WORLD = (-0.27984815, -0.1159169, 0.88047623, -0.3647052) # NOTE: setup and teardown are own function to allow calling them in the tests @@ -77,10 +75,8 @@ def teardown(sim: sim_utils.SimulationContext): # close all the opened viewport from before. rep.vp_manager.destroy_hydra_textures("Replicator") # stop simulation - # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( - sim._timeline.stop() + sim.stop() # clear the stage - sim.clear_all_callbacks() sim.clear_instance() @@ -99,7 +95,7 @@ def test_camera_init(setup_sim_camera): # Create camera camera = Camera(camera_cfg) # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() + assert sim.get_setting("/isaaclab/render/rtx_sensors") # Play sim sim.reset() # Check if camera is initialized @@ -108,12 +104,6 @@ def test_camera_init(setup_sim_camera): assert camera._sensor_prims[0].GetPath().pathString == camera_cfg.prim_path assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # Check buffers that exist and have correct shapes assert camera.data.pos_w.shape == (1, 3) assert camera.data.quat_w_ros.shape == (1, 4) @@ -121,7 +111,7 @@ def test_camera_init(setup_sim_camera): assert camera.data.quat_w_opengl.shape == (1, 4) assert camera.data.intrinsic_matrices.shape == (1, 3, 3) assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - assert camera.data.info == [{camera_cfg.data_types[0]: None}] + assert camera.data.info == {camera_cfg.data_types[0]: None} # Simulate physics for _ in range(10): @@ -185,28 +175,23 @@ def test_camera_init_offset(setup_sim_camera): np.testing.assert_allclose(prim_tf_ros[0:3, 3], cam_cfg_offset_ros.offset.pos) np.testing.assert_allclose(prim_tf_opengl[0:3, 3], cam_cfg_offset_opengl.offset.pos) np.testing.assert_allclose(prim_tf_world[0:3, 3], cam_cfg_offset_world.offset.pos) + # scipy's as_quat() returns xyzw format, which matches our config format np.testing.assert_allclose( - convert_quat(tf.Rotation.from_matrix(prim_tf_ros[:3, :3]).as_quat(), "wxyz"), + tf.Rotation.from_matrix(prim_tf_ros[:3, :3]).as_quat(), cam_cfg_offset_opengl.offset.rot, rtol=1e-5, ) np.testing.assert_allclose( - convert_quat(tf.Rotation.from_matrix(prim_tf_opengl[:3, :3]).as_quat(), "wxyz"), + tf.Rotation.from_matrix(prim_tf_opengl[:3, :3]).as_quat(), cam_cfg_offset_opengl.offset.rot, rtol=1e-5, ) np.testing.assert_allclose( - convert_quat(tf.Rotation.from_matrix(prim_tf_world[:3, :3]).as_quat(), "wxyz"), + tf.Rotation.from_matrix(prim_tf_world[:3, :3]).as_quat(), cam_cfg_offset_opengl.offset.rot, rtol=1e-5, ) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # check if transform correctly set in output np.testing.assert_allclose(camera_ros.data.pos_w[0].cpu().numpy(), cam_cfg_offset_ros.offset.pos, rtol=1e-5) np.testing.assert_allclose(camera_ros.data.quat_w_ros[0].cpu().numpy(), QUAT_ROS, rtol=1e-5) @@ -230,11 +215,6 @@ def test_multi_camera_init(setup_sim_camera): # play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() # Simulate physics for _ in range(10): # perform rendering @@ -266,11 +246,6 @@ def test_multi_camera_with_different_resolution(setup_sim_camera): # play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() # perform rendering sim.step() # update camera @@ -350,12 +325,6 @@ def test_camera_set_world_poses(setup_sim_camera): # set new pose camera.set_world_poses(position.clone(), orientation.clone(), convention="world") - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # check if transform correctly set in output torch.testing.assert_close(camera.data.pos_w, position) torch.testing.assert_close(camera.data.quat_w_world, orientation) @@ -378,12 +347,6 @@ def test_camera_set_world_poses_from_view(setup_sim_camera): # set new pose camera.set_world_poses_from_view(eyes.clone(), targets.clone()) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # check if transform correctly set in output torch.testing.assert_close(camera.data.pos_w, eyes) torch.testing.assert_close(camera.data.quat_w_ros, quat_ros_gt) @@ -404,12 +367,6 @@ def test_intrinsic_matrix(setup_sim_camera): # Set matrix into simulator camera.set_intrinsic_matrices(rs_intrinsic_matrix.clone()) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # Simulate physics for _ in range(10): # perform rendering @@ -434,7 +391,7 @@ def test_depth_clipping(setup_sim_camera): sim, _, dt = setup_sim_camera camera_cfg_zero = CameraCfg( prim_path="/World/CameraZero", - offset=CameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(-0.125, 0.362, 0.873, -0.302), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(0.362, 0.873, -0.302, -0.125), convention="ros"), spawn=sim_utils.PinholeCameraCfg().from_intrinsic_matrix( focal_length=38.0, intrinsic_matrix=[380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0], @@ -451,22 +408,17 @@ def test_depth_clipping(setup_sim_camera): camera_cfg_none = copy.deepcopy(camera_cfg_zero) camera_cfg_none.prim_path = "/World/CameraNone" - camera_cfg_none.depth_clipping_behavior = "none" + camera_cfg_none.renderer_cfg.depth_clipping_behavior = "none" camera_none = Camera(camera_cfg_none) camera_cfg_max = copy.deepcopy(camera_cfg_zero) camera_cfg_max.prim_path = "/World/CameraMax" - camera_cfg_max.depth_clipping_behavior = "max" + camera_cfg_max.renderer_cfg.depth_clipping_behavior = "max" camera_max = Camera(camera_cfg_max) # Play sim sim.reset() - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - camera_zero.update(dt) camera_none.update(dt) camera_max.update(dt) @@ -542,6 +494,7 @@ def test_camera_resolution_all_colorize(setup_sim_camera): camera_cfg.data_types = [ "rgb", "rgba", + "albedo", "depth", "distance_to_camera", "distance_to_image_plane", @@ -551,20 +504,15 @@ def test_camera_resolution_all_colorize(setup_sim_camera): "instance_segmentation_fast", "instance_id_segmentation_fast", ] - camera_cfg.colorize_instance_id_segmentation = True - camera_cfg.colorize_instance_segmentation = True - camera_cfg.colorize_semantic_segmentation = True + camera_cfg.renderer_cfg.colorize_instance_id_segmentation = True + camera_cfg.renderer_cfg.colorize_instance_segmentation = True + camera_cfg.renderer_cfg.colorize_semantic_segmentation = True # Create camera camera = Camera(camera_cfg) # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() camera.update(dt) # expected sizes @@ -576,6 +524,7 @@ def test_camera_resolution_all_colorize(setup_sim_camera): output = camera.data.output assert output["rgb"].shape == hw_3c_shape assert output["rgba"].shape == hw_4c_shape + assert output["albedo"].shape == hw_4c_shape assert output["depth"].shape == hw_1c_shape assert output["distance_to_camera"].shape == hw_1c_shape assert output["distance_to_image_plane"].shape == hw_1c_shape @@ -589,6 +538,7 @@ def test_camera_resolution_all_colorize(setup_sim_camera): output = camera.data.output assert output["rgb"].dtype == torch.uint8 assert output["rgba"].dtype == torch.uint8 + assert output["albedo"].dtype == torch.uint8 assert output["depth"].dtype == torch.float assert output["distance_to_camera"].dtype == torch.float assert output["distance_to_image_plane"].dtype == torch.float @@ -606,6 +556,7 @@ def test_camera_resolution_no_colorize(setup_sim_camera): camera_cfg.data_types = [ "rgb", "rgba", + "albedo", "depth", "distance_to_camera", "distance_to_image_plane", @@ -615,19 +566,14 @@ def test_camera_resolution_no_colorize(setup_sim_camera): "instance_segmentation_fast", "instance_id_segmentation_fast", ] - camera_cfg.colorize_instance_id_segmentation = False - camera_cfg.colorize_instance_segmentation = False - camera_cfg.colorize_semantic_segmentation = False + camera_cfg.renderer_cfg.colorize_instance_id_segmentation = False + camera_cfg.renderer_cfg.colorize_instance_segmentation = False + camera_cfg.renderer_cfg.colorize_semantic_segmentation = False # Create camera camera = Camera(camera_cfg) # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(12): - sim.step() camera.update(dt) # expected sizes @@ -639,6 +585,7 @@ def test_camera_resolution_no_colorize(setup_sim_camera): output = camera.data.output assert output["rgb"].shape == hw_3c_shape assert output["rgba"].shape == hw_4c_shape + assert output["albedo"].shape == hw_4c_shape assert output["depth"].shape == hw_1c_shape assert output["distance_to_camera"].shape == hw_1c_shape assert output["distance_to_image_plane"].shape == hw_1c_shape @@ -652,6 +599,7 @@ def test_camera_resolution_no_colorize(setup_sim_camera): output = camera.data.output assert output["rgb"].dtype == torch.uint8 assert output["rgba"].dtype == torch.uint8 + assert output["albedo"].dtype == torch.uint8 assert output["depth"].dtype == torch.float assert output["distance_to_camera"].dtype == torch.float assert output["distance_to_image_plane"].dtype == torch.float @@ -669,6 +617,7 @@ def test_camera_large_resolution_all_colorize(setup_sim_camera): camera_cfg.data_types = [ "rgb", "rgba", + "albedo", "depth", "distance_to_camera", "distance_to_image_plane", @@ -678,9 +627,9 @@ def test_camera_large_resolution_all_colorize(setup_sim_camera): "instance_segmentation_fast", "instance_id_segmentation_fast", ] - camera_cfg.colorize_instance_id_segmentation = True - camera_cfg.colorize_instance_segmentation = True - camera_cfg.colorize_semantic_segmentation = True + camera_cfg.renderer_cfg.colorize_instance_id_segmentation = True + camera_cfg.renderer_cfg.colorize_instance_segmentation = True + camera_cfg.renderer_cfg.colorize_semantic_segmentation = True camera_cfg.width = 512 camera_cfg.height = 512 # Create camera @@ -689,11 +638,6 @@ def test_camera_large_resolution_all_colorize(setup_sim_camera): # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() camera.update(dt) # expected sizes @@ -705,6 +649,7 @@ def test_camera_large_resolution_all_colorize(setup_sim_camera): output = camera.data.output assert output["rgb"].shape == hw_3c_shape assert output["rgba"].shape == hw_4c_shape + assert output["albedo"].shape == hw_4c_shape assert output["depth"].shape == hw_1c_shape assert output["distance_to_camera"].shape == hw_1c_shape assert output["distance_to_image_plane"].shape == hw_1c_shape @@ -718,6 +663,7 @@ def test_camera_large_resolution_all_colorize(setup_sim_camera): output = camera.data.output assert output["rgb"].dtype == torch.uint8 assert output["rgba"].dtype == torch.uint8 + assert output["albedo"].dtype == torch.uint8 assert output["depth"].dtype == torch.float assert output["distance_to_camera"].dtype == torch.float assert output["distance_to_image_plane"].dtype == torch.float @@ -739,11 +685,6 @@ def test_camera_resolution_rgb_only(setup_sim_camera): # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() camera.update(dt) # expected sizes @@ -766,11 +707,6 @@ def test_camera_resolution_rgba_only(setup_sim_camera): # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() camera.update(dt) # expected sizes @@ -782,6 +718,54 @@ def test_camera_resolution_rgba_only(setup_sim_camera): assert output["rgba"].dtype == torch.uint8 +def test_camera_resolution_albedo_only(setup_sim_camera): + """Test camera resolution is correctly set for albedo only.""" + # Add all types + sim, camera_cfg, dt = setup_sim_camera + camera_cfg.data_types = ["albedo"] + # Create camera + camera = Camera(camera_cfg) + + # Play sim + sim.reset() + + camera.update(dt) + + # expected sizes + hw_4c_shape = (1, camera_cfg.height, camera_cfg.width, 4) + # access image data and compare shapes + output = camera.data.output + assert output["albedo"].shape == hw_4c_shape + # access image data and compare dtype + assert output["albedo"].dtype == torch.uint8 + + +@pytest.mark.parametrize( + "data_type", + ["simple_shading_constant_diffuse", "simple_shading_diffuse_mdl", "simple_shading_full_mdl"], +) +def test_camera_resolution_simple_shading_only(setup_sim_camera, data_type): + """Test camera resolution is correctly set for simple shading only.""" + # Add all types + sim, camera_cfg, dt = setup_sim_camera + camera_cfg.data_types = [data_type] + # Create camera + camera = Camera(camera_cfg) + + # Play sim + sim.reset() + + camera.update(dt) + + # expected sizes + hw_3c_shape = (1, camera_cfg.height, camera_cfg.width, 3) + # access image data and compare shapes + output = camera.data.output + assert output[data_type].shape == hw_3c_shape + # access image data and compare dtype + assert output[data_type].dtype == torch.uint8 + + def test_camera_resolution_depth_only(setup_sim_camera): """Test camera resolution is correctly set for depth only.""" # Add all types @@ -793,11 +777,6 @@ def test_camera_resolution_depth_only(setup_sim_camera): # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() camera.update(dt) # expected sizes @@ -809,68 +788,377 @@ def test_camera_resolution_depth_only(setup_sim_camera): assert output["depth"].dtype == torch.float -def test_throughput(setup_sim_camera): - """Checks that the single camera gets created properly with a rig.""" - # Create directory temp dir to dump the results - file_dir = os.path.dirname(os.path.realpath(__file__)) - temp_dir = os.path.join(file_dir, "output", "camera", "throughput") - os.makedirs(temp_dir, exist_ok=True) - # Create replicator writer - rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3) - # create camera +def test_sensor_print(setup_sim_camera): + """Test sensor print is working correctly.""" + # Create sensor sim, camera_cfg, dt = setup_sim_camera - camera_cfg.height = 480 - camera_cfg.width = 640 + sensor = Camera(cfg=camera_cfg) + # Play sim + sim.reset() + # print info + print(sensor) + + +def setup_with_device(device) -> tuple[sim_utils.SimulationContext, CameraCfg, float]: + camera_cfg = CameraCfg( + height=128, + width=256, + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 4.0), rot=(0.0, 1.0, 0.0, 0.0), convention="ros"), + prim_path="/World/Camera", + update_period=0, + data_types=["rgb", "distance_to_camera"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + ) + sim_utils.create_new_stage() + dt = 0.01 + sim_cfg = sim_utils.SimulationCfg(dt=dt, device=device) + sim = sim_utils.SimulationContext(sim_cfg) + _populate_scene() + sim_utils.update_stage() + return sim, camera_cfg, dt + + +@pytest.fixture(scope="function") +def setup_camera_device(device): + """Fixture with explicit device parametrization for GPU/CPU testing.""" + sim, camera_cfg, dt = setup_with_device(device) + yield sim, camera_cfg, dt + teardown(sim) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_camera_multi_regex_init(setup_camera_device, device): + """Test multi-camera initialization with regex prim paths and content validation.""" + sim, camera_cfg, dt = setup_camera_device + + num_cameras = 9 + for i in range(num_cameras): + sim_utils.create_prim(f"/World/Origin_{i}", "Xform") + + camera_cfg = copy.deepcopy(camera_cfg) + camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" camera = Camera(camera_cfg) - # Play simulator sim.reset() - # Set camera pose - eyes = torch.tensor([[2.5, 2.5, 2.5]], dtype=torch.float32, device=camera.device) - targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device) - camera.set_world_poses_from_view(eyes, targets) + assert camera.is_initialized + assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" + assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): + assert camera.data.pos_w.shape == (num_cameras, 3) + assert camera.data.quat_w_ros.shape == (num_cameras, 4) + assert camera.data.quat_w_world.shape == (num_cameras, 4) + assert camera.data.quat_w_opengl.shape == (num_cameras, 4) + assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) + assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) + + for _ in range(10): sim.step() - # Simulate physics + camera.update(dt) + for im_type, im_data in camera.data.output.items(): + if im_type == "rgb": + assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) + for i in range(4): + assert (im_data[i] / 255.0).mean() > 0.0 + elif im_type == "distance_to_camera": + assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) + for i in range(4): + assert im_data[i].mean() > 0.0 + del camera + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_camera_all_annotators(setup_camera_device, device): + """Test all supported annotators produce correct shapes, dtypes, content, and info.""" + sim, camera_cfg, dt = setup_camera_device + all_annotator_types = [ + "rgb", + "rgba", + "albedo", + "depth", + "distance_to_camera", + "distance_to_image_plane", + "normals", + "motion_vectors", + "semantic_segmentation", + "instance_segmentation_fast", + "instance_id_segmentation_fast", + ] + + num_cameras = 9 + for i in range(num_cameras): + sim_utils.create_prim(f"/World/Origin_{i}", "Xform") + + camera_cfg = copy.deepcopy(camera_cfg) + camera_cfg.data_types = all_annotator_types + camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" + camera = Camera(camera_cfg) + + sim.reset() + + assert camera.is_initialized + assert sorted(camera.data.output.keys()) == sorted(all_annotator_types) + + for _ in range(10): + sim.step() + camera.update(dt) + for data_type, im_data in camera.data.output.items(): + if data_type in ["rgb", "normals"]: + assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) + elif data_type in [ + "rgba", + "albedo", + "semantic_segmentation", + "instance_segmentation_fast", + "instance_id_segmentation_fast", + ]: + assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) + for i in range(num_cameras): + assert (im_data[i] / 255.0).mean() > 0.0 + elif data_type in ["motion_vectors"]: + assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 2) + for i in range(num_cameras): + assert im_data[i].mean() != 0.0 + elif data_type in ["depth", "distance_to_camera", "distance_to_image_plane"]: + assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) + for i in range(num_cameras): + assert im_data[i].mean() > 0.0 + + output = camera.data.output + info = camera.data.info + assert output["rgb"].dtype == torch.uint8 + assert output["rgba"].dtype == torch.uint8 + assert output["albedo"].dtype == torch.uint8 + assert output["depth"].dtype == torch.float + assert output["distance_to_camera"].dtype == torch.float + assert output["distance_to_image_plane"].dtype == torch.float + assert output["normals"].dtype == torch.float + assert output["motion_vectors"].dtype == torch.float + assert output["semantic_segmentation"].dtype == torch.uint8 + assert output["instance_segmentation_fast"].dtype == torch.uint8 + assert output["instance_id_segmentation_fast"].dtype == torch.uint8 + assert isinstance(info["semantic_segmentation"], dict) + assert isinstance(info["instance_segmentation_fast"], dict) + assert isinstance(info["instance_id_segmentation_fast"], dict) + + del camera + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_camera_segmentation_non_colorize(setup_camera_device, device): + """Test segmentation outputs with colorization disabled produce correct dtypes and info.""" + sim, camera_cfg, dt = setup_camera_device + num_cameras = 9 + for i in range(num_cameras): + sim_utils.create_prim(f"/World/Origin_{i}", "Xform") + + camera_cfg = copy.deepcopy(camera_cfg) + camera_cfg.data_types = ["semantic_segmentation", "instance_segmentation_fast", "instance_id_segmentation_fast"] + camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" + camera_cfg.renderer_cfg.colorize_semantic_segmentation = False + camera_cfg.renderer_cfg.colorize_instance_segmentation = False + camera_cfg.renderer_cfg.colorize_instance_id_segmentation = False + camera = Camera(camera_cfg) + + sim.reset() + for _ in range(5): - # perform rendering sim.step() - # update camera - with Timer(f"Time taken for updating camera with shape {camera.image_shape}"): - camera.update(dt) - # Save images - with Timer(f"Time taken for writing data with shape {camera.image_shape} "): - # Pack data back into replicator format to save them using its writer - rep_output = {"annotators": {}} - camera_data = convert_dict_to_backend({k: v[0] for k, v in camera.data.output.items()}, backend="numpy") - for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()): - if info is not None: - rep_output["annotators"][key] = {"render_product": {"data": data, **info}} - else: - rep_output["annotators"][key] = {"render_product": {"data": data}} - # Save images - rep_output["trigger_outputs"] = {"on_time": camera.frame[0]} - rep_writer.write(rep_output) - print("----------------------------------------") - # Check image data - for im_data in camera.data.output.values(): - assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 1) + camera.update(dt) + for seg_type in camera_cfg.data_types: + assert camera.data.output[seg_type].shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) + assert camera.data.output[seg_type].dtype == torch.int32 + assert isinstance(camera.data.info[seg_type], dict) + + del camera + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_camera_normals_unit_length(setup_camera_device, device): + """Test that normals output vectors have approximately unit length.""" + sim, camera_cfg, dt = setup_camera_device + num_cameras = 9 + for i in range(num_cameras): + sim_utils.create_prim(f"/World/Origin_{i}", "Xform") + + camera_cfg = copy.deepcopy(camera_cfg) + camera_cfg.data_types = ["normals"] + camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" + camera = Camera(camera_cfg) -def test_sensor_print(setup_sim_camera): - """Test sensor print is working correctly.""" - # Create sensor - sim, camera_cfg, dt = setup_sim_camera - sensor = Camera(cfg=camera_cfg) - # Play sim sim.reset() - # print info - print(sensor) + + for _ in range(10): + sim.step() + camera.update(dt) + im_data = camera.data.output["normals"] + assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) + for i in range(4): + assert im_data[i].mean() > 0.0 + norms = torch.linalg.norm(im_data, dim=-1) + assert torch.allclose(norms, torch.ones_like(norms), atol=1e-9) + + assert camera.data.output["normals"].dtype == torch.float + del camera + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_camera_data_types_ordering(setup_camera_device, device): + """Test that requesting specific data types produces the expected output keys.""" + sim, camera_cfg, dt = setup_camera_device + camera_cfg_distance = copy.deepcopy(camera_cfg) + camera_cfg_distance.data_types = ["distance_to_camera"] + camera_cfg_distance.prim_path = "/World/CameraDistance" + camera_distance = Camera(camera_cfg_distance) + + camera_cfg_depth = copy.deepcopy(camera_cfg) + camera_cfg_depth.data_types = ["depth"] + camera_cfg_depth.prim_path = "/World/CameraDepth" + camera_depth = Camera(camera_cfg_depth) + + camera_cfg_both = copy.deepcopy(camera_cfg) + camera_cfg_both.data_types = ["distance_to_camera", "depth"] + camera_cfg_both.prim_path = "/World/CameraBoth" + camera_both = Camera(camera_cfg_both) + + sim.reset() + + assert camera_distance.is_initialized + assert camera_depth.is_initialized + assert camera_both.is_initialized + assert list(camera_distance.data.output.keys()) == ["distance_to_camera"] + assert list(camera_depth.data.output.keys()) == ["depth"] + assert list(camera_both.data.output.keys()) == ["depth", "distance_to_camera"] + + del camera_distance + del camera_depth + del camera_both + + +@pytest.mark.parametrize("device", ["cuda:0"]) +def test_camera_frame_offset(setup_camera_device, device): + """Test that camera reflects scene color changes without frame-offset lag.""" + sim, camera_cfg, dt = setup_camera_device + camera_cfg = copy.deepcopy(camera_cfg) + camera_cfg.height = 480 + camera_cfg.width = 480 + camera = Camera(camera_cfg) + + stage = sim_utils.get_current_stage() + for i in range(10): + prim = stage.GetPrimAtPath(f"/World/Objects/Obj_{i:02d}") + color = Gf.Vec3f(1, 1, 1) + UsdGeom.Gprim(prim).GetDisplayColorAttr().Set([color]) + + sim.reset() + + for _ in range(100): + sim.step() + camera.update(dt) + + image_before = camera.data.output["rgb"].clone() / 255.0 + + for i in range(10): + prim = stage.GetPrimAtPath(f"/World/Objects/Obj_{i:02d}") + color = Gf.Vec3f(0, 0, 0) + UsdGeom.Gprim(prim).GetDisplayColorAttr().Set([color]) + + sim.step() + camera.update(dt) + + image_after = camera.data.output["rgb"].clone() / 255.0 + + assert torch.abs(image_after - image_before).mean() > 0.01 + + del camera + + +def test_camera_warns_once_on_unsupported_data_types(setup_sim_camera, caplog): + """Test Camera warns once and drops data types its renderer cannot produce.""" + import logging + + from isaaclab.renderers import Renderer + from isaaclab.renderers.base_renderer import BaseRenderer + + sim, camera_cfg, dt = setup_sim_camera + camera_cfg = copy.deepcopy(camera_cfg) + camera_cfg.data_types = ["rgba", "depth", "normals"] + + from isaaclab.sensors.camera.camera_data import RenderBufferKind, RenderBufferSpec + + class _PartialRenderer(BaseRenderer): + """Publishes only ``rgba`` in its supported-output contract.""" + + def __init__(self, cfg=None): + self.cfg = cfg + + def supported_output_types(self): + return {RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8)} + + def prepare_stage(self, stage, num_envs): + pass + + def create_render_data(self, sensor): + return object() + + def set_outputs(self, render_data, output_data): + pass + + def update_transforms(self): + pass + + def update_camera(self, render_data, positions, orientations, intrinsics): + pass + + def render(self, render_data): + pass + + def read_output(self, render_data, camera_data): + pass + + def cleanup(self, render_data): + pass + + backend = Renderer._get_backend(camera_cfg.renderer_cfg) + original = Renderer._registry.get(backend) + Renderer._registry[backend] = _PartialRenderer + try: + camera = Camera(camera_cfg) + caplog.clear() + with caplog.at_level(logging.WARNING, logger="isaaclab.sensors.camera.camera"): + sim.reset() + # Step a few frames and confirm the warning is emitted once at init. + for _ in range(3): + sim.step() + camera.update(dt) + + warning_records = [ + r for r in caplog.records if r.levelno == logging.WARNING and "does not support" in r.getMessage() + ] + assert len(warning_records) == 1, ( + f"Expected exactly one 'does not support' warning, got {len(warning_records)}:" + f" {[r.getMessage() for r in warning_records]}" + ) + msg = warning_records[0].getMessage() + assert "_PartialRenderer" in msg + assert "depth" in msg + assert "normals" in msg + assert "rgba" not in msg + + # Only the supported subset is in ``data.output``; the rest were dropped. + assert set(camera.data.output.keys()) == {"rgba"} + # ``data.info`` mirrors the ``data.output`` keys. + assert set(camera.data.info.keys()) == {"rgba"} + + del camera + finally: + if original is not None: + Renderer._registry[backend] = original + else: + Renderer._registry.pop(backend, None) def _populate_scene(): @@ -903,6 +1191,8 @@ def _populate_scene(): color = Gf.Vec3f(random.random(), random.random(), random.random()) geom_prim.CreateDisplayColorAttr() geom_prim.GetDisplayColorAttr().Set([color]) - # add rigid properties - SingleGeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True) - SingleRigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0) + # add rigid body and collision properties using Isaac Lab schemas + prim_path = f"/World/Objects/Obj_{i:02d}" + sim_utils.define_rigid_body_properties(prim_path, sim_utils.RigidBodyPropertiesCfg()) + sim_utils.define_mass_properties(prim_path, sim_utils.MassPropertiesCfg(mass=5.0)) + sim_utils.define_collision_properties(prim_path, sim_utils.CollisionPropertiesCfg()) diff --git a/source/isaaclab/test/sensors/test_first_frame_textured_rendering.py b/source/isaaclab/test/sensors/test_first_frame_textured_rendering.py new file mode 100644 index 000000000000..1c2ef0ec7b00 --- /dev/null +++ b/source/isaaclab/test/sensors/test_first_frame_textured_rendering.py @@ -0,0 +1,161 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True, enable_cameras=True).app + +"""Rest everything follows.""" + +import pytest +import torch + +import omni.replicator.core as rep + +import isaaclab.sim as sim_utils +from isaaclab.sensors.camera import Camera, CameraCfg +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + +# resolution +HEIGHT = 256 +WIDTH = 256 + +# grey default-material detection: channels within this tolerance and mean below threshold +GREY_CHANNEL_TOLERANCE = 3.0 +GREY_MEAN_THRESHOLD = 85.0 + +# number of extra sim steps before capturing the stabilised reference frame +STABILISATION_STEPS = 5 + +# max allowed per-channel mean difference between first and stabilised frames +FRAME_CONSISTENCY_THRESHOLD = 15.0 + +# scene: dome light +DOME_LIGHT_INTENSITY = 3000.0 + +# scene: textured cube pose +CUBE_TRANSLATION = (0.0, 0.0, 0.6) +CUBE_ORIENTATION = (0.7071, 0.0, 0.7071, 0.0) # rotate DexCube with its yellow "E" face texture up +CUBE_SCALE = (0.9, 0.9, 0.9) + + +def _is_grey(mean_rgb: torch.Tensor) -> bool: + """Return True if mean_rgb looks like the grey default material.""" + channels_equal = (mean_rgb[1] - mean_rgb[0]).abs() < GREY_CHANNEL_TOLERANCE and ( + mean_rgb[2] - mean_rgb[0] + ).abs() < GREY_CHANNEL_TOLERANCE + all_low = mean_rgb.mean() < GREY_MEAN_THRESHOLD + return bool(channels_equal and all_low) + + +@pytest.fixture(scope="function") +def setup_sim(device): + """Fixture to set up and tear down the textured rendering test environment.""" + # Create a new stage + sim_utils.create_new_stage() + # Simulation time-step + dt = 0.01 + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(dt=dt, device=device) + sim = sim_utils.SimulationContext(sim_cfg) + # populate scene + _populate_scene() + # load stage + sim_utils.update_stage() + yield sim, dt + # Teardown + rep.vp_manager.destroy_hydra_textures("Replicator") + sim.stop() + sim.clear_instance() + + +def _assert_first_frame_textured(first_frame: torch.Tensor, stable_frame: torch.Tensor): + """Verify that first_frame shows loaded textures and is consistent with stable_frame.""" + mean_first = first_frame.mean(dim=(0, 1)) + mean_stable = stable_frame.mean(dim=(0, 1)) + # Guard 1: not the grey default material + assert not _is_grey(mean_first), ( + f"First frame looks like the grey default material " + f"(mean RGB: {mean_first[0]:.1f}, {mean_first[1]:.1f}, {mean_first[2]:.1f}). " + "The renderer's streaming wait (ensure_isaac_rtx_render_update) " + "may not have completed texture loading before the first capture." + ) + + # Guard 2: first frame and stabilised frame are broadly consistent + per_channel_diff = (mean_first - mean_stable).abs() + assert per_channel_diff.max().item() < FRAME_CONSISTENCY_THRESHOLD, ( + f"First and stabilised frames differ too much per-channel " + f"(max delta {per_channel_diff.max():.1f}, means: " + f"first=({mean_first[0]:.1f}, {mean_first[1]:.1f}, {mean_first[2]:.1f}), " + f"stable=({mean_stable[0]:.1f}, {mean_stable[1]:.1f}, {mean_stable[2]:.1f})). " + "The first frame may not be fully textured." + ) + + +@pytest.mark.parametrize("device", ["cuda:0"]) +@pytest.mark.isaacsim_ci +def test_first_frame_is_textured_camera(setup_sim, device): + """First RTX frame from a USD Camera must show loaded textures, not a grey placeholder.""" + sim, dt = setup_sim + camera_cfg = CameraCfg( + height=HEIGHT, + width=WIDTH, + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.75), rot=(0.0, 1.0, 0.0, 0.0), convention="ros"), + prim_path="/World/Camera", + update_period=0, + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, + focus_distance=400.0, + horizontal_aperture=20.955, + clipping_range=(0.1, 1.0e5), + ), + ) + # Create camera + camera = Camera(camera_cfg) + + sim.reset() + + # The first sim step + camera update should produce textured output + sim.step() + camera.update(dt) + first_frame = camera.data.output["rgb"][0].clone().to(dtype=torch.float32) + + # Let the renderer stabilise, then capture the reference frame + for _ in range(STABILISATION_STEPS): + sim.step() + camera.update(dt) + stable_frame = camera.data.output["rgb"][0].clone().to(dtype=torch.float32) + + del camera + + _assert_first_frame_textured(first_frame, stable_frame) + + +""" +Helper functions. +""" + + +def _populate_scene(): + """Add prims to the scene.""" + # Ground-plane + cfg = sim_utils.GroundPlaneCfg() + cfg.func("/World/defaultGroundPlane", cfg) + # Lights + cfg = sim_utils.DomeLightCfg(intensity=DOME_LIGHT_INTENSITY, color=(1.0, 1.0, 1.0)) + cfg.func("/World/Light/Dome", cfg) + # Textured cube rotated so yellow "E" face is visible + sim_utils.create_prim( + "/World/Objects/ReferenceCube", + "Xform", + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + translation=CUBE_TRANSLATION, + orientation=CUBE_ORIENTATION, + scale=CUBE_SCALE, + ) diff --git a/source/isaaclab/test/sensors/test_multi_mesh_ray_caster.py b/source/isaaclab/test/sensors/test_multi_mesh_ray_caster.py index c27b25b53b79..37e4818c7991 100644 --- a/source/isaaclab/test/sensors/test_multi_mesh_ray_caster.py +++ b/source/isaaclab/test/sensors/test_multi_mesh_ray_caster.py @@ -91,7 +91,7 @@ def test_raycast_multi_cubes(device, trimesh_box, rays): return_normal=True, return_face_id=True, mesh_positions_w=torch.tensor([[[0, 0, 0], [0, 2, 0]]], dtype=torch.float32, device=device), - mesh_orientations_w=torch.tensor([[[1, 0, 0, 0], [1, 0, 0, 0]]], dtype=torch.float32, device=device), + mesh_orientations_w=torch.tensor([[[0, 0, 0, 1], [0, 0, 0, 1]]], dtype=torch.float32, device=device), return_mesh_id=True, ) diff --git a/source/isaaclab/test/sensors/test_multi_mesh_ray_caster_camera.py b/source/isaaclab/test/sensors/test_multi_mesh_ray_caster_camera.py index 6e30a5fcdc98..8657c938c691 100644 --- a/source/isaaclab/test/sensors/test_multi_mesh_ray_caster_camera.py +++ b/source/isaaclab/test/sensors/test_multi_mesh_ray_caster_camera.py @@ -16,7 +16,6 @@ """Rest everything follows.""" import copy -import os import numpy as np import pytest @@ -31,14 +30,12 @@ from isaaclab.sim import PinholeCameraCfg from isaaclab.terrains.trimesh.utils import make_plane from isaaclab.terrains.utils import create_prim_from_mesh -from isaaclab.utils import convert_dict_to_backend -from isaaclab.utils.timer import Timer -# sample camera poses +# sample camera poses (quaternions in xyzw format) POSITION = [2.5, 2.5, 2.5] -QUAT_ROS = [-0.17591989, 0.33985114, 0.82047325, -0.42470819] -QUAT_OPENGL = [0.33985113, 0.17591988, 0.42470818, 0.82047324] -QUAT_WORLD = [-0.3647052, -0.27984815, -0.1159169, 0.88047623] +QUAT_ROS = [0.33985114, 0.82047325, -0.42470819, -0.17591989] +QUAT_OPENGL = [0.17591988, 0.42470818, 0.82047324, 0.33985113] +QUAT_WORLD = [-0.27984815, -0.1159169, 0.88047623, -0.3647052] @pytest.fixture(scope="function") @@ -61,7 +58,7 @@ def setup_simulation(): prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), debug_vis=False, pattern_cfg=patterns.PinholeCameraPatternCfg( focal_length=24.0, @@ -81,10 +78,8 @@ def setup_simulation(): # close all the opened viewport from before. rep.vp_manager.destroy_hydra_textures("Replicator") # stop simulation - # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( - sim._timeline.stop() + sim.stop() # clear the stage - sim.clear_all_callbacks() sim.clear_instance() @@ -136,11 +131,6 @@ def test_camera_init(setup_simulation): sim.reset() # Check if camera is initialized assert camera.is_initialized - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() # Check buffers that exists and have correct shapes assert camera.data.pos_w.shape == (1, 3) assert camera.data.quat_w_ros.shape == (1, 4) @@ -148,7 +138,7 @@ def test_camera_init(setup_simulation): assert camera.data.quat_w_opengl.shape == (1, 4) assert camera.data.intrinsic_matrices.shape == (1, 3, 3) assert camera.data.image_shape == (camera_cfg.pattern_cfg.height, camera_cfg.pattern_cfg.width) - assert camera.data.info == [{camera_cfg.data_types[0]: None}] + assert camera.data.info == {camera_cfg.data_types[0]: None} # Simulate physics for _ in range(10): # perform rendering @@ -171,11 +161,6 @@ def test_camera_resolution(setup_simulation): camera = MultiMeshRayCasterCamera(cfg=camera_cfg) # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() camera.update(dt) # access image data and compare shapes for im_data in camera.data.output.values(): @@ -200,7 +185,7 @@ def test_camera_init_intrinsic_matrix(setup_simulation): prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), debug_vis=False, pattern_cfg=patterns.PinholeCameraPatternCfg.from_intrinsic_matrix( intrinsic_matrix=intrinsic_matrix, @@ -256,11 +241,6 @@ def test_multi_camera_init(setup_simulation): # play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() # Simulate physics for _ in range(10): # perform rendering @@ -338,11 +318,6 @@ def test_intrinsic_matrix(setup_simulation, height, width): rs_intrinsic_matrix = torch.tensor(rs_intrinsic_matrix, device=camera.device).reshape(3, 3).unsqueeze(0) # Set matrix into simulator camera.set_intrinsic_matrices(rs_intrinsic_matrix.clone()) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() # Simulate physics for _ in range(10): # perform rendering @@ -355,64 +330,6 @@ def test_intrinsic_matrix(setup_simulation, height, width): del camera -@pytest.mark.isaacsim_ci -def test_throughput(setup_simulation): - """Test camera throughput for different image sizes.""" - sim, dt, camera_cfg = setup_simulation - - # Create directory temp dir to dump the results - file_dir = os.path.dirname(os.path.realpath(__file__)) - temp_dir = os.path.join(file_dir, "output", "camera", "throughput") - os.makedirs(temp_dir, exist_ok=True) - # Create replicator writer - rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3) - # create camera - camera_cfg_copy = copy.deepcopy(camera_cfg) - camera_cfg_copy.pattern_cfg.height = 480 - camera_cfg_copy.pattern_cfg.width = 640 - camera = MultiMeshRayCasterCamera(camera_cfg_copy) - - # Play simulator - sim.reset() - - # Set camera pose - eyes = torch.tensor([[2.5, 2.5, 2.5]], dtype=torch.float32, device=camera.device) - targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device) - camera.set_world_poses_from_view(eyes, targets) - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # Simulate physics - for _ in range(5): - # perform rendering - sim.step() - # update camera - with Timer(f"Time taken for updating camera with shape {camera.image_shape}"): - camera.update(dt) - # Save images - with Timer(f"Time taken for writing data with shape {camera.image_shape} "): - # Pack data back into replicator format to save them using its writer - rep_output = {"annotators": {}} - camera_data = convert_dict_to_backend(camera.data.output, backend="numpy") - for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()): - if info is not None: - rep_output["annotators"][key] = {"render_product": {"data": data, **info}} - else: - rep_output["annotators"][key] = {"render_product": {"data": data}} - # Save images - rep_output["trigger_outputs"] = {"on_time": camera.frame[0]} - rep_writer.write(rep_output) - print("----------------------------------------") - # Check image data - for im_data in camera.data.output.values(): - assert im_data.shape == (1, camera_cfg_copy.pattern_cfg.height, camera_cfg_copy.pattern_cfg.width, 1) - - del camera - - @pytest.mark.parametrize( "data_types", [ @@ -437,7 +354,7 @@ def test_output_equal_to_usdcamera(setup_simulation, data_types): prim_path="/World/Camera_warp", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), + offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=data_types, @@ -520,7 +437,7 @@ def test_output_equal_to_usdcamera(setup_simulation, data_types): def test_output_equal_to_usdcamera_offset(setup_simulation): """Test that ray caster camera output equals USD camera output with offset.""" sim, dt, camera_cfg = setup_simulation - offset_rot = (-0.1251, 0.3617, 0.8731, -0.3020) + offset_rot = (0.3617, 0.8731, -0.3020, -0.1251) camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, @@ -598,12 +515,12 @@ def test_output_equal_to_usdcamera_prim_offset(setup_simulation): under an XForm prim that is translated and rotated from the world origin.""" sim, dt, camera_cfg = setup_simulation - offset_rot = [-0.1251, 0.3617, 0.8731, -0.3020] + offset_rot = [0.3617, 0.8731, -0.3020, -0.1251] - # gf quat + # gf quat (QUAT_OPENGL is xyzw, Gf.Quatd uses wxyz) gf_quatf = Gf.Quatd() - gf_quatf.SetReal(QUAT_OPENGL[0]) - gf_quatf.SetImaginary(tuple(QUAT_OPENGL[1:])) + gf_quatf.SetReal(QUAT_OPENGL[3]) + gf_quatf.SetImaginary(tuple(QUAT_OPENGL[:3])) camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, @@ -696,7 +613,7 @@ def test_output_equal_to_usd_camera_intrinsics(setup_simulation, height, width): sim, dt, camera_cfg = setup_simulation # create cameras - offset_rot = [-0.1251, 0.3617, 0.8731, -0.3020] + offset_rot = [0.3617, 0.8731, -0.3020, -0.1251] offset_pos = (2.5, 2.5, 4.0) intrinsics = [380.0831, 0.0, width / 2, 0.0, 380.0831, height / 2, 0.0, 0.0, 1.0] sim_utils.create_prim("/World/Camera_warp", "Xform") @@ -799,7 +716,7 @@ def test_output_equal_to_usd_camera_when_intrinsics_set(setup_simulation): prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), + offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=["distance_to_camera"], @@ -860,3 +777,35 @@ def test_output_equal_to_usd_camera_when_intrinsics_set(setup_simulation): ) del camera_usd, camera_warp + + +@pytest.mark.isaacsim_ci +def test_image_mesh_ids_identifies_hit_mesh(setup_simulation): + """image_mesh_ids must contain 0 for ground-plane hits (only one mesh registered).""" + sim, dt, camera_cfg = setup_simulation + + cfg = copy.deepcopy(camera_cfg) + cfg.update_mesh_ids = True + cfg.data_types = ["distance_to_camera"] + + camera = MultiMeshRayCasterCamera(cfg=cfg) + sim.reset() + camera.update(dt) + + mesh_ids = camera.data.image_mesh_ids # shape (N, H, W, 1), dtype torch.int16 + assert mesh_ids is not None, "image_mesh_ids should not be None when update_mesh_ids=True" + assert mesh_ids.shape[-1] == 1 + assert mesh_ids.dtype == torch.int16 + + # Identify actual hits via distance < inf. This relies on depth_clipping_behavior="none" + # (the default), which leaves missed rays at the Warp-kernel fill value of inf. + # Under "max" clipping, missed rays would be clamped to a finite max_distance, making + # the inf comparison incorrect. + hit_mask = camera.data.output["distance_to_camera"][0, :, :, 0] < float("inf") + assert hit_mask.any(), "Expected at least some rays to hit the ground plane" + + # All hits against the single registered mesh must carry mesh_id=0 (first mesh index). + hit_mesh_ids = mesh_ids[0, :, :, 0][hit_mask] + assert torch.all(hit_mesh_ids == 0), ( + f"All hits against the single ground mesh must have mesh_id=0, got: {hit_mesh_ids.unique()}" + ) diff --git a/source/isaaclab/test/sensors/test_multi_tiled_camera.py b/source/isaaclab/test/sensors/test_multi_tiled_camera.py index a1fb9a178351..113524cfe934 100644 --- a/source/isaaclab/test/sensors/test_multi_tiled_camera.py +++ b/source/isaaclab/test/sensors/test_multi_tiled_camera.py @@ -24,12 +24,15 @@ from flaky import flaky import omni.replicator.core as rep -from isaacsim.core.prims import SingleGeometryPrim, SingleRigidPrim from pxr import Gf, UsdGeom import isaaclab.sim as sim_utils from isaaclab.sensors.camera import TiledCamera, TiledCameraCfg +# Deprecation warnings from TiledCamera/TiledCameraCfg are expected in this file; +# the deprecation mechanism itself is validated in test_tiled_camera.py. +pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning") + @pytest.fixture() def setup_camera(): @@ -37,7 +40,7 @@ def setup_camera(): camera_cfg = TiledCameraCfg( height=128, width=256, - offset=TiledCameraCfg.OffsetCfg(pos=(0.0, 0.0, 4.0), rot=(0.0, 0.0, 1.0, 0.0), convention="ros"), + offset=TiledCameraCfg.OffsetCfg(pos=(0.0, 0.0, 4.0), rot=(0.0, 1.0, 0.0, 0.0), convention="ros"), prim_path="/World/Camera", update_period=0, data_types=["rgb", "distance_to_camera"], @@ -60,10 +63,8 @@ def setup_camera(): # Teardown rep.vp_manager.destroy_hydra_textures("Replicator") # stop simulation - # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( - sim._timeline.stop() + sim.stop() # clear the stage - sim.clear_all_callbacks() sim.clear_instance() @@ -86,7 +87,7 @@ def test_multi_tiled_camera_init(setup_camera): tiled_cameras.append(camera) # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() + assert sim.get_setting("/isaaclab/render/rtx_sensors") # Play sim sim.reset() @@ -98,12 +99,6 @@ def test_multi_tiled_camera_init(setup_camera): assert camera._sensor_prims[1].GetPath().pathString == f"/World/Origin_{i}_1/CameraSensor" assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - for camera in tiled_cameras: # Check buffers that exists and have correct shapes assert camera.data.pos_w.shape == (num_cameras_per_tiled_camera, 3) @@ -156,6 +151,7 @@ def test_all_annotators_multi_tiled_camera(setup_camera): all_annotator_types = [ "rgb", "rgba", + "albedo", "depth", "distance_to_camera", "distance_to_image_plane", @@ -182,7 +178,7 @@ def test_all_annotators_multi_tiled_camera(setup_camera): tiled_cameras.append(camera) # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() + assert sim.get_setting("/isaaclab/render/rtx_sensors") # Play sim sim.reset() @@ -195,12 +191,6 @@ def test_all_annotators_multi_tiled_camera(setup_camera): assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) assert sorted(camera.data.output.keys()) == sorted(all_annotator_types) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - for camera in tiled_cameras: # Check buffers that exists and have correct shapes assert camera.data.pos_w.shape == (num_cameras_per_tiled_camera, 3) @@ -223,6 +213,7 @@ def test_all_annotators_multi_tiled_camera(setup_camera): assert im_data.shape == (num_cameras_per_tiled_camera, camera.cfg.height, camera.cfg.width, 3) elif data_type in [ "rgba", + "albedo", "semantic_segmentation", "instance_segmentation_fast", "instance_id_segmentation_fast", @@ -245,6 +236,7 @@ def test_all_annotators_multi_tiled_camera(setup_camera): info = camera.data.info assert output["rgb"].dtype == torch.uint8 assert output["rgba"].dtype == torch.uint8 + assert output["albedo"].dtype == torch.uint8 assert output["depth"].dtype == torch.float assert output["distance_to_camera"].dtype == torch.float assert output["distance_to_image_plane"].dtype == torch.float @@ -283,7 +275,7 @@ def test_different_resolution_multi_tiled_camera(setup_camera): tiled_cameras.append(camera) # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() + assert sim.get_setting("/isaaclab/render/rtx_sensors") # Play sim sim.reset() @@ -295,12 +287,6 @@ def test_different_resolution_multi_tiled_camera(setup_camera): assert camera._sensor_prims[1].GetPath().pathString == f"/World/Origin_{i}_1/CameraSensor" assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - for camera in tiled_cameras: # Check buffers that exists and have correct shapes assert camera.data.pos_w.shape == (num_cameras_per_tiled_camera, 3) @@ -335,6 +321,7 @@ def test_different_resolution_multi_tiled_camera(setup_camera): @pytest.mark.isaacsim_ci +@flaky(max_runs=3, min_passes=1) def test_frame_offset_multi_tiled_camera(setup_camera): """Test frame offset issue with multiple tiled cameras""" camera_cfg, sim, dt = setup_camera @@ -407,7 +394,7 @@ def test_frame_different_poses_multi_tiled_camera(setup_camera): num_tiled_cameras = 3 num_cameras_per_tiled_camera = 4 positions = [(0.0, 0.0, 4.0), (0.0, 0.0, 2.0), (0.0, 0.0, 3.0)] - rotations = [(0.0, 0.0, 1.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, 0.0, 1.0, 0.0)] + rotations = [(0.0, 1.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0)] tiled_cameras = [] for i in range(num_tiled_cameras): @@ -424,12 +411,6 @@ def test_frame_different_poses_multi_tiled_camera(setup_camera): # Play sim sim.reset() - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # Simulate physics for _ in range(10): # Initialize data arrays @@ -503,6 +484,8 @@ def _populate_scene(): color = Gf.Vec3f(random.random(), random.random(), random.random()) geom_prim.CreateDisplayColorAttr() geom_prim.GetDisplayColorAttr().Set([color]) - # add rigid properties - SingleGeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True) - SingleRigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0) + # add rigid body and collision properties using Isaac Lab schemas + prim_path = f"/World/Objects/Obj_{i:02d}" + sim_utils.define_rigid_body_properties(prim_path, sim_utils.RigidBodyPropertiesCfg()) + sim_utils.define_mass_properties(prim_path, sim_utils.MassPropertiesCfg(mass=5.0)) + sim_utils.define_collision_properties(prim_path, sim_utils.CollisionPropertiesCfg()) diff --git a/source/isaaclab/test/sensors/test_outdated_sensor.py b/source/isaaclab/test/sensors/test_outdated_sensor.py index ac0c989c6839..3c07151ea275 100644 --- a/source/isaaclab/test/sensors/test_outdated_sensor.py +++ b/source/isaaclab/test/sensors/test_outdated_sensor.py @@ -19,8 +19,8 @@ import pytest import torch -import carb -import omni.usd +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager import isaaclab_tasks # noqa: F401 from isaaclab_tasks.utils.parse_cfg import parse_env_cfg @@ -31,8 +31,7 @@ def temp_dir(): """Fixture to create and clean up a temporary directory for test datasets.""" # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running the # test on many environments. - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) # create a temporary directory to store the test datasets temp_dir = tempfile.mkdtemp() yield temp_dir @@ -46,7 +45,7 @@ def temp_dir(): @pytest.mark.isaacsim_ci def test_action_state_recorder_terms(temp_dir, task_name, device, num_envs): """Check FrameTransformer values after reset.""" - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) diff --git a/source/isaaclab/test/sensors/test_ray_caster.py b/source/isaaclab/test/sensors/test_ray_caster.py index 01b2dde1ae2a..6ac63cf0d041 100644 --- a/source/isaaclab/test/sensors/test_ray_caster.py +++ b/source/isaaclab/test/sensors/test_ray_caster.py @@ -18,7 +18,9 @@ # Import after app launch import warp as wp -from isaaclab.utils.math import matrix_from_quat, quat_from_euler_xyz, random_orientation +from isaaclab.sensors.ray_caster.kernels import quat_yaw_only as _quat_yaw_only_func +from isaaclab.utils.math import matrix_from_quat, quat_from_euler_xyz, random_orientation, yaw_quat +from isaaclab.utils.warp.kernels import raycast_mesh_masked_kernel as _raycast_mesh_masked_kernel from isaaclab.utils.warp.ops import convert_to_warp_mesh, raycast_dynamic_meshes, raycast_mesh @@ -97,7 +99,7 @@ def test_raycast_multi_cubes(raycast_setup): return_normal=True, return_face_id=True, mesh_positions_w=torch.tensor([[[0, 0, 0], [0, 2, 0]]], dtype=torch.float32, device=device), - mesh_orientations_w=torch.tensor([[[1, 0, 0, 0], [1, 0, 0, 0]]], dtype=torch.float32, device=device), + mesh_orientations_w=torch.tensor([[[0, 0, 0, 1], [0, 0, 0, 1]]], dtype=torch.float32, device=device), return_mesh_id=True, ) @@ -239,3 +241,282 @@ def test_raycast_random_cube(raycast_setup): torch.testing.assert_close(ray_distance, ray_distance_m) torch.testing.assert_close(ray_normal, ray_normal_m) torch.testing.assert_close(ray_face_id, ray_face_id_m) + + +## +# RayCaster sensor-level tests +## + + +def test_raycaster_offset_does_not_affect_pos_w(): + """Verify that cfg.offset.pos shifts ray starts but NOT data.pos_w. + + data.pos_w must reflect the parent body position so that downstream + observations like height_scan (pos_w_z - hit_z - 0.5) produce values + relative to the body, not relative to the offset sensor frame. + + Regression test: previously the offset was baked into the FrameView's + Xform local transform, causing data.pos_w to include the 20m offset + and breaking height-scan observations during training. + """ + import isaaclab.sim as sim_utils + from isaaclab.sensors.ray_caster import RayCaster, RayCasterCfg, patterns + from isaaclab.terrains.trimesh.utils import make_plane + from isaaclab.terrains.utils import create_prim_from_mesh + + sim_utils.create_new_stage() + + # ground plane at z=0 + mesh = make_plane(size=(100, 100), height=0.0, center_zero=True) + create_prim_from_mesh("/World/ground", mesh) + + # parent body at known position + body_pos = (0.0, 0.0, 0.6) + sim_utils.create_prim("/World/Robot", "Xform", translation=body_pos) + + # large z-offset to make the regression obvious + offset_z = 20.0 + cfg = RayCasterCfg( + prim_path="/World/Robot", + offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, offset_z)), + mesh_prim_paths=["/World/ground"], + pattern_cfg=patterns.GridPatternCfg(resolution=0.5, size=[1.0, 1.0]), + ray_alignment="yaw", + ) + + dt = 0.01 + sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=dt)) + + sensor = RayCaster(cfg) + sim.reset() + sensor.update(dt) + + # data.pos_w / data.ray_hits_w return ProxyArray wrappers; use .torch for tensor indexing. + pos_w = sensor.data.pos_w.torch[0].cpu() + + # pos_w.z should be near the body height, NOT body_height + offset + assert abs(pos_w[2].item() - body_pos[2]) < 1.0, ( + f"data.pos_w.z = {pos_w[2].item():.2f}, expected near body height {body_pos[2]}." + f" If pos_w.z ≈ {body_pos[2] + offset_z}, the offset was incorrectly baked into the FrameView." + ) + + # ray_hits should be near z=0 (ground plane) + hits_z = sensor.data.ray_hits_w.torch[0, :, 2].cpu() + valid = hits_z[~torch.isinf(hits_z)] + if len(valid) > 0: + assert valid.abs().max().item() < 2.0, ( + f"Ray hits z range [{valid.min().item():.2f}, {valid.max().item():.2f}] — expected near ground (z≈0)." + ) + + # height_scan observation: pos_w_z - hit_z - 0.5 should be small, not ~20 + if len(valid) > 0: + height_obs = pos_w[2].item() - valid.mean().item() - 0.5 + assert abs(height_obs) < 5.0, ( + f"height_scan observation = {height_obs:.2f}, expected near 0." + f" If ≈{offset_z}, the offset leaked into data.pos_w." + ) + + sim.stop() + sim.clear_instance() + + +# --------------------------------------------------------------------------- +# Tests for raycast_mesh_masked_kernel (new kernel in utils/warp/kernels.py) +# --------------------------------------------------------------------------- + +_SENTINEL = -2.0 # value pre-filled into output buffers; chosen outside [-1, 1] so it cannot +# equal any component of a unit-length surface normal, making "not written" assertions unambiguous. + + +def _make_masked_buffers(device, n_envs, n_rays): + """Allocate all warp buffers needed by raycast_mesh_masked_kernel. + + ray_dist_w and ray_normal_w are pre-filled with _SENTINEL so that tests can + meaningfully assert those buffers were *not* written when the corresponding + return flag is 0. + """ + ray_starts_w = wp.zeros((n_envs, n_rays), dtype=wp.vec3f, device=device) + ray_dirs_w = wp.zeros((n_envs, n_rays), dtype=wp.vec3f, device=device) + ray_hits_w = wp.zeros((n_envs, n_rays), dtype=wp.vec3f, device=device) + ray_dist_w = wp.zeros((n_envs, n_rays), dtype=wp.float32, device=device) + wp.to_torch(ray_dist_w).fill_(_SENTINEL) + ray_normal_w = wp.zeros((n_envs, n_rays), dtype=wp.vec3f, device=device) + wp.to_torch(ray_normal_w).fill_(_SENTINEL) + return ray_starts_w, ray_dirs_w, ray_hits_w, ray_dist_w, ray_normal_w + + +def test_raycast_mesh_masked_kernel_hits_only(raycast_setup): + """return_distance=0, return_normal=0: only ray_hits are written on a hit.""" + device = raycast_setup["device"] + mesh_id = raycast_setup["single_mesh_id"] + expected_hits = raycast_setup["expected_ray_hits"] # shape (1, 2, 3) + + n_envs, n_rays = 1, 2 + ray_starts_w, ray_dirs_w, ray_hits_w, ray_dist_w, ray_normal_w = _make_masked_buffers(device, n_envs, n_rays) + env_mask = wp.array([True], dtype=wp.bool, device=device) + + wp.to_torch(ray_starts_w)[:] = torch.tensor([[[0, -0.35, -5], [0.25, 0.35, -5]]], device=device) + wp.to_torch(ray_dirs_w)[:] = torch.tensor([[[0, 0, 1], [0, 0, 1]]], device=device) + wp.to_torch(ray_hits_w).fill_(float("inf")) + + wp.launch( + _raycast_mesh_masked_kernel, + dim=(n_envs, n_rays), + inputs=[mesh_id, env_mask, ray_starts_w, ray_dirs_w, float(1e6), 0, 0, ray_hits_w, ray_dist_w, ray_normal_w], + device=device, + ) + + torch.testing.assert_close(wp.to_torch(ray_hits_w), expected_hits) + assert torch.all(wp.to_torch(ray_dist_w) == _SENTINEL), "Distance buffer must not be written when return_distance=0" + assert torch.all(wp.to_torch(ray_normal_w) == _SENTINEL), "Normal buffer must not be written when return_normal=0" + + +def test_raycast_mesh_masked_kernel_with_distance(raycast_setup): + """return_distance=1: distances are written in addition to hits.""" + device = raycast_setup["device"] + mesh_id = raycast_setup["single_mesh_id"] + + n_envs, n_rays = 1, 2 + ray_starts_w, ray_dirs_w, ray_hits_w, ray_dist_w, ray_normal_w = _make_masked_buffers(device, n_envs, n_rays) + env_mask = wp.array([True], dtype=wp.bool, device=device) + + wp.to_torch(ray_starts_w)[:] = torch.tensor([[[0, -0.35, -5], [0.25, 0.35, -5]]], device=device) + wp.to_torch(ray_dirs_w)[:] = torch.tensor([[[0, 0, 1], [0, 0, 1]]], device=device) + wp.to_torch(ray_hits_w).fill_(float("inf")) + + wp.launch( + _raycast_mesh_masked_kernel, + dim=(n_envs, n_rays), + inputs=[mesh_id, env_mask, ray_starts_w, ray_dirs_w, float(1e6), 1, 0, ray_hits_w, ray_dist_w, ray_normal_w], + device=device, + ) + + # Cube bottom at z=-0.5, rays start at z=-5 going +z, distance = 4.5 + torch.testing.assert_close(wp.to_torch(ray_dist_w), torch.tensor([[4.5, 4.5]], device=device)) + assert torch.all(wp.to_torch(ray_normal_w) == _SENTINEL), "Normal buffer must not be written when return_normal=0" + + +def test_raycast_mesh_masked_kernel_with_normal(raycast_setup): + """return_distance=1, return_normal=1: both distances and surface normals are written.""" + device = raycast_setup["device"] + mesh_id = raycast_setup["single_mesh_id"] + + n_envs, n_rays = 1, 2 + ray_starts_w, ray_dirs_w, ray_hits_w, ray_dist_w, ray_normal_w = _make_masked_buffers(device, n_envs, n_rays) + env_mask = wp.array([True], dtype=wp.bool, device=device) + + wp.to_torch(ray_starts_w)[:] = torch.tensor([[[0, -0.35, -5], [0.25, 0.35, -5]]], device=device) + wp.to_torch(ray_dirs_w)[:] = torch.tensor([[[0, 0, 1], [0, 0, 1]]], device=device) + wp.to_torch(ray_hits_w).fill_(float("inf")) + + wp.launch( + _raycast_mesh_masked_kernel, + dim=(n_envs, n_rays), + inputs=[mesh_id, env_mask, ray_starts_w, ray_dirs_w, float(1e6), 1, 1, ray_hits_w, ray_dist_w, ray_normal_w], + device=device, + ) + + # Cube bottom at z=-0.5, rays start at z=-5, distance = 4.5 + torch.testing.assert_close(wp.to_torch(ray_dist_w), torch.tensor([[4.5, 4.5]], device=device)) + torch.testing.assert_close( + wp.to_torch(ray_normal_w), + torch.tensor([[[0, 0, -1], [0, 0, -1]]], device=device, dtype=torch.float32), + ) + + +def test_raycast_mesh_masked_kernel_env_mask(raycast_setup): + """Masked-out environments must not be written.""" + device = raycast_setup["device"] + mesh_id = raycast_setup["single_mesh_id"] + + n_envs, n_rays = 2, 2 + ray_starts_w, ray_dirs_w, ray_hits_w, ray_dist_w, ray_normal_w = _make_masked_buffers(device, n_envs, n_rays) + env_mask = wp.array([True, False], dtype=wp.bool, device=device) + + starts = torch.tensor([[[0, -0.35, -5], [0.25, 0.35, -5]], [[0, -0.35, -5], [0.25, 0.35, -5]]], device=device) + dirs = torch.tensor([[[0, 0, 1], [0, 0, 1]], [[0, 0, 1], [0, 0, 1]]], device=device) + wp.to_torch(ray_starts_w)[:] = starts + wp.to_torch(ray_dirs_w)[:] = dirs + wp.to_torch(ray_hits_w).fill_(float("inf")) + + wp.launch( + _raycast_mesh_masked_kernel, + dim=(n_envs, n_rays), + inputs=[mesh_id, env_mask, ray_starts_w, ray_dirs_w, float(1e6), 1, 0, ray_hits_w, ray_dist_w, ray_normal_w], + device=device, + ) + + hits = wp.to_torch(ray_hits_w) + dist = wp.to_torch(ray_dist_w) + + assert not torch.isinf(hits[0]).any(), "Active env 0 should have valid hits" + torch.testing.assert_close(dist[0], torch.tensor([4.5, 4.5], device=device)) + assert torch.isinf(hits[1]).all(), "Masked env 1 hits must remain inf" + assert torch.all(dist[1] == _SENTINEL), "Masked env 1 distances must remain at sentinel" + assert torch.all(wp.to_torch(ray_normal_w) == _SENTINEL), "Normal buffer must not be written when return_normal=0" + + +# --------------------------------------------------------------------------- +# Test quat_yaw_only correctness (regression for atan2-based fix) +# --------------------------------------------------------------------------- + + +@wp.kernel(enable_backward=False) +def _call_quat_yaw_only(q_in: wp.array(dtype=wp.quatf), q_out: wp.array(dtype=wp.quatf)): + i = wp.tid() + q_out[i] = _quat_yaw_only_func(q_in[i]) + + +def test_quat_yaw_only_pure_yaw(): + """Pure yaw: quat_yaw_only should match the yaw_quat() reference for all yaw angles.""" + device = "cuda" if torch.cuda.is_available() else "cpu" + yaw_angles = torch.tensor([0.0, 0.5, 1.2, -0.8, np.pi], device=device) + + for yaw in yaw_angles: + q_torch = quat_from_euler_xyz( + torch.tensor([0.0], device=device), + torch.tensor([0.0], device=device), + yaw.unsqueeze(0), + ) # shape (1, 4), xyzw + + expected = yaw_quat(q_torch) # shape (1, 4) + + q_in = wp.from_torch(q_torch.contiguous(), dtype=wp.quatf) + q_out = wp.zeros(1, dtype=wp.quatf, device=device) + wp.launch(_call_quat_yaw_only, dim=1, inputs=[q_in, q_out], device=device) + result = wp.to_torch(q_out) # shape (1, 4) + + torch.testing.assert_close(result, expected, atol=1e-5, rtol=1e-5) + + +def test_quat_yaw_only_with_pitch_roll(): + """Non-zero pitch and roll: only the yaw component should be preserved. + + This is the regression test for the old bug where simply zeroing qx/qy and + renormalizing gave the wrong answer when pitch or roll was non-zero. + """ + device = "cuda" if torch.cuda.is_available() else "cpu" + + # Several combined pitch+roll+yaw orientations: (roll, pitch, yaw) + test_cases = [ + (0.3, 0.4, 1.2), + (0.5, 0.0, 0.7), + (-0.2, 0.6, -1.0), + (1.0, 1.0, 0.0), # heavy pitch+roll, zero yaw → result should be identity + ] + + for roll, pitch, yaw in test_cases: + q_torch = quat_from_euler_xyz( + torch.tensor([roll], device=device), + torch.tensor([pitch], device=device), + torch.tensor([yaw], device=device), + ) # shape (1, 4), xyzw + + expected = yaw_quat(q_torch) # shape (1, 4) + + q_in = wp.from_torch(q_torch.contiguous(), dtype=wp.quatf) + q_out = wp.zeros(1, dtype=wp.quatf, device=device) + wp.launch(_call_quat_yaw_only, dim=1, inputs=[q_in, q_out], device=device) + result = wp.to_torch(q_out) + + torch.testing.assert_close(result, expected, atol=1e-5, rtol=1e-5) diff --git a/source/isaaclab/test/sensors/test_ray_caster_camera.py b/source/isaaclab/test/sensors/test_ray_caster_camera.py index 8b4c2f4a973a..a913d38dd833 100644 --- a/source/isaaclab/test/sensors/test_ray_caster_camera.py +++ b/source/isaaclab/test/sensors/test_ray_caster_camera.py @@ -31,14 +31,12 @@ from isaaclab.sim import PinholeCameraCfg from isaaclab.terrains.trimesh.utils import make_plane from isaaclab.terrains.utils import create_prim_from_mesh -from isaaclab.utils import convert_dict_to_backend -from isaaclab.utils.timer import Timer # sample camera poses POSITION = [2.5, 2.5, 2.5] -QUAT_ROS = [-0.17591989, 0.33985114, 0.82047325, -0.42470819] -QUAT_OPENGL = [0.33985113, 0.17591988, 0.42470818, 0.82047324] -QUAT_WORLD = [-0.3647052, -0.27984815, -0.1159169, 0.88047623] +QUAT_ROS = [0.33985114, 0.82047325, -0.42470819, -0.17591989] +QUAT_OPENGL = [0.17591988, 0.42470818, 0.82047324, 0.33985113] +QUAT_WORLD = [-0.27984815, -0.1159169, 0.88047623, -0.3647052] DEBUG_PLOTS = False @@ -55,7 +53,7 @@ def setup() -> tuple[sim_utils.SimulationContext, RayCasterCameraCfg, float]: prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=[ @@ -71,9 +69,13 @@ def setup() -> tuple[sim_utils.SimulationContext, RayCasterCameraCfg, float]: # Load kit helper sim_cfg = sim_utils.SimulationCfg(dt=dt) sim = sim_utils.SimulationContext(sim_cfg) - # Ground-plane + # Ground-plane with visual material for RTX rendering compatibility mesh = make_plane(size=(100, 100), height=0.0, center_zero=True) - create_prim_from_mesh("/World/defaultGroundPlane", mesh) + visual_material = sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.5, 0.5)) + create_prim_from_mesh("/World/defaultGroundPlane", mesh, visual_material=visual_material) + # Add lighting for RTX rendering + light_cfg = sim_utils.DomeLightCfg(intensity=2000.0) + light_cfg.func("/World/Light", light_cfg) # load stage sim_utils.update_stage() return sim, camera_cfg, dt @@ -84,10 +86,8 @@ def teardown(sim: sim_utils.SimulationContext): # close all the opened viewport from before. rep.vp_manager.destroy_hydra_textures("Replicator") # stop simulation - # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( - sim._timeline.stop() + sim.stop() # clear the stage - sim.clear_all_callbacks() sim.clear_instance() @@ -116,7 +116,7 @@ def test_camera_init(setup_sim): assert camera.data.quat_w_opengl.shape == (1, 4) assert camera.data.intrinsic_matrices.shape == (1, 3, 3) assert camera.data.image_shape == (camera_cfg.pattern_cfg.height, camera_cfg.pattern_cfg.width) - assert camera.data.info == [{camera_cfg.data_types[0]: None}] + assert camera.data.info == {camera_cfg.data_types[0]: None} # Simulate physics for _ in range(10): sim.step() @@ -167,7 +167,7 @@ def test_depth_clipping(setup_sim): camera_cfg_zero = RayCasterCameraCfg( prim_path="/World/CameraZero", mesh_prim_paths=["/World/defaultGroundPlane"], - offset=RayCasterCameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(0.9914449, 0.0, 0.1305, 0.0), convention="world"), + offset=RayCasterCameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(0.0, 0.1305, 0.0, 0.9914449), convention="world"), pattern_cfg=patterns.PinholeCameraPatternCfg().from_intrinsic_matrix( focal_length=38.0, intrinsic_matrix=[380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0], @@ -313,7 +313,7 @@ def test_camera_init_intrinsic_matrix(setup_sim): prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), debug_vis=False, pattern_cfg=patterns.PinholeCameraPatternCfg.from_intrinsic_matrix( intrinsic_matrix=intrinsic_matrix, @@ -450,58 +450,6 @@ def test_intrinsic_matrix(setup_sim): torch.testing.assert_close(rs_intrinsic_matrix, camera.data.intrinsic_matrices) -@pytest.mark.isaacsim_ci -def test_throughput(setup_sim): - """Checks that the single camera gets created properly with a rig.""" - sim, camera_cfg, dt = setup_sim - # Create directory temp dir to dump the results - file_dir = os.path.dirname(os.path.realpath(__file__)) - temp_dir = os.path.join(file_dir, "output", "camera", "throughput") - os.makedirs(temp_dir, exist_ok=True) - # Create replicator writer - rep_writer = rep.BasicWriter(output_dir=temp_dir, frame_padding=3) - # create camera - camera_cfg.pattern_cfg.height = 480 - camera_cfg.pattern_cfg.width = 640 - camera = RayCasterCamera(camera_cfg) - - # Play simulator - sim.reset() - - # Set camera pose - eyes = torch.tensor([[2.5, 2.5, 2.5]], dtype=torch.float32, device=camera.device) - targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device) - camera.set_world_poses_from_view(eyes, targets) - - # Simulate for a few steps - for _ in range(5): - sim.step() - # Simulate physics - for _ in range(5): - # perform rendering - sim.step() - # update camera - with Timer(f"Time taken for updating camera with shape {camera.image_shape}"): - camera.update(dt) - # Save images - with Timer(f"Time taken for writing data with shape {camera.image_shape} "): - # Pack data back into replicator format to save them using its writer - rep_output = {"annotators": {}} - camera_data = convert_dict_to_backend({k: v[0] for k, v in camera.data.output.items()}, backend="numpy") - for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()): - if info is not None: - rep_output["annotators"][key] = {"render_product": {"data": data, **info}} - else: - rep_output["annotators"][key] = {"render_product": {"data": data}} - # Save images - rep_output["trigger_outputs"] = {"on_time": camera.frame[0]} - rep_writer.write(rep_output) - print("----------------------------------------") - # Check image data - for im_data in camera.data.output.values(): - assert im_data.shape == (1, camera_cfg.pattern_cfg.height, camera_cfg.pattern_cfg.width, 1) - - @pytest.mark.isaacsim_ci def test_output_equal_to_usdcamera(setup_sim): sim, camera_cfg, dt = setup_sim @@ -516,7 +464,7 @@ def test_output_equal_to_usdcamera(setup_sim): prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), + offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=["distance_to_image_plane", "distance_to_camera", "normals"], @@ -534,7 +482,7 @@ def test_output_equal_to_usdcamera(setup_sim): spawn=PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1e-4, 1.0e5) ), - offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), ) camera_usd = Camera(camera_cfg_usd) @@ -581,6 +529,8 @@ def test_output_equal_to_usdcamera(setup_sim): torch.testing.assert_close( camera_usd.data.output["distance_to_image_plane"], camera_warp.data.output["distance_to_image_plane"], + rtol=1e-5, + atol=1e-4, ) torch.testing.assert_close( camera_usd.data.output["distance_to_camera"], @@ -602,7 +552,7 @@ def test_output_equal_to_usdcamera(setup_sim): @pytest.mark.isaacsim_ci def test_output_equal_to_usdcamera_offset(setup_sim): sim, camera_cfg, dt = setup_sim - offset_rot = [-0.1251, 0.3617, 0.8731, -0.3020] + offset_rot = [0.3617, 0.8731, -0.3020, -0.1251] camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, @@ -681,12 +631,12 @@ def test_output_equal_to_usdcamera_prim_offset(setup_sim): under an XForm prim that is translated and rotated from the world origin .""" sim, camera_cfg, dt = setup_sim - offset_rot = (-0.1251, 0.3617, 0.8731, -0.3020) + offset_rot = (0.3617, 0.8731, -0.3020, -0.1251) # gf quat gf_quatf = Gf.Quatd() - gf_quatf.SetReal(QUAT_OPENGL[0]) - gf_quatf.SetImaginary(tuple(QUAT_OPENGL[1:])) + gf_quatf.SetReal(QUAT_OPENGL[3]) + gf_quatf.SetImaginary(tuple(QUAT_OPENGL[:3])) camera_pattern_cfg = patterns.PinholeCameraPatternCfg( focal_length=24.0, @@ -779,7 +729,7 @@ def test_output_equal_to_usd_camera_intrinsics(setup_sim, focal_length): sim, camera_cfg, dt = setup_sim # create cameras - offset_rot = (-0.1251, 0.3617, 0.8731, -0.3020) + offset_rot = (0.3617, 0.8731, -0.3020, -0.1251) offset_pos = (2.5, 2.5, 4.0) intrinsics = [380.0831, 0.0, 480.0, 0.0, 380.0831, 270.0, 0.0, 0.0, 1.0] sim_utils.create_prim("/World/Camera_warp", "Xform") @@ -912,7 +862,7 @@ def test_output_equal_to_usd_camera_when_intrinsics_set(setup_sim, focal_length_ prim_path="/World/Camera", mesh_prim_paths=["/World/defaultGroundPlane"], update_period=0, - offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), + offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), debug_vis=False, pattern_cfg=camera_pattern_cfg, data_types=["distance_to_camera"], @@ -948,11 +898,11 @@ def test_output_equal_to_usd_camera_when_intrinsics_set(setup_sim, focal_length_ # set camera position camera_warp.set_world_poses_from_view( - eyes=torch.tensor([[0.0, 0.0, 5.0]], device=camera_warp.device), + eyes=torch.tensor([[0.001, 0.0, 5.0]], device=camera_warp.device), targets=torch.tensor([[0.0, 0.0, 0.0]], device=camera_warp.device), ) camera_usd.set_world_poses_from_view( - eyes=torch.tensor([[0.0, 0.0, 5.0]], device=camera_usd.device), + eyes=torch.tensor([[0.001, 0.0, 5.0]], device=camera_usd.device), targets=torch.tensor([[0.0, 0.0, 0.0]], device=camera_usd.device), ) @@ -1012,3 +962,142 @@ def test_sensor_print(setup_sim): sim.reset() # print info print(sensor) + + +@pytest.mark.isaacsim_ci +def test_depth_clipping_d2ip_and_d2c_are_independent(setup_sim): + """Clipping distance_to_image_plane must not corrupt distance_to_camera and vice versa. + + Both are derived from the same raw ray_distance buffer. If that buffer is modified + in-place by one clipping pass it would corrupt the other. This test verifies that + requesting both data types simultaneously gives results consistent with requesting + each one alone. + """ + sim, camera_cfg, dt = setup_sim + + base_cfg = RayCasterCameraCfg( + prim_path="/World/Camera", + mesh_prim_paths=["/World/defaultGroundPlane"], + offset=RayCasterCameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(0.0, 0.1305, 0.0, 0.9914449), convention="world"), + pattern_cfg=patterns.PinholeCameraPatternCfg.from_intrinsic_matrix( + focal_length=38.0, + intrinsic_matrix=[380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0], + height=540, + width=960, + ), + max_distance=5.0, + data_types=["distance_to_image_plane", "distance_to_camera"], + depth_clipping_behavior="max", + update_period=0, + ) + + # Camera requesting both data types simultaneously + sim_utils.create_prim("/World/CameraJoint", "Xform") + cfg_joint = copy.deepcopy(base_cfg) + cfg_joint.prim_path = "/World/CameraJoint" + cam_joint = RayCasterCamera(cfg_joint) + + # Camera requesting only d2ip + sim_utils.create_prim("/World/CameraD2IP", "Xform") + cfg_d2ip = copy.deepcopy(base_cfg) + cfg_d2ip.prim_path = "/World/CameraD2IP" + cfg_d2ip.data_types = ["distance_to_image_plane"] + cam_d2ip = RayCasterCamera(cfg_d2ip) + + # Camera requesting only d2c + sim_utils.create_prim("/World/CameraD2C", "Xform") + cfg_d2c = copy.deepcopy(base_cfg) + cfg_d2c.prim_path = "/World/CameraD2C" + cfg_d2c.data_types = ["distance_to_camera"] + cam_d2c = RayCasterCamera(cfg_d2c) + + sim.reset() + + cam_joint.update(dt) + cam_d2ip.update(dt) + cam_d2c.update(dt) + + d2ip_joint = cam_joint.data.output["distance_to_image_plane"] + d2c_joint = cam_joint.data.output["distance_to_camera"] + d2ip_solo = cam_d2ip.data.output["distance_to_image_plane"] + d2c_solo = cam_d2c.data.output["distance_to_camera"] + + # Joint camera must match solo cameras (clipping one must not affect the other) + torch.testing.assert_close(d2ip_joint, d2ip_solo, atol=1e-5, rtol=1e-5) + torch.testing.assert_close(d2c_joint, d2c_solo, atol=1e-5, rtol=1e-5) + + # Both should be clipped to max_distance (camera is 6 m above ground, max_distance=5 m) + assert d2ip_joint.max().item() <= base_cfg.max_distance + 1e-4 + assert d2c_joint.max().item() <= base_cfg.max_distance + 1e-4 + + +@pytest.mark.isaacsim_ci +def test_frame_counter_increments_per_update(setup_sim): + """frame counter must increment by exactly 1 per update() call and reset to 0 on reset().""" + sim, camera_cfg, dt = setup_sim + camera = RayCasterCamera(cfg=camera_cfg) + sim.reset() + + assert torch.all(camera.frame == 0), "Frame must start at 0" + + n_steps = 7 + for step in range(1, n_steps + 1): + sim.step() + camera.update(dt, force_recompute=True) + assert camera.frame[0].item() == step, f"Frame must be {step} after {step} update(s)" + + # Partial reset: only env 0 (single-env camera, but API accepts env_ids) + camera.reset(env_ids=[0]) + assert camera.frame[0].item() == 0, "Frame must be 0 after reset(env_ids=[0])" + + # Full reset + for _ in range(3): + sim.step() + camera.update(dt, force_recompute=True) + camera.reset() + assert torch.all(camera.frame == 0), "Frame must be 0 after full reset()" + + +@pytest.mark.isaacsim_ci +def test_set_intrinsic_matrices_updates_output(setup_sim): + """Depth output must change when intrinsics are updated via set_intrinsic_matrices(). + + This tests that the warp view refresh in set_intrinsic_matrices() actually takes + effect: stale warp views would cause subsequent images to use the old ray pattern. + """ + sim, camera_cfg, dt = setup_sim + + # Place camera looking straight down at the ground + camera_cfg = copy.deepcopy(camera_cfg) + camera_cfg.offset = RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 5.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world") + camera_cfg.data_types = ["distance_to_camera"] + camera = RayCasterCamera(cfg=camera_cfg) + sim.reset() + + # Capture output with default focal length (24 mm → 20.955 mm aperture) + for _ in range(3): + sim.step() + camera.update(dt) + output_before = camera.data.output["distance_to_camera"].clone() + + # Change to a very different focal length (longer → tighter FOV → depth values differ at edges) + new_matrix = torch.tensor( + [[200.0, 0.0, 320.0], [0.0, 200.0, 240.0], [0.0, 0.0, 1.0]], + device=camera.device, + ).unsqueeze(0) + camera.set_intrinsic_matrices(new_matrix, focal_length=1.0) + + for _ in range(3): + sim.step() + camera.update(dt) + output_after = camera.data.output["distance_to_camera"].clone() + + # Outputs must differ after intrinsics change (different ray angles → different depths) + assert not torch.allclose(output_before, output_after, atol=1e-3), ( + "Depth output must change when intrinsic matrix is updated; unchanged output indicates stale warp ray buffers." + ) + # With depth_clipping_behavior="none" (default), missed rays produce inf — that is valid. + # No NaN values must appear; where rays hit, depth must be positive. + assert not torch.any(torch.isnan(output_after)), "Expected no NaN values in depth output after intrinsics update" + if torch.any(torch.isfinite(output_after)): + assert output_after[torch.isfinite(output_after)].min() > 0 diff --git a/source/isaaclab/test/sensors/test_ray_caster_integration.py b/source/isaaclab/test/sensors/test_ray_caster_integration.py new file mode 100644 index 000000000000..36b6d8ff4692 --- /dev/null +++ b/source/isaaclab/test/sensors/test_ray_caster_integration.py @@ -0,0 +1,439 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# pyright: reportPrivateUsage=none + +"""Integration tests for ray caster sensor view paths, env_mask, and intrinsics. + +These tests require Isaac Sim (AppLauncher). They cover the integration-level +items from ``TODO_ray_caster_kernel_tests.md``: + +- ``_get_view_transforms_wp`` ArticulationView and RigidBodyView paths +- ``MultiMeshRayCaster`` env_mask behavior +- ``MultiMeshRayCasterCamera.set_intrinsic_matrices`` propagation +- ``_update_mesh_transforms`` non-identity orientation offset (known bug, xfail) +- Depth clipping ordering for ``MultiMeshRayCasterCamera`` +""" + +from isaaclab.app import AppLauncher + +simulation_app = AppLauncher(headless=True, enable_cameras=True).app + +import copy + +import numpy as np +import pytest +import torch +import warp as wp + +from pxr import UsdGeom, UsdPhysics + +import isaaclab.sim as sim_utils +from isaaclab.sensors.ray_caster import ( + MultiMeshRayCaster, + MultiMeshRayCasterCamera, + MultiMeshRayCasterCameraCfg, + MultiMeshRayCasterCfg, + RayCaster, + RayCasterCfg, + patterns, +) +from isaaclab.terrains.trimesh.utils import make_plane +from isaaclab.terrains.utils import create_prim_from_mesh + +_GROUND_PATH = "/World/Ground" +_DT = 0.01 + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_sim_and_ground(): + """Create a blank stage with a flat ground plane at z=0.""" + sim_utils.create_new_stage() + sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=_DT)) + mesh = make_plane(size=(100, 100), height=0.0, center_zero=True) + create_prim_from_mesh(_GROUND_PATH, mesh) + sim_utils.update_stage() + return sim + + +def _single_downward_ray_cfg(prim_path: str) -> RayCasterCfg: + """RayCasterCfg with a single downward ray, no offset, world alignment.""" + return RayCasterCfg( + prim_path=prim_path, + mesh_prim_paths=[_GROUND_PATH], + update_period=0, + offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), + debug_vis=False, + pattern_cfg=patterns.GridPatternCfg(resolution=1.0, size=(0.0, 0.0), direction=(0.0, 0.0, -1.0)), + ray_alignment="world", + ) + + +@pytest.fixture +def sim_ground(): + sim = _make_sim_and_ground() + yield sim + sim.stop() + sim.clear_instance() + + +# --------------------------------------------------------------------------- +# _get_view_transforms_wp: ArticulationView path +# --------------------------------------------------------------------------- + + +@pytest.mark.isaacsim_ci +def test_articulation_view_path(sim_ground): + """Mount a ray caster on a prim with ArticulationRootAPI. + + Verifies that sensor pos_w matches the prim's initial position and that + the downward ray hits the ground plane. This exercises the + ``ArticulationView.get_root_transforms()`` quaternion-convention path in + :meth:`_get_view_transforms_wp`. + """ + sim = sim_ground + expected_pos = (3.0, 4.0, 5.0) + + prim_path = "/World/ArticulatedBody" + sim_utils.create_prim(prim_path, "Xform", translation=expected_pos) + stage = sim_utils.get_current_stage() + prim = stage.GetPrimAtPath(prim_path) + UsdPhysics.RigidBodyAPI.Apply(prim) + UsdPhysics.ArticulationRootAPI.Apply(prim) + # Mass is needed for physics; collision is needed for PhysX to track the body. + mass_api = UsdPhysics.MassAPI.Apply(prim) + mass_api.CreateMassAttr().Set(1.0) + # Create a small collision cube so PhysX treats this as a real body. + cube_path = f"{prim_path}/CollisionCube" + cube_geom = UsdGeom.Cube.Define(stage, cube_path) + cube_geom.CreateSizeAttr().Set(0.1) + UsdPhysics.CollisionAPI.Apply(stage.GetPrimAtPath(cube_path)) + sim_utils.update_stage() + + sensor = RayCaster(_single_downward_ray_cfg(prim_path)) + sim.reset() + sensor.update(_DT) + + pos_w = sensor.data.pos_w.torch[0].cpu().numpy() + np.testing.assert_allclose( + pos_w, + expected_pos, + atol=0.15, + err_msg="ArticulationView: sensor pos_w must match initial prim position", + ) + + hits = sensor.data.ray_hits_w.torch[0, 0].cpu().numpy() + assert abs(hits[2]) < 0.5, f"ArticulationView: downward ray should hit near z=0, got z={hits[2]}" + + +# --------------------------------------------------------------------------- +# _get_view_transforms_wp: RigidBodyView path +# --------------------------------------------------------------------------- + + +@pytest.mark.isaacsim_ci +def test_rigid_body_view_path(sim_ground): + """Mount a ray caster on a prim with RigidBodyAPI (no ArticulationRootAPI). + + Exercises the ``RigidBodyView.get_transforms()`` path in + :meth:`_get_view_transforms_wp`. + """ + sim = sim_ground + expected_pos = (1.0, 2.0, 6.0) + + prim_path = "/World/RigidBody" + sim_utils.create_prim(prim_path, "Xform", translation=expected_pos) + stage = sim_utils.get_current_stage() + prim = stage.GetPrimAtPath(prim_path) + UsdPhysics.RigidBodyAPI.Apply(prim) + mass_api = UsdPhysics.MassAPI.Apply(prim) + mass_api.CreateMassAttr().Set(1.0) + cube_path = f"{prim_path}/CollisionCube" + cube_geom = UsdGeom.Cube.Define(stage, cube_path) + cube_geom.CreateSizeAttr().Set(0.1) + UsdPhysics.CollisionAPI.Apply(stage.GetPrimAtPath(cube_path)) + sim_utils.update_stage() + + sensor = RayCaster(_single_downward_ray_cfg(prim_path)) + sim.reset() + sensor.update(_DT) + + pos_w = sensor.data.pos_w.torch[0].cpu().numpy() + np.testing.assert_allclose( + pos_w, + expected_pos, + atol=0.15, + err_msg="RigidBodyView: sensor pos_w must match initial prim position", + ) + + hits = sensor.data.ray_hits_w.torch[0, 0].cpu().numpy() + assert abs(hits[2]) < 0.5, f"RigidBodyView: downward ray should hit near z=0, got z={hits[2]}" + + +# --------------------------------------------------------------------------- +# MultiMeshRayCasterCamera.set_intrinsic_matrices +# --------------------------------------------------------------------------- + + +@pytest.fixture +def sim_ground_camera(): + """Fixture providing sim + a base MultiMeshRayCasterCameraCfg.""" + sim = _make_sim_and_ground() + + camera_cfg = MultiMeshRayCasterCameraCfg( + prim_path="/World/Camera", + mesh_prim_paths=[_GROUND_PATH], + update_period=0, + offset=MultiMeshRayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 5.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), + debug_vis=False, + pattern_cfg=patterns.PinholeCameraPatternCfg( + focal_length=24.0, + horizontal_aperture=20.955, + height=480, + width=640, + ), + data_types=["distance_to_camera"], + ) + + sim_utils.create_prim("/World/Camera", "Xform") + + yield sim, camera_cfg + + sim.stop() + sim.clear_instance() + + +@pytest.mark.isaacsim_ci +def test_multi_mesh_camera_set_intrinsic_matrices(sim_ground_camera): + """Depth output must change when intrinsics are updated on MultiMeshRayCasterCamera. + + The multi-mesh variant overrides ``_initialize_rays_impl`` without calling + ``super()``, so the warp view refresh path may differ from RayCasterCamera. + This test verifies that ``set_intrinsic_matrices`` actually takes effect. + """ + sim, camera_cfg = sim_ground_camera + + camera = MultiMeshRayCasterCamera(cfg=camera_cfg) + sim.reset() + + # Capture output with default intrinsics + for _ in range(3): + sim.step() + camera.update(_DT) + output_before = camera.data.output["distance_to_camera"].clone() + + # Change to a very different intrinsic matrix (different FOV) + new_matrix = torch.tensor( + [[200.0, 0.0, 320.0], [0.0, 200.0, 240.0], [0.0, 0.0, 1.0]], + device=camera.device, + ).unsqueeze(0) + camera.set_intrinsic_matrices(new_matrix, focal_length=1.0) + + for _ in range(3): + sim.step() + camera.update(_DT) + output_after = camera.data.output["distance_to_camera"].clone() + + assert not torch.allclose(output_before, output_after, atol=1e-3), ( + "MultiMeshRayCasterCamera: depth output must change after set_intrinsic_matrices; " + "unchanged output indicates stale warp ray buffers." + ) + assert not torch.any(torch.isnan(output_after)), "No NaN values expected after intrinsics update" + + +# --------------------------------------------------------------------------- +# Depth clipping ordering for MultiMeshRayCasterCamera +# --------------------------------------------------------------------------- + + +@pytest.mark.isaacsim_ci +def test_multi_mesh_camera_d2ip_and_d2c_independent(sim_ground_camera): + """Requesting both d2ip and d2c simultaneously must produce correct independent results. + + The ``distance_to_image_plane`` computation reads ``_ray_distance`` before + ``distance_to_camera`` clips it in-place. This test verifies the two data + types do not interfere with each other. + """ + sim, base_cfg = sim_ground_camera + + joint_cfg = copy.deepcopy(base_cfg) + joint_cfg.prim_path = "/World/CameraJoint" + joint_cfg.data_types = ["distance_to_image_plane", "distance_to_camera"] + joint_cfg.max_distance = 4.5 # camera is 5 m up, so some rays should be clipped + joint_cfg.depth_clipping_behavior = "max" + sim_utils.create_prim("/World/CameraJoint", "Xform") + cam_joint = MultiMeshRayCasterCamera(joint_cfg) + + d2ip_cfg = copy.deepcopy(base_cfg) + d2ip_cfg.prim_path = "/World/CameraD2IP" + d2ip_cfg.data_types = ["distance_to_image_plane"] + d2ip_cfg.max_distance = 4.5 + d2ip_cfg.depth_clipping_behavior = "max" + sim_utils.create_prim("/World/CameraD2IP", "Xform") + cam_d2ip = MultiMeshRayCasterCamera(d2ip_cfg) + + d2c_cfg = copy.deepcopy(base_cfg) + d2c_cfg.prim_path = "/World/CameraD2C" + d2c_cfg.data_types = ["distance_to_camera"] + d2c_cfg.max_distance = 4.5 + d2c_cfg.depth_clipping_behavior = "max" + sim_utils.create_prim("/World/CameraD2C", "Xform") + cam_d2c = MultiMeshRayCasterCamera(d2c_cfg) + + sim.reset() + + cam_joint.update(_DT) + cam_d2ip.update(_DT) + cam_d2c.update(_DT) + + d2ip_joint = cam_joint.data.output["distance_to_image_plane"] + d2c_joint = cam_joint.data.output["distance_to_camera"] + d2ip_solo = cam_d2ip.data.output["distance_to_image_plane"] + d2c_solo = cam_d2c.data.output["distance_to_camera"] + + # Joint camera must match solo cameras (clipping one must not corrupt the other) + torch.testing.assert_close(d2ip_joint, d2ip_solo, atol=1e-5, rtol=1e-5) + torch.testing.assert_close(d2c_joint, d2c_solo, atol=1e-5, rtol=1e-5) + + +# --------------------------------------------------------------------------- +# MultiMeshRayCaster env_mask behavior +# --------------------------------------------------------------------------- + + +@pytest.mark.isaacsim_ci +def test_multi_mesh_env_mask_preserves_masked_buffers(sim_ground): + """Masked environments must retain their pre-update buffer values. + + Creates a single-env MultiMeshRayCaster, captures output after one update, + then calls ``_update_buffers_impl`` with the environment masked out and + verifies the output buffers are unchanged. + """ + sim = sim_ground + + prim_path = "/World/Sensor" + sim_utils.create_prim(prim_path, "Xform", translation=(0.0, 0.0, 3.0)) + + cfg = MultiMeshRayCasterCfg( + prim_path=prim_path, + mesh_prim_paths=[_GROUND_PATH], + update_period=0, + offset=MultiMeshRayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), + debug_vis=False, + pattern_cfg=patterns.GridPatternCfg(resolution=1.0, size=(0.0, 0.0), direction=(0.0, 0.0, -1.0)), + ray_alignment="world", + ) + sensor = MultiMeshRayCaster(cfg) + sim.reset() + + # First update: populate buffers with real values + sensor.update(_DT) + hits_before = sensor.data.ray_hits_w.torch.clone() + + # Second update with env masked out: buffers must not change + mask_all_false = wp.array([False], dtype=wp.bool, device=sensor.device) + sensor._update_buffers_impl(mask_all_false) + + hits_after = sensor.data.ray_hits_w.torch + torch.testing.assert_close( + hits_after, + hits_before, + atol=0.0, + rtol=0.0, + msg="Masked env: ray_hits_w must be unchanged after update with env masked out", + ) + + +# --------------------------------------------------------------------------- +# _update_mesh_transforms: non-identity orientation offset +# --------------------------------------------------------------------------- + + +@pytest.mark.isaacsim_ci +def test_update_mesh_transforms_non_identity_offset(sim_ground): + """Tracked mesh position must account for body orientation when applying offset. + + Setup: a kinematic rigid body at (0, 0, 2) rotated 90 deg around Z, with a + child mesh offset by (1, 0, 0) in the body's local frame. + + Correct world position of mesh = body_pos + rotate(body_ori, local_offset) + = (0, 0, 2) + rotate(90degZ, (1, 0, 0)) + = (0, 0, 2) + (0, 1, 0) + = (0, 1, 2) + + Naive subtraction (the old bug) would give: body_pos - offset = (-1, 0, 2). + """ + sim = sim_ground + + from isaaclab.utils.math import quat_from_euler_xyz + + # 90 deg yaw quaternion in xyzw + yaw90 = quat_from_euler_xyz(torch.tensor([0.0]), torch.tensor([0.0]), torch.tensor([torch.pi / 2])) + yaw90_xyzw = tuple(yaw90[0].tolist()) + + # Create a kinematic rigid body at (0, 0, 2) rotated 90 deg around Z + body_path = "/World/DynamicBody" + sim_utils.create_prim(body_path, "Xform", translation=(0.0, 0.0, 2.0), orientation=yaw90_xyzw) + stage = sim_utils.get_current_stage() + body_prim = stage.GetPrimAtPath(body_path) + UsdPhysics.RigidBodyAPI.Apply(body_prim) + mass_api = UsdPhysics.MassAPI.Apply(body_prim) + mass_api.CreateMassAttr().Set(1.0) + body_prim.GetAttribute("physics:kinematicEnabled").Set(True) + + # Create a child Xform offset by (1, 0, 0) in the body's local frame, + # then place mesh geometry under it. The Xform translation is the offset + # that _obtain_trackable_prim_view / resolve_prim_pose will discover. + child_mesh_path = f"{body_path}/OffsetMesh" + sim_utils.create_prim(child_mesh_path, "Xform", translation=(1.0, 0.0, 0.0)) + mesh_data = make_plane(size=(2, 2), height=0.0, center_zero=True) + create_prim_from_mesh(f"{child_mesh_path}/Plane", mesh_data) + # Add collision so PhysX tracks the body + col_path = f"{body_path}/CollisionCube" + cube_geom = UsdGeom.Cube.Define(stage, col_path) + cube_geom.CreateSizeAttr().Set(0.1) + UsdPhysics.CollisionAPI.Apply(stage.GetPrimAtPath(col_path)) + sim_utils.update_stage() + + # Create a sensor prim to mount the MultiMeshRayCaster on + sensor_path = "/World/SensorMount" + sim_utils.create_prim(sensor_path, "Xform", translation=(0.0, 0.0, 5.0)) + + # Configure MultiMeshRayCaster to track the child mesh + cfg = MultiMeshRayCasterCfg( + prim_path=sensor_path, + mesh_prim_paths=[ + MultiMeshRayCasterCfg.RaycastTargetCfg( + prim_expr=child_mesh_path, + track_mesh_transforms=True, + ), + ], + update_period=0, + offset=MultiMeshRayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), + debug_vis=False, + pattern_cfg=patterns.GridPatternCfg(resolution=1.0, size=(0.0, 0.0), direction=(0.0, 0.0, -1.0)), + ray_alignment="world", + ) + sensor = MultiMeshRayCaster(cfg) + sim.reset() + sensor.update(_DT) + + # Verify mesh position: body at (0,0,2) rotated 90deg Z, child offset (1,0,0) local + # Expected: (0, 0, 2) + rotate(90degZ, (1,0,0)) = (0, 0, 2) + (0, 1, 0) = (0, 1, 2) + mesh_pos = sensor._mesh_positions_w_torch.clone() + np.testing.assert_allclose( + mesh_pos[0, 0].cpu().numpy(), + [0.0, 1.0, 2.0], + atol=0.15, + err_msg=( + "Mesh position should be (0, 1, 2) via proper frame decomposition: " + "body_pos + rotate(body_ori, local_offset). " + "If this fails, the offset is not being rotated by the body orientation." + ), + ) diff --git a/source/isaaclab/test/sensors/test_ray_caster_kernels.py b/source/isaaclab/test/sensors/test_ray_caster_kernels.py new file mode 100644 index 000000000000..cc57e4f1eec5 --- /dev/null +++ b/source/isaaclab/test/sensors/test_ray_caster_kernels.py @@ -0,0 +1,577 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for ray caster kernels. + +Tests for kernels in ``sensors/ray_caster/kernels.py`` and +``utils/warp/kernels.py``. Exercised directly with hand-crafted warp arrays +and analytically computed expected outputs. No simulation, no stage, no +AppLauncher -- just warp and numpy on CPU (or CUDA when available). + +See ``test_update_ray_caster_kernel.py`` for tests of +:func:`update_ray_caster_kernel`. +""" + +from __future__ import annotations + +import importlib.util +import math +import os + +import numpy as np +import pytest +import warp as wp + +# --------------------------------------------------------------------------- +# Import kernel modules directly (avoids Isaac Sim / Omniverse dependencies) +# --------------------------------------------------------------------------- + +_SENSOR_KERNEL_PATH = os.path.join( + os.path.dirname(__file__), + os.pardir, + os.pardir, + "isaaclab", + "sensors", + "ray_caster", + "kernels.py", +) +_spec = importlib.util.spec_from_file_location("ray_caster_kernels", os.path.normpath(_SENSOR_KERNEL_PATH)) +_sensor_mod = importlib.util.module_from_spec(_spec) +_spec.loader.exec_module(_sensor_mod) + +_WARP_KERNEL_PATH = os.path.join( + os.path.dirname(__file__), + os.pardir, + os.pardir, + "isaaclab", + "utils", + "warp", + "kernels.py", +) +_warp_spec = importlib.util.spec_from_file_location("warp_kernels", os.path.normpath(_WARP_KERNEL_PATH)) +_warp_mod = importlib.util.module_from_spec(_warp_spec) +_warp_spec.loader.exec_module(_warp_mod) + +compute_distance_to_image_plane_masked_kernel = _sensor_mod.compute_distance_to_image_plane_masked_kernel +apply_depth_clipping_masked_kernel = _sensor_mod.apply_depth_clipping_masked_kernel +apply_z_drift_kernel = _sensor_mod.apply_z_drift_kernel +quat_yaw_only = _sensor_mod.quat_yaw_only + +raycast_dynamic_meshes_kernel = _warp_mod.raycast_dynamic_meshes_kernel + +# --------------------------------------------------------------------------- +# Constants & setup +# --------------------------------------------------------------------------- + +wp.init() +DEVICE = "cuda:0" if wp.is_cuda_available() else "cpu" +ATOL = 1e-5 + + +# --------------------------------------------------------------------------- +# Wrapper kernel for quat_yaw_only (@wp.func cannot be launched directly) +# --------------------------------------------------------------------------- + + +@wp.kernel(enable_backward=False) +def _quat_yaw_only_test_kernel( + q_in: wp.array(dtype=wp.quatf), + q_out: wp.array(dtype=wp.quatf), +): + tid = wp.tid() + q_out[tid] = quat_yaw_only(q_in[tid]) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _euler_to_quat_xyzw(roll: float, pitch: float, yaw: float) -> tuple[float, float, float, float]: + """Euler angles (intrinsic XYZ) to quaternion in xyzw convention.""" + cr, sr = math.cos(roll / 2), math.sin(roll / 2) + cp, sp = math.cos(pitch / 2), math.sin(pitch / 2) + cy, sy = math.cos(yaw / 2), math.sin(yaw / 2) + qx = sr * cp * cy - cr * sp * sy + qy = cr * sp * cy + sr * cp * sy + qz = cr * cp * sy - sr * sp * cy + qw = cr * cp * cy + sr * sp * sy + return (qx, qy, qz, qw) + + +def _make_flat_mesh(size: float = 4.0) -> wp.Mesh: + """Create a flat square mesh in the XY plane at z=0, centered at origin.""" + half = size / 2.0 + vertices = np.array( + [[-half, -half, 0.0], [half, -half, 0.0], [half, half, 0.0], [-half, half, 0.0]], + dtype=np.float32, + ) + indices = np.array([0, 1, 2, 0, 2, 3], dtype=np.int32) + return wp.Mesh( + points=wp.array(vertices, dtype=wp.vec3, device=DEVICE), + indices=wp.array(indices, dtype=wp.int32, device=DEVICE), + ) + + +def _to_numpy(a: wp.array) -> np.ndarray: + """Convert a warp array to numpy, handling GPU arrays transparently.""" + return a.numpy() + + +# --------------------------------------------------------------------------- +# Tests: raycast_dynamic_meshes_kernel +# --------------------------------------------------------------------------- + + +class TestRaycastDynamicMeshesKernel: + """Tests for :func:`raycast_dynamic_meshes_kernel` from ``utils/warp/kernels.py``. + + Each test creates trivial warp meshes (flat quads) and verifies raycasting + results against analytical expectations. + """ + + IDENT_Q = [0.0, 0.0, 0.0, 1.0] + + @staticmethod + def _launch( + num_envs: int, + num_meshes: int, + num_rays: int, + env_mask: np.ndarray, + mesh_ids: np.ndarray, + ray_starts: np.ndarray, + ray_dirs: np.ndarray, + mesh_pos: np.ndarray, + mesh_rot: np.ndarray, + max_dist: float = 1e6, + sentinel: float | None = None, + ) -> dict[str, np.ndarray]: + """Build warp arrays, launch kernel, return outputs as numpy dicts.""" + env_mask_wp = wp.array(env_mask.astype(np.bool_), dtype=wp.bool, device=DEVICE) + mesh_wp = wp.array(mesh_ids, dtype=wp.uint64, device=DEVICE) + starts_wp = wp.array(ray_starts, dtype=wp.vec3f, device=DEVICE) + dirs_wp = wp.array(ray_dirs, dtype=wp.vec3f, device=DEVICE) + mpos_wp = wp.array(mesh_pos, dtype=wp.vec3f, device=DEVICE) + mrot_wp = wp.array(mesh_rot, dtype=wp.quatf, device=DEVICE) + + fill = sentinel if sentinel is not None else float("inf") + + hits_np = np.full((num_envs, num_rays, 3), fill, dtype=np.float32) + ray_hits = wp.array(hits_np, dtype=wp.vec3f, device=DEVICE) + + dist_np = np.full((num_envs, num_rays), fill, dtype=np.float32) + ray_distance = wp.array(dist_np, dtype=wp.float32, device=DEVICE) + + normal_np = np.full((num_envs, num_rays, 3), fill, dtype=np.float32) + ray_normal = wp.array(normal_np, dtype=wp.vec3f, device=DEVICE) + + face_np = np.full((num_envs, num_rays), -1, dtype=np.int32) + ray_face_id = wp.array(face_np, dtype=wp.int32, device=DEVICE) + + mesh_id_np = np.full((num_envs, num_rays), -1, dtype=np.int16) + ray_mesh_id = wp.array(mesh_id_np, dtype=wp.int16, device=DEVICE) + + wp.launch( + raycast_dynamic_meshes_kernel, + dim=(num_meshes, num_envs, num_rays), + inputs=[ + env_mask_wp, + mesh_wp, + starts_wp, + dirs_wp, + ray_hits, + ray_distance, + ray_normal, + ray_face_id, + ray_mesh_id, + mpos_wp, + mrot_wp, + max_dist, + 1, # return_normal + 1, # return_face_id + 1, # return_mesh_id + ], + device=DEVICE, + ) + wp.synchronize_device(DEVICE) + + return { + "hits": _to_numpy(ray_hits), + "distance": _to_numpy(ray_distance), + "normal": _to_numpy(ray_normal), + "face_id": _to_numpy(ray_face_id), + "mesh_id": _to_numpy(ray_mesh_id), + } + + def test_env_mask_skipping(self): + """Env 0 masked out -- verify output buffers retain sentinel values.""" + mesh = _make_flat_mesh() + iq = self.IDENT_Q + out = self._launch( + num_envs=2, + num_meshes=1, + num_rays=1, + env_mask=np.array([False, True]), + mesh_ids=np.array([[mesh.id], [mesh.id]], dtype=np.uint64), + ray_starts=np.array([[[0, 0, 10]], [[0, 0, 10]]], dtype=np.float32), + ray_dirs=np.array([[[0, 0, -1]], [[0, 0, -1]]], dtype=np.float32), + mesh_pos=np.array([[[0, 0, 2]], [[0, 0, 2]]], dtype=np.float32), + mesh_rot=np.array([[iq], [iq]], dtype=np.float32), + sentinel=999.0, + ) + + # Env 0 (masked): all outputs retain sentinel / initial fill + np.testing.assert_allclose(out["hits"][0, 0], [999, 999, 999], atol=ATOL) + assert out["distance"][0, 0] == pytest.approx(999.0, abs=ATOL) + np.testing.assert_allclose(out["normal"][0, 0], [999, 999, 999], atol=ATOL) + assert out["face_id"][0, 0] == -1 + assert out["mesh_id"][0, 0] == -1 + + # Env 1 (active): should have hit the mesh at z=2, distance 8 + np.testing.assert_allclose(out["hits"][1, 0], [0, 0, 2], atol=ATOL) + assert out["distance"][1, 0] == pytest.approx(8.0, abs=ATOL) + assert out["mesh_id"][1, 0] == 0 + + def test_closest_hit_overlapping_meshes(self): + """Two meshes at different distances -- closer hit wins. + + Mesh A at z=2 (farther), Mesh B at z=4 (closer to ray origin at z=10). + Ray from (0,0,10) going (0,0,-1). Expected: hit Mesh B at distance 6. + """ + mesh_a = _make_flat_mesh() + mesh_b = _make_flat_mesh() + iq = self.IDENT_Q + + out = self._launch( + num_envs=1, + num_meshes=2, + num_rays=1, + env_mask=np.array([True]), + mesh_ids=np.array([[mesh_a.id, mesh_b.id]], dtype=np.uint64), + ray_starts=np.array([[[0, 0, 10]]], dtype=np.float32), + ray_dirs=np.array([[[0, 0, -1]]], dtype=np.float32), + mesh_pos=np.array([[[0, 0, 2], [0, 0, 4]]], dtype=np.float32), + mesh_rot=np.array([[iq, iq]], dtype=np.float32), + ) + + np.testing.assert_allclose(out["hits"][0, 0], [0, 0, 4], atol=ATOL) + assert out["distance"][0, 0] == pytest.approx(6.0, abs=ATOL) + np.testing.assert_allclose(out["normal"][0, 0], [0, 0, 1], atol=ATOL) + assert out["mesh_id"][0, 0] == 1 # mesh_b is closer + + def test_mesh_transform_application(self): + """Mesh translated/rotated -- verify hits in correct world-space coordinates. + + Mesh: flat XY quad at z=0 (local), placed at world (5,0,0) with 90 deg + Y rotation. This turns it into a vertical plane at x=5. + Ray from (10,0,0) going (-1,0,0) should hit at (5,0,0), distance=5. + World-space normal: local (0,0,1) rotated by 90 deg Y = (1,0,0). + """ + mesh = _make_flat_mesh() + rot90y = [0.0, math.sin(math.pi / 4), 0.0, math.cos(math.pi / 4)] + + out = self._launch( + num_envs=1, + num_meshes=1, + num_rays=1, + env_mask=np.array([True]), + mesh_ids=np.array([[mesh.id]], dtype=np.uint64), + ray_starts=np.array([[[10, 0, 0]]], dtype=np.float32), + ray_dirs=np.array([[[-1, 0, 0]]], dtype=np.float32), + mesh_pos=np.array([[[5, 0, 0]]], dtype=np.float32), + mesh_rot=np.array([[rot90y]], dtype=np.float32), + ) + + np.testing.assert_allclose(out["hits"][0, 0], [5, 0, 0], atol=ATOL) + assert out["distance"][0, 0] == pytest.approx(5.0, abs=ATOL) + np.testing.assert_allclose(out["normal"][0, 0], [1, 0, 0], atol=ATOL) + + def test_equidistant_meshes(self): + """Two meshes at exact same distance -- hit position is always correct. + + Known limitation (warp#1058): when two meshes are equidistant, the + ``atomic_min`` + equality-check pattern is not fully thread-safe. + Normals, face_ids, and mesh_ids may come from either mesh. The hit + *position* is always correct because both threads compute the same + world-space point. + """ + mesh_a = _make_flat_mesh() + mesh_b = _make_flat_mesh() + iq = self.IDENT_Q + + out = self._launch( + num_envs=1, + num_meshes=2, + num_rays=1, + env_mask=np.array([True]), + mesh_ids=np.array([[mesh_a.id, mesh_b.id]], dtype=np.uint64), + ray_starts=np.array([[[0, 0, 10]]], dtype=np.float32), + ray_dirs=np.array([[[0, 0, -1]]], dtype=np.float32), + mesh_pos=np.array([[[0, 0, 3], [0, 0, 3]]], dtype=np.float32), + mesh_rot=np.array([[iq, iq]], dtype=np.float32), + ) + + # Position and distance are always correct, even under the race + np.testing.assert_allclose(out["hits"][0, 0], [0, 0, 3], atol=ATOL) + assert out["distance"][0, 0] == pytest.approx(7.0, abs=ATOL) + # mesh_id can be 0 or 1 -- both are valid under the race condition + assert out["mesh_id"][0, 0] in (0, 1) + + +# --------------------------------------------------------------------------- +# Tests: compute_distance_to_image_plane_masked_kernel +# --------------------------------------------------------------------------- + + +class TestComputeDistanceToImagePlaneMaskedKernel: + """Tests for :func:`compute_distance_to_image_plane_masked_kernel`.""" + + @staticmethod + def _launch( + quat_xyzw: list[float], + ray_distance: list[list[float]], + ray_dirs: list[list[list[float]]], + env_mask: list[bool] | None = None, + ) -> np.ndarray: + """Launch kernel and return distance_to_image_plane as numpy.""" + num_envs = len(ray_distance) + num_rays = len(ray_distance[0]) + if env_mask is None: + env_mask = [True] * num_envs + + mask_wp = wp.array(np.array(env_mask, dtype=np.bool_), dtype=wp.bool, device=DEVICE) + quat_np = np.array([quat_xyzw] * num_envs, dtype=np.float32) + quat_wp = wp.array(quat_np, dtype=wp.quatf, device=DEVICE) + ray_dist_wp = wp.array(np.array(ray_distance, dtype=np.float32), dtype=wp.float32, device=DEVICE) + dirs_wp = wp.array(np.array(ray_dirs, dtype=np.float32), dtype=wp.vec3f, device=DEVICE) + out_wp = wp.zeros((num_envs, num_rays), dtype=wp.float32, device=DEVICE) + + wp.launch( + compute_distance_to_image_plane_masked_kernel, + dim=(num_envs, num_rays), + inputs=[mask_wp, quat_wp, ray_dist_wp, dirs_wp], + outputs=[out_wp], + device=DEVICE, + ) + wp.synchronize_device(DEVICE) + return _to_numpy(out_wp) + + def test_known_camera_orientation(self): + """Identity camera, ray along +X at distance 5 -- d2ip equals 5.""" + result = self._launch( + quat_xyzw=[0, 0, 0, 1], + ray_distance=[[5.0]], + ray_dirs=[[[1, 0, 0]]], + ) + assert result[0, 0] == pytest.approx(5.0, abs=ATOL) + + def test_off_axis_camera(self): + """Camera pitched 45 deg around Y, ray going world -Z. + + Camera forward (+X_cam) in world = (cos45, 0, -sin45). + Displacement = 10 * (0, 0, -1) = (0, 0, -10). + Projection onto camera forward = dot((0,0,-10), (cos45,0,-sin45)) + = 10 * sin(45 deg). + """ + pitch45 = list(_euler_to_quat_xyzw(0, math.pi / 4, 0)) + result = self._launch( + quat_xyzw=pitch45, + ray_distance=[[10.0]], + ray_dirs=[[[0, 0, -1]]], + ) + expected = 10.0 * math.sin(math.pi / 4) + assert result[0, 0] == pytest.approx(expected, abs=ATOL) + + def test_inf_distance(self): + """Inf distance produces NaN through the projection (inf * 0 = NaN). + + When a ray misses, ray_distance is inf. Multiplying inf by zero-valued + ray-direction components yields NaN (IEEE 754), which propagates through + the quaternion rotation. The downstream + :func:`apply_depth_clipping_masked_kernel` handles NaN correctly via + ``wp.isnan()``, so the overall pipeline is sound. + """ + result = self._launch( + quat_xyzw=[0, 0, 0, 1], + ray_distance=[[float("inf")]], + ray_dirs=[[[1, 0, 0]]], + ) + assert np.isnan(result[0, 0]), f"Expected NaN from inf*0 contamination, got {result[0, 0]}" + + +# --------------------------------------------------------------------------- +# Tests: apply_depth_clipping_masked_kernel +# --------------------------------------------------------------------------- + + +class TestApplyDepthClippingMaskedKernel: + """Tests for :func:`apply_depth_clipping_masked_kernel`.""" + + @staticmethod + def _launch( + depth_values: list[list[float]], + max_dist: float, + fill_val: float, + env_mask: list[bool] | None = None, + ) -> np.ndarray: + """Launch kernel and return clipped depth as numpy.""" + num_envs = len(depth_values) + num_rays = len(depth_values[0]) + if env_mask is None: + env_mask = [True] * num_envs + + mask_wp = wp.array(np.array(env_mask, dtype=np.bool_), dtype=wp.bool, device=DEVICE) + depth_wp = wp.array(np.array(depth_values, dtype=np.float32), dtype=wp.float32, device=DEVICE) + + wp.launch( + apply_depth_clipping_masked_kernel, + dim=(num_envs, num_rays), + inputs=[mask_wp, max_dist, fill_val], + outputs=[depth_wp], + device=DEVICE, + ) + wp.synchronize_device(DEVICE) + return _to_numpy(depth_wp) + + def test_boundary_at_max_dist(self): + """Value at exactly max_dist is preserved (not clipped).""" + result = self._launch([[10.0]], max_dist=10.0, fill_val=0.0) + assert result[0, 0] == pytest.approx(10.0, abs=ATOL) + + def test_above_max_dist(self): + """Value above max_dist is replaced with fill_val.""" + result = self._launch([[10.001]], max_dist=10.0, fill_val=0.0) + assert result[0, 0] == pytest.approx(0.0, abs=ATOL) + + def test_nan_value(self): + """NaN value is replaced with fill_val.""" + result = self._launch([[float("nan")]], max_dist=10.0, fill_val=0.0) + assert result[0, 0] == pytest.approx(0.0, abs=ATOL) + + def test_inf_value(self): + """Inf is clipped (inf > max_dist is true).""" + result = self._launch([[float("inf")]], max_dist=10.0, fill_val=0.0) + assert result[0, 0] == pytest.approx(0.0, abs=ATOL) + + def test_negative_depth(self): + """Negative depth passes through unclipped (valid for distance-to-image-plane).""" + result = self._launch([[-3.5]], max_dist=10.0, fill_val=0.0) + assert result[0, 0] == pytest.approx(-3.5, abs=ATOL) + + def test_env_mask(self): + """Masked env retains original value -- clipping is not applied.""" + result = self._launch( + depth_values=[[15.0], [15.0]], + max_dist=10.0, + fill_val=0.0, + env_mask=[False, True], + ) + # Env 0 (masked): unchanged + assert result[0, 0] == pytest.approx(15.0, abs=ATOL) + # Env 1 (active): clipped + assert result[1, 0] == pytest.approx(0.0, abs=ATOL) + + def test_fill_val_zero_vs_max(self): + """fill_val=0.0 and fill_val=max_dist produce correct replacements.""" + max_dist = 10.0 + + result_zero = self._launch([[15.0]], max_dist=max_dist, fill_val=0.0) + assert result_zero[0, 0] == pytest.approx(0.0, abs=ATOL) + + result_max = self._launch([[15.0]], max_dist=max_dist, fill_val=max_dist) + assert result_max[0, 0] == pytest.approx(max_dist, abs=ATOL) + + +# --------------------------------------------------------------------------- +# Tests: apply_z_drift_kernel +# --------------------------------------------------------------------------- + + +class TestApplyZDriftKernel: + """Tests for :func:`apply_z_drift_kernel`.""" + + @staticmethod + def _launch( + hits: list[list[list[float]]], + drift: list[list[float]], + env_mask: list[bool] | None = None, + ) -> np.ndarray: + """Launch kernel and return modified ray_hits as numpy.""" + num_envs = len(hits) + num_rays = len(hits[0]) + if env_mask is None: + env_mask = [True] * num_envs + + mask_wp = wp.array(np.array(env_mask, dtype=np.bool_), dtype=wp.bool, device=DEVICE) + drift_wp = wp.array(np.array(drift, dtype=np.float32), dtype=wp.vec3f, device=DEVICE) + hits_wp = wp.array(np.array(hits, dtype=np.float32), dtype=wp.vec3f, device=DEVICE) + + wp.launch( + apply_z_drift_kernel, + dim=(num_envs, num_rays), + inputs=[mask_wp, drift_wp], + outputs=[hits_wp], + device=DEVICE, + ) + wp.synchronize_device(DEVICE) + return _to_numpy(hits_wp) + + def test_known_drift(self): + """ray_cast_drift = (0, 0, 1.5) shifts ray hit z by exactly 1.5.""" + result = self._launch( + hits=[[[3.0, 4.0, 5.0]]], + drift=[[0.0, 0.0, 1.5]], + ) + np.testing.assert_allclose(result[0, 0], [3.0, 4.0, 6.5], atol=ATOL) + + def test_only_z_component(self): + """Only z-component of drift is applied; x and y are unchanged.""" + result = self._launch( + hits=[[[3.0, 4.0, 5.0]]], + drift=[[0.5, 0.3, 1.0]], + ) + np.testing.assert_allclose(result[0, 0], [3.0, 4.0, 6.0], atol=ATOL) + + +# --------------------------------------------------------------------------- +# Tests: quat_yaw_only +# --------------------------------------------------------------------------- + + +class TestQuatYawOnly: + """Tests for :func:`quat_yaw_only` (a ``@wp.func`` tested via wrapper kernel).""" + + def test_gimbal_lock(self): + """At pitch = +/-pi/2, atan2 is near-degenerate but should produce a + finite, unit-norm, pure-yaw quaternion (only z and w components). + """ + q_down = _euler_to_quat_xyzw(0, math.pi / 2, 0) # pitch = +pi/2 + q_up = _euler_to_quat_xyzw(0, -math.pi / 2, 0) # pitch = -pi/2 + + q_in_np = np.array([list(q_down), list(q_up)], dtype=np.float32) + q_in = wp.array(q_in_np, dtype=wp.quatf, device=DEVICE) + q_out = wp.zeros(2, dtype=wp.quatf, device=DEVICE) + + wp.launch( + _quat_yaw_only_test_kernel, + dim=2, + inputs=[q_in], + outputs=[q_out], + device=DEVICE, + ) + wp.synchronize_device(DEVICE) + + result = _to_numpy(q_out) + + for i in range(2): + qx, qy, qz, qw = result[i] + # Must be finite (no NaN / inf) + assert np.isfinite(result[i]).all(), f"Non-finite output at index {i}: {result[i]}" + # Must be a pure-yaw quaternion: x ~ 0, y ~ 0 + assert abs(qx) < ATOL, f"x-component should be ~0 at gimbal lock, got {qx}" + assert abs(qy) < ATOL, f"y-component should be ~0 at gimbal lock, got {qy}" + # Must be unit-norm + norm = math.sqrt(float(qx) ** 2 + float(qy) ** 2 + float(qz) ** 2 + float(qw) ** 2) + assert norm == pytest.approx(1.0, abs=ATOL), f"Non-unit quaternion at index {i}: norm={norm}" diff --git a/source/isaaclab/test/sensors/test_ray_caster_patterns.py b/source/isaaclab/test/sensors/test_ray_caster_patterns.py index cab9e4af7245..5edcbe55d7c6 100644 --- a/source/isaaclab/test/sensors/test_ray_caster_patterns.py +++ b/source/isaaclab/test/sensors/test_ray_caster_patterns.py @@ -172,7 +172,7 @@ def test_lidar_pattern_basic_properties(self, device): ray_starts, ray_directions = patterns.lidar_pattern(cfg, device) # Check that all directions are unit vectors - norms = torch.norm(ray_directions, dim=1) + norms = torch.linalg.norm(ray_directions, dim=1) torch.testing.assert_close(norms, torch.ones_like(norms), rtol=1e-5, atol=1e-5) # All rays should start from origin @@ -302,7 +302,7 @@ def test_bpearl_pattern_basic_properties(self, device): ray_starts, ray_directions = patterns.bpearl_pattern(cfg, device) # Check that all directions are unit vectors - norms = torch.norm(ray_directions, dim=1) + norms = torch.linalg.norm(ray_directions, dim=1) torch.testing.assert_close(norms, torch.ones_like(norms), rtol=1e-5, atol=1e-5) # All rays should start from origin @@ -375,7 +375,7 @@ def test_pinhole_camera_pattern_basic_properties(self, device): ray_starts, ray_directions = patterns.pinhole_camera_pattern(cfg, intrinsic_matrix, device) # Check that all directions are unit vectors - norms = torch.norm(ray_directions, dim=2) + norms = torch.linalg.norm(ray_directions, dim=2) torch.testing.assert_close(norms, torch.ones_like(norms), rtol=1e-5, atol=1e-5) # All rays should start from origin diff --git a/source/isaaclab/test/sensors/test_ray_caster_sensor.py b/source/isaaclab/test/sensors/test_ray_caster_sensor.py new file mode 100644 index 000000000000..3326c524f958 --- /dev/null +++ b/source/isaaclab/test/sensors/test_ray_caster_sensor.py @@ -0,0 +1,271 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# pyright: reportPrivateUsage=none + +"""Tests for RayCaster sensor behavior: alignment modes and reset.""" + +from isaaclab.app import AppLauncher + +simulation_app = AppLauncher(headless=True).app + +import numpy as np +import pytest +import torch + +import isaaclab.sim as sim_utils +from isaaclab.sensors.ray_caster import RayCaster, RayCasterCfg, patterns +from isaaclab.terrains.trimesh.utils import make_plane +from isaaclab.terrains.utils import create_prim_from_mesh +from isaaclab.utils.math import quat_from_euler_xyz + +# ------------------------------------------------------------------- +# Helpers +# ------------------------------------------------------------------- + +_GROUND_PATH = "/World/Ground" + + +def _make_sim_and_ground(): + """Create a blank stage with a flat ground plane at z=0 and return the SimulationContext.""" + sim_utils.create_new_stage() + dt = 0.01 + sim_cfg = sim_utils.SimulationCfg(dt=dt) + sim = sim_utils.SimulationContext(sim_cfg) + mesh = make_plane(size=(100, 100), height=0.0, center_zero=True) + create_prim_from_mesh(_GROUND_PATH, mesh) + sim_utils.update_stage() + return sim + + +def _ray_caster_cfg(prim_path: str, alignment: str) -> RayCasterCfg: + """Single downward ray, no offset from prim.""" + return RayCasterCfg( + prim_path=prim_path, + mesh_prim_paths=[_GROUND_PATH], + update_period=0, + offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), + debug_vis=False, + pattern_cfg=patterns.GridPatternCfg(resolution=1.0, size=(0.0, 0.0), direction=(0.0, 0.0, -1.0)), + ray_alignment=alignment, + ) + + +@pytest.fixture +def sim_ground(): + sim = _make_sim_and_ground() + yield sim + sim.stop() + sim.clear_instance() + + +# ------------------------------------------------------------------- +# Alignment mode tests +# ------------------------------------------------------------------- + + +@pytest.mark.isaacsim_ci +def test_world_alignment_ignores_sensor_pitch(sim_ground): + """In 'world' alignment, ray direction is always (0,0,-1) regardless of sensor pitch. + + Two sensors at the same location: one upright (identity), one pitched 30°. + World-mode sensors must produce the same hit position (straight below at z=0). + """ + sim = sim_ground + + # Upright sensor: identity orientation + sim_utils.create_prim("/World/SensorUpright", "Xform", translation=(0.0, 0.0, 2.0)) + # Pitched 30° sensor — orientation=(x,y,z,w) per Isaac Lab convention + pitch_quat = quat_from_euler_xyz( + torch.tensor([0.0]), torch.tensor([np.pi / 6]), torch.tensor([0.0]) + ) # shape (1, 4), xyzw + sim_utils.create_prim( + "/World/SensorPitched", + "Xform", + translation=(0.0, 0.0, 2.0), + orientation=tuple(pitch_quat[0].tolist()), # xyzw + ) + + sensor_upright = RayCaster(_ray_caster_cfg("/World/SensorUpright", "world")) + sensor_pitched = RayCaster(_ray_caster_cfg("/World/SensorPitched", "world")) + sim.reset() + + dt = 0.01 + sensor_upright.update(dt) + sensor_pitched.update(dt) + + # ray_hits_w returns a ProxyArray; use .torch for tensor indexing. + hits_upright = sensor_upright.data.ray_hits_w.torch # (1, 1, 3) + hits_pitched = sensor_pitched.data.ray_hits_w.torch + + # Both must hit z=0 (straight down, world frame direction) + assert abs(hits_upright[0, 0, 2].item()) < 0.02, ( + f"Upright world sensor must hit z≈0, got {hits_upright[0, 0, 2].item()}" + ) + assert abs(hits_pitched[0, 0, 2].item()) < 0.02, ( + f"Pitched world sensor must hit z≈0, got {hits_pitched[0, 0, 2].item()}" + ) + # Lateral positions must agree (same start at [0,0,2] + same direction [0,0,-1]) + torch.testing.assert_close(hits_upright, hits_pitched, atol=0.02, rtol=0) + + +@pytest.mark.isaacsim_ci +def test_base_alignment_rotates_ray_direction(sim_ground): + """In 'base' alignment, ray direction follows the full sensor orientation. + + A sensor pitched +30° around Y (quat_from_euler_xyz(pitch=pi/6)): + - Rotates (0,0,-1) to (-sin(30°), 0, -cos(30°)) = (-0.5, 0, -0.866) + - world mode → ray still goes straight down, hits x≈0, z≈0 + - base mode → ray tilts, hits at x ≈ -2*tan(30°) ≈ -1.155 + """ + sim = sim_ground + + pitch_quat = quat_from_euler_xyz( + torch.tensor([0.0]), torch.tensor([np.pi / 6]), torch.tensor([0.0]) + ) # shape (1, 4), xyzw + orientation = tuple(pitch_quat[0].tolist()) + + sim_utils.create_prim("/World/SensorWorld", "Xform", translation=(0.0, 0.0, 2.0), orientation=orientation) + sim_utils.create_prim("/World/SensorBase", "Xform", translation=(0.0, 0.0, 2.0), orientation=orientation) + + sensor_world = RayCaster(_ray_caster_cfg("/World/SensorWorld", "world")) + sensor_base = RayCaster(_ray_caster_cfg("/World/SensorBase", "base")) + sim.reset() + + dt = 0.01 + sensor_world.update(dt) + sensor_base.update(dt) + + hits_world = sensor_world.data.ray_hits_w.torch # (1, 1, 3) + hits_base = sensor_base.data.ray_hits_w.torch + + # World mode: ray still hits directly below (x≈0, y≈0, z≈0) + assert abs(hits_world[0, 0, 0].item()) < 0.05, f"World mode hit x must be near 0, got {hits_world[0, 0, 0].item()}" + assert abs(hits_world[0, 0, 2].item()) < 0.05, f"World mode must hit z≈0, got {hits_world[0, 0, 2].item()}" + + # Base mode: pitch +30° around Y rotates (0,0,-1) to (-0.5, 0, -0.866). + # From height 2, the ray hits x = -2 * tan(30°) ≈ -1.155. + expected_x = -2.0 * np.tan(np.pi / 6) + assert abs(hits_base[0, 0, 0].item() - expected_x) < 0.05, ( + f"Base mode hit x should be ≈{expected_x:.3f}, got {hits_base[0, 0, 0].item():.3f}" + ) + assert abs(hits_base[0, 0, 2].item()) < 0.05, f"Base mode must hit ground (z≈0), got {hits_base[0, 0, 2].item()}" + + +@pytest.mark.isaacsim_ci +def test_yaw_alignment_direction_unchanged(sim_ground): + """In 'yaw' alignment, ray directions stay world-down despite pitch+roll. + + Setup: sensor at (0,0,2), pitched 30° and yawed 45°; pattern has a single ray + at local offset (+1, 0, 0). + + - world mode: start = sensor_pos + (1,0,0) (no rotation applied to offset) + - yaw mode: start = sensor_pos + yaw_rot(45°) @ (1,0,0) = (cos45°, sin45°, 0) + + Both modes fire the ray straight down (direction unchanged), so both hit z=0. + The hit x-coordinate differs between modes, confirming the yaw-only rotation of + start positions is applied in 'yaw' mode but not in 'world' mode. + """ + sim = sim_ground + + combined_quat = quat_from_euler_xyz( + torch.tensor([0.0]), + torch.tensor([np.pi / 6]), # 30° pitch + torch.tensor([np.pi / 4]), # 45° yaw + ) # shape (1, 4), xyzw + orientation = tuple(combined_quat[0].tolist()) + + sim_utils.create_prim("/World/SensorWorldY", "Xform", translation=(0.0, 0.0, 2.0), orientation=orientation) + sim_utils.create_prim("/World/SensorYaw", "Xform", translation=(0.0, 0.0, 2.0), orientation=orientation) + + # Use a single ray at local offset (+1, 0, 0), still pointing down + def _cfg_with_offset(prim_path, alignment): + return RayCasterCfg( + prim_path=prim_path, + mesh_prim_paths=[_GROUND_PATH], + update_period=0, + offset=RayCasterCfg.OffsetCfg(pos=(1.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), + debug_vis=False, + pattern_cfg=patterns.GridPatternCfg(resolution=1.0, size=(0.0, 0.0), direction=(0.0, 0.0, -1.0)), + ray_alignment=alignment, + ) + + sensor_world = RayCaster(_cfg_with_offset("/World/SensorWorldY", "world")) + sensor_yaw = RayCaster(_cfg_with_offset("/World/SensorYaw", "yaw")) + sim.reset() + + dt = 0.01 + sensor_world.update(dt) + sensor_yaw.update(dt) + + hits_world = sensor_world.data.ray_hits_w.torch # (1, 1, 3) + hits_yaw = sensor_yaw.data.ray_hits_w.torch + + # Both modes must hit the ground (direction unchanged = straight down in both modes) + assert abs(hits_world[0, 0, 2].item()) < 0.05, "World mode must hit z≈0" + assert abs(hits_yaw[0, 0, 2].item()) < 0.05, "Yaw mode must hit z≈0 (direction straight down)" + + # world mode: offset (1,0,0) not rotated → ray starts at sensor_pos+(1,0,0) → hits x≈1 + assert abs(hits_world[0, 0, 0].item() - 1.0) < 0.05, ( + f"World mode: hit x should be ≈1.0 (unrotated offset), got {hits_world[0, 0, 0].item():.3f}" + ) + + # yaw mode: offset (1,0,0) rotated by 45° yaw → starts at sensor_pos+(cos45°, sin45°, 0) → hits x≈cos45° + expected_x_yaw = np.cos(np.pi / 4) # ≈ 0.707 + assert abs(hits_yaw[0, 0, 0].item() - expected_x_yaw) < 0.05, ( + f"Yaw mode: hit x should be ≈{expected_x_yaw:.3f} (yaw-rotated offset), got {hits_yaw[0, 0, 0].item():.3f}" + ) + # Confirm they differ — if they were the same, the test would not cover the yaw rotation + assert not torch.allclose(hits_world, hits_yaw, atol=0.1), ( + "Yaw and world modes must produce different hit positions for non-zero lateral offset" + ) + + +# ------------------------------------------------------------------- +# Reset / drift test +# ------------------------------------------------------------------- + + +@pytest.mark.isaacsim_ci +def test_ray_caster_reset_resamples_drift(sim_ground): + """reset() resamples drift values within the configured drift_range.""" + sim = sim_ground + + sim_utils.create_prim("/World/Sensor", "Xform", translation=(0.0, 0.0, 2.0)) + cfg = _ray_caster_cfg("/World/Sensor", "world") + cfg.drift_range = (0.01, 0.05) # force non-zero drift + sensor = RayCaster(cfg) + sim.reset() + # sim.reset() initializes the sensor with zero drift; call sensor.reset() to resample + # from the configured drift_range before we capture the baseline. + sensor.reset() + + dt = 0.01 + sensor.update(dt) + drift_before = sensor.drift.clone() # (1, 3) torch tensor + + lo, hi = cfg.drift_range + + # After sensor.reset(), drift should be within the configured range + assert drift_before.shape == (1, 3), f"Drift shape should be (1, 3), got {drift_before.shape}" + assert (drift_before >= lo - 1e-6).all() and (drift_before <= hi + 1e-6).all(), ( + f"Initial drift must be in [{lo}, {hi}], got [{drift_before.min():.4f}, {drift_before.max():.4f}]" + ) + + # reset() resamples drift; values should remain within the configured range + # Call reset() multiple times until we get a different sample (probability of same is near zero + # for continuous uniform distribution, but we retry to avoid flakiness). + for _ in range(5): + sensor.reset() + drift_after = sensor.drift.clone() + if not torch.allclose(drift_after, drift_before): + break + assert drift_after.shape == drift_before.shape, "Drift shape must be preserved after reset" + assert (drift_after >= lo - 1e-6).all() and (drift_after <= hi + 1e-6).all(), ( + f"Drift after reset must be in [{lo}, {hi}], got [{drift_after.min():.4f}, {drift_after.max():.4f}]" + ) + assert not torch.allclose(drift_after, drift_before), ( + "reset() must resample drift; values must change from initial sample" + ) diff --git a/source/isaaclab/test/sensors/test_sensor_base.py b/source/isaaclab/test/sensors/test_sensor_base.py index 1f41ba4ab4e5..34a865ff96f1 100644 --- a/source/isaaclab/test/sensors/test_sensor_base.py +++ b/source/isaaclab/test/sensors/test_sensor_base.py @@ -19,6 +19,7 @@ import pytest import torch +import warp as wp import isaaclab.sim as sim_utils from isaaclab.sensors import SensorBase, SensorBaseCfg @@ -46,13 +47,18 @@ def data(self): # return the data (where `_data` is the data for the sensor) return self._data - def _update_buffers_impl(self, env_ids: Sequence[int]): + def _update_buffers_impl(self, env_mask: wp.array | None = None): + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + if len(env_ids) == 0: + return self._data.count[env_ids] += 1 - def reset(self, env_ids: Sequence[int] | None = None): - super().reset(env_ids=env_ids) + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + super().reset(env_ids=env_ids, env_mask=env_mask) # Resolve sensor ids - if env_ids is None: + if env_ids is None and env_mask is not None: + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + elif env_ids is None: env_ids = slice(None) self._data.count[env_ids] = 0 @@ -93,7 +99,7 @@ def create_dummy_sensor(request, device): # Simulation time-step dt = 0.01 # Load kit helper - sim_cfg = sim_utils.SimulationCfg(dt=dt, device=device) + sim_cfg = sim_utils.SimulationCfg(device=device, dt=dt) sim = sim_utils.SimulationContext(sim_cfg) # create sensor @@ -105,11 +111,8 @@ def create_dummy_sensor(request, device): yield sensor_cfg, sim, dt - # stop simulation - # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( - sim._timeline.stop() - # clear the stage - sim.clear_all_callbacks() + # stop simulation and clean up + sim.stop() sim.clear_instance() diff --git a/source/isaaclab/test/sensors/test_tiled_camera.py b/source/isaaclab/test/sensors/test_tiled_camera.py index f160ef35df84..4ce62cd5336f 100644 --- a/source/isaaclab/test/sensors/test_tiled_camera.py +++ b/source/isaaclab/test/sensors/test_tiled_camera.py @@ -6,6 +6,14 @@ # ignore private usage of variables warning # pyright: reportPrivateUsage=none +"""Tests for the deprecated TiledCamera / TiledCameraCfg aliases. + +TiledCamera is now a thin subclass of Camera that emits a DeprecationWarning. +All substantive Camera tests live in ``test_camera.py``. This file only verifies +that the deprecation mechanism works correctly and that TiledCamera remains a +functional Camera alias. +""" + """Launch Isaac Sim Simulator first.""" from isaaclab.app import AppLauncher @@ -15,30 +23,27 @@ """Rest everything follows.""" -import copy import random +import warnings import numpy as np import pytest import torch import omni.replicator.core as rep -from isaacsim.core.prims import SingleGeometryPrim, SingleRigidPrim from pxr import Gf, UsdGeom import isaaclab.sim as sim_utils from isaaclab.sensors.camera import Camera, CameraCfg, TiledCamera, TiledCameraCfg -from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.timer import Timer @pytest.fixture(scope="function") -def setup_camera(device) -> tuple[sim_utils.SimulationContext, TiledCameraCfg, float]: +def setup_camera(device) -> tuple[sim_utils.SimulationContext, CameraCfg, float]: """Fixture to set up and tear down the camera simulation environment.""" - camera_cfg = TiledCameraCfg( + camera_cfg = CameraCfg( height=128, width=256, - offset=TiledCameraCfg.OffsetCfg(pos=(0.0, 0.0, 4.0), rot=(0.0, 0.0, 1.0, 0.0), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 4.0), rot=(0.0, 1.0, 0.0, 0.0), convention="ros"), prim_path="/World/Camera", update_period=0, data_types=["rgb", "distance_to_camera"], @@ -60,223 +65,79 @@ def setup_camera(device) -> tuple[sim_utils.SimulationContext, TiledCameraCfg, f yield sim, camera_cfg, dt # Teardown rep.vp_manager.destroy_hydra_textures("Replicator") - sim._timeline.stop() - sim.clear_all_callbacks() + sim.stop() sim.clear_instance() @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_single_camera_init(setup_camera, device): - """Test single camera initialization.""" +def test_tiled_camera_deprecation_warning(setup_camera, device): + """TiledCamera instantiation emits a DeprecationWarning.""" sim, camera_cfg, dt = setup_camera - # Create camera - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[0].GetPath().pathString == camera_cfg.prim_path - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (1, 3) - assert camera.data.quat_w_ros.shape == (1, 4) - assert camera.data.quat_w_world.shape == (1, 4) - assert camera.data.quat_w_opengl.shape == (1, 4) - assert camera.data.intrinsic_matrices.shape == (1, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for im_type, im_data in camera.data.output.items(): - if im_type == "rgb": - assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 3) - assert (im_data / 255.0).mean() > 0.0 - elif im_type == "distance_to_camera": - assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 1) - assert im_data.mean() > 0.0 - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_depth_clipping_max(setup_camera, device): - """Test depth max clipping.""" - sim, _, dt = setup_camera - # get camera cfgs - camera_cfg = TiledCameraCfg( - prim_path="/World/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(-0.125, 0.362, 0.873, -0.302), convention="ros"), - spawn=sim_utils.PinholeCameraCfg().from_intrinsic_matrix( - focal_length=38.0, - intrinsic_matrix=[380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0], - height=540, - width=960, - clipping_range=(4.9, 5.0), - ), - height=540, - width=960, - data_types=["depth"], - depth_clipping_behavior="max", - ) - camera = TiledCamera(camera_cfg) - - # Play sim - sim.reset() - - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - camera.update(dt) - - assert len(camera.data.output["depth"][torch.isinf(camera.data.output["depth"])]) == 0 - assert camera.data.output["depth"].min() >= camera_cfg.spawn.clipping_range[0] - assert camera.data.output["depth"].max() <= camera_cfg.spawn.clipping_range[1] - + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + camera = TiledCamera(camera_cfg) + deprecation_warnings = [x for x in w if issubclass(x.category, DeprecationWarning)] + assert len(deprecation_warnings) >= 1 + assert "TiledCamera is deprecated" in str(deprecation_warnings[0].message) del camera @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_depth_clipping_none(setup_camera, device): - """Test depth none clipping.""" - sim, _, dt = setup_camera - # get camera cfgs - camera_cfg = TiledCameraCfg( - prim_path="/World/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(-0.125, 0.362, 0.873, -0.302), convention="ros"), - spawn=sim_utils.PinholeCameraCfg().from_intrinsic_matrix( - focal_length=38.0, - intrinsic_matrix=[380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0], - height=540, - width=960, - clipping_range=(4.9, 5.0), - ), - height=540, - width=960, - data_types=["depth"], - depth_clipping_behavior="none", - ) - camera = TiledCamera(camera_cfg) - - # Play sim - sim.reset() - - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - camera.update(dt) - - assert len(camera.data.output["depth"][torch.isinf(camera.data.output["depth"])]) > 0 - assert camera.data.output["depth"].min() >= camera_cfg.spawn.clipping_range[0] - if len(camera.data.output["depth"][~torch.isinf(camera.data.output["depth"])]) > 0: - assert ( - camera.data.output["depth"][~torch.isinf(camera.data.output["depth"])].max() - <= camera_cfg.spawn.clipping_range[1] +def test_tiled_camera_cfg_deprecation_warning(setup_camera, device): + """TiledCameraCfg instantiation emits a DeprecationWarning.""" + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + _cfg = TiledCameraCfg( + height=128, + width=256, + prim_path="/World/Camera", + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), ) - - del camera + deprecation_warnings = [x for x in w if issubclass(x.category, DeprecationWarning)] + assert len(deprecation_warnings) >= 1 + assert "TiledCameraCfg is deprecated" in str(deprecation_warnings[0].message) +@pytest.mark.filterwarnings("ignore::DeprecationWarning") @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_depth_clipping_zero(setup_camera, device): - """Test depth zero clipping.""" - sim, _, dt = setup_camera - # get camera cfgs - camera_cfg = TiledCameraCfg( - prim_path="/World/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(2.5, 2.5, 6.0), rot=(-0.125, 0.362, 0.873, -0.302), convention="ros"), - spawn=sim_utils.PinholeCameraCfg().from_intrinsic_matrix( - focal_length=38.0, - intrinsic_matrix=[380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0], - height=540, - width=960, - clipping_range=(4.9, 5.0), - ), - height=540, - width=960, - data_types=["depth"], - depth_clipping_behavior="zero", - ) +def test_tiled_camera_is_camera_subclass(setup_camera, device): + """TiledCamera is a subclass of Camera, so isinstance checks work.""" + sim, camera_cfg, dt = setup_camera camera = TiledCamera(camera_cfg) - - # Play sim - sim.reset() - - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - camera.update(dt) - - assert len(camera.data.output["depth"][torch.isinf(camera.data.output["depth"])]) == 0 - assert camera.data.output["depth"].min() == 0.0 - assert camera.data.output["depth"].max() <= camera_cfg.spawn.clipping_range[1] - + assert isinstance(camera, Camera) + assert isinstance(camera, TiledCamera) del camera +@pytest.mark.filterwarnings("ignore::DeprecationWarning") @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci -def test_multi_camera_init(setup_camera, device): - """Test multi-camera initialization.""" +def test_tiled_camera_basic_functionality(setup_camera, device): + """TiledCamera produces correct output (proving it delegates to Camera).""" sim, camera_cfg, dt = setup_camera - - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() # Play sim sim.reset() # Check if camera is initialized assert camera.is_initialized # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" + assert camera._sensor_prims[0].GetPath().pathString == camera_cfg.prim_path assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) + assert camera.data.pos_w.shape == (1, 3) + assert camera.data.intrinsic_matrices.shape == (1, 3, 3) assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) # Simulate physics - for _ in range(10): + for _ in range(5): # perform rendering sim.step() # update camera @@ -284,1448 +145,19 @@ def test_multi_camera_init(setup_camera, device): # check image data for im_type, im_data in camera.data.output.items(): if im_type == "rgb": - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) - for i in range(4): - assert (im_data[i] / 255.0).mean() > 0.0 + assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 3) + assert (im_data / 255.0).mean() > 0.0 elif im_type == "distance_to_camera": - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(4): - assert im_data[i].mean() > 0.0 - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_rgb_only_camera(setup_camera, device): - """Test initialization with only RGB data type.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["rgb"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["rgba", "rgb"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - im_data = camera.data.output["rgb"] - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) - for i in range(4): - assert (im_data[i] / 255.0).mean() > 0.0 - assert camera.data.output["rgb"].dtype == torch.uint8 - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_data_types(setup_camera, device): - """Test different data types for camera initialization.""" - sim, camera_cfg, dt = setup_camera - # Create camera - camera_cfg_distance = copy.deepcopy(camera_cfg) - camera_cfg_distance.data_types = ["distance_to_camera"] - camera_cfg_distance.prim_path = "/World/CameraDistance" - camera_distance = TiledCamera(camera_cfg_distance) - camera_cfg_depth = copy.deepcopy(camera_cfg) - camera_cfg_depth.data_types = ["depth"] - camera_cfg_depth.prim_path = "/World/CameraDepth" - camera_depth = TiledCamera(camera_cfg_depth) - camera_cfg_both = copy.deepcopy(camera_cfg) - camera_cfg_both.data_types = ["distance_to_camera", "depth"] - camera_cfg_both.prim_path = "/World/CameraBoth" - camera_both = TiledCamera(camera_cfg_both) - - # Play sim - sim.reset() - - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check if cameras are initialized - assert camera_distance.is_initialized - assert camera_depth.is_initialized - assert camera_both.is_initialized - - # Check if camera prims are set correctly and that they are camera prims - assert camera_distance._sensor_prims[0].GetPath().pathString == "/World/CameraDistance" - assert isinstance(camera_distance._sensor_prims[0], UsdGeom.Camera) - assert camera_depth._sensor_prims[0].GetPath().pathString == "/World/CameraDepth" - assert isinstance(camera_depth._sensor_prims[0], UsdGeom.Camera) - assert camera_both._sensor_prims[0].GetPath().pathString == "/World/CameraBoth" - assert isinstance(camera_both._sensor_prims[0], UsdGeom.Camera) - assert list(camera_distance.data.output.keys()) == ["distance_to_camera"] - assert list(camera_depth.data.output.keys()) == ["depth"] - assert list(camera_both.data.output.keys()) == ["depth", "distance_to_camera"] - - del camera_distance - del camera_depth - del camera_both - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_depth_only_camera(setup_camera, device): - """Test initialization with only depth.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["distance_to_camera"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["distance_to_camera"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - im_data = camera.data.output["distance_to_camera"] - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(4): - assert im_data[i].mean() > 0.0 - assert camera.data.output["distance_to_camera"].dtype == torch.float - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_rgba_only_camera(setup_camera, device): - """Test initialization with only RGBA.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["rgba"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["rgba"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - for i in range(4): - assert (im_data[i] / 255.0).mean() > 0.0 - assert camera.data.output["rgba"].dtype == torch.uint8 - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_distance_to_camera_only_camera(setup_camera, device): - """Test initialization with only distance_to_camera.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["distance_to_camera"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["distance_to_camera"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(4): - assert im_data[i].mean() > 0.0 - assert camera.data.output["distance_to_camera"].dtype == torch.float - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_distance_to_image_plane_only_camera(setup_camera, device): - """Test initialization with only distance_to_image_plane.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["distance_to_image_plane"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["distance_to_image_plane"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(4): - assert im_data[i].mean() > 0.0 - assert camera.data.output["distance_to_image_plane"].dtype == torch.float + assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 1) + assert im_data.mean() > 0.0 del camera -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_normals_only_camera(setup_camera, device): - """Test initialization with only normals.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["normals"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["normals"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) - for i in range(4): - assert im_data[i].mean() > 0.0 - # check normal norm is approximately 1 - norms = torch.norm(im_data, dim=-1) - assert torch.allclose(norms, torch.ones_like(norms), atol=1e-9) - assert camera.data.output["normals"].dtype == torch.float - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_motion_vectors_only_camera(setup_camera, device): - """Test initialization with only motion_vectors.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["motion_vectors"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["motion_vectors"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 2) - for i in range(4): - assert im_data[i].mean() != 0.0 - assert camera.data.output["motion_vectors"].dtype == torch.float - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_semantic_segmentation_colorize_only_camera(setup_camera, device): - """Test initialization with only semantic_segmentation.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["semantic_segmentation"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["semantic_segmentation"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - for i in range(4): - assert (im_data[i] / 255.0).mean() > 0.0 - assert camera.data.output["semantic_segmentation"].dtype == torch.uint8 - assert isinstance(camera.data.info["semantic_segmentation"], dict) - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_instance_segmentation_fast_colorize_only_camera(setup_camera, device): - """Test initialization with only instance_segmentation_fast.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["instance_segmentation_fast"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["instance_segmentation_fast"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - for i in range(num_cameras): - assert (im_data[i] / 255.0).mean() > 0.0 - assert camera.data.output["instance_segmentation_fast"].dtype == torch.uint8 - assert isinstance(camera.data.info["instance_segmentation_fast"], dict) - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_instance_id_segmentation_fast_colorize_only_camera(setup_camera, device): - """Test initialization with only instance_id_segmentation_fast.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["instance_id_segmentation_fast"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["instance_id_segmentation_fast"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - for i in range(num_cameras): - assert (im_data[i] / 255.0).mean() > 0.0 - assert camera.data.output["instance_id_segmentation_fast"].dtype == torch.uint8 - assert isinstance(camera.data.info["instance_id_segmentation_fast"], dict) - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_semantic_segmentation_non_colorize_only_camera(setup_camera, device): - """Test initialization with only semantic_segmentation.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["semantic_segmentation"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera_cfg.colorize_semantic_segmentation = False - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["semantic_segmentation"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(num_cameras): - assert im_data[i].to(dtype=float).mean() > 0.0 - assert camera.data.output["semantic_segmentation"].dtype == torch.int32 - assert isinstance(camera.data.info["semantic_segmentation"], dict) - - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_instance_segmentation_fast_non_colorize_only_camera(setup_camera, device): - """Test initialization with only instance_segmentation_fast.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["instance_segmentation_fast"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera_cfg.colorize_instance_segmentation = False - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["instance_segmentation_fast"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(num_cameras): - assert im_data[i].to(dtype=float).mean() > 0.0 - assert camera.data.output["instance_segmentation_fast"].dtype == torch.int32 - assert isinstance(camera.data.info["instance_segmentation_fast"], dict) - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -def test_instance_id_segmentation_fast_non_colorize_only_camera(setup_camera, device): - """Test initialization with only instance_id_segmentation_fast.""" - sim, camera_cfg, dt = setup_camera - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = ["instance_id_segmentation_fast"] - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera_cfg.colorize_instance_id_segmentation = False - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert list(camera.data.output.keys()) == ["instance_id_segmentation_fast"] - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for _, im_data in camera.data.output.items(): - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(num_cameras): - assert im_data[i].to(dtype=float).mean() > 0.0 - assert camera.data.output["instance_id_segmentation_fast"].dtype == torch.int32 - assert isinstance(camera.data.info["instance_id_segmentation_fast"], dict) - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_all_annotators_camera(setup_camera, device): - """Test initialization with all supported annotators.""" - sim, camera_cfg, dt = setup_camera - all_annotator_types = [ - "rgb", - "rgba", - "depth", - "distance_to_camera", - "distance_to_image_plane", - "normals", - "motion_vectors", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ] - - num_cameras = 9 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = all_annotator_types - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert sorted(camera.data.output.keys()) == sorted(all_annotator_types) - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for data_type, im_data in camera.data.output.items(): - if data_type in ["rgb", "normals"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) - elif data_type in [ - "rgba", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - for i in range(num_cameras): - assert (im_data[i] / 255.0).mean() > 0.0 - elif data_type in ["motion_vectors"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 2) - for i in range(num_cameras): - assert im_data[i].mean() != 0.0 - elif data_type in ["depth", "distance_to_camera", "distance_to_image_plane"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(num_cameras): - assert im_data[i].mean() > 0.0 - - # access image data and compare dtype - output = camera.data.output - info = camera.data.info - assert output["rgb"].dtype == torch.uint8 - assert output["rgba"].dtype == torch.uint8 - assert output["depth"].dtype == torch.float - assert output["distance_to_camera"].dtype == torch.float - assert output["distance_to_image_plane"].dtype == torch.float - assert output["normals"].dtype == torch.float - assert output["motion_vectors"].dtype == torch.float - assert output["semantic_segmentation"].dtype == torch.uint8 - assert output["instance_segmentation_fast"].dtype == torch.uint8 - assert output["instance_id_segmentation_fast"].dtype == torch.uint8 - assert isinstance(info["semantic_segmentation"], dict) - assert isinstance(info["instance_segmentation_fast"], dict) - assert isinstance(info["instance_id_segmentation_fast"], dict) - - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_all_annotators_low_resolution_camera(setup_camera, device): - """Test initialization with all supported annotators.""" - sim, camera_cfg, dt = setup_camera - all_annotator_types = [ - "rgb", - "rgba", - "depth", - "distance_to_camera", - "distance_to_image_plane", - "normals", - "motion_vectors", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ] - - num_cameras = 2 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.height = 40 - camera_cfg.width = 40 - camera_cfg.data_types = all_annotator_types - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert sorted(camera.data.output.keys()) == sorted(all_annotator_types) - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for data_type, im_data in camera.data.output.items(): - if data_type in ["rgb", "normals"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) - elif data_type in [ - "rgba", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - for i in range(num_cameras): - assert (im_data[i] / 255.0).mean() > 0.0 - elif data_type in ["motion_vectors"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 2) - for i in range(num_cameras): - assert im_data[i].mean() != 0.0 - elif data_type in ["depth", "distance_to_camera", "distance_to_image_plane"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(num_cameras): - assert im_data[i].mean() > 0.0 - - # access image data and compare dtype - output = camera.data.output - info = camera.data.info - assert output["rgb"].dtype == torch.uint8 - assert output["rgba"].dtype == torch.uint8 - assert output["depth"].dtype == torch.float - assert output["distance_to_camera"].dtype == torch.float - assert output["distance_to_image_plane"].dtype == torch.float - assert output["normals"].dtype == torch.float - assert output["motion_vectors"].dtype == torch.float - assert output["semantic_segmentation"].dtype == torch.uint8 - assert output["instance_segmentation_fast"].dtype == torch.uint8 - assert output["instance_id_segmentation_fast"].dtype == torch.uint8 - assert isinstance(info["semantic_segmentation"], dict) - assert isinstance(info["instance_segmentation_fast"], dict) - assert isinstance(info["instance_id_segmentation_fast"], dict) - - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_all_annotators_non_perfect_square_number_camera(setup_camera, device): - """Test initialization with all supported annotators.""" - sim, camera_cfg, dt = setup_camera - all_annotator_types = [ - "rgb", - "rgba", - "depth", - "distance_to_camera", - "distance_to_image_plane", - "normals", - "motion_vectors", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ] - - num_cameras = 11 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.data_types = all_annotator_types - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert sorted(camera.data.output.keys()) == sorted(all_annotator_types) - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate physics - for _ in range(10): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for data_type, im_data in camera.data.output.items(): - if data_type in ["rgb", "normals"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) - elif data_type in [ - "rgba", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - for i in range(num_cameras): - assert (im_data[i] / 255.0).mean() > 0.0 - elif data_type in ["motion_vectors"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 2) - for i in range(num_cameras): - assert im_data[i].mean() != 0.0 - elif data_type in ["depth", "distance_to_camera", "distance_to_image_plane"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(num_cameras): - assert im_data[i].mean() > 0.0 - - # access image data and compare dtype - output = camera.data.output - info = camera.data.info - assert output["rgb"].dtype == torch.uint8 - assert output["rgba"].dtype == torch.uint8 - assert output["depth"].dtype == torch.float - assert output["distance_to_camera"].dtype == torch.float - assert output["distance_to_image_plane"].dtype == torch.float - assert output["normals"].dtype == torch.float - assert output["motion_vectors"].dtype == torch.float - assert output["semantic_segmentation"].dtype == torch.uint8 - assert output["instance_segmentation_fast"].dtype == torch.uint8 - assert output["instance_id_segmentation_fast"].dtype == torch.uint8 - assert isinstance(info["semantic_segmentation"], dict) - assert isinstance(info["instance_segmentation_fast"], dict) - assert isinstance(info["instance_id_segmentation_fast"], dict) - - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_all_annotators_instanceable(setup_camera, device): - """Test initialization with all supported annotators on instanceable assets.""" - sim, camera_cfg, dt = setup_camera - all_annotator_types = [ - "rgb", - "rgba", - "depth", - "distance_to_camera", - "distance_to_image_plane", - "normals", - "motion_vectors", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ] - - num_cameras = 10 - for i in range(num_cameras): - sim_utils.create_prim(f"/World/Origin_{i}", "Xform", translation=(0.0, i, 0.0)) - - # Create a stage with 10 instanceable cubes, where each camera points to one cube - stage = sim_utils.get_current_stage() - for i in range(10): - # Remove objects added to stage by default - stage.RemovePrim(f"/World/Objects/Obj_{i:02d}") - # Add instanceable cubes - sim_utils.create_prim( - f"/World/Cube_{i}", - "Xform", - usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", - translation=(0.0, i, 5.0), - orientation=(1.0, 0.0, 0.0, 0.0), - scale=(5.0, 5.0, 5.0), - ) - prim = stage.GetPrimAtPath(f"/World/Cube_{i}") - sim_utils.add_labels(prim, labels=["cube"], instance_name="class") - - # Create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.height = 120 - camera_cfg.width = 80 - camera_cfg.data_types = all_annotator_types - camera_cfg.prim_path = "/World/Origin_.*/CameraSensor" - camera_cfg.offset.pos = (0.0, 0.0, 5.5) - camera = TiledCamera(camera_cfg) - # Check simulation parameter is set correctly - assert sim.has_rtx_sensors() - # Play sim - sim.reset() - # Check if camera is initialized - assert camera.is_initialized - # Check if camera prim is set correctly and that it is a camera prim - assert camera._sensor_prims[1].GetPath().pathString == "/World/Origin_1/CameraSensor" - assert isinstance(camera._sensor_prims[0], UsdGeom.Camera) - assert sorted(camera.data.output.keys()) == sorted(all_annotator_types) - - # Check buffers that exists and have correct shapes - assert camera.data.pos_w.shape == (num_cameras, 3) - assert camera.data.quat_w_ros.shape == (num_cameras, 4) - assert camera.data.quat_w_world.shape == (num_cameras, 4) - assert camera.data.quat_w_opengl.shape == (num_cameras, 4) - assert camera.data.intrinsic_matrices.shape == (num_cameras, 3, 3) - assert camera.data.image_shape == (camera_cfg.height, camera_cfg.width) - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Simulate physics - for _ in range(2): - # perform rendering - sim.step() - # update camera - camera.update(dt) - # check image data - for data_type, im_data in camera.data.output.items(): - if data_type in ["rgb", "normals"]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 3) - elif data_type in [ - "rgba", - "semantic_segmentation", - "instance_segmentation_fast", - "instance_id_segmentation_fast", - ]: - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 4) - # semantic_segmentation has mean 0.43 - # rgba has mean 0.38 - # instance_segmentation_fast has mean 0.42 - # instance_id_segmentation_fast has mean 0.55-0.62 - for i in range(num_cameras): - assert (im_data[i] / 255.0).mean() > 0.2 - elif data_type in ["motion_vectors"]: - # motion vectors have mean 0.2 - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 2) - for i in range(num_cameras): - assert (im_data[i].abs().mean()) > 0.15 - elif data_type in ["depth", "distance_to_camera", "distance_to_image_plane"]: - # depth has mean 2.7 - # distance_to_image_plane has mean 3.1 - assert im_data.shape == (num_cameras, camera_cfg.height, camera_cfg.width, 1) - for i in range(num_cameras): - assert im_data[i].mean() > 2.5 - - # access image data and compare dtype - output = camera.data.output - info = camera.data.info - assert output["rgb"].dtype == torch.uint8 - assert output["rgba"].dtype == torch.uint8 - assert output["depth"].dtype == torch.float - assert output["distance_to_camera"].dtype == torch.float - assert output["distance_to_image_plane"].dtype == torch.float - assert output["normals"].dtype == torch.float - assert output["motion_vectors"].dtype == torch.float - assert output["semantic_segmentation"].dtype == torch.uint8 - assert output["instance_segmentation_fast"].dtype == torch.uint8 - assert output["instance_id_segmentation_fast"].dtype == torch.uint8 - assert isinstance(info["semantic_segmentation"], dict) - assert isinstance(info["instance_segmentation_fast"], dict) - assert isinstance(info["instance_id_segmentation_fast"], dict) - - del camera - - -@pytest.mark.parametrize("device", ["cuda:0"]) -@pytest.mark.isaacsim_ci -def test_throughput(setup_camera, device): - """Test tiled camera throughput.""" - sim, camera_cfg, dt = setup_camera - # create camera - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.height = 480 - camera_cfg.width = 640 - camera = TiledCamera(camera_cfg) - - # Play simulator - sim.reset() - - # Simulate for a few steps - # note: This is a workaround to ensure that the textures are loaded. - # Check "Known Issues" section in the documentation for more details. - for _ in range(5): - sim.step() - - # Simulate physics - for _ in range(5): - # perform rendering - sim.step() - # update camera - with Timer(f"Time taken for updating camera with shape {camera.image_shape}"): - camera.update(dt) - # Check image data - for im_type, im_data in camera.data.output.items(): - if im_type == "rgb": - assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 3) - assert (im_data / 255.0).mean() > 0.0 - elif im_type == "distance_to_camera": - assert im_data.shape == (1, camera_cfg.height, camera_cfg.width, 1) - assert im_data.mean() > 0.0 - del camera - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_output_equal_to_usd_camera_intrinsics(setup_camera, device): - """ - Test that the output of the ray caster camera and the usd camera are the same when both are - initialized with the same intrinsic matrix. - """ - sim, _, dt = setup_camera - # create cameras - offset_rot = (-0.1251, 0.3617, 0.8731, -0.3020) - offset_pos = (2.5, 2.5, 4.0) - intrinsics = [380.08, 0.0, 467.79, 0.0, 380.08, 262.05, 0.0, 0.0, 1.0] - # get camera cfgs - # TODO: add clipping range back, once correctly supported by tiled camera - camera_tiled_cfg = TiledCameraCfg( - prim_path="/World/Camera_tiled", - offset=TiledCameraCfg.OffsetCfg(pos=offset_pos, rot=offset_rot, convention="ros"), - spawn=sim_utils.PinholeCameraCfg.from_intrinsic_matrix( - intrinsic_matrix=intrinsics, - height=540, - width=960, - ), - height=540, - width=960, - data_types=["depth"], - ) - camera_usd_cfg = CameraCfg( - prim_path="/World/Camera_usd", - offset=CameraCfg.OffsetCfg(pos=offset_pos, rot=offset_rot, convention="ros"), - spawn=sim_utils.PinholeCameraCfg.from_intrinsic_matrix( - intrinsic_matrix=intrinsics, - height=540, - width=960, - ), - height=540, - width=960, - data_types=["distance_to_image_plane"], - ) - - # set aperture offsets to 0, as currently not supported for usd/ tiled camera - camera_tiled_cfg.spawn.horizontal_aperture_offset = 0 - camera_tiled_cfg.spawn.vertical_aperture_offset = 0 - camera_usd_cfg.spawn.horizontal_aperture_offset = 0 - camera_usd_cfg.spawn.vertical_aperture_offset = 0 - # init cameras - camera_tiled = TiledCamera(camera_tiled_cfg) - camera_usd = Camera(camera_usd_cfg) - - # play sim - sim.reset() - sim.play() - - # perform steps - for _ in range(5): - sim.step() - - # update camera - camera_usd.update(dt) - camera_tiled.update(dt) - - # filter nan and inf from output - cam_tiled_output = camera_tiled.data.output["depth"].clone() - cam_usd_output = camera_usd.data.output["distance_to_image_plane"].clone() - cam_tiled_output[torch.isnan(cam_tiled_output)] = 0 - cam_tiled_output[torch.isinf(cam_tiled_output)] = 0 - cam_usd_output[torch.isnan(cam_usd_output)] = 0 - cam_usd_output[torch.isinf(cam_usd_output)] = 0 - - # check that both have the same intrinsic matrices - torch.testing.assert_close(camera_tiled.data.intrinsic_matrices[0], camera_usd.data.intrinsic_matrices[0]) - - # check the apertures - torch.testing.assert_close( - camera_usd._sensor_prims[0].GetHorizontalApertureAttr().Get(), - camera_tiled._sensor_prims[0].GetHorizontalApertureAttr().Get(), - ) - torch.testing.assert_close( - camera_usd._sensor_prims[0].GetVerticalApertureAttr().Get(), - camera_tiled._sensor_prims[0].GetVerticalApertureAttr().Get(), - ) - - # check image data - torch.testing.assert_close( - cam_tiled_output[..., 0], - cam_usd_output[..., 0], - atol=5e-5, - rtol=5e-6, - ) - - del camera_tiled - del camera_usd - - -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_sensor_print(setup_camera, device): - """Test sensor print is working correctly.""" - sim, camera_cfg, _ = setup_camera - # Create sensor - sensor = TiledCamera(cfg=camera_cfg) - # Play sim - sim.reset() - # print info - print(sensor) - - -@pytest.mark.parametrize("device", ["cuda:0"]) -@pytest.mark.isaacsim_ci -def test_frame_offset_small_resolution(setup_camera, device): - """Test frame offset issue with small resolution camera.""" - sim, camera_cfg, dt = setup_camera - # Create sensor - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.height = 80 - camera_cfg.width = 80 - camera_cfg.offset.pos = (0.0, 0.0, 0.5) - tiled_camera = TiledCamera(camera_cfg) - # play sim - sim.reset() - # simulate some steps first to make sure objects are settled - stage = sim_utils.get_current_stage() - for i in range(10): - prim = stage.GetPrimAtPath(f"/World/Objects/Obj_{i:02d}") - UsdGeom.Gprim(prim).GetOrderedXformOps()[2].Set(Gf.Vec3d(1.0, 1.0, 1.0)) - for i in range(100): - # step simulation - sim.step() - # update camera - tiled_camera.update(dt) - # collect image data - image_before = tiled_camera.data.output["rgb"].clone() / 255.0 - - # update scene - for i in range(10): - prim = stage.GetPrimAtPath(f"/World/Objects/Obj_{i:02d}") - color = Gf.Vec3f(0, 0, 0) - UsdGeom.Gprim(prim).GetDisplayColorAttr().Set([color]) - - # update rendering - sim.step() - # update camera - tiled_camera.update(dt) - - # make sure the image is different - image_after = tiled_camera.data.output["rgb"].clone() / 255.0 - - # check difference is above threshold - assert torch.abs(image_after - image_before).mean() > 0.1 # images of same color should be below 0.01 - - -@pytest.mark.parametrize("device", ["cuda:0"]) -@pytest.mark.isaacsim_ci -def test_frame_offset_large_resolution(setup_camera, device): - """Test frame offset issue with large resolution camera.""" - sim, camera_cfg, dt = setup_camera - # Create sensor - camera_cfg = copy.deepcopy(camera_cfg) - camera_cfg.height = 480 - camera_cfg.width = 480 - tiled_camera = TiledCamera(camera_cfg) - - # modify scene to be less stochastic - stage = sim_utils.get_current_stage() - for i in range(10): - prim = stage.GetPrimAtPath(f"/World/Objects/Obj_{i:02d}") - color = Gf.Vec3f(1, 1, 1) - UsdGeom.Gprim(prim).GetDisplayColorAttr().Set([color]) - - # play sim - sim.reset() - # simulate some steps first to make sure objects are settled - for i in range(100): - # step simulation - sim.step() - # update camera - tiled_camera.update(dt) - # collect image data - image_before = tiled_camera.data.output["rgb"].clone() / 255.0 - - # update scene - for i in range(10): - prim = stage.GetPrimAtPath(f"/World/Objects/Obj_{i:02d}") - color = Gf.Vec3f(0, 0, 0) - UsdGeom.Gprim(prim).GetDisplayColorAttr().Set([color]) - - # update rendering - sim.step() - # update camera - tiled_camera.update(dt) - - # make sure the image is different - image_after = tiled_camera.data.output["rgb"].clone() / 255.0 - - # check difference is above threshold - assert torch.abs(image_after - image_before).mean() > 0.01 # images of same color should be below 0.001 - - """ Helper functions. """ -@staticmethod def _populate_scene(): """Add prims to the scene.""" # Ground-plane @@ -1758,6 +190,8 @@ def _populate_scene(): color = Gf.Vec3f(random.random(), random.random(), random.random()) geom_prim.CreateDisplayColorAttr() geom_prim.GetDisplayColorAttr().Set([color]) - # add rigid properties - SingleGeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True) - SingleRigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0) + # add rigid body and collision properties using Isaac Lab schemas + prim_path = f"/World/Objects/Obj_{i:02d}" + sim_utils.define_rigid_body_properties(prim_path, sim_utils.RigidBodyPropertiesCfg()) + sim_utils.define_mass_properties(prim_path, sim_utils.MassPropertiesCfg(mass=5.0)) + sim_utils.define_collision_properties(prim_path, sim_utils.CollisionPropertiesCfg()) diff --git a/source/isaaclab/test/sensors/test_tiled_camera_env.py b/source/isaaclab/test/sensors/test_tiled_camera_env.py index 5c4d33f6a58e..c2d2982d336f 100644 --- a/source/isaaclab/test/sensors/test_tiled_camera_env.py +++ b/source/isaaclab/test/sensors/test_tiled_camera_env.py @@ -34,14 +34,15 @@ import gymnasium as gym import pytest -import omni.usd - +import isaaclab.sim as sim_utils from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg, ManagerBasedRLEnv, ManagerBasedRLEnvCfg from isaaclab.sensors import save_images_to_file import isaaclab_tasks # noqa: F401 from isaaclab_tasks.utils.parse_cfg import parse_env_cfg +pytestmark = pytest.mark.isaacsim_ci + @pytest.mark.skip(reason="Currently takes too long to run") def test_tiled_resolutions_tiny(): @@ -109,7 +110,7 @@ def _launch_tests(tile_widths: range, tile_heights: range, num_envs: int): for width in tile_widths: for height in tile_heights: # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # parse configuration env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) env_cfg.tiled_camera.width = width diff --git a/source/isaaclab/test/sensors/test_update_ray_caster_kernel.py b/source/isaaclab/test/sensors/test_update_ray_caster_kernel.py new file mode 100644 index 000000000000..65402518b7a0 --- /dev/null +++ b/source/isaaclab/test/sensors/test_update_ray_caster_kernel.py @@ -0,0 +1,510 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for :func:`update_ray_caster_kernel`. + +These tests exercise the kernel directly with hand-crafted warp arrays and +analytically computed expected outputs. No simulation, no stage, no AppLauncher +— just warp on CPU (or CUDA when available). +""" + +from __future__ import annotations + +import importlib.util +import math +import os + +import numpy as np +import pytest +import torch +import warp as wp + +# Import the kernel module directly to avoid pulling in the full isaaclab package +# (which requires Isaac Sim / Omniverse dependencies). The kernel file itself only +# depends on warp. +_KERNEL_PATH = os.path.join( + os.path.dirname(__file__), + os.pardir, + os.pardir, + "isaaclab", + "sensors", + "ray_caster", + "kernels.py", +) +_spec = importlib.util.spec_from_file_location("ray_caster_kernels", os.path.normpath(_KERNEL_PATH)) +_mod = importlib.util.module_from_spec(_spec) +_spec.loader.exec_module(_mod) + +update_ray_caster_kernel = _mod.update_ray_caster_kernel +ALIGNMENT_WORLD = _mod.ALIGNMENT_WORLD +ALIGNMENT_YAW = _mod.ALIGNMENT_YAW +ALIGNMENT_BASE = _mod.ALIGNMENT_BASE + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +wp.init() +DEVICE = "cuda:0" if wp.is_cuda_available() else "cpu" +TORCH_DEVICE = torch.device(DEVICE) +ATOL = 1e-5 + + +def _make_transform(pos: tuple[float, float, float], quat_xyzw: tuple[float, float, float, float]) -> wp.array: + """Create a warp transformf array (1,) from position and xyzw quaternion.""" + t = torch.tensor([[pos[0], pos[1], pos[2], quat_xyzw[0], quat_xyzw[1], quat_xyzw[2], quat_xyzw[3]]], device=DEVICE) + return wp.from_torch(t.contiguous()).view(wp.transformf) + + +def _identity_quat() -> tuple[float, float, float, float]: + """Return identity quaternion in xyzw.""" + return (0.0, 0.0, 0.0, 1.0) + + +def _yaw_quat(yaw_rad: float) -> tuple[float, float, float, float]: + """Pure yaw quaternion in xyzw.""" + return (0.0, 0.0, math.sin(yaw_rad / 2), math.cos(yaw_rad / 2)) + + +def _euler_to_quat_xyzw(roll: float, pitch: float, yaw: float) -> tuple[float, float, float, float]: + """Euler angles (intrinsic XYZ) to quaternion in xyzw convention.""" + q = torch.zeros(1, 4) + cr, sr = math.cos(roll / 2), math.sin(roll / 2) + cp, sp = math.cos(pitch / 2), math.sin(pitch / 2) + cy, sy = math.cos(yaw / 2), math.sin(yaw / 2) + # xyzw + q[0, 0] = sr * cp * cy - cr * sp * sy + q[0, 1] = cr * sp * cy + sr * cp * sy + q[0, 2] = cr * cp * sy - sr * sp * cy + q[0, 3] = cr * cp * cy + sr * sp * sy + return tuple(q[0].tolist()) + + +def _quat_rotate(q_xyzw: tuple, v: tuple) -> np.ndarray: + """Rotate vector v by quaternion q (xyzw) using numpy.""" + qx, qy, qz, qw = q_xyzw + # quaternion rotation: v' = q * v * q^-1 + # Using the formula: v' = v + 2*w*(w×v) + 2*(q_vec × (q_vec × v + w*v)) + # Simpler: v' = v + 2w(q×v) + 2(q×(q×v)) + q_vec = np.array([qx, qy, qz]) + v = np.array(v) + t = 2.0 * np.cross(q_vec, v) + return v + qw * t + np.cross(q_vec, t) + + +def _launch_kernel( + transforms: wp.array, + env_mask: wp.array, + offset_pos: wp.array, + offset_quat: wp.array, + drift: wp.array, + ray_cast_drift: wp.array, + ray_starts_local: wp.array, + ray_directions_local: wp.array, + alignment_mode: int, + num_envs: int, + num_rays: int, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """Launch the kernel and return (pos_w, quat_w, ray_starts_w, ray_directions_w) as numpy arrays.""" + pos_w = wp.zeros(num_envs, dtype=wp.vec3f, device=DEVICE) + quat_w = wp.zeros(num_envs, dtype=wp.quatf, device=DEVICE) + ray_starts_w = wp.zeros((num_envs, num_rays), dtype=wp.vec3f, device=DEVICE) + ray_directions_w = wp.zeros((num_envs, num_rays), dtype=wp.vec3f, device=DEVICE) + + wp.launch( + update_ray_caster_kernel, + dim=(num_envs, num_rays), + inputs=[ + transforms, + env_mask, + offset_pos, + offset_quat, + drift, + ray_cast_drift, + ray_starts_local, + ray_directions_local, + alignment_mode, + ], + outputs=[pos_w, quat_w, ray_starts_w, ray_directions_w], + device=DEVICE, + ) + wp.synchronize_device(DEVICE) + + return ( + wp.to_torch(pos_w).cpu().numpy(), + wp.to_torch(quat_w).cpu().numpy(), + wp.to_torch(ray_starts_w).cpu().numpy(), + wp.to_torch(ray_directions_w).cpu().numpy(), + ) + + +def _make_inputs( + view_pos=(0.0, 0.0, 0.0), + view_quat=None, + offset_pos=(0.0, 0.0, 0.0), + offset_quat=None, + drift=(0.0, 0.0, 0.0), + ray_cast_drift=(0.0, 0.0, 0.0), + ray_start=(0.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + num_envs=1, +): + """Build all kernel input arrays for a single-ray, single (or multi)-env scenario.""" + if view_quat is None: + view_quat = _identity_quat() + if offset_quat is None: + offset_quat = _identity_quat() + + transforms = _make_transform(view_pos, view_quat) + if num_envs > 1: + # Replicate the same transform for all envs + t_torch = wp.to_torch(transforms).repeat(num_envs, 1) + transforms = wp.from_torch(t_torch.contiguous()).view(wp.transformf) + + mask_t = torch.ones(num_envs, dtype=torch.bool, device=TORCH_DEVICE) + env_mask = wp.from_torch(mask_t) + + op = torch.tensor( + [[offset_pos[0], offset_pos[1], offset_pos[2]]] * num_envs, dtype=torch.float32, device=TORCH_DEVICE + ) + offset_pos_wp = wp.from_torch(op.contiguous(), dtype=wp.vec3f) + + oq = torch.tensor( + [[offset_quat[0], offset_quat[1], offset_quat[2], offset_quat[3]]] * num_envs, + dtype=torch.float32, + device=TORCH_DEVICE, + ) + offset_quat_wp = wp.from_torch(oq.contiguous(), dtype=wp.quatf) + + d = torch.tensor([[drift[0], drift[1], drift[2]]] * num_envs, dtype=torch.float32, device=TORCH_DEVICE) + drift_wp = wp.from_torch(d.contiguous(), dtype=wp.vec3f) + + rcd = torch.tensor( + [[ray_cast_drift[0], ray_cast_drift[1], ray_cast_drift[2]]] * num_envs, dtype=torch.float32, device=TORCH_DEVICE + ) + rcd_wp = wp.from_torch(rcd.contiguous(), dtype=wp.vec3f) + + rs = torch.tensor( + [[[ray_start[0], ray_start[1], ray_start[2]]]] * num_envs, dtype=torch.float32, device=TORCH_DEVICE + ) + rs_wp = wp.from_torch(rs.contiguous(), dtype=wp.vec3f) + + rd = torch.tensor([[[ray_dir[0], ray_dir[1], ray_dir[2]]]] * num_envs, dtype=torch.float32, device=TORCH_DEVICE) + rd_wp = wp.from_torch(rd.contiguous(), dtype=wp.vec3f) + + return transforms, env_mask, offset_pos_wp, offset_quat_wp, drift_wp, rcd_wp, rs_wp, rd_wp + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestUpdateRayCasterKernel: + """Unit tests for update_ray_caster_kernel launched directly with warp arrays.""" + + def test_identity_passthrough(self): + """All identity/zero inputs → pos_w = origin, quat_w = identity, rays unchanged.""" + inputs = _make_inputs(ray_start=(1.0, 2.0, 3.0), ray_dir=(0.0, 0.0, -1.0)) + pos_w, quat_w, starts_w, dirs_w = _launch_kernel(*inputs, alignment_mode=0, num_envs=1, num_rays=1) + + np.testing.assert_allclose(pos_w[0], [0, 0, 0], atol=ATOL) + np.testing.assert_allclose(quat_w[0], [0, 0, 0, 1], atol=ATOL) + # World mode, identity: ray_start_w = local_start + pos (= local_start + origin) + np.testing.assert_allclose(starts_w[0, 0], [1, 2, 3], atol=ATOL) + np.testing.assert_allclose(dirs_w[0, 0], [0, 0, -1], atol=ATOL) + + # Same for yaw and base — all should agree at identity + for mode in [1, 2]: + inputs = _make_inputs(ray_start=(1.0, 2.0, 3.0), ray_dir=(0.0, 0.0, -1.0)) + _, _, starts_w2, dirs_w2 = _launch_kernel(*inputs, alignment_mode=mode, num_envs=1, num_rays=1) + np.testing.assert_allclose(starts_w2[0, 0], [1, 2, 3], atol=ATOL) + np.testing.assert_allclose(dirs_w2[0, 0], [0, 0, -1], atol=ATOL) + + def test_offset_composition(self): + """View at (1,0,2) yawed 90° + offset (0,1,0) → combined_pos = (1,-1,2). + + 90° yaw: quat = (0, 0, sin(45°), cos(45°)) = (0, 0, 0.7071, 0.7071) + quat_rotate(90°yaw, (0,1,0)) = (-1, 0, 0) [Y axis maps to -X] + combined_pos = (1,0,2) + (-1,0,0) = (0,0,2) + combined_quat = yaw90 * identity = yaw90 + """ + yaw90 = _yaw_quat(math.pi / 2) + inputs = _make_inputs( + view_pos=(1.0, 0.0, 2.0), + view_quat=yaw90, + offset_pos=(0.0, 1.0, 0.0), + ) + pos_w, quat_w, _, _ = _launch_kernel(*inputs, alignment_mode=2, num_envs=1, num_rays=1) + + expected_offset_rotated = _quat_rotate(yaw90, (0, 1, 0)) # (-1, 0, 0) + expected_pos = np.array([1, 0, 2]) + expected_offset_rotated + np.testing.assert_allclose(pos_w[0], expected_pos, atol=ATOL) + np.testing.assert_allclose(quat_w[0], list(yaw90), atol=ATOL) + + def test_world_alignment_ignores_rotation(self): + """World mode: ray starts = local_start + combined_pos, directions unchanged. + + Sensor at (0,0,5), pitched 45° around Y. Local ray at (+1,0,0), direction (0,0,-1). + World mode should NOT rotate the ray start or direction. + """ + pitch45 = _euler_to_quat_xyzw(0, math.pi / 4, 0) + inputs = _make_inputs( + view_pos=(0.0, 0.0, 5.0), + view_quat=pitch45, + ray_start=(1.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + pos_w, _, starts_w, dirs_w = _launch_kernel(*inputs, alignment_mode=0, num_envs=1, num_rays=1) + + # ray_start_w = local_start + combined_pos = (1,0,0) + (0,0,5) = (1,0,5) + np.testing.assert_allclose(starts_w[0, 0], [1, 0, 5], atol=ATOL) + # direction unchanged + np.testing.assert_allclose(dirs_w[0, 0], [0, 0, -1], atol=ATOL) + + def test_yaw_alignment_rotates_starts_only(self): + """Yaw mode: ray starts rotated by yaw-only quaternion, directions unchanged. + + Sensor yawed 90° + pitched 30°. Local ray start at (+1, 0, 0). + Yaw-only extracts 90° yaw → rotates (+1,0,0) to (0,+1,0). + Direction (0,0,-1) is NOT rotated in yaw mode. + """ + q = _euler_to_quat_xyzw(0, math.pi / 6, math.pi / 2) # pitch 30°, yaw 90° + inputs = _make_inputs( + view_pos=(0.0, 0.0, 3.0), + view_quat=q, + ray_start=(1.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + pos_w, _, starts_w, dirs_w = _launch_kernel(*inputs, alignment_mode=1, num_envs=1, num_rays=1) + + # yaw-only of 90° yaw + 30° pitch → pure 90° yaw + yaw_only = _yaw_quat(math.pi / 2) + rotated_start = _quat_rotate(yaw_only, (1, 0, 0)) # (0, 1, 0) + expected_start = rotated_start + np.array([0, 0, 3]) # + combined_pos + np.testing.assert_allclose(starts_w[0, 0], expected_start, atol=ATOL) + + # direction unchanged in yaw mode + np.testing.assert_allclose(dirs_w[0, 0], [0, 0, -1], atol=ATOL) + + def test_base_alignment_rotates_starts_and_directions(self): + """Base mode: both ray starts and directions rotated by full combined quaternion. + + Sensor yawed 90°. Local ray at (+1, 0, 0), direction (0, 0, -1). + 90° yaw rotates: + (+1,0,0) → (0,+1,0) + (0,0,-1) → (0,0,-1) [yaw doesn't affect Z-down] + """ + yaw90 = _yaw_quat(math.pi / 2) + inputs = _make_inputs( + view_pos=(0.0, 0.0, 4.0), + view_quat=yaw90, + ray_start=(1.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + _, _, starts_w, dirs_w = _launch_kernel(*inputs, alignment_mode=2, num_envs=1, num_rays=1) + + rotated_start = _quat_rotate(yaw90, (1, 0, 0)) # (0, 1, 0) + expected_start = rotated_start + np.array([0, 0, 4]) + np.testing.assert_allclose(starts_w[0, 0], expected_start, atol=ATOL) + + rotated_dir = _quat_rotate(yaw90, (0, 0, -1)) # (0, 0, -1) — Z unaffected by yaw + np.testing.assert_allclose(dirs_w[0, 0], rotated_dir, atol=ATOL) + + def test_base_alignment_with_pitch_rotates_direction(self): + """Base mode with pitch: direction is rotated by the full orientation. + + Sensor pitched 90° around Y (looking forward instead of down). + Direction (0,0,-1) rotated by 90° pitch around Y → (-1,0,0). + """ + pitch90 = _euler_to_quat_xyzw(0, math.pi / 2, 0) + inputs = _make_inputs( + view_pos=(0.0, 0.0, 2.0), + view_quat=pitch90, + ray_start=(0.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + _, _, _, dirs_w = _launch_kernel(*inputs, alignment_mode=2, num_envs=1, num_rays=1) + + rotated_dir = _quat_rotate(pitch90, (0, 0, -1)) # (-1, 0, 0) + np.testing.assert_allclose(dirs_w[0, 0], rotated_dir, atol=ATOL) + + def test_ray_cast_drift_world_mode(self): + """World mode: ray_cast_drift XY is added raw to position, Z is NOT applied. + + drift = (0.5, 0.3, 0.7). In world mode: + pos_drifted = (combined_pos.x + 0.5, combined_pos.y + 0.3, combined_pos.z) + Note: Z component of ray_cast_drift is NOT added to position in any mode. + """ + inputs = _make_inputs( + view_pos=(1.0, 2.0, 3.0), + ray_cast_drift=(0.5, 0.3, 0.7), + ray_start=(0.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + _, _, starts_w, dirs_w = _launch_kernel(*inputs, alignment_mode=0, num_envs=1, num_rays=1) + + # World mode: pos_drifted = (1+0.5, 2+0.3, 3) = (1.5, 2.3, 3) + # ray_start_w = local_start + pos_drifted = (0,0,0) + (1.5, 2.3, 3) + np.testing.assert_allclose(starts_w[0, 0], [1.5, 2.3, 3.0], atol=ATOL) + np.testing.assert_allclose(dirs_w[0, 0], [0, 0, -1], atol=ATOL) + + def test_ray_cast_drift_yaw_mode(self): + """Yaw mode: ray_cast_drift XY is rotated by yaw-only quat, Z is NOT applied. + + Sensor yawed 90°, drift = (1.0, 0.0, 0.5). + yaw-rotated drift = quat_rotate(yaw90, (1,0,0.5)) — but only XY of the result + is used for pos_drifted. Actually looking at the kernel: + rot_drift = quat_rotate(yaw_q, rcd) # full rotation of the drift vector + pos_drifted = (combined_pos.x + rot_drift.x, combined_pos.y + rot_drift.y, combined_pos.z) + So the drift vector is fully rotated, but only XY of the result is added. + """ + yaw90 = _yaw_quat(math.pi / 2) + inputs = _make_inputs( + view_pos=(0.0, 0.0, 5.0), + view_quat=yaw90, + ray_cast_drift=(1.0, 0.0, 0.5), + ray_start=(0.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + _, _, starts_w, _ = _launch_kernel(*inputs, alignment_mode=1, num_envs=1, num_rays=1) + + # yaw90 rotates (1, 0, 0.5) → (0, 1, 0.5) [X→Y under 90° yaw, Z unchanged] + rot_drift = _quat_rotate(yaw90, (1, 0, 0.5)) + # pos_drifted = (0 + rot_drift.x, 0 + rot_drift.y, 5) — Z from combined_pos + expected_start = np.array([rot_drift[0], rot_drift[1], 5.0]) + # local_start = (0,0,0), rotated by yaw_q → still (0,0,0) + np.testing.assert_allclose(starts_w[0, 0], expected_start, atol=ATOL) + + def test_ray_cast_drift_base_mode(self): + """Base mode: ray_cast_drift XY is rotated by full combined_quat, Z is NOT applied. + + Sensor pitched 90° around Y, drift = (1.0, 0.0, 0.0). + Full rotation of (1,0,0) by 90° pitch around Y → (0, 0, -1). + pos_drifted = (combined_pos.x + 0, combined_pos.y + 0, combined_pos.z) — both XY of + rotated drift happen to be 0 in this case. + """ + pitch90 = _euler_to_quat_xyzw(0, math.pi / 2, 0) + inputs = _make_inputs( + view_pos=(0.0, 0.0, 5.0), + view_quat=pitch90, + ray_cast_drift=(1.0, 0.0, 0.0), + ray_start=(0.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + _, _, starts_w, _ = _launch_kernel(*inputs, alignment_mode=2, num_envs=1, num_rays=1) + + rot_drift = _quat_rotate(pitch90, (1, 0, 0)) # (0, 0, -1) + # pos_drifted = (0 + rot_drift.x, 0 + rot_drift.y, 5) = (0, 0, 5) + # local_start (0,0,0) rotated by pitch90 → still (0,0,0) + expected_start = np.array([rot_drift[0], rot_drift[1], 5.0]) + np.testing.assert_allclose(starts_w[0, 0], expected_start, atol=ATOL) + + def test_env_mask_skips_masked_envs(self): + """Masked-out environments retain sentinel values in output buffers. + + 2 envs, env 0 masked out (False), env 1 active (True). + Output buffers are pre-filled with sentinel (999). After kernel launch, + env 0 should still have 999, env 1 should have computed values. + """ + yaw90 = _yaw_quat(math.pi / 2) + + # Build transforms for 2 envs: both at (0,0,2) with yaw90 + t_single = torch.tensor( + [[0, 0, 2, yaw90[0], yaw90[1], yaw90[2], yaw90[3]]], + dtype=torch.float32, + device=DEVICE, + ) + t_both = t_single.repeat(2, 1).contiguous() + transforms = wp.from_torch(t_both).view(wp.transformf) + + # Mask: env 0 = False, env 1 = True + mask_t = torch.tensor([False, True], dtype=torch.bool, device=TORCH_DEVICE) + env_mask = wp.from_torch(mask_t) + + # Zero offsets and drifts for both envs + zero3 = torch.zeros(2, 3, dtype=torch.float32, device=TORCH_DEVICE) + offset_pos_wp = wp.from_torch(zero3.clone().contiguous(), dtype=wp.vec3f) + iq = torch.tensor([[0, 0, 0, 1]] * 2, dtype=torch.float32, device=TORCH_DEVICE) + offset_quat_wp = wp.from_torch(iq.contiguous(), dtype=wp.quatf) + drift_wp = wp.from_torch(zero3.clone().contiguous(), dtype=wp.vec3f) + rcd_wp = wp.from_torch(zero3.clone().contiguous(), dtype=wp.vec3f) + + # Single ray per env + rs = torch.tensor([[[1, 0, 0]]] * 2, dtype=torch.float32, device=TORCH_DEVICE) + rs_wp = wp.from_torch(rs.contiguous(), dtype=wp.vec3f) + rd = torch.tensor([[[0, 0, -1]]] * 2, dtype=torch.float32, device=TORCH_DEVICE) + rd_wp = wp.from_torch(rd.contiguous(), dtype=wp.vec3f) + + # Pre-fill outputs with sentinel + sentinel = 999.0 + pos_w_t = torch.full((2, 3), sentinel, dtype=torch.float32, device=TORCH_DEVICE) + pos_w = wp.from_torch(pos_w_t.contiguous(), dtype=wp.vec3f) + quat_w_t = torch.full((2, 4), sentinel, dtype=torch.float32, device=TORCH_DEVICE) + quat_w = wp.from_torch(quat_w_t.contiguous(), dtype=wp.quatf) + starts_w_t = torch.full((2, 1, 3), sentinel, dtype=torch.float32, device=TORCH_DEVICE) + starts_w = wp.from_torch(starts_w_t.contiguous(), dtype=wp.vec3f) + dirs_w_t = torch.full((2, 1, 3), sentinel, dtype=torch.float32, device=TORCH_DEVICE) + dirs_w = wp.from_torch(dirs_w_t.contiguous(), dtype=wp.vec3f) + + wp.launch( + update_ray_caster_kernel, + dim=(2, 1), + inputs=[transforms, env_mask, offset_pos_wp, offset_quat_wp, drift_wp, rcd_wp, rs_wp, rd_wp, 2], + outputs=[pos_w, quat_w, starts_w, dirs_w], + device=DEVICE, + ) + wp.synchronize_device(DEVICE) + + pos_np = wp.to_torch(pos_w).cpu().numpy() + quat_np = wp.to_torch(quat_w).cpu().numpy() + starts_np = wp.to_torch(starts_w).cpu().numpy() + dirs_np = wp.to_torch(dirs_w).cpu().numpy() + + # Env 0 (masked): all outputs should still be sentinel + np.testing.assert_allclose(pos_np[0], [sentinel] * 3, atol=ATOL) + np.testing.assert_allclose(quat_np[0], [sentinel] * 4, atol=ATOL) + np.testing.assert_allclose(starts_np[0, 0], [sentinel] * 3, atol=ATOL) + np.testing.assert_allclose(dirs_np[0, 0], [sentinel] * 3, atol=ATOL) + + # Env 1 (active): should have computed values + np.testing.assert_allclose(pos_np[1], [0, 0, 2], atol=ATOL) + np.testing.assert_allclose(quat_np[1], list(yaw90), atol=ATOL) + # Base mode: (1,0,0) rotated by yaw90 = (0,1,0), + pos (0,0,2) + expected_start = _quat_rotate(yaw90, (1, 0, 0)) + np.array([0, 0, 2]) + np.testing.assert_allclose(starts_np[1, 0], expected_start, atol=ATOL) + expected_dir = _quat_rotate(yaw90, (0, 0, -1)) # (0, 0, -1) unaffected by yaw + np.testing.assert_allclose(dirs_np[1, 0], expected_dir, atol=ATOL) + + def test_positional_drift_added_before_alignment(self): + """The `drift` parameter is added to combined_pos before ray transformation. + + Verify that drift shifts the sensor position (and therefore ray starts) + equally across all alignment modes. + """ + drift_val = (0.0, 0.0, 1.5) # shift up 1.5m + results = {} + for mode_name, mode_int in [("world", 0), ("yaw", 1), ("base", 2)]: + inputs = _make_inputs( + view_pos=(0.0, 0.0, 3.0), + drift=drift_val, + ray_start=(0.0, 0.0, 0.0), + ray_dir=(0.0, 0.0, -1.0), + ) + pos_w, _, starts_w, _ = _launch_kernel(*inputs, alignment_mode=mode_int, num_envs=1, num_rays=1) + results[mode_name] = (pos_w, starts_w) + + # All modes: pos_w should be (0, 0, 4.5) = view_pos + drift + for mode_name in ["world", "yaw", "base"]: + np.testing.assert_allclose( + results[mode_name][0][0], + [0, 0, 4.5], + atol=ATOL, + err_msg=f"{mode_name} mode: pos_w should include drift", + ) + # ray_start_w Z should also reflect the drifted position + assert results[mode_name][1][0, 0, 2] == pytest.approx(4.5, abs=ATOL), ( + f"{mode_name} mode: ray start Z should be 4.5" + ) diff --git a/source/isaaclab/test/sim/frame_view_contract_utils.py b/source/isaaclab/test/sim/frame_view_contract_utils.py new file mode 100644 index 000000000000..4336f94bd3d7 --- /dev/null +++ b/source/isaaclab/test/sim/frame_view_contract_utils.py @@ -0,0 +1,404 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared FrameView contract tests. + +This module defines the invariants that **every** FrameView backend +(USD, Fabric, Newton) must satisfy. Backend test files import these tests +via ``from frame_view_contract_utils import *`` and provide a +``view_factory`` pytest fixture that builds the backend-specific scene. + +The factory signature is:: + + def view_factory() -> Callable[[int, str], ViewBundle]: ... + +Where ``ViewBundle`` is a :class:`NamedTuple`:: + + class ViewBundle(NamedTuple): + view: BaseFrameView + get_parent_pos: Callable[[int, str], torch.Tensor] + set_parent_pos: Callable[[torch.Tensor, int], None] + teardown: Callable[[], None] + +- ``view``: The FrameView under test. Must track child prims at + :data:`CHILD_OFFSET` under parent prims/bodies. +- ``get_parent_pos(n, device)``: Read the parent prim/body positions. +- ``set_parent_pos(positions, n)``: Write the parent prim/body positions. +- ``teardown()``: Cleanup (close context, clear stage, etc.). + +Tolerance policy: + - Indexed reads (exact copy): ``atol=0`` + - Composition / decomposition through float32 transforms: ``atol=ATOL`` + - Parent position identity checks (should be untouched): ``atol=0`` +""" + +from __future__ import annotations + +from collections.abc import Callable +from typing import NamedTuple + +import pytest +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +CHILD_OFFSET = (0.1, 0.0, 0.05) +"""Local offset of the child prim from its parent, shared by all backend fixtures.""" + +ATOL = 1e-5 +"""Default absolute tolerance for float32 transform composition.""" + + +class ViewBundle(NamedTuple): + """Return type of the ``view_factory`` fixture.""" + + view: object + get_parent_pos: Callable + set_parent_pos: Callable + teardown: Callable + + +def _t(a): + """Convert a wp.array or ProxyArray return to a torch.Tensor (pass-through otherwise).""" + if isinstance(a, ProxyArray): + return a.torch + return wp.to_torch(a) if isinstance(a, wp.array) else a + + +def _wp_vec3f(data, device="cpu"): + return wp.array([wp.vec3f(*row) for row in data], dtype=wp.vec3f, device=device) + + +def _wp_vec4f(data, device="cpu"): + return wp.array([wp.vec4f(*row) for row in data], dtype=wp.vec4f, device=device) + + +# ================================================================== +# Contract: Getters +# ================================================================== + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_world_pose_equals_parent_plus_offset(device, view_factory): + """world_pose == parent_pos + local offset (identity parent orientation).""" + bundle = view_factory(num_envs=4, device=device) + try: + child_pos = _t(bundle.view.get_world_poses()[0]) + parent_pos = bundle.get_parent_pos(4, device) + offset = torch.tensor(CHILD_OFFSET, device=device) + + torch.testing.assert_close(child_pos, parent_pos + offset.unsqueeze(0), atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_local_pose_equals_structural_offset(device, view_factory): + """local_pose == the authored offset (0.1, 0, 0.05) for every prim.""" + bundle = view_factory(num_envs=4, device=device) + try: + local_pos, local_quat = bundle.view.get_local_poses() + expected_pos = torch.tensor(CHILD_OFFSET, device=device).expand(4, -1) + expected_quat = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device).expand(4, -1) + + torch.testing.assert_close(_t(local_pos), expected_pos, atol=ATOL, rtol=0) + torch.testing.assert_close(_t(local_quat), expected_quat, atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_local_differs_from_world(device, view_factory): + """local != world when parent is not at the origin. + + Asserts |world - local| > 0.5 to catch any implementation that returns + world as local. The parent is offset from the origin so the z-component + alone provides > 0.5 difference. + """ + bundle = view_factory(num_envs=2, device=device) + try: + world_pos = _t(bundle.view.get_world_poses()[0]) + local_pos = _t(bundle.view.get_local_poses()[0]) + + diff = (world_pos - local_pos).abs().max().item() + assert diff > 0.5, ( + f"Expected |world - local| > 0.5, got {diff:.4f}. world={world_pos.tolist()}, local={local_pos.tolist()}" + ) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_local_stable_after_parent_move(device, view_factory): + """Moving the parent changes world but NOT local.""" + bundle = view_factory(num_envs=2, device=device) + try: + local_before = _t(bundle.view.get_local_poses()[0]).clone() + bundle.set_parent_pos(torch.tensor([[99.0, 0.0, 0.0], [0.0, 99.0, 0.0]], device=device), 2) + local_after = _t(bundle.view.get_local_poses()[0]) + + torch.testing.assert_close(local_after, local_before, atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_world_tracks_parent_move(device, view_factory): + """Moving the parent shifts world poses by the same amount.""" + bundle = view_factory(num_envs=2, device=device) + try: + new_parent_pos = torch.tensor([[5.0, 0.0, 0.0], [0.0, 5.0, 0.0]], device=device) + bundle.set_parent_pos(new_parent_pos, 2) + + child_pos = _t(bundle.view.get_world_poses()[0]) + offset = torch.tensor(CHILD_OFFSET, device=device) + + torch.testing.assert_close(child_pos, new_parent_pos + offset.unsqueeze(0), atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_indexed_get_returns_correct_subset(device, view_factory): + """Indexed get (out-of-order) returns exact copies for both world and local.""" + bundle = view_factory(num_envs=5, device=device) + try: + all_world = _t(bundle.view.get_world_poses()[0]) + all_local = _t(bundle.view.get_local_poses()[0]) + + indices_list = [4, 1, 3] + indices = wp.array(indices_list, dtype=wp.int32, device=device) + sub_world = _t(bundle.view.get_world_poses(indices)[0]) + sub_local = _t(bundle.view.get_local_poses(indices)[0]) + + for out_i, view_i in enumerate(indices_list): + torch.testing.assert_close(sub_world[out_i], all_world[view_i], atol=0, rtol=0) + torch.testing.assert_close(sub_local[out_i], all_local[view_i], atol=0, rtol=0) + finally: + bundle.teardown() + + +# ================================================================== +# Contract: Setters +# ================================================================== + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_world_roundtrip(device, view_factory): + """set_world_poses -> get_world_poses returns the same values.""" + bundle = view_factory(num_envs=2, device=device) + try: + new_pos = _wp_vec3f([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]], device=device) + new_quat = _wp_vec4f([[0.0, 0.0, 0.7071068, 0.7071068], [0.0, 0.0, 0.0, 1.0]], device=device) + bundle.view.set_world_poses(new_pos, new_quat) + + ret_pos, ret_quat = bundle.view.get_world_poses() + torch.testing.assert_close(_t(ret_pos), _t(new_pos), atol=ATOL, rtol=0) + torch.testing.assert_close(_t(ret_quat), _t(new_quat), atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_local_roundtrip(device, view_factory): + """set_local_poses -> get_local_poses returns the same values.""" + bundle = view_factory(num_envs=2, device=device) + try: + new_pos = _wp_vec3f([[0.5, 0.3, 0.1], [0.2, 0.7, 0.4]], device=device) + new_quat = _wp_vec4f([[0.0, 0.0, 0.0, 1.0]] * 2, device=device) + bundle.view.set_local_poses(new_pos, new_quat) + + ret_pos, ret_quat = bundle.view.get_local_poses() + torch.testing.assert_close(_t(ret_pos), _t(new_pos), atol=ATOL, rtol=0) + torch.testing.assert_close(_t(ret_quat), _t(new_quat), atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_world_does_not_move_parent(device, view_factory): + """set_world_poses must not modify the parent prim/body position.""" + bundle = view_factory(num_envs=2, device=device) + try: + parent_before = bundle.get_parent_pos(2, device).clone() + bundle.view.set_world_poses( + _wp_vec3f([[99.0, 99.0, 99.0], [88.0, 88.0, 88.0]], device=device), + _wp_vec4f([[0.0, 0.0, 0.0, 1.0]] * 2, device=device), + ) + parent_after = bundle.get_parent_pos(2, device) + + torch.testing.assert_close(parent_after, parent_before, atol=0, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_local_does_not_move_parent(device, view_factory): + """set_local_poses must not modify the parent prim/body position.""" + bundle = view_factory(num_envs=2, device=device) + try: + parent_before = bundle.get_parent_pos(2, device).clone() + bundle.view.set_local_poses( + _wp_vec3f([[0.5, 0.5, 0.5], [1.0, 1.0, 1.0]], device=device), + _wp_vec4f([[0.0, 0.0, 0.0, 1.0]] * 2, device=device), + ) + parent_after = bundle.get_parent_pos(2, device) + + torch.testing.assert_close(parent_after, parent_before, atol=0, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_world_updates_local(device, view_factory): + """After set_world_poses, get_local_poses reflects the new offset. + + Uses non-axis-aligned offsets to catch coordinate swap bugs. + """ + bundle = view_factory(num_envs=2, device=device) + try: + parent_pos = bundle.get_parent_pos(2, device) + desired_offset = torch.tensor([[0.3, 0.7, 0.2], [0.8, 0.1, 0.6]], device=device) + new_world = parent_pos + desired_offset + + bundle.view.set_world_poses( + _wp_vec3f(new_world.tolist(), device=device), + _wp_vec4f([[0.0, 0.0, 0.0, 1.0]] * 2, device=device), + ) + + local_pos = _t(bundle.view.get_local_poses()[0]) + torch.testing.assert_close(local_pos, desired_offset, atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_local_updates_world(device, view_factory): + """After set_local_poses, get_world_poses == parent + new_local. + + Uses non-axis-aligned offsets to catch coordinate swap bugs. + """ + bundle = view_factory(num_envs=2, device=device) + try: + parent_pos = bundle.get_parent_pos(2, device) + new_offset = torch.tensor([[0.4, 0.9, 0.15], [0.6, 0.2, 0.85]], device=device) + bundle.view.set_local_poses( + _wp_vec3f(new_offset.tolist(), device=device), + _wp_vec4f([[0.0, 0.0, 0.0, 1.0]] * 2, device=device), + ) + + world_pos = _t(bundle.view.get_world_poses()[0]) + torch.testing.assert_close(world_pos, parent_pos + new_offset, atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_world_partial_position_only(device, view_factory): + """Setting only positions: new positions written, orientations preserved.""" + bundle = view_factory(num_envs=2, device=device) + try: + _, orig_quat = bundle.view.get_world_poses() + new_pos = _wp_vec3f([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], device=device) + bundle.view.set_world_poses(positions=new_pos) + + ret_pos, ret_quat = bundle.view.get_world_poses() + torch.testing.assert_close(_t(ret_pos), _t(new_pos), atol=ATOL, rtol=0) + torch.testing.assert_close(_t(ret_quat), _t(orig_quat), atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_world_partial_orientation_only(device, view_factory): + """Setting only orientations: new orientations written, positions preserved.""" + bundle = view_factory(num_envs=2, device=device) + try: + orig_pos, _ = bundle.view.get_world_poses() + new_quat = _wp_vec4f([[0.0, 0.0, 0.7071068, 0.7071068], [0.7071068, 0.0, 0.0, 0.7071068]], device=device) + bundle.view.set_world_poses(orientations=new_quat) + + ret_pos, ret_quat = bundle.view.get_world_poses() + torch.testing.assert_close(_t(ret_pos), _t(orig_pos), atol=ATOL, rtol=0) + torch.testing.assert_close(_t(ret_quat), _t(new_quat), atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_local_partial_position_only(device, view_factory): + """Setting only local translations: new translations written, orientations preserved.""" + bundle = view_factory(num_envs=2, device=device) + try: + _, orig_quat = bundle.view.get_local_poses() + new_pos = _wp_vec3f([[0.2, 0.3, 0.4], [0.5, 0.6, 0.7]], device=device) + bundle.view.set_local_poses(translations=new_pos) + + ret_pos, ret_quat = bundle.view.get_local_poses() + torch.testing.assert_close(_t(ret_pos), _t(new_pos), atol=ATOL, rtol=0) + torch.testing.assert_close(_t(ret_quat), _t(orig_quat), atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_set_world_indexed_only_affects_subset(device, view_factory): + """Indexed set_world_poses writes requested indices, leaves others untouched.""" + bundle = view_factory(num_envs=4, device=device) + try: + orig_pos = _t(bundle.view.get_world_poses()[0]).clone() + indices = wp.array([1, 3], dtype=wp.int32, device=device) + new_pos = _wp_vec3f([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]], device=device) + bundle.view.set_world_poses(positions=new_pos, indices=indices) + + updated = _t(bundle.view.get_world_poses()[0]) + torch.testing.assert_close(updated[0], orig_pos[0], atol=0, rtol=0) + torch.testing.assert_close(updated[2], orig_pos[2], atol=0, rtol=0) + torch.testing.assert_close(updated[1], _t(new_pos)[0], atol=ATOL, rtol=0) + torch.testing.assert_close(updated[3], _t(new_pos)[1], atol=ATOL, rtol=0) + finally: + bundle.teardown() + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_return_types_are_torcharray(device, view_factory): + """Public API contract — every backend returns ProxyArray from the pose getters.""" + bundle = view_factory(num_envs=2, device=device) + try: + pos_full, quat_full = bundle.view.get_world_poses() + assert isinstance(pos_full, ProxyArray), ( + f"get_world_poses()[0] must be ProxyArray, got {type(pos_full).__name__}" + ) + assert isinstance(quat_full, ProxyArray), ( + f"get_world_poses()[1] must be ProxyArray, got {type(quat_full).__name__}" + ) + + indices = wp.array([0], dtype=wp.int32, device=bundle.view.device) + pos_idx, quat_idx = bundle.view.get_world_poses(indices) + assert isinstance(pos_idx, ProxyArray), ( + f"get_world_poses(indices)[0] must be ProxyArray, got {type(pos_idx).__name__}" + ) + assert isinstance(quat_idx, ProxyArray), ( + f"get_world_poses(indices)[1] must be ProxyArray, got {type(quat_idx).__name__}" + ) + + lpos_full, lquat_full = bundle.view.get_local_poses() + assert isinstance(lpos_full, ProxyArray), ( + f"get_local_poses()[0] must be ProxyArray, got {type(lpos_full).__name__}" + ) + assert isinstance(lquat_full, ProxyArray), ( + f"get_local_poses()[1] must be ProxyArray, got {type(lquat_full).__name__}" + ) + + lpos_idx, lquat_idx = bundle.view.get_local_poses(indices) + assert isinstance(lpos_idx, ProxyArray), ( + f"get_local_poses(indices)[0] must be ProxyArray, got {type(lpos_idx).__name__}" + ) + assert isinstance(lquat_idx, ProxyArray), ( + f"get_local_poses(indices)[1] must be ProxyArray, got {type(lquat_idx).__name__}" + ) + finally: + bundle.teardown() diff --git a/source/isaaclab/test/sim/test_build_simulation_context_headless.py b/source/isaaclab/test/sim/test_build_simulation_context_headless.py index ebe059bed666..8f13d79041e7 100644 --- a/source/isaaclab/test/sim/test_build_simulation_context_headless.py +++ b/source/isaaclab/test/sim/test_build_simulation_context_headless.py @@ -88,6 +88,7 @@ def test_build_simulation_context_cfg(): ) with build_simulation_context(sim_cfg=cfg, gravity_enabled=False, dt=0.01, device="cpu") as sim: + # Values from sim_cfg should not be overridden by build_simulation_context args assert sim.cfg.gravity == gravity assert sim.cfg.device == device assert sim.cfg.dt == dt diff --git a/source/isaaclab/test/sim/test_build_simulation_context_nonheadless.py b/source/isaaclab/test/sim/test_build_simulation_context_nonheadless.py index ae2203c43b70..8c053bc51cda 100644 --- a/source/isaaclab/test/sim/test_build_simulation_context_nonheadless.py +++ b/source/isaaclab/test/sim/test_build_simulation_context_nonheadless.py @@ -58,7 +58,10 @@ def test_build_simulation_context_ground_plane(add_ground_plane): def test_build_simulation_context_auto_add_lighting(add_lighting, auto_add_lighting): """Test that the simulation context is built with the correct lighting.""" with build_simulation_context(add_lighting=add_lighting, auto_add_lighting=auto_add_lighting) as sim: - if auto_add_lighting or add_lighting: + has_gui = sim.get_setting("/isaaclab/has_gui") + # Dome light is added if add_lighting=True OR (auto_add_lighting=True AND has_gui) + should_have_light = add_lighting or (auto_add_lighting and has_gui) + if should_have_light: # Ensure that dome light got added assert sim.stage.GetPrimAtPath("/World/defaultDomeLight").IsValid() else: @@ -68,6 +71,7 @@ def test_build_simulation_context_auto_add_lighting(add_lighting, auto_add_light def test_build_simulation_context_cfg(): """Test that the simulation context is built with the correct cfg and values don't get overridden.""" + dt = 0.001 # Non-standard gravity gravity = (0.0, 0.0, -1.81) @@ -80,6 +84,7 @@ def test_build_simulation_context_cfg(): ) with build_simulation_context(sim_cfg=cfg, gravity_enabled=False, dt=0.01, device="cpu") as sim: + # Values from sim_cfg should not be overridden by build_simulation_context args assert sim.cfg.gravity == gravity assert sim.cfg.device == device assert sim.cfg.dt == dt diff --git a/source/isaaclab/test/sim/test_cloner.py b/source/isaaclab/test/sim/test_cloner.py new file mode 100644 index 000000000000..1f8af90387b5 --- /dev/null +++ b/source/isaaclab/test/sim/test_cloner.py @@ -0,0 +1,276 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for USD cloner utilities (no PhysX dependency).""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import pytest +import torch + +from pxr import UsdGeom + +import isaaclab.sim as sim_utils +from isaaclab.cloner import ClonePlan, TemplateCloneCfg, clone_from_template, sequential, usd_replicate +from isaaclab.sim import build_simulation_context + +pytestmark = pytest.mark.isaacsim_ci + + +@pytest.fixture(params=["cpu", "cuda"]) +def sim(request): + """Provide a fresh simulation context for each test on CPU and CUDA.""" + with build_simulation_context(device=request.param, dt=0.01, add_lighting=False) as sim: + yield sim + + +def test_usd_replicate_with_positions_and_mask(sim): + """Replicate sources to selected envs and author translate ops from positions.""" + # Prepare sources under /World/template + sim_utils.create_prim("/World/template", "Xform") + sim_utils.create_prim("/World/template/A", "Xform") + sim_utils.create_prim("/World/template/B", "Xform") + + # Prepare destination env namespaces + num_envs = 3 + env_ids = torch.arange(num_envs, dtype=torch.long) + sim_utils.create_prim("/World/envs", "Xform") + for i in range(num_envs): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform") + + # Map A -> env 0 and 2; B -> env 1 only + mask = torch.zeros((2, num_envs), dtype=torch.bool) + mask[0, [0, 2]] = True + mask[1, [1]] = True + + usd_replicate( + sim_utils.get_current_stage(), + sources=["/World/template/A", "/World/template/B"], + destinations=["/World/envs/env_{}/Object/A", "/World/envs/env_{}/Object/B"], + env_ids=env_ids, + mask=mask, + ) + + # Validate replication and translate op + stage = sim_utils.get_current_stage() + assert stage.GetPrimAtPath("/World/envs/env_0/Object/A").IsValid() + assert not stage.GetPrimAtPath("/World/envs/env_0/Object/B").IsValid() + assert stage.GetPrimAtPath("/World/envs/env_1/Object/B").IsValid() + assert not stage.GetPrimAtPath("/World/envs/env_1/Object/A").IsValid() + assert stage.GetPrimAtPath("/World/envs/env_2/Object/A").IsValid() + + # Check xformOp:translate authored for env_2/A + prim = stage.GetPrimAtPath("/World/envs/env_2/Object/A") + xform = UsdGeom.Xformable(prim) + ops = xform.GetOrderedXformOps() + assert any(op.GetOpType() == UsdGeom.XformOp.TypeTranslate for op in ops) + + +def test_usd_replicate_depth_order_parent_child(sim): + """Replicate parent and child when provided out of order; parent should exist before child.""" + # Prepare sources + sim_utils.create_prim("/World/template", "Xform") + sim_utils.create_prim("/World/template/Parent", "Xform") + sim_utils.create_prim("/World/template/Parent/Child", "Xform") + + # Destinations (single env) + env_ids = torch.tensor([0, 1], dtype=torch.long) + sim_utils.create_prim("/World/envs", "Xform") + sim_utils.create_prim("/World/envs/env_0", "Xform") + sim_utils.create_prim("/World/envs/env_1", "Xform") + + # Provide child first, then parent; depth sort should handle this + usd_replicate( + sim_utils.get_current_stage(), + sources=["/World/template/Parent/Child", "/World/template/Parent"], + destinations=["/World/envs/env_{}/Parent/Child", "/World/envs/env_{}/Parent"], + env_ids=env_ids, + ) + + stage = sim_utils.get_current_stage() + for i in range(2): + assert stage.GetPrimAtPath(f"/World/envs/env_{i}/Parent").IsValid() + assert stage.GetPrimAtPath(f"/World/envs/env_{i}/Parent/Child").IsValid() + + +def test_usd_replicate_self_copy_skips_copy_spec(sim): + """usd_replicate must not call Sdf.CopySpec when source and destination paths are identical. + + Sdf.CopySpec(src, src) is a no-op in the current USD version so it does not corrupt children, + but the call is still wasteful. The guard ensures it is skipped entirely. This test mocks + Sdf.CopySpec to verify it is called exactly once (for env_1) and never for the self case (env_0). + """ + from unittest.mock import patch + + import isaaclab.cloner.cloner_utils as _cloner_mod + + stage = sim_utils.get_current_stage() + sim_utils.create_prim("/World/envs", "Xform") + sim_utils.create_prim("/World/envs/env_0", "Xform") + sim_utils.create_prim("/World/envs/env_0/Robot", "Xform") + sim_utils.create_prim("/World/envs/env_0/Robot/base_link", "Xform") + sim_utils.create_prim("/World/envs/env_1", "Xform") + + copy_calls: list[tuple[str, str]] = [] + real_copy_spec = _cloner_mod.Sdf.CopySpec + + def capturing_copy_spec(src_layer, src_path, dst_layer, dst_path): + copy_calls.append((str(src_path), str(dst_path))) + return real_copy_spec(src_layer, src_path, dst_layer, dst_path) + + with patch.object(_cloner_mod.Sdf, "CopySpec", capturing_copy_spec): + usd_replicate( + stage, + sources=["/World/envs/env_0"], + destinations=["/World/envs/env_{}"], + env_ids=torch.tensor([0, 1], dtype=torch.long), + mask=torch.ones((1, 2), dtype=torch.bool), + ) + + # CopySpec must be called for env_1 but never for env_0 (self-copy) + assert all(src != dst for src, dst in copy_calls), f"Self-copy detected in CopySpec calls: {copy_calls}" + assert any(dst == "/World/envs/env_1" for _, dst in copy_calls), "CopySpec was not called for env_1" + + +@pytest.mark.parametrize( + "parent_paths, spawn_pattern, expected_child_paths, bad_path, match_expr", + [ + ( + ["/World/rig_0_alpha", "/World/rig_0_beta", "/World/rig_0_gamma"], + "/World/rig_0_.*/Sensor", + ["/World/rig_0_alpha/Sensor", "/World/rig_0_beta/Sensor", "/World/rig_0_gamma/Sensor"], + "/World/rig_00/Sensor", + "/World/rig_0_.*", + ), + ( + [ + "/World/group_a/slot_0", + "/World/group_a/slot_1", + "/World/group_b/slot_0", + "/World/group_b/slot_1", + ], + "/World/group_.*/slot_.*/Sensor", + [ + "/World/group_a/slot_0/Sensor", + "/World/group_a/slot_1/Sensor", + "/World/group_b/slot_0/Sensor", + "/World/group_b/slot_1/Sensor", + ], + "/World/group_0/slot_0/Sensor", + "/World/group_.*/slot_.*", + ), + ( + ["/World/template/Object"], + "/World/template/Object/proto_.*", + ["/World/template/Object/proto_0"], + "/World/template/Object0/proto_0", + "/World/template/Object", + ), + ], +) +def test_clone_decorator_wildcard_patterns( + sim, parent_paths, spawn_pattern, expected_child_paths, bad_path, match_expr +): + """The @clone decorator handles two distinct wildcard patterns correctly. + + Case A – ``.*`` in root_path (parent is a regex): the child prim is spawned at + ``source_prim_paths[0]`` as a prototype and then copied to every other matching + parent via ``Sdf.CopySpec``, so **all** parents end up with the child. The old + ``prim_path.replace(".*", "0")`` approach created spurious intermediate prims + that inflated ``find_matching_prims`` counts and broke tiled-camera initialization. + + Case B – ``.*`` only in asset_path (leaf): no parent regex, so + ``source_prim_paths == [root_path]`` (one entry, no copy step). Replacing + ``".*"`` → ``"0"`` in the asset name gives the intended prototype name + (e.g. ``proto_asset_0``) under the single real parent. + """ + for path in parent_paths: + sim_utils.create_prim(path, "Xform") + + cfg = sim_utils.ConeCfg(radius=0.1, height=0.2) + cfg.func(spawn_pattern, cfg) + + stage = sim_utils.get_current_stage() + + # Every expected child path must exist + for child_path in expected_child_paths: + assert stage.GetPrimAtPath(child_path).IsValid(), ( + f"Prim was not spawned at '{child_path}'. The @clone decorator may have used the wrong spawn path." + ) + + # The spurious path from the old replace(".*", "0") must NOT exist + assert not stage.GetPrimAtPath(bad_path).IsValid(), ( + f"Spurious prim found at '{bad_path}'. " + "The @clone decorator incorrectly derived the spawn path by replacing '.*' with '0'." + ) + + # find_matching_prims must see exactly the original parents — no spurious extras + all_matching = sim_utils.find_matching_prims(match_expr) + assert len(all_matching) == len(parent_paths), ( + f"Expected {len(parent_paths)} matching prims, got {len(all_matching)}. " + "Spurious parent prims were likely created by the @clone decorator." + ) + + +def test_clone_from_template_returns_clone_plan(sim): + """clone_from_template exposes per-group ClonePlan dicts with prototype-to-env masks. + + Builds two USD prototypes under one group, clones across four envs with the deterministic + sequential strategy, and asserts the returned dict has one entry keyed by the group's + destination template, with a ``[2, 4]`` boolean mask whose columns sum to one. + """ + num_clones = 4 + cfg = TemplateCloneCfg(device=sim.cfg.device, clone_strategy=sequential, clone_physics=False) + + sim_utils.create_prim(cfg.template_root, "Xform") + sim_utils.create_prim(f"{cfg.template_root}/Object", "Xform") + sim_utils.create_prim(f"{cfg.template_root}/Object/proto_asset_0", "Xform") + sim_utils.create_prim(f"{cfg.template_root}/Object/proto_asset_1", "Xform") + sim_utils.create_prim("/World/envs", "Xform") + for i in range(num_clones): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform", translation=(0, 0, 0)) + + stage = sim_utils.get_current_stage() + plans = clone_from_template(stage, num_clones=num_clones, template_clone_cfg=cfg) + + assert isinstance(plans, dict) + assert list(plans.keys()) == ["/World/envs/env_{}/Object"] + plan = plans["/World/envs/env_{}/Object"] + assert isinstance(plan, ClonePlan) + assert plan.dest_template == "/World/envs/env_{}/Object" + assert sorted(plan.prototype_paths) == [ + "/World/template/Object/proto_asset_0", + "/World/template/Object/proto_asset_1", + ] + assert plan.clone_mask.shape == (2, num_clones) + assert plan.clone_mask.dtype == torch.bool + # Each env gets exactly one prototype (column-sum invariant) + assert torch.all(plan.clone_mask.sum(dim=0) == 1) + # Sequential strategy assigns env i → prototype (i % num_protos) + actual_proto_idx = plan.clone_mask.to(torch.int).argmax(dim=0).cpu() + assert torch.equal(actual_proto_idx, torch.tensor([0, 1, 0, 1])) + + +def test_clone_from_template_returns_empty_dict_when_no_prototypes(sim): + """clone_from_template returns an empty dict when no prototypes match the identifier.""" + num_clones = 2 + cfg = TemplateCloneCfg(device=sim.cfg.device, clone_strategy=sequential, clone_physics=False) + + sim_utils.create_prim(cfg.template_root, "Xform") + sim_utils.create_prim("/World/envs", "Xform") + for i in range(num_clones): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform", translation=(0, 0, 0)) + + stage = sim_utils.get_current_stage() + plans = clone_from_template(stage, num_clones=num_clones, template_clone_cfg=cfg) + + assert plans == {} diff --git a/source/isaaclab/test/sim/test_mesh_converter.py b/source/isaaclab/test/sim/test_mesh_converter.py index ea4529d293cd..1373f3753d08 100644 --- a/source/isaaclab/test/sim/test_mesh_converter.py +++ b/source/isaaclab/test/sim/test_mesh_converter.py @@ -19,7 +19,6 @@ import pytest -import omni from pxr import UsdGeom, UsdPhysics import isaaclab.sim as sim_utils @@ -30,13 +29,13 @@ def random_quaternion(): - # Generate four random numbers for the quaternion + # Generate four random numbers for the quaternion (x, y, z, w format) u1, u2, u3 = random.random(), random.random(), random.random() w = math.sqrt(1 - u1) * math.sin(2 * math.pi * u2) x = math.sqrt(1 - u1) * math.cos(2 * math.pi * u2) y = math.sqrt(u1) * math.sin(2 * math.pi * u3) z = math.sqrt(u1) * math.cos(2 * math.pi * u3) - return (w, x, y, z) + return (x, y, z, w) @pytest.fixture(scope="session") @@ -71,8 +70,6 @@ def sim(): # stop simulation sim.stop() # cleanup stage and context - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -92,7 +89,7 @@ def check_mesh_conversion(mesh_converter: MeshConverter): # Check prim can be properly spawned assert stage.GetPrimAtPath(prim_path).IsValid() - stage = omni.usd.get_context().get_stage() + stage = sim_utils.get_current_stage() # Check axis is z-up axis = UsdGeom.GetStageUpAxis(stage) assert axis == "Z" @@ -106,7 +103,7 @@ def check_mesh_conversion(mesh_converter: MeshConverter): pos = tuple(prim.GetAttribute("xformOp:translate").Get()) assert pos == mesh_converter.cfg.translation quat = prim.GetAttribute("xformOp:orient").Get() - quat = (quat.GetReal(), quat.GetImaginary()[0], quat.GetImaginary()[1], quat.GetImaginary()[2]) + quat = (quat.GetImaginary()[0], quat.GetImaginary()[1], quat.GetImaginary()[2], quat.GetReal()) assert quat == mesh_converter.cfg.rotation scale = tuple(prim.GetAttribute("xformOp:scale").Get()) assert scale == mesh_converter.cfg.scale diff --git a/source/isaaclab/test/sim/test_mjcf_converter.py b/source/isaaclab/test/sim/test_mjcf_converter.py index 8ce098b4a51b..2e68b66a2fc0 100644 --- a/source/isaaclab/test/sim/test_mjcf_converter.py +++ b/source/isaaclab/test/sim/test_mjcf_converter.py @@ -16,12 +16,14 @@ import pytest -from isaacsim.core.utils.extensions import enable_extension, get_extension_path_from_name +from isaacsim.core.experimental.utils.app import enable_extension, get_extension_path import isaaclab.sim as sim_utils from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.sim.converters import MjcfConverter, MjcfConverterCfg +pytestmark = pytest.mark.isaacsim_ci + @pytest.fixture(autouse=True) def test_setup_teardown(): @@ -35,12 +37,10 @@ def test_setup_teardown(): # Setup: Create MJCF config enable_extension("isaacsim.asset.importer.mjcf") - extension_path = get_extension_path_from_name("isaacsim.asset.importer.mjcf") + extension_path = get_extension_path("isaacsim.asset.importer.mjcf") config = MjcfConverterCfg( asset_path=f"{extension_path}/data/mjcf/nv_ant.xml", - import_sites=True, - fix_base=False, - make_instanceable=True, + self_collision=False, ) # Yield the resources for the test @@ -48,12 +48,9 @@ def test_setup_teardown(): # Teardown: Cleanup simulation sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() -@pytest.mark.isaacsim_ci def test_no_change(test_setup_teardown): """Call conversion twice. This should not generate a new USD file.""" sim, mjcf_config = test_setup_teardown @@ -71,7 +68,6 @@ def test_no_change(test_setup_teardown): assert time_usd_file_created == new_time_usd_file_created -@pytest.mark.isaacsim_ci def test_config_change(test_setup_teardown): """Call conversion twice but change the config in the second call. This should generate a new USD file.""" sim, mjcf_config = test_setup_teardown @@ -81,7 +77,7 @@ def test_config_change(test_setup_teardown): # change the config new_config = mjcf_config - new_config.fix_base = not mjcf_config.fix_base + new_config.self_collision = not mjcf_config.self_collision # define the usd directory new_config.usd_dir = mjcf_converter.usd_dir # convert to usd but this time in the same directory as previous step @@ -91,14 +87,13 @@ def test_config_change(test_setup_teardown): assert time_usd_file_created != new_time_usd_file_created -@pytest.mark.isaacsim_ci def test_create_prim_from_usd(test_setup_teardown): """Call conversion and create a prim from it.""" sim, mjcf_config = test_setup_teardown - urdf_converter = MjcfConverter(mjcf_config) + mjcf_converter = MjcfConverter(mjcf_config) prim_path = "/World/Robot" - sim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) + sim_utils.create_prim(prim_path, usd_path=mjcf_converter.usd_path) assert sim.stage.GetPrimAtPath(prim_path).IsValid() diff --git a/source/isaaclab/test/sim/test_newton_model_utils.py b/source/isaaclab/test/sim/test_newton_model_utils.py new file mode 100644 index 000000000000..cc157074fd42 --- /dev/null +++ b/source/isaaclab/test/sim/test_newton_model_utils.py @@ -0,0 +1,516 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for :mod:`isaaclab.sim.utils.newton_model_utils` (no Kit required).""" + +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import warnings +from types import SimpleNamespace + +import numpy as np +import pytest +import torch +import warp as wp + +from pxr import Gf, Sdf, Usd, UsdGeom, UsdShade + +from isaaclab.sim.utils.newton_model_utils import ( + _OMNIPBR_DEFAULTS, + _UNBOUND_DEFAULT_FALLBACK_GRAY, + _get_omnipbr_albedo, + _resolve_shape_color, + _scatter_shape_color_rows_kernel, + replace_newton_shape_colors, +) + +_WARNING_MESSAGE = "Newton shape color replacement is enabled; this workaround will be deprecated in a future release." + + +def _replace_newton_shape_colors_wrapper(model: object, stage: Usd.Stage | None = None) -> int: + """Call :func:`replace_newton_shape_colors` with :class:`FutureWarning` suppressed in test reports.""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message=_WARNING_MESSAGE, category=FutureWarning) + return replace_newton_shape_colors(model, stage) + + +_OMNIPBR_ALBEDO_INPUT_CASES = [ + pytest.param((0.2, 0.4, 0.6), (0.5, 0.25, 2.0), id="both_authored"), + pytest.param((0.25, 0.5, 0.75), None, id="diffuse_only"), + pytest.param(None, (0.4, 0.5, 2.0), id="tint_only"), + pytest.param(None, None, id="defaults"), +] + + +def _expected_omnipbr_linear_albedo( + diffuse_color_constant: tuple[float, float, float] | None, + diffuse_tint: tuple[float, float, float] | None, +) -> tuple[float, float, float]: + cd = diffuse_color_constant or _OMNIPBR_DEFAULTS["diffuse_color_constant"] + td = diffuse_tint or _OMNIPBR_DEFAULTS["diffuse_tint"] + return (cd[0] * td[0], cd[1] * td[1], cd[2] * td[2]) + + +def _make_omnipbr_test_shader( + stage: Usd.Stage, + material_prim_path: str, +) -> UsdShade.Shader: + """Define a ``UsdShade.Material`` and minimal OmniPBR ``UsdShade.Shader`` (MDL asset only). + + Diffuse and tint inputs are unauthored until the caller sets them (MDL defaults apply in readers). + + Args: + stage: Stage to author on. + material_prim_path: Absolute prim path for ``UsdShade.Material.Define``. + + Returns: + The defined shader API. + """ + UsdShade.Material.Define(stage, material_prim_path) + shader = UsdShade.Shader.Define(stage, f"{material_prim_path}/OmniPBRShader") + assert shader.GetPrim().IsValid() + shader_prim = shader.GetPrim() + mdl_asset_attr = shader_prim.CreateAttribute("info:mdl:sourceAsset", Sdf.ValueTypeNames.Asset) + assert mdl_asset_attr.IsValid() + mdl_asset_attr.Set(Sdf.AssetPath("OmniPBR.mdl")) + return shader + + +def _define_mesh_and_bind_material(stage: Usd.Stage, mesh_path: str, material: UsdShade.Material) -> UsdGeom.Mesh: + """Define a mesh at ``mesh_path`` and bind ``material`` via :class:`UsdShade.MaterialBindingAPI`.""" + mesh = UsdGeom.Mesh.Define(stage, mesh_path) + assert mesh.GetPrim().IsValid() + mesh_prim = mesh.GetPrim() + UsdShade.MaterialBindingAPI.Apply(mesh_prim) + UsdShade.MaterialBindingAPI(mesh_prim).Bind(material) + return mesh + + +def _make_preview_surface_bound_mesh_stage() -> tuple[Usd.Stage, str]: + """In-memory stage with ``/World/Mesh`` bound to ``UsdPreviewSurface``.""" + stage = Usd.Stage.CreateInMemory() + + mat = UsdShade.Material.Define(stage, "/World/Mat") + shader = UsdShade.Shader.Define(stage, "/World/Mat/PreviewSurface") + assert mat.GetPrim().IsValid() and shader.GetPrim().IsValid() + shader.CreateIdAttr("UsdPreviewSurface") + shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(1.0, 0.0, 0.0)) + mat.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface") + _define_mesh_and_bind_material(stage, "/World/Mesh", mat) + return stage, "/World/Mesh" + + +def _make_mesh_bound_to_omnipbr_test_material( + diffuse_color_constant: tuple[float, float, float] | None, + diffuse_tint: tuple[float, float, float] | None, +) -> tuple[Usd.Stage, UsdShade.Shader, str]: + """Reuse :func:`_make_omnipbr_test_shader`, author inputs, add ``/World/Mesh`` bound to ``/World/Mat``.""" + stage = Usd.Stage.CreateInMemory() + + shader = _make_omnipbr_test_shader(stage, "/World/Mat") + if diffuse_color_constant is not None: + diffuse_inp = shader.CreateInput("diffuse_color_constant", Sdf.ValueTypeNames.Color3f) + diffuse_inp.Set(Gf.Vec3f(*diffuse_color_constant)) + if diffuse_tint is not None: + tint_inp = shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f) + tint_inp.Set(Gf.Vec3f(*diffuse_tint)) + + mat_prim = stage.GetPrimAtPath("/World/Mat") + assert mat_prim.IsValid() + mat = UsdShade.Material(mat_prim) + _define_mesh_and_bind_material(stage, "/World/Mesh", mat) + return stage, shader, "/World/Mesh" + + +def _reference_linear_to_srgb(rgb: tuple[float, float, float]) -> tuple[float, float, float]: + """Host sRGB OETF for linear ``rgb``. + + Args: + rgb: Linear RGB triple; channels may lie outside ``[0, 1]``. + + Returns: + Encoded RGB in ``[0, 1]`` as three floats. + """ + + def linear_to_srgb(c: float) -> float: + if c <= 0.0: + return 0.0 + if c >= 1.0: + return 1.0 + if c <= 0.0031308: + return 12.92 * c + return 1.055 * (c ** (1.0 / 2.4)) - 0.055 + + r, g, b = rgb + return (linear_to_srgb(r), linear_to_srgb(g), linear_to_srgb(b)) + + +def _run_scatter_shape_color_rows_kernel( + linear_colors: list[tuple[float, float, float]], + device: str, +) -> torch.Tensor: + """Launch :func:`_scatter_shape_color_rows_kernel` on ``device``.""" + num_colors = len(linear_colors) + row_indices = list(range(num_colors)) + + shape_colors = wp.zeros(num_colors, dtype=wp.vec3, device=device) + idx_wp = wp.array(row_indices, dtype=wp.int32, device=device) + colors_np = np.asarray(linear_colors, dtype=np.float32).reshape(num_colors, 3) + row_colors = wp.from_numpy(colors_np, dtype=wp.vec3, device=device) + wp.launch( + _scatter_shape_color_rows_kernel, + dim=num_colors, + inputs=[shape_colors, idx_wp, row_colors], + device=device, + ) + return wp.to_torch(shape_colors) + + +@pytest.mark.parametrize( + "linear_rgb", + [ + pytest.param((0.002, 0.0, 0.21404114048), id="low_linear_zero_pow"), + pytest.param((-0.25, 1.75, 0.5), id="oob_clamps_pow"), + ], +) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_scatter_shape_color_rows_kernel(device: str, linear_rgb: tuple[float, float, float]): + """Packed RGB per case; the two parametrized cases jointly cover every ``_linear_channel_to_srgb_warp`` branch.""" + after = _run_scatter_shape_color_rows_kernel([linear_rgb], device=device) + exp = _reference_linear_to_srgb(linear_rgb) + for ch in range(3): + assert after[0, ch].item() == pytest.approx(exp[ch], rel=1e-5) + + +@pytest.mark.parametrize(("diffuse_color_constant", "diffuse_tint"), _OMNIPBR_ALBEDO_INPUT_CASES) +def test_get_omnipbr_albedo( + diffuse_color_constant: tuple[float, float, float] | None, + diffuse_tint: tuple[float, float, float] | None, +): + """``_get_omnipbr_albedo`` is diffuse × tint per channel; ``None`` means that shader input is not authored. + + Unauthored inputs use ``_OMNIPBR_DEFAULTS`` in ``newton_model_utils``. + """ + stage = Usd.Stage.CreateInMemory() + + shader = _make_omnipbr_test_shader(stage, "/World/Mat") + if diffuse_color_constant is not None: + diffuse_inp = shader.CreateInput("diffuse_color_constant", Sdf.ValueTypeNames.Color3f) + diffuse_inp.Set(Gf.Vec3f(*diffuse_color_constant)) + if diffuse_tint is not None: + tint_inp = shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f) + tint_inp.Set(Gf.Vec3f(*diffuse_tint)) + + expected_albedo = _expected_omnipbr_linear_albedo(diffuse_color_constant, diffuse_tint) + + shader_prim = shader.GetPrim() + assert shader_prim.IsValid() + assert _get_omnipbr_albedo(shader_prim) == pytest.approx(expected_albedo, rel=1e-5) + + +def test_resolve_shape_color_invalid_prim(): + """Invalid prim path yields ``None`` (no replacement).""" + stage = Usd.Stage.CreateInMemory() + assert _resolve_shape_color(stage, "/World/Missing", {}) is None + + +def test_resolve_shape_color_guide_purpose(): + """Guide-purpose geometry is left on Newton's palette (no resolved replacement).""" + stage = Usd.Stage.CreateInMemory() + + mesh = UsdGeom.Mesh.Define(stage, "/World/GuideMesh") + assert mesh.GetPrim().IsValid() + purpose_attr = UsdGeom.Imageable(mesh).GetPurposeAttr() + assert purpose_attr.IsValid() + purpose_attr.Set(UsdGeom.Tokens.guide) + + assert _resolve_shape_color(stage, "/World/GuideMesh", {}) is None + + +def test_resolve_shape_color_no_material_binding(): + """Unbound mesh without ``displayColor``: neutral linear gray fallback.""" + stage = Usd.Stage.CreateInMemory() + + mesh = UsdGeom.Mesh.Define(stage, "/World/Mesh") + assert mesh.GetPrim().IsValid() + + # Default fallback gray should be returned when there is no material binding and no display color. + material_color_cache: dict[str, tuple[float, float, float] | None] = {} + out = _resolve_shape_color(stage, "/World/Mesh", material_color_cache) + assert out == pytest.approx(_UNBOUND_DEFAULT_FALLBACK_GRAY, rel=1e-5) + + # Add display color primvar + pv = UsdGeom.PrimvarsAPI(mesh).CreatePrimvar( + "displayColor", Sdf.ValueTypeNames.Color3fArray, UsdGeom.Tokens.constant, 1 + ) + + # Set an arbitrary color. + display_color = (0.11, 0.55, 0.9) + pv.Set([Gf.Vec3f(*display_color)]) + + # The display color should be returned instead of the fallback gray. + out = _resolve_shape_color(stage, "/World/Mesh", material_color_cache) + assert out == pytest.approx(display_color, rel=1e-5) + + +@pytest.mark.parametrize(("diffuse_color_constant", "diffuse_tint"), _OMNIPBR_ALBEDO_INPUT_CASES) +def test_resolve_shape_color_omnipbr_binding( + diffuse_color_constant: tuple[float, float, float] | None, + diffuse_tint: tuple[float, float, float] | None, +): + """Bound OmniPBR mesh: :func:`_resolve_shape_color` matches diffuse × tint.""" + stage, _shader, mesh_path = _make_mesh_bound_to_omnipbr_test_material(diffuse_color_constant, diffuse_tint) + expected_albedo = _expected_omnipbr_linear_albedo(diffuse_color_constant, diffuse_tint) + + out = _resolve_shape_color(stage, mesh_path, {}) + assert out == pytest.approx(expected_albedo, rel=1e-5) + + +def test_resolve_shape_color_neutral_material_binding(): + """Bound ``UsdPreviewSurface`` material: not OmniPBR, so resolution is ``None`` (Newton row unchanged).""" + stage, mesh_path = _make_preview_surface_bound_mesh_stage() + assert _resolve_shape_color(stage, mesh_path, {}) is None + + +def test_replace_newton_shape_colors_warning(): + """A :exc:`FutureWarning` is expected by default.""" + model = SimpleNamespace(shape_label=None, shape_color=None) + + with pytest.warns(FutureWarning, match=_WARNING_MESSAGE): + replace_newton_shape_colors(model) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_env_var_switch(monkeypatch: pytest.MonkeyPatch, device: str): + """Setting ``ISAACLAB_REPLACE_NEWTON_SHAPE_COLORS`` to ``0`` disables the workaround.""" + monkeypatch.setenv("ISAACLAB_REPLACE_NEWTON_SHAPE_COLORS", "0") + + # Set up a stage that has a prim with display color primvar and no material binding. If the workaround was enabled, + # the prim's color in the model would be synced with the display color primvar. + stage = Usd.Stage.CreateInMemory() + mesh = UsdGeom.Mesh.Define(stage, "/World/A") + assert mesh.GetPrim().IsValid() + pv = UsdGeom.PrimvarsAPI(mesh).CreatePrimvar( + "displayColor", Sdf.ValueTypeNames.Color3fArray, UsdGeom.Tokens.constant, 1 + ) + pv.Set([Gf.Vec3f(0.2, 0.4, 0.6)]) + + shape_color = wp.zeros(1, dtype=wp.vec3, device=device) + before = wp.to_torch(shape_color).clone() + model = SimpleNamespace(shape_label=["/World/A"], shape_color=shape_color) + + # The workaround is disabled, so the shape color is expected to be unchanged + with warnings.catch_warnings(record=True) as recorded: + warnings.simplefilter("always") + num_colors_updated = replace_newton_shape_colors(model, stage) + + assert num_colors_updated == 0 + assert torch.allclose(wp.to_torch(shape_color), before) + + # No FutureWarning is expected + assert not any(issubclass(w.category, FutureWarning) for w in recorded) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_invalid_prim(device: str): + """Invalid prim path leaves ``shape_color`` unchanged.""" + stage = Usd.Stage.CreateInMemory() + + shape_color = wp.array([(0.1, 0.2, 0.3)], dtype=wp.vec3, device=device) + before = wp.to_torch(shape_color).clone() + + model = SimpleNamespace(shape_label=["/World/Missing"], shape_color=shape_color) + + count = _replace_newton_shape_colors_wrapper(model, stage) + assert count == 0 + assert torch.allclose(wp.to_torch(shape_color), before) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_guide_purpose(device: str): + """Guide-purpose mesh leaves ``shape_color`` unchanged.""" + stage = Usd.Stage.CreateInMemory() + mesh = UsdGeom.Mesh.Define(stage, "/World/GuideMesh") + assert mesh.GetPrim().IsValid() + purpose_attr = UsdGeom.Imageable(mesh).GetPurposeAttr() + assert purpose_attr.IsValid() + purpose_attr.Set(UsdGeom.Tokens.guide) + + shape_color = wp.array([(0.1, 0.2, 0.3)], dtype=wp.vec3, device=device) + before = wp.to_torch(shape_color).clone() + model = SimpleNamespace(shape_label=["/World/GuideMesh"], shape_color=shape_color) + + count = _replace_newton_shape_colors_wrapper(model, stage) + assert count == 0 + assert torch.allclose(wp.to_torch(shape_color), before) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_no_material_binding(device: str): + """No material: ``displayColor`` or unbound gray as linear RGB, then sRGB OETF into ``shape_color``.""" + stage = Usd.Stage.CreateInMemory() + + # Mesh A has a display color primvar. + mesh_a = UsdGeom.Mesh.Define(stage, "/World/A") + pv = UsdGeom.PrimvarsAPI(mesh_a).CreatePrimvar( + "displayColor", Sdf.ValueTypeNames.Color3fArray, UsdGeom.Tokens.constant, 1 + ) + color_a = (0.1, 0.2, 0.3) + pv.Set([Gf.Vec3f(*color_a)]) + + # Mesh B has no material binding and no display color primvar. + UsdGeom.Mesh.Define(stage, "/World/B") + + shape_labels = ["/World/A", "/World/B"] + num_shapes = len(shape_labels) + shape_colors = wp.zeros(num_shapes, dtype=wp.vec3, device=device) + model = SimpleNamespace(shape_label=shape_labels, shape_color=shape_colors) + + num_colors_updated = _replace_newton_shape_colors_wrapper(model, stage) + assert num_colors_updated == 2 + + after = wp.to_torch(shape_colors) + exp_a = _reference_linear_to_srgb(color_a) + exp_b = _reference_linear_to_srgb(_UNBOUND_DEFAULT_FALLBACK_GRAY) + expected = torch.tensor([exp_a, exp_b], dtype=after.dtype, device=after.device) + torch.testing.assert_close(after, expected, rtol=1e-5, atol=1e-5) + + +@pytest.mark.parametrize(("diffuse_color_constant", "diffuse_tint"), _OMNIPBR_ALBEDO_INPUT_CASES) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_omnipbr_binding( + device: str, + diffuse_color_constant: tuple[float, float, float] | None, + diffuse_tint: tuple[float, float, float] | None, +): + """Bound OmniPBR: diffuse × tint then sRGB OETF.""" + stage, _shader, mesh_path = _make_mesh_bound_to_omnipbr_test_material(diffuse_color_constant, diffuse_tint) + + shape_color = wp.zeros(1, dtype=wp.vec3, device=device) + model = SimpleNamespace(shape_label=[mesh_path], shape_color=shape_color) + + assert _replace_newton_shape_colors_wrapper(model, stage) == 1 + + exp = _reference_linear_to_srgb(_expected_omnipbr_linear_albedo(diffuse_color_constant, diffuse_tint)) + after = wp.to_torch(shape_color)[0] + torch.testing.assert_close( + after, + torch.tensor(exp, dtype=after.dtype, device=after.device), + rtol=1e-5, + atol=1e-5, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_neutral_material(device: str): + """Bound ``UsdPreviewSurface`` material leaves ``shape_color`` unchanged.""" + stage, mesh_path = _make_preview_surface_bound_mesh_stage() + + shape_color = wp.array([(0.1, 0.2, 0.3)], dtype=wp.vec3, device=device) + before = wp.to_torch(shape_color).clone() + model = SimpleNamespace(shape_label=[mesh_path], shape_color=shape_color) + + assert _replace_newton_shape_colors_wrapper(model, stage) == 0 + assert torch.allclose(wp.to_torch(shape_color), before) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_respects_binding_strength(device: str): + """Parent stronger-than-descendants binding overrides direct child binding.""" + # Scene graph (``ComputeBoundMaterial`` on the mesh yields ParentMat / green, not ChildMat / red): + # + # /World + # +-- Parent ........................ [bind: GreenMat, strongerThanDescendants] + # | \-- Mesh .................... [bind: RedMat] + # +-- GreenMat ...................... OmniPBRShader .. diffuse (0, 1, 0) + # +-- RedMat ........................ OmniPBRShader .. diffuse (1, 0, 0) + # + stage = Usd.Stage.CreateInMemory() + + parent = UsdGeom.Xform.Define(stage, "/World/Parent") + mesh = UsdGeom.Mesh.Define(stage, "/World/Parent/Mesh") + assert parent.GetPrim().IsValid() and mesh.GetPrim().IsValid() + + # Parent material is green + green_shader = _make_omnipbr_test_shader(stage, "/World/GreenMat") + green_shader.CreateInput("diffuse_color_constant", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(0.0, 1.0, 0.0)) + + # Child material is red + red_shader = _make_omnipbr_test_shader(stage, "/World/RedMat") + red_shader.CreateInput("diffuse_color_constant", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(1.0, 0.0, 0.0)) + + # Bind GreenMat to Parent + parent_prim = parent.GetPrim() + parent_mat = UsdShade.Material(stage.GetPrimAtPath("/World/GreenMat")) + UsdShade.MaterialBindingAPI.Apply(parent_prim) + UsdShade.MaterialBindingAPI(parent_prim).Bind(parent_mat, bindingStrength=UsdShade.Tokens.strongerThanDescendants) + + # Bind RedMat to Mesh + mesh_prim = mesh.GetPrim() + child_mat = UsdShade.Material(stage.GetPrimAtPath("/World/RedMat")) + UsdShade.MaterialBindingAPI.Apply(mesh_prim) + UsdShade.MaterialBindingAPI(mesh_prim).Bind(child_mat) + + shape_color = wp.zeros(1, dtype=wp.vec3, device=device) + model = SimpleNamespace(shape_label=["/World/Parent/Mesh"], shape_color=shape_color) + assert _replace_newton_shape_colors_wrapper(model, stage) == 1 + + # Expected color is green which is inherited from the parent xform prim + exp = (0.0, 1.0, 0.0) + after = wp.to_torch(shape_color)[0] + torch.testing.assert_close( + after, + torch.tensor(exp, dtype=after.dtype, device=after.device), + rtol=1e-5, + atol=1e-5, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_replace_newton_shape_colors_instanced(device: str): + """Instance-proxy labels deduplicate via canonical prototype paths in the per-key cache.""" + stage = Usd.Stage.CreateInMemory() + + prototype = UsdGeom.Xform.Define(stage, "/World/Prototype") + assert prototype.GetPrim().IsValid() + mesh = UsdGeom.Mesh.Define(stage, "/World/Prototype/Mesh") + assert mesh.GetPrim().IsValid() + UsdGeom.PrimvarsAPI(mesh).CreatePrimvar( + "displayColor", Sdf.ValueTypeNames.Color3fArray, UsdGeom.Tokens.constant, 1 + ).Set([Gf.Vec3f(0.1, 0.2, 0.3)]) + + # Create an instance from the prototype + env_0 = stage.DefinePrim("/World/envs/env_0", "Xform") + env_0.GetReferences().AddInternalReference("/World/Prototype") + env_0.SetInstanceable(True) + + # Create another instance from the prototype + env_1 = stage.DefinePrim("/World/envs/env_1", "Xform") + env_1.GetReferences().AddInternalReference("/World/Prototype") + env_1.SetInstanceable(True) + + proxy_paths = [ + "/World/envs/env_0/Mesh", + "/World/envs/env_1/Mesh", + ] + for proxy_path in proxy_paths: + inst_proxy = stage.GetPrimAtPath(proxy_path) + assert inst_proxy.IsValid(), f"Proxy path {proxy_path} is not valid" + assert inst_proxy.IsInstanceProxy(), f"Proxy path {proxy_path} is not an instance proxy" + + # Create the dummy model + shape_color = wp.zeros(2, dtype=wp.vec3, device=device) + model = SimpleNamespace( + shape_label=proxy_paths, + shape_color=shape_color, + ) + assert _replace_newton_shape_colors_wrapper(model, stage) == 2 + + after = wp.to_torch(shape_color) + exp = _reference_linear_to_srgb((0.1, 0.2, 0.3)) + expected = torch.tensor([exp, exp], dtype=after.dtype, device=after.device) + torch.testing.assert_close(after, expected, rtol=1e-5, atol=1e-5) diff --git a/source/isaaclab/test/sim/test_physx_scene_data_provider_visualizer_contract.py b/source/isaaclab/test/sim/test_physx_scene_data_provider_visualizer_contract.py new file mode 100644 index 000000000000..979d66cc4a7e --- /dev/null +++ b/source/isaaclab/test/sim/test_physx_scene_data_provider_visualizer_contract.py @@ -0,0 +1,221 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for PhysxSceneDataProvider visualizer-facing contracts.""" + +from __future__ import annotations + +import sys +from types import SimpleNamespace + +import pytest +import torch +from isaaclab_physx.scene_data_providers import PhysxSceneDataProvider + +from isaaclab.cloner import ClonePlan + +PROVIDER_MOD = "isaaclab_physx.scene_data_providers.physx_scene_data_provider" + + +def _silent_stage() -> SimpleNamespace: + """Stage stub whose ``GetPrimAtPath`` returns an invalid prim — env xforms read as zero.""" + return SimpleNamespace(GetPrimAtPath=lambda path: SimpleNamespace(IsValid=lambda: False)) + + +@pytest.fixture +def stub_provider(): + """Bare :class:`PhysxSceneDataProvider` with all buffer attrs initialized to defaults. + + Tests assign ``_simulation_context`` and ``_stage`` themselves; everything else is the + pre-build state the build path expects. + """ + p = object.__new__(PhysxSceneDataProvider) + p._device = "cpu" + p._xform_views = {} + p._view_body_index_map = {} + p._view_order_tensors = {} + p._pose_buf_num_bodies = 0 + p._positions_buf = None + p._orientations_buf = None + p._covered_buf = None + p._xform_mask_buf = None + return p + + +@pytest.fixture +def newton_stub(monkeypatch): + """Stub the ``isaaclab_newton`` newton-prebuild module and the side-effect helpers. + + Returned :class:`SimpleNamespace` exposes: + + * ``calls`` — list of kwargs from each prebuild invocation, + * ``model`` / ``state_obj`` — what prebuild returns; tests can override before invoking. + """ + state = SimpleNamespace( + calls=[], + model=SimpleNamespace(body_label=[], articulation_label=[]), + state_obj=object(), + ) + + def _prebuild(**kwargs): + state.calls.append(dict(kwargs)) + return state.model, state.state_obj + + monkeypatch.setitem( + sys.modules, "isaaclab_newton.cloner.newton_replicate", SimpleNamespace(newton_visualizer_prebuild=_prebuild) + ) + monkeypatch.setattr(f"{PROVIDER_MOD}.UsdGeom.GetStageUpAxis", lambda stage: "Z") + monkeypatch.setattr(f"{PROVIDER_MOD}.replace_newton_shape_colors", lambda m, s: None) + return state + + +def test_get_newton_model_returns_model_when_sync_enabled(stub_provider): + """Callers receive the full Newton model from :meth:`get_newton_model`.""" + stub_provider._needs_newton_sync = True + stub_provider._newton_model = "full-model" + assert stub_provider.get_newton_model() == "full-model" + + +def test_build_from_clone_plans_populates_provider_state(stub_provider, newton_stub): + """Building from per-group clone plans sets model, state, and rigid-body paths. + + Asserts the provider derives its own (sources, destinations, mask) from the plans + without consulting any auxiliary spec object: representative source paths are recovered + from ``dest_template.format()``, masks are concatenated + along the prototype axis, and per-env positions are read from stage xforms. + """ + newton_stub.model = SimpleNamespace( + body_label=["/World/envs/env_0/Object/A"], + articulation_label=["/World/envs/env_0/Robot"], + ) + plans = { + "/World/envs/env_{}/Object": ClonePlan( + dest_template="/World/envs/env_{}/Object", + prototype_paths=["/World/template/Object/proto_0", "/World/template/Object/proto_1"], + # proto 0 → env 0, 2 ; proto 1 → env 1, 3 + clone_mask=torch.tensor([[True, False, True, False], [False, True, False, True]], dtype=torch.bool), + ), + "/World/envs/env_{}/Robot": ClonePlan( + dest_template="/World/envs/env_{}/Robot", + prototype_paths=["/World/template/Robot/proto_0"], + clone_mask=torch.ones((1, 4), dtype=torch.bool), + ), + } + stub_provider._simulation_context = SimpleNamespace(get_clone_plans=lambda: plans) + stub_provider._stage = _silent_stage() + + stub_provider._build_newton_model_from_clone_plans() + + assert stub_provider._newton_model is newton_stub.model + assert stub_provider._newton_state is newton_stub.state_obj + assert stub_provider._rigid_body_paths == newton_stub.model.body_label + assert stub_provider._rigid_body_view_paths == newton_stub.model.body_label + newton_stub.model.articulation_label + assert stub_provider._num_envs_at_last_newton_build == 4 + assert stub_provider._last_newton_model_build_source == "built" + + kw = newton_stub.calls[-1] + # Source recovery picks the first-env user per prototype. + assert kw["sources"] == [ + "/World/envs/env_0/Object", + "/World/envs/env_1/Object", + "/World/envs/env_0/Robot", + ] + assert kw["destinations"] == ["/World/envs/env_{}/Object", "/World/envs/env_{}/Object", "/World/envs/env_{}/Robot"] + assert kw["mapping"].shape == (3, 4) + assert kw["positions"].shape == (4, 3) + + +def test_build_from_clone_plans_missing_sets_error_state(stub_provider): + """When no clone plans are published, model/state stay unset.""" + stub_provider._simulation_context = SimpleNamespace(get_clone_plans=lambda: {}) + stub_provider._stage = object() + + stub_provider._build_newton_model_from_clone_plans() + + assert stub_provider._last_newton_model_build_source == "missing" + assert stub_provider._newton_model is None + assert stub_provider._newton_state is None + + +def test_build_from_clone_plans_skips_unused_prototype_rows(stub_provider, newton_stub): + """A prototype row with no assigned env (all-False mask row) is dropped, not raised on. + + When ``num_prototypes > num_envs`` under a sequential strategy (or any strategy that + leaves some prototypes unused), ``clone_mask[row].nonzero()[0]`` would otherwise raise + ``IndexError``. The provider must filter unused rows out of sources/destinations/mask. + """ + # 3 prototypes, 2 envs, sequential: env 0 → proto 0, env 1 → proto 1, proto 2 unused. + plans = { + "/World/envs/env_{}/Object": ClonePlan( + dest_template="/World/envs/env_{}/Object", + prototype_paths=[ + "/World/template/Object/proto_0", + "/World/template/Object/proto_1", + "/World/template/Object/proto_2", + ], + clone_mask=torch.tensor([[True, False], [False, True], [False, False]], dtype=torch.bool), + ) + } + stub_provider._simulation_context = SimpleNamespace(get_clone_plans=lambda: plans) + stub_provider._stage = _silent_stage() + + stub_provider._build_newton_model_from_clone_plans() + + assert stub_provider._last_newton_model_build_source == "built" + kw = newton_stub.calls[-1] + # Unused proto_2 row dropped; only the two assigned prototypes survive. + assert kw["sources"] == ["/World/envs/env_0/Object", "/World/envs/env_1/Object"] + assert kw["mapping"].shape == (2, 2) + + +def test_build_from_clone_plans_uses_dest_template_for_env_lookup(stub_provider, newton_stub): + """Env-origin lookup uses the per-plan ``dest_template`` prefix, not a hardcoded path. + + A scene with a non-default env path (``/Stage/scenes/env_``) should still have its + xform translates read correctly. Replaces the prior hardcoded ``/World/envs/env_``. + """ + visited: list[str] = [] + + def _get_prim(path): + visited.append(path) + return SimpleNamespace(IsValid=lambda: False) + + plans = { + "/Stage/scenes/env_{}/Object": ClonePlan( + dest_template="/Stage/scenes/env_{}/Object", + prototype_paths=["/Stage/template/Object/proto_0"], + clone_mask=torch.ones((1, 3), dtype=torch.bool), + ) + } + stub_provider._simulation_context = SimpleNamespace(get_clone_plans=lambda: plans) + stub_provider._stage = SimpleNamespace(GetPrimAtPath=_get_prim) + + stub_provider._build_newton_model_from_clone_plans() + + assert {f"/Stage/scenes/env_{i}" for i in range(3)} <= set(visited) + assert not any(p.startswith("/World/envs/") for p in visited) + + +def test_clone_plan_is_hashable_with_unhashable_fields(): + """``ClonePlan`` must hash despite carrying a tensor and a list. + + With ``field(hash=False)`` on the unhashable members, hashing operates on + ``dest_template`` only — the natural identity (it is the dict key in + :meth:`SimulationContext.get_clone_plans`). + """ + plan_a = ClonePlan( + dest_template="/World/envs/env_{}/Object", + prototype_paths=["/World/template/Object/proto_0"], + clone_mask=torch.ones((1, 4), dtype=torch.bool), + ) + plan_b = ClonePlan( + dest_template="/World/envs/env_{}/Object", + prototype_paths=["/World/template/Object/proto_99"], + clone_mask=torch.zeros((1, 4), dtype=torch.bool), + ) + assert isinstance(hash(plan_a), int) + # Equality folds in only dest_template, so two plans with the same destination compare + # equal regardless of prototype/mask differences. + assert plan_a == plan_b diff --git a/source/isaaclab/test/sim/test_schemas.py b/source/isaaclab/test/sim/test_schemas.py index 05710bd9228e..1acda3b149c6 100644 --- a/source/isaaclab/test/sim/test_schemas.py +++ b/source/isaaclab/test/sim/test_schemas.py @@ -76,8 +76,6 @@ def setup_simulation(): # Teardown sim._disable_app_control_on_stop_handle = True # prevent timeout sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -215,6 +213,64 @@ def test_defining_articulation_properties_on_prim(setup_simulation): sim.step() +@pytest.mark.isaacsim_ci +def test_multi_instance_schema_detection_on_tendon_joints(setup_simulation): + """Test that multi-instance PhysX tendon schemas are correctly detected via substring matching. + + Multi-instance schemas (e.g. PhysxTendonAxisAPI, PhysxTendonAxisRootAPI) appear in + GetAppliedSchemas() as 'SchemaName:instanceName' (e.g. 'PhysxTendonAxisAPI:inst0'). + An exact ``in list`` check fails because 'PhysxTendonAxisAPI' != 'PhysxTendonAxisAPI:inst0'. + This test ensures the substring-based detection used by modify_joint_drive_properties + and modify_fixed_tendon_properties handles multi-instance schemas correctly. + + We call the unwrapped functions directly (via ``__wrapped__``) to bypass the + ``@apply_nested`` decorator, which traverses children and does not return the + inner function's bool result. + """ + sim, _, _, _, _, joint_cfg = setup_simulation + stage = sim_utils.get_current_stage() + + # unwrap to get the raw functions that return bool + _modify_joint_drive = schemas.modify_joint_drive_properties.__wrapped__ + _modify_fixed_tendon = schemas.modify_fixed_tendon_properties.__wrapped__ + + # -- set up two body prims connected by a revolute joint + sim_utils.create_prim("/World/tendon_test", prim_type="Xform") + sim_utils.create_prim("/World/tendon_test/body0", prim_type="Cube") + sim_utils.create_prim("/World/tendon_test/body1", prim_type="Cube") + joint = UsdPhysics.RevoluteJoint.Define(stage, "/World/tendon_test/body1/joint0") + joint_prim = joint.GetPrim() + + # -- 1) Joint with only tendon child schema (no root) -> drive should be SKIPPED + joint_prim.AddAppliedSchema("PhysxTendonAxisAPI:inst0") + applied = joint_prim.GetAppliedSchemas() + assert any("PhysxTendonAxisAPI" in s for s in applied), "Multi-instance schema not found via substring" + assert "PhysxTendonAxisAPI" not in applied, "Exact match should NOT find multi-instance schema" + + result = _modify_joint_drive(joint_prim.GetPrimPath().pathString, joint_cfg) + assert result is False, "Tendon child joint should be skipped (return False)" + + # -- 2) Joint with both child AND root tendon schema -> drive should NOT be skipped + joint_prim.AddAppliedSchema("PhysxTendonAxisRootAPI:inst0") + applied = joint_prim.GetAppliedSchemas() + assert any("PhysxTendonAxisRootAPI" in s for s in applied) + assert "PhysxTendonAxisRootAPI" not in applied, "Exact match should NOT find multi-instance schema" + + result = _modify_joint_drive(joint_prim.GetPrimPath().pathString, joint_cfg) + assert result is True, "Tendon root joint should NOT be skipped" + + # -- 3) modify_fixed_tendon_properties should detect multi-instance root schema + tendon_cfg = schemas.FixedTendonPropertiesCfg(stiffness=10.0, damping=0.1) + result = _modify_fixed_tendon(joint_prim.GetPrimPath().pathString, tendon_cfg) + assert result is True, "Prim with PhysxTendonAxisRootAPI:inst0 should be detected" + + # -- 4) Prim WITHOUT any tendon root schema -> modify_fixed_tendon should return False + sim_utils.create_prim("/World/tendon_test/body2", prim_type="Cube") + no_tendon_joint = UsdPhysics.RevoluteJoint.Define(stage, "/World/tendon_test/body2/joint1") + result = _modify_fixed_tendon(no_tendon_joint.GetPrim().GetPrimPath().pathString, tendon_cfg) + assert result is False, "Prim without tendon root schema should return False" + + """ Helper functions. """ @@ -368,7 +424,7 @@ def _validate_joint_drive_properties_on_prim(prim_path: str, joint_cfg, verbose: # iterate over the joint properties for attr_name, attr_value in joint_cfg.__dict__.items(): # skip names we know are not present - if attr_name == "func": + if attr_name in ["func", "ensure_drives_exist"]: continue # resolve the drive (linear or angular) drive_model = "linear" if joint_prim.IsA(UsdPhysics.PrismaticJoint) else "angular" diff --git a/source/isaaclab/test/sim/test_simulation_context.py b/source/isaaclab/test/sim/test_simulation_context.py index 4244b36ff8e1..6ea578a85e30 100644 --- a/source/isaaclab/test/sim/test_simulation_context.py +++ b/source/isaaclab/test/sim/test_simulation_context.py @@ -12,12 +12,13 @@ """Rest everything follows.""" -from collections.abc import Generator +import weakref import numpy as np import pytest +from isaaclab_physx.physics import IsaacEvents, PhysxCfg, PhysxManager -import omni.physx +import omni.timeline import isaaclab.sim as sim_utils from isaaclab.sim import SimulationCfg, SimulationContext @@ -26,80 +27,99 @@ @pytest.fixture(autouse=True) def test_setup_teardown(): """Setup and teardown for each test.""" - # Setup: Clear any existing simulation context + # Setup: Clear any existing simulation context and create a fresh stage SimulationContext.clear_instance() + sim_utils.create_new_stage() # Yield for the test yield - # Teardown: Clear the simulation context after each test SimulationContext.clear_instance() -@pytest.fixture -def sim_with_stage_in_memory() -> Generator[SimulationContext, None, None]: - """Create a simulation context with stage in memory.""" - # create stage in memory - cfg = SimulationCfg(create_stage_in_memory=True) - sim = SimulationContext(cfg=cfg) - # update stage - sim_utils.update_stage() - # yield simulation context - yield sim - # stop simulation - omni.physx.get_physx_simulation_interface().detach_stage() - sim.stop() - # clear simulation context - sim.clear() - sim.clear_all_callbacks() - sim.clear_instance() +""" +Basic Configuration Tests +""" @pytest.mark.isaacsim_ci -def test_singleton(): - """Tests that the singleton is working.""" - sim1 = SimulationContext() - sim2 = SimulationContext() - assert sim1 is sim2 - - # try to delete the singleton - sim2.clear_instance() - assert sim1.instance() is None - # create new instance - sim4 = SimulationContext() - assert sim1 is not sim4 - assert sim1.instance() is sim4.instance() - +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_init(device): + """Test the simulation context initialization.""" + from isaaclab.sim.spawners.materials import RigidBodyMaterialCfg -@pytest.mark.isaacsim_ci -def test_initialization(): - """Test the simulation config.""" - cfg = SimulationCfg(physics_prim_path="/Physics/PhysX", render_interval=5, gravity=(0.0, -0.5, -0.5)) - sim = SimulationContext(cfg) + cfg = SimulationCfg( + device=device, + physics_prim_path="/Physics/PhysX", + gravity=(0.0, -0.5, -0.5), + physics_material=RigidBodyMaterialCfg(), + render_interval=5, + ) + # sim = SimulationContext(cfg) # TODO: Figure out why keyword argument doesn't work. # note: added a fix in Isaac Sim 2023.1 for this. - # sim = SimulationContext(cfg=cfg) + sim = SimulationContext(cfg=cfg) + + # verify stage is valid + assert sim.stage is not None + # verify device property + assert sim.device == device + # verify no RTX sensors are available + assert not sim.get_setting("/isaaclab/render/rtx_sensors") + + # obtain physics scene from USD (string-based schema: physxScene:*) + from pxr import UsdPhysics + + physics_scene_prim = sim.stage.GetPrimAtPath("/Physics/PhysX") + assert physics_scene_prim.IsValid() + physics_scene = UsdPhysics.Scene(physics_scene_prim) + physics_hz = physics_scene_prim.GetAttribute("physxScene:timeStepsPerSecond").Get() + physics_dt = 1.0 / physics_hz + assert physics_dt == cfg.dt - # check valid settings - assert sim.get_physics_dt() == cfg.dt - assert sim.get_rendering_dt() == cfg.dt * cfg.render_interval - assert not sim.has_rtx_sensors() # check valid paths assert sim.stage.GetPrimAtPath("/Physics/PhysX").IsValid() assert sim.stage.GetPrimAtPath("/Physics/PhysX/defaultMaterial").IsValid() # check valid gravity - gravity_dir, gravity_mag = sim.get_physics_context().get_gravity() + gravity_dir, gravity_mag = ( + physics_scene.GetGravityDirectionAttr().Get(), + physics_scene.GetGravityMagnitudeAttr().Get(), + ) gravity = np.array(gravity_dir) * gravity_mag np.testing.assert_almost_equal(gravity, cfg.gravity) @pytest.mark.isaacsim_ci -def test_sim_version(): - """Test obtaining the version.""" - sim = SimulationContext() - version = sim.get_version() - assert len(version) > 0 - assert version[0] >= 4 +def test_instance_before_creation(): + """Test accessing instance before creating returns None.""" + # clear any existing instance + SimulationContext.clear_instance() + + # accessing instance before creation should return None + assert SimulationContext.instance() is None + + +@pytest.mark.isaacsim_ci +def test_singleton(): + """Tests that the singleton is working.""" + sim1 = SimulationContext() + sim2 = SimulationContext() + assert sim1 is sim2 + + # try to delete the singleton + sim2.clear_instance() + assert sim1.instance() is None + # create new instance + sim3 = SimulationContext() + assert sim1 is not sim3 + assert sim1.instance() is sim3.instance() + # clear instance + sim3.clear_instance() + + +""" +Property Tests. +""" @pytest.mark.isaacsim_ci @@ -110,54 +130,992 @@ def test_carb_setting(): sim.set_setting("/physics/physxDispatcher", False) assert sim.get_setting("/physics/physxDispatcher") is False # unknown carb setting - sim.set_setting("/myExt/using_omniverse_version", sim.get_version()) - assert tuple(sim.get_setting("/myExt/using_omniverse_version")) == tuple(sim.get_version()) + sim.set_setting("/myExt/test_value", 42) + assert sim.get_setting("/myExt/test_value") == 42 @pytest.mark.isaacsim_ci def test_headless_mode(): """Test that render mode is headless since we are running in headless mode.""" sim = SimulationContext() - # check default render mode - assert sim.render_mode == sim.RenderMode.NO_GUI_OR_RENDERING + # check default render mode (no GUI and no offscreen rendering) + assert not sim.has_gui and not sim.has_offscreen_render -# def test_boundedness(): -# """Test that the boundedness of the simulation context remains constant. -# -# Note: This test fails right now because Isaac Sim does not handle boundedness correctly. On creation, -# it is registering itself to various callbacks and hence the boundedness is more than 1. This may not be -# critical for the simulation context since we usually call various clear functions before deleting the -# simulation context. -# """ -# sim = SimulationContext() -# # manually set the boundedness to 1? -- this is not possible because of Isaac Sim. -# sim.clear_all_callbacks() -# sim._stage_open_callback = None -# sim._physics_timer_callback = None -# sim._event_timer_callback = None -# -# # check that boundedness of simulation context is correct -# sim_ref_count = ctypes.c_long.from_address(id(sim)).value -# # reset the simulation -# sim.reset() -# assert ctypes.c_long.from_address(id(sim)).value == sim_ref_count -# # step the simulation -# for _ in range(10): -# sim.step() -# assert ctypes.c_long.from_address(id(sim)).value == sim_ref_count -# # clear the simulation -# sim.clear_instance() -# assert ctypes.c_long.from_address(id(sim)).value == sim_ref_count - 1 +""" +Timeline Operations Tests. +""" @pytest.mark.isaacsim_ci -def test_zero_gravity(): - """Test that gravity can be properly disabled.""" - cfg = SimulationCfg(gravity=(0.0, 0.0, 0.0)) +def test_timeline_play_stop(): + """Test timeline play and stop operations.""" + sim = SimulationContext() + + # initially simulation should be stopped + assert sim.is_stopped() + assert not sim.is_playing() + # start the simulation + sim.play() + assert sim.is_playing() + assert not sim.is_stopped() + + # disable callback to prevent app from continuing + sim._disable_app_control_on_stop_handle = True # type: ignore + # stop the simulation + sim.stop() + assert sim.is_stopped() + assert not sim.is_playing() + + +@pytest.mark.isaacsim_ci +def test_timeline_pause(): + """Test timeline pause operation.""" + sim = SimulationContext() + + # start the simulation + sim.play() + assert sim.is_playing() + + # pause the simulation + sim.pause() + assert not sim.is_playing() + assert not sim.is_stopped() # paused is different from stopped + + +""" +Reset and Step Tests +""" + + +@pytest.mark.isaacsim_ci +def test_reset(): + """Test simulation reset.""" + cfg = SimulationCfg(dt=0.01) sim = SimulationContext(cfg) - gravity_dir, gravity_mag = sim.get_physics_context().get_gravity() - gravity = np.array(gravity_dir) * gravity_mag - np.testing.assert_almost_equal(gravity, cfg.gravity) + # create a simple cube to test with + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # reset the simulation + sim.reset() + + # check that simulation is playing after reset + assert sim.is_playing() + + # check that physics sim view is created + assert sim.physics_sim_view is not None + + +@pytest.mark.isaacsim_ci +def test_reset_soft(): + """Test soft reset (without stopping simulation).""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create a simple cube + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # perform initial reset + sim.reset() + assert sim.is_playing() + + # perform soft reset + sim.reset(soft=True) + + # simulation should still be playing + assert sim.is_playing() + + +@pytest.mark.isaacsim_ci +def test_forward(): + """Test forward propagation for fabric updates.""" + cfg = SimulationCfg(dt=0.01, use_fabric=True) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + sim.reset() + + # call forward + sim.forward() + + # should not raise any errors + assert sim.is_playing() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("render", [True, False]) +def test_step(render): + """Test stepping simulation with and without rendering.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + sim.reset() + + # step with rendering + for _ in range(10): + sim.step(render=render) + + # simulation should still be playing + assert sim.is_playing() + + +@pytest.mark.isaacsim_ci +def test_render(): + """Test rendering simulation.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + sim.reset() + + # render + for _ in range(10): + sim.render() + + # simulation should still be playing + assert sim.is_playing() + + +@pytest.mark.isaacsim_ci +def test_render_pumps_app_update_without_visualizer(): + """Regression test for issue #5052: headless video must pump Kit when no visualizer does. + + Originally ``SimulationContext.render()`` called ``omni.kit.app.get_app().update()`` when + no visualizer had ``pumps_app_update()`` (see PR #5056). The same contract is now implemented + from :func:`~isaaclab.envs.utils.recording_hooks.run_recording_hooks_after_visualizers`, which calls + :func:`~isaaclab_physx.renderers.isaac_rtx_renderer_utils.pump_kit_app_for_headless_video_render_if_needed` + when ``/isaaclab/video/enabled`` is set (as with ``--video``), which in turn calls + ``ensure_isaac_rtx_render_update()`` (guarded by ``is_rendering`` and the no-pumping-visualizer check). + + Without this path, replicator render products used for ``rgb_array`` / RecordVideo stay stale (black frames). + """ + from unittest.mock import MagicMock, patch + + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + sim.reset() + + sim.set_setting("/isaaclab/video/enabled", True) + sim.set_setting("/isaaclab/render/rtx_sensors", True) + + mock_app = MagicMock() + mock_app.is_running.return_value = True + + with ( + patch("isaaclab.utils.version.has_kit", return_value=True), + patch( + "isaaclab_physx.renderers.isaac_rtx_renderer_utils._get_stage_streaming_busy", + return_value=False, + ), + patch("omni.kit.app.get_app", return_value=mock_app), + ): + sim.render() + + mock_app.update.assert_called_once() + + +@pytest.mark.isaacsim_ci +def test_render_skips_app_update_when_visualizer_pumps_it(): + """Regression test: do not pump Kit in the headless-video path when a visualizer already does. + + A visualizer with ``pumps_app_update() == True`` (e.g. KitVisualizer) calls ``app.update()`` in + its own ``step()``. The recording-hook pump must then skip + ``ensure_isaac_rtx_render_update`` so we do not double-pump the Kit loop. + """ + from unittest.mock import MagicMock, patch + + from isaaclab.visualizers.base_visualizer import BaseVisualizer + + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + sim.reset() + + sim.set_setting("/isaaclab/video/enabled", True) + sim.set_setting("/isaaclab/render/rtx_sensors", True) + + mock_viz = MagicMock(spec=BaseVisualizer) + mock_viz.pumps_app_update.return_value = True + mock_viz.is_closed = False + mock_viz.is_running.return_value = True + mock_viz.is_rendering_paused.return_value = False + mock_viz.is_training_paused.return_value = False + mock_viz.get_rendering_dt.return_value = None + sim._visualizers = [mock_viz] + + mock_app = MagicMock() + mock_app.is_running.return_value = True + + with ( + patch("isaaclab.utils.version.has_kit", return_value=True), + patch("omni.kit.app.get_app", return_value=mock_app), + ): + sim.render() + + mock_app.update.assert_not_called() + + sim._visualizers = [] + + +""" +Stage Operations Tests +""" + + +@pytest.mark.isaacsim_ci +def test_get_initial_stage(): + """Test getting the initial stage.""" + sim = SimulationContext() + + # get initial stage + stage = sim.stage + + # verify stage is valid + assert stage is not None + assert stage == sim.stage + + +@pytest.mark.isaacsim_ci +def test_clear_stage(): + """Test clearing the stage.""" + sim = SimulationContext() + + # create some objects + cube_cfg1 = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg1.func("/World/Cube1", cube_cfg1) + cube_cfg2 = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg2.func("/World/Cube2", cube_cfg2) + + # verify objects exist + assert sim.stage.GetPrimAtPath("/World/Cube1").IsValid() + assert sim.stage.GetPrimAtPath("/World/Cube2").IsValid() + + # clear the stage + sim.clear_stage() + + # verify objects are removed but World and Physics remain + assert not sim.stage.GetPrimAtPath("/World/Cube1").IsValid() + assert not sim.stage.GetPrimAtPath("/World/Cube2").IsValid() + assert sim.stage.GetPrimAtPath("/World").IsValid() + assert sim.stage.GetPrimAtPath(sim.cfg.physics_prim_path).IsValid() # type: ignore[union-attr] + + +""" +Physics Configuration Tests +""" + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("solver_type", [0, 1]) # 0=PGS, 1=TGS +def test_solver_type(solver_type): + """Test different solver types.""" + cfg = SimulationCfg(physics=PhysxCfg(solver_type=solver_type)) + sim = SimulationContext(cfg) + + # obtain physics scene from USD (string-based: physxScene:solverType) + physics_scene_prim = sim.stage.GetPrimAtPath(cfg.physics_prim_path) + solver_type_str = "PGS" if solver_type == 0 else "TGS" + assert physics_scene_prim.GetAttribute("physxScene:solverType").Get() == solver_type_str + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("use_fabric", [True, False]) +def test_fabric_setting(use_fabric): + """Test that fabric setting is properly set.""" + cfg = SimulationCfg(use_fabric=use_fabric) + sim = SimulationContext(cfg) + + # check fabric is enabled via physics setting + assert sim.get_setting("/isaaclab/fabric_enabled") == use_fabric + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("dt", [0.01, 0.02, 0.005]) +def test_physics_dt(dt): + """Test that physics time step is properly configured.""" + cfg = SimulationCfg(dt=dt) + sim = SimulationContext(cfg) + + # obtain physics scene from USD (string-based: physxScene:timeStepsPerSecond) + physics_scene_prim = sim.stage.GetPrimAtPath(cfg.physics_prim_path) + physics_hz = physics_scene_prim.GetAttribute("physxScene:timeStepsPerSecond").Get() + physics_dt = 1.0 / physics_hz + assert abs(physics_dt - dt) < 1e-6 + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("gravity", [(0.0, 0.0, 0.0), (0.0, 0.0, -9.81), (0.5, 0.5, 0.5)]) +def test_custom_gravity(gravity): + """Test that gravity can be properly set.""" + from pxr import UsdPhysics + + cfg = SimulationCfg(gravity=gravity) + sim = SimulationContext(cfg) + + # obtain physics scene from USD + physics_scene_prim = sim.stage.GetPrimAtPath(cfg.physics_prim_path) + physics_scene = UsdPhysics.Scene(physics_scene_prim) + + gravity_dir, gravity_mag = ( + physics_scene.GetGravityDirectionAttr().Get(), + physics_scene.GetGravityMagnitudeAttr().Get(), + ) + actual_gravity = np.array(gravity_dir) * gravity_mag + np.testing.assert_almost_equal(actual_gravity, cfg.gravity, decimal=6) + + +""" +Callback Tests. +""" + + +@pytest.mark.isaacsim_ci +def test_timeline_callbacks_on_play(): + """Test that timeline callbacks are triggered on play event.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create a simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # create a flag to track callback execution + callback_state = {"play_called": False, "stop_called": False} + + # define callback functions + def on_play_callback(event): + callback_state["play_called"] = True + + def on_stop_callback(event): + callback_state["stop_called"] = True + + # register callbacks + timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + play_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), + lambda event: on_play_callback(event), + order=20, + ) + stop_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.STOP), + lambda event: on_stop_callback(event), + order=20, + ) + + try: + # ensure callbacks haven't been called yet + assert not callback_state["play_called"] + assert not callback_state["stop_called"] + + # play the simulation - this should trigger play callback + sim.play() + assert callback_state["play_called"] + assert not callback_state["stop_called"] + + # reset flags + callback_state["play_called"] = False + + # disable app control to prevent hanging + sim._disable_app_control_on_stop_handle = True # type: ignore + + # stop the simulation - this should trigger stop callback + sim.stop() + assert callback_state["stop_called"] + + finally: + # cleanup callbacks + if play_handle is not None: + play_handle.unsubscribe() + if stop_handle is not None: + stop_handle.unsubscribe() + + +@pytest.mark.isaacsim_ci +def test_timeline_callbacks_with_weakref(): + """Test that timeline callbacks work correctly with weak references (similar to asset_base.py).""" + + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create a simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # create a test object that will be weakly referenced + class CallbackTracker: + def __init__(self): + self.play_count = 0 + self.stop_count = 0 + + def on_play(self, event): + self.play_count += 1 + + def on_stop(self, event): + self.stop_count += 1 + + # create an instance of the callback tracker + tracker = CallbackTracker() + + # define safe callback wrapper (similar to asset_base.py pattern) + def safe_callback(callback_name, event, obj_ref): + """Safely invoke a callback on a weakly-referenced object.""" + try: + obj = obj_ref() # Dereference the weakref + if obj is not None: + getattr(obj, callback_name)(event) + except ReferenceError: + # Object has been deleted; ignore + pass + + # register callbacks with weakref + obj_ref = weakref.ref(tracker) + timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + + play_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), + lambda event, obj_ref=obj_ref: safe_callback("on_play", event, obj_ref), + order=20, + ) + stop_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.STOP), + lambda event, obj_ref=obj_ref: safe_callback("on_stop", event, obj_ref), + order=20, + ) + + try: + # verify callbacks haven't been called + assert tracker.play_count == 0 + assert tracker.stop_count == 0 + + # trigger play event + sim.play() + assert tracker.play_count == 1 + assert tracker.stop_count == 0 + + # disable app control to prevent hanging + sim._disable_app_control_on_stop_handle = True # type: ignore + + # trigger stop event + sim.stop() + assert tracker.play_count == 1 + assert tracker.stop_count == 1 + + # delete the tracker object + del tracker + + # trigger events again - callbacks should handle the deleted object gracefully + sim.play() + # disable app control again + sim._disable_app_control_on_stop_handle = True # type: ignore + sim.stop() + # should not raise any errors + + finally: + # cleanup callbacks + if play_handle is not None: + play_handle.unsubscribe() + if stop_handle is not None: + stop_handle.unsubscribe() + + +@pytest.mark.isaacsim_ci +def test_multiple_callbacks_on_same_event(): + """Test that multiple callbacks can be registered for the same event.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create tracking for multiple callbacks + callback_counts = {"callback1": 0, "callback2": 0, "callback3": 0} + + def callback1(event): + callback_counts["callback1"] += 1 + + def callback2(event): + callback_counts["callback2"] += 1 + + def callback3(event): + callback_counts["callback3"] += 1 + + # register multiple callbacks for play event + timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + handle1 = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), lambda event: callback1(event), order=20 + ) + handle2 = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), lambda event: callback2(event), order=21 + ) + handle3 = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), lambda event: callback3(event), order=22 + ) + + try: + # verify none have been called + assert all(count == 0 for count in callback_counts.values()) + + # trigger play event + sim.play() + + # all callbacks should have been called + assert callback_counts["callback1"] == 1 + assert callback_counts["callback2"] == 1 + assert callback_counts["callback3"] == 1 + + finally: + # cleanup all callbacks + if handle1 is not None: + handle1.unsubscribe() + if handle2 is not None: + handle2.unsubscribe() + if handle3 is not None: + handle3.unsubscribe() + + +@pytest.mark.isaacsim_ci +def test_callback_execution_order(): + """Test that callbacks are executed in the correct order based on priority.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # track execution order + execution_order = [] + + def callback_low_priority(event): + execution_order.append("low") + + def callback_medium_priority(event): + execution_order.append("medium") + + def callback_high_priority(event): + execution_order.append("high") + + # register callbacks with different priorities (lower order = higher priority) + timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + handle_high = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), lambda event: callback_high_priority(event), order=5 + ) + handle_medium = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), lambda event: callback_medium_priority(event), order=10 + ) + handle_low = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), lambda event: callback_low_priority(event), order=15 + ) + + try: + # trigger play event + sim.play() + + # verify callbacks were executed in correct order + assert len(execution_order) == 3 + assert execution_order[0] == "high" + assert execution_order[1] == "medium" + assert execution_order[2] == "low" + + finally: + # cleanup callbacks + if handle_high is not None: + handle_high.unsubscribe() + if handle_medium is not None: + handle_medium.unsubscribe() + if handle_low is not None: + handle_low.unsubscribe() + + +@pytest.mark.isaacsim_ci +def test_callback_unsubscribe(): + """Test that unsubscribing callbacks works correctly.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create callback counter + callback_count = {"count": 0} + + def on_play_callback(event): + callback_count["count"] += 1 + + # register callback + timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + play_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), lambda event: on_play_callback(event), order=20 + ) + + try: + # trigger play event + sim.play() + assert callback_count["count"] == 1 + + # stop simulation + sim._disable_app_control_on_stop_handle = True # type: ignore + sim.stop() + + # unsubscribe the callback + play_handle.unsubscribe() + play_handle = None + + # trigger play event again + sim.play() + + # callback should not have been called again (still 1) + assert callback_count["count"] == 1 + + finally: + # cleanup if needed + if play_handle is not None: + play_handle.unsubscribe() + + +@pytest.mark.isaacsim_ci +def test_pause_event_callback(): + """Test that pause event callbacks are triggered correctly.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create callback tracker + callback_state = {"pause_called": False} + + def on_pause_callback(event): + callback_state["pause_called"] = True + + # register pause callback + timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + pause_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PAUSE), lambda event: on_pause_callback(event), order=20 + ) + + try: + # play the simulation first + sim.play() + assert not callback_state["pause_called"] + + # pause the simulation + sim.pause() + + # callback should have been triggered + assert callback_state["pause_called"] + + finally: + # cleanup + if pause_handle is not None: + pause_handle.unsubscribe() + + +""" +Isaac Events Callback Tests. +""" + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize( + "event_type", + [IsaacEvents.PHYSICS_WARMUP, IsaacEvents.SIMULATION_VIEW_CREATED, IsaacEvents.PHYSICS_READY], +) +def test_isaac_event_triggered_on_reset(event_type): + """Test that Isaac events are triggered during reset.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # create callback tracker + callback_state = {"called": False} + + def on_event(event): + callback_state["called"] = True + + # register callback for the event + callback_id = PhysxManager.register_callback(lambda event: on_event(event), event=event_type) + + try: + # verify callback hasn't been called yet + assert not callback_state["called"] + + # reset the simulation - should trigger the event + sim.reset() + + # verify callback was triggered + assert callback_state["called"] + + finally: + # cleanup callback + if callback_id is not None: + PhysxManager.deregister_callback(callback_id) + + +@pytest.mark.isaacsim_ci +def test_isaac_event_prim_deletion(): + """Test that PRIM_DELETION Isaac event is triggered when a prim is deleted.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + sim.reset() + + # create callback tracker + callback_state = {"prim_deleted": False, "deleted_path": None} + + def on_prim_deletion(event): + callback_state["prim_deleted"] = True + # event payload should contain the deleted prim path + if hasattr(event, "payload") and event.payload: + callback_state["deleted_path"] = event.payload.get("prim_path") + + # register callback for PRIM_DELETION event + callback_id = PhysxManager.register_callback(lambda event: on_prim_deletion(event), event=IsaacEvents.PRIM_DELETION) + + try: + # verify callback hasn't been called yet + assert not callback_state["prim_deleted"] + + # delete the cube prim + sim_utils.delete_prim("/World/Cube") + + # trigger the event by dispatching it manually (since deletion might be handled differently) + PhysxManager._message_bus.dispatch_event(IsaacEvents.PRIM_DELETION.value, payload={"prim_path": "/World/Cube"}) # type: ignore + + # verify callback was triggered + assert callback_state["prim_deleted"] + + finally: + # cleanup callback + if callback_id is not None: + PhysxManager.deregister_callback(callback_id) + + +@pytest.mark.isaacsim_ci +def test_isaac_event_timeline_stop(): + """Test that TIMELINE_STOP Isaac event can be registered and triggered.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create callback tracker + callback_state = {"timeline_stop_called": False} + + def on_timeline_stop(event): + callback_state["timeline_stop_called"] = True + + # register callback for TIMELINE_STOP event + callback_id = PhysxManager.register_callback(lambda event: on_timeline_stop(event), event=IsaacEvents.TIMELINE_STOP) + + try: + # verify callback hasn't been called yet + assert not callback_state["timeline_stop_called"] + + # play and stop the simulation + sim.play() + + # disable app control to prevent hanging + sim._disable_app_control_on_stop_handle = True # type: ignore + + # stop the simulation + sim.stop() + + # dispatch the event manually + PhysxManager._message_bus.dispatch_event(IsaacEvents.TIMELINE_STOP.value, payload={}) # type: ignore + + # verify callback was triggered + assert callback_state["timeline_stop_called"] + + finally: + # cleanup callback + if callback_id is not None: + PhysxManager.deregister_callback(callback_id) + + +@pytest.mark.isaacsim_ci +def test_isaac_event_callbacks_with_weakref(): + """Test Isaac event callbacks with weak references (similar to asset_base.py pattern).""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # create a test object that will be weakly referenced + class PhysicsTracker: + def __init__(self): + self.warmup_count = 0 + self.ready_count = 0 + + def on_warmup(self, event): + self.warmup_count += 1 + + def on_ready(self, event): + self.ready_count += 1 + + tracker = PhysicsTracker() + + # define safe callback wrapper (same pattern as asset_base.py) + def safe_callback(callback_name, event, obj_ref): + """Safely invoke a callback on a weakly-referenced object.""" + try: + obj = obj_ref() + if obj is not None: + getattr(obj, callback_name)(event) + except ReferenceError: + # Object has been deleted; ignore + pass + + # register callbacks with weakref + obj_ref = weakref.ref(tracker) + + warmup_id = PhysxManager.register_callback( + lambda event, obj_ref=obj_ref: safe_callback("on_warmup", event, obj_ref), + event=IsaacEvents.PHYSICS_WARMUP, + ) + ready_id = PhysxManager.register_callback( + lambda event, obj_ref=obj_ref: safe_callback("on_ready", event, obj_ref), event=IsaacEvents.PHYSICS_READY + ) + + try: + # verify callbacks haven't been called + assert tracker.warmup_count == 0 + assert tracker.ready_count == 0 + + # reset simulation - triggers WARMUP and READY events + sim.reset() + + # verify callbacks were triggered (may be called multiple times during warmup sequence) + assert tracker.warmup_count >= 1 + assert tracker.ready_count >= 1 + + # delete the tracker object + del tracker + + # reset again - callbacks should handle the deleted object gracefully + sim.reset(soft=True) + + # should not raise any errors even though tracker is deleted + + finally: + # cleanup callbacks + if warmup_id is not None: + PhysxManager.deregister_callback(warmup_id) + if ready_id is not None: + PhysxManager.deregister_callback(ready_id) + + +@pytest.mark.isaacsim_ci +def test_multiple_isaac_event_callbacks(): + """Test that multiple callbacks can be registered for the same Isaac event.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # create tracking for multiple callbacks + callback_counts = {"callback1": 0, "callback2": 0, "callback3": 0} + + def callback1(event): + callback_counts["callback1"] += 1 + + def callback2(event): + callback_counts["callback2"] += 1 + + def callback3(event): + callback_counts["callback3"] += 1 + + # register multiple callbacks for PHYSICS_READY event + id1 = PhysxManager.register_callback(lambda event: callback1(event), event=IsaacEvents.PHYSICS_READY) + id2 = PhysxManager.register_callback(lambda event: callback2(event), event=IsaacEvents.PHYSICS_READY) + id3 = PhysxManager.register_callback(lambda event: callback3(event), event=IsaacEvents.PHYSICS_READY) + + try: + # verify none have been called + assert all(count == 0 for count in callback_counts.values()) + + # reset simulation - triggers PHYSICS_READY event + sim.reset() + + # all callbacks should have been called (may be called multiple times during warmup sequence) + assert callback_counts["callback1"] >= 1 + assert callback_counts["callback2"] >= 1 + assert callback_counts["callback3"] >= 1 + + finally: + # cleanup all callbacks + if id1 is not None: + PhysxManager.deregister_callback(id1) + if id2 is not None: + PhysxManager.deregister_callback(id2) + if id3 is not None: + PhysxManager.deregister_callback(id3) + + +""" +Exception Handling in Callbacks Tests. +""" + + +@pytest.mark.isaacsim_ci +def test_exception_in_callback_on_reset(): + """Test that exceptions stored during reset are raised.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + test_error_message = "Test exception on reset" + + def failing_callback(event): + PhysxManager.store_callback_exception(RuntimeError(test_error_message)) + + handle = PhysxManager.register_callback(failing_callback, event=IsaacEvents.PHYSICS_READY) + + try: + with pytest.raises(RuntimeError, match=test_error_message): + sim.reset() + finally: + if handle is not None: + PhysxManager.deregister_callback(handle) + SimulationContext.clear_instance() + + +@pytest.mark.isaacsim_ci +def test_exception_in_callback_on_step(): + """Test that exceptions stored during step are raised.""" + cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(cfg) + + # create simple scene + cube_cfg = sim_utils.CuboidCfg(size=(0.1, 0.1, 0.1)) + cube_cfg.func("/World/Cube", cube_cfg) + + # reset first to initialize + sim.reset() + + test_error_message = "Test exception on step" + + def failing_callback(event): + PhysxManager.store_callback_exception(RuntimeError(test_error_message)) + + handle = PhysxManager.register_callback(failing_callback, event=IsaacEvents.POST_PHYSICS_STEP) + + try: + with pytest.raises(RuntimeError, match=test_error_message): + sim.step() + finally: + if handle is not None: + PhysxManager.deregister_callback(handle) + SimulationContext.clear_instance() diff --git a/source/isaaclab/test/sim/test_simulation_context_visualizers.py b/source/isaaclab/test/sim/test_simulation_context_visualizers.py new file mode 100644 index 000000000000..3b7ca93dcfa3 --- /dev/null +++ b/source/isaaclab/test/sim/test_simulation_context_visualizers.py @@ -0,0 +1,952 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for SimulationContext visualizer orchestration.""" + +from __future__ import annotations + +import sys +from typing import Any, cast + +import isaaclab_visualizers.kit.kit_visualizer as kit_visualizer +import isaaclab_visualizers.newton.newton_visualization_markers as newton_markers +import isaaclab_visualizers.rerun.rerun_visualizer as rerun_visualizer +import isaaclab_visualizers.viser.viser_visualizer as viser_visualizer +import numpy as np +import pytest +import torch +from isaaclab_visualizers.kit.kit_visualizer_cfg import KitVisualizerCfg +from isaaclab_visualizers.rerun.rerun_visualizer_cfg import RerunVisualizerCfg +from isaaclab_visualizers.viser.viser_visualizer_cfg import ViserVisualizerCfg + +from isaaclab.markers.vis_marker_registry import VisMarkerRegistry +from isaaclab.sim.simulation_context import SimulationContext + + +class _FakePhysicsManager: + def __init__(self): + self.forward_calls = 0 + + def forward(self): + self.forward_calls += 1 + + +class _FakeProvider: + def __init__(self): + self.update_calls = [] + + def update(self): + self.update_calls.append(True) + + +class _FakeVisualizer: + """Minimal visualizer for orchestration tests.""" + + def __init__( + self, + *, + env_ids=None, + running=True, + closed=False, + rendering_paused=False, + training_paused_steps=0, + raises_on_step=False, + requires_forward=False, + pumps_app_update=False, + ): + self._env_ids = env_ids + self._running = running + self._closed = closed + self._rendering_paused = rendering_paused + self._training_paused_steps = training_paused_steps + self._raises_on_step = raises_on_step + self._requires_forward = requires_forward + self._pumps_app_update = pumps_app_update + self.step_calls = [] + self.close_calls = 0 + + @property + def is_closed(self): + return self._closed + + def is_running(self): + return self._running + + def is_rendering_paused(self): + return self._rendering_paused + + def is_training_paused(self): + if self._training_paused_steps > 0: + self._training_paused_steps -= 1 + return True + return False + + def step(self, dt): + self.step_calls.append(dt) + if self._raises_on_step: + raise RuntimeError("step failed") + + def close(self): + self.close_calls += 1 + self._closed = True + + def get_visualized_env_ids(self): + return self._env_ids + + def requires_forward_before_step(self): + return self._requires_forward + + def pumps_app_update(self): + return self._pumps_app_update + + def supports_markers(self): + return False + + +def _make_context(visualizers, provider=None): + ctx = object.__new__(SimulationContext) + ctx._visualizers = list(visualizers) + ctx._scene_data_provider = provider + ctx.physics_manager = _FakePhysicsManager() + ctx._visualizer_step_counter = 0 + return ctx + + +def test_update_scene_data_provider_forwards_and_updates_provider(): + provider = _FakeProvider() + viz_a = _FakeVisualizer(env_ids=[0, 2], requires_forward=True) + viz_b = _FakeVisualizer(env_ids=[2, 3]) + viz_c = _FakeVisualizer(env_ids=None) + ctx = _make_context([viz_a, viz_b, viz_c], provider=provider) + + ctx.update_scene_data_provider() + + assert ctx.physics_manager.forward_calls == 1 + assert provider.update_calls == [True] + assert ctx._visualizer_step_counter == 1 + + +def test_update_scene_data_provider_force_forward_with_no_visualizers(): + provider = _FakeProvider() + ctx = _make_context([], provider=provider) + ctx.update_scene_data_provider(force_require_forward=True) + assert ctx.physics_manager.forward_calls == 1 + assert provider.update_calls == [True] + + +def test_update_visualizers_removes_closed_nonrunning_and_failed(caplog): + provider = _FakeProvider() + closed_viz = _FakeVisualizer(closed=True) + stopped_viz = _FakeVisualizer(running=False) + failing_viz = _FakeVisualizer(raises_on_step=True) + paused_viz = _FakeVisualizer(rendering_paused=True) + healthy_viz = _FakeVisualizer(env_ids=[1]) + ctx = _make_context([closed_viz, stopped_viz, failing_viz, paused_viz, healthy_viz], provider=provider) + + with caplog.at_level("ERROR"): + ctx.update_visualizers(0.1) + + assert ctx._visualizers == [paused_viz, healthy_viz] + assert closed_viz.close_calls == 1 + assert stopped_viz.close_calls == 1 + assert failing_viz.close_calls == 1 + assert paused_viz.close_calls == 0 + assert paused_viz.step_calls == [0.0] + assert healthy_viz.step_calls == [0.1] + assert any("Error stepping visualizer" in r.message for r in caplog.records) + + +def test_update_visualizers_skips_zero_dt_for_paused_app_pumping_visualizer(): + provider = _FakeProvider() + paused_app_pumping_viz = _FakeVisualizer(rendering_paused=True, pumps_app_update=True) + ctx = _make_context([paused_app_pumping_viz], provider=provider) + + ctx.update_visualizers(0.3) + + assert paused_app_pumping_viz.step_calls == [] + + +def test_update_visualizers_handles_training_pause_loop(): + provider = _FakeProvider() + viz = _FakeVisualizer(training_paused_steps=1) + ctx = _make_context([viz], provider=provider) + + ctx.update_visualizers(0.2) + + assert viz.step_calls == [0.0, 0.2] + + +def test_vis_marker_registry_dispatch_allows_callback_mutation(): + registry = VisMarkerRegistry() + calls = [] + + def _remove_other_callback(event): + calls.append(("remove_other", event)) + registry.remove_callback("other") + + def _other_callback(event): + calls.append(("other", event)) + + registry.add_callback("remove_other", _remove_other_callback) + registry.add_callback("other", _other_callback) + + registry.dispatch_callbacks("tick") + + assert calls == [("remove_other", "tick"), ("other", "tick")] + assert "other" not in registry._callbacks + + +class _DummyViserSceneDataProvider: + def __init__(self): + self._metadata = {"num_envs": 4} + self.state_calls: list[list[int] | None] = [] + + def get_metadata(self) -> dict: + return self._metadata + + def get_newton_model(self): + return "dummy-model" + + def get_newton_state(self): + self.state_calls.append(None) + return {"state_call": len(self.state_calls)} + + def get_camera_transforms(self): + return {} + + +class _DummyViserViewer: + def __init__(self): + self.calls = [] + + def begin_frame(self, sim_time: float) -> None: + self.calls.append(("begin_frame", sim_time)) + + def log_state(self, state) -> None: + self.calls.append(("log_state", state)) + + def end_frame(self) -> None: + self.calls.append(("end_frame",)) + + def is_running(self) -> bool: + return True + + +def test_viser_visualizer_initialize_and_step_uses_provider_state(monkeypatch: pytest.MonkeyPatch): + provider = _DummyViserSceneDataProvider() + viewer = _DummyViserViewer() + + def _fake_create_viewer(self, record_to_viser: str | None, metadata: dict | None = None): + assert record_to_viser is None + assert metadata == provider.get_metadata() + self._viewer = viewer + + monkeypatch.setattr(viser_visualizer.ViserVisualizer, "_create_viewer", _fake_create_viewer) + + visualizer = viser_visualizer.ViserVisualizer(ViserVisualizerCfg()) + visualizer.initialize(cast(Any, provider)) + visualizer.step(0.25) + + assert visualizer.is_initialized + assert provider.state_calls == [None, None] + assert visualizer._sim_time == pytest.approx(0.25) + assert viewer.calls[0][0] == "begin_frame" + assert viewer.calls[0][1] == pytest.approx(0.25) + # log_state passes through get_newton_state() as-is; no env_ids (or other) keys are merged in. + assert viewer.calls[1] == ("log_state", {"state_call": 2}) + assert viewer.calls[2] == ("end_frame",) + + +def test_viser_visualizer_marker_render_failure_does_not_interrupt_state_updates( + monkeypatch: pytest.MonkeyPatch, caplog: pytest.LogCaptureFixture +): + provider = _DummyViserSceneDataProvider() + viewer = _DummyViserViewer() + marker_calls = [] + + def _fake_create_viewer(self, record_to_viser: str | None, metadata: dict | None = None): + self._viewer = viewer + + def _raise_marker_render(*args, **kwargs): + marker_calls.append((args, kwargs)) + raise RuntimeError("marker overlay failed") + + monkeypatch.setattr(viser_visualizer.ViserVisualizer, "_create_viewer", _fake_create_viewer) + monkeypatch.setattr(viser_visualizer, "render_newton_visualization_markers", _raise_marker_render) + + visualizer = viser_visualizer.ViserVisualizer(ViserVisualizerCfg()) + visualizer.initialize(cast(Any, provider)) + + with caplog.at_level("WARNING"): + visualizer.step(0.25) + + assert marker_calls + assert viewer.calls[0][0] == "begin_frame" + assert viewer.calls[1] == ("log_state", {"state_call": 2}) + assert viewer.calls[2] == ("end_frame",) + assert "Marker rendering failed; continuing body updates" in caplog.text + + +def test_newton_marker_mesh_registration_is_per_viewer(monkeypatch: pytest.MonkeyPatch): + marker = object.__new__(newton_markers.NewtonVisualizationMarkers) + marker._registered_meshes = set() + + class _FakeMesh: + vertices = np.zeros((1, 3), dtype=np.float32) + indices = np.zeros((3,), dtype=np.int32) + normals = np.zeros((0, 3), dtype=np.float32) + uvs = np.zeros((0, 2), dtype=np.float32) + + class _FakeViewer: + def __init__(self): + self.meshes = [] + + def log_mesh(self, name, vertices, indices, **kwargs): + self.meshes.append((name, vertices, indices, kwargs)) + + monkeypatch.setattr(newton_markers, "_create_mesh", lambda cfg: _FakeMesh()) + monkeypatch.setattr(newton_markers.wp, "array", lambda value, dtype=None: value) + + spec = newton_markers._NewtonMarkerSpec(renderer="mesh", mesh_type="box", mesh_params={"size": (1.0, 1.0, 1.0)}) + viewer_a = _FakeViewer() + viewer_b = _FakeViewer() + + marker._ensure_mesh_registered(viewer_a, "/Visuals/marker/meshes/arrow", spec) + marker._ensure_mesh_registered(viewer_a, "/Visuals/marker/meshes/arrow", spec) + marker._ensure_mesh_registered(viewer_b, "/Visuals/marker/meshes/arrow", spec) + + assert len(viewer_a.meshes) == 1 + assert len(viewer_b.meshes) == 1 + + +class _FakeNewtonMarkerMesh: + vertices = np.zeros((1, 3), dtype=np.float32) + indices = np.zeros((3,), dtype=np.int32) + normals = np.zeros((0, 3), dtype=np.float32) + uvs = np.zeros((0, 2), dtype=np.float32) + + +class _FakeNewtonMarkerViewer: + def __init__(self): + self.meshes = [] + self.instances = [] + self.lines = [] + + def log_mesh(self, name, vertices, indices, **kwargs): + self.meshes.append((name, vertices, indices, kwargs)) + + def log_instances(self, batch_name, mesh_name, xforms, scales, colors, materials, hidden=False): + self.instances.append( + { + "batch_name": batch_name, + "mesh_name": mesh_name, + "xforms": xforms, + "scales": scales, + "colors": colors, + "materials": materials, + "hidden": hidden, + } + ) + + def log_lines(self, batch_name, starts, ends, colors, width=None, hidden=False): + self.lines.append( + { + "batch_name": batch_name, + "starts": starts, + "ends": ends, + "colors": colors, + "width": width, + "hidden": hidden, + } + ) + + +def _make_newton_marker_for_render( + *, + marker_names: list[str], + translations: torch.Tensor, + marker_indices: torch.Tensor | None = None, + visible: bool = True, +): + marker = object.__new__(newton_markers.NewtonVisualizationMarkers) + marker_cfg_type = type("MarkerCfg", (), {"visual_material": None}) + marker.cfg = type("Cfg", (), {"markers": {name: marker_cfg_type() for name in marker_names}})() + marker.group_id = "/Visuals/marker::test" + marker.visible = visible + marker.translations = translations + marker.orientations = torch.tensor([[0.0, 0.0, 0.0, 1.0]], dtype=torch.float32).repeat(translations.shape[0], 1) + marker.scales = torch.ones((translations.shape[0], 3), dtype=torch.float32) + marker.marker_indices = marker_indices + marker.count = translations.shape[0] + marker._registered_meshes = set() + marker._warned_unsupported = set() + return marker + + +def _patch_newton_marker_render_deps(monkeypatch: pytest.MonkeyPatch) -> None: + specs = { + "arrow": newton_markers._NewtonMarkerSpec( + renderer="mesh", + mesh_type="box", + mesh_params={"size": (1.0, 1.0, 1.0)}, + color=(1.0, 1.0, 1.0), + texture=np.zeros((2, 2, 3), dtype=np.uint8), + ), + "sphere": newton_markers._NewtonMarkerSpec(renderer="mesh", mesh_type="sphere", mesh_params={"radius": 1.0}), + "frame": newton_markers._NewtonMarkerSpec(renderer="frame"), + } + + monkeypatch.setattr(newton_markers, "_create_mesh", lambda cfg: _FakeNewtonMarkerMesh()) + monkeypatch.setattr(newton_markers.wp, "array", lambda value, dtype=None: value) + monkeypatch.setattr(newton_markers, "_resolve_newton_marker_cfg", lambda name, marker_cfg, cfg: specs[name]) + + +def test_newton_marker_render_filters_visible_envs(monkeypatch: pytest.MonkeyPatch): + _patch_newton_marker_render_deps(monkeypatch) + translations = torch.arange(8, dtype=torch.float32).unsqueeze(1).repeat(1, 3) + marker = _make_newton_marker_for_render( + marker_names=["arrow"], + translations=translations, + marker_indices=torch.zeros(8, dtype=torch.int32), + ) + viewer = _FakeNewtonMarkerViewer() + + marker.render(viewer, visible_env_ids=[1, 3], num_envs=4) + + assert len(viewer.instances) == 1 + assert viewer.instances[0]["hidden"] is False + assert viewer.instances[0]["xforms"][:, 0].tolist() == [1.0, 3.0, 5.0, 7.0] + + +def test_newton_marker_render_routes_instances_by_prototype(monkeypatch: pytest.MonkeyPatch): + _patch_newton_marker_render_deps(monkeypatch) + translations = torch.arange(4, dtype=torch.float32).unsqueeze(1).repeat(1, 3) + marker = _make_newton_marker_for_render( + marker_names=["arrow", "sphere"], + translations=translations, + marker_indices=torch.tensor([0, 1, 0, 1], dtype=torch.int32), + ) + viewer = _FakeNewtonMarkerViewer() + + marker.render(viewer, visible_env_ids=None, num_envs=4) + + visible_instances = [call for call in viewer.instances if not call["hidden"]] + assert [call["batch_name"] for call in visible_instances] == [ + "/Visuals/marker::test/arrow", + "/Visuals/marker::test/sphere", + ] + assert [call["xforms"].shape[0] for call in visible_instances] == [2, 2] + assert visible_instances[0]["materials"][:, 3].tolist() == [1.0, 1.0] + assert visible_instances[1]["materials"][:, 3].tolist() == [0.0, 0.0] + + +def test_newton_marker_render_hides_unselected_prototypes(monkeypatch: pytest.MonkeyPatch): + _patch_newton_marker_render_deps(monkeypatch) + marker = _make_newton_marker_for_render( + marker_names=["arrow", "sphere", "frame"], + translations=torch.zeros((3, 3), dtype=torch.float32), + marker_indices=torch.zeros(3, dtype=torch.int32), + ) + viewer = _FakeNewtonMarkerViewer() + + marker.render(viewer, visible_env_ids=None, num_envs=3) + + hidden_instances = [call for call in viewer.instances if call["hidden"]] + assert [call["batch_name"] for call in hidden_instances] == ["/Visuals/marker::test/sphere"] + assert viewer.lines == [ + { + "batch_name": "/Visuals/marker::test/frame", + "starts": None, + "ends": None, + "colors": None, + "width": None, + "hidden": True, + } + ] + + +@pytest.mark.parametrize( + ("cfg_max_visible_envs", "expected_visible"), + [ + (None, None), + (0, []), + (3, [0, 1, 2]), + ], +) +def test_viser_visualizer_create_viewer_applies_visible_worlds( + monkeypatch: pytest.MonkeyPatch, + cfg_max_visible_envs: int | None, + expected_visible: list[int] | None, +): + captured = {} + + class _FakeNewtonViewerViser: + def __init__( + self, + *, + port: int, + label: str | None, + verbose: bool, + share: bool, + record_to_viser: str | None, + metadata: dict | None = None, + ): + captured["init"] = { + "port": port, + "label": label, + "verbose": verbose, + "share": share, + "record_to_viser": record_to_viser, + "metadata": metadata, + } + + def set_model(self, model: Any) -> None: + captured["set_model"] = model + + def set_visible_worlds(self, worlds) -> None: + captured["visible_worlds"] = worlds + + def set_world_offsets(self, spacing) -> None: + captured["set_world_offsets"] = tuple(spacing) + + monkeypatch.setattr(viser_visualizer, "NewtonViewerViser", _FakeNewtonViewerViser) + monkeypatch.setattr( + viser_visualizer.ViserVisualizer, + "_resolve_initial_camera_pose", + lambda self: ((1.0, 2.0, 3.0), (0.0, 0.0, 0.0)), + ) + monkeypatch.setattr(viser_visualizer.ViserVisualizer, "_set_viser_camera_view", lambda self, pose: None) + + cfg = ViserVisualizerCfg( + max_visible_envs=cfg_max_visible_envs, + open_browser=False, + randomly_sample_visible_envs=False, + ) + visualizer = viser_visualizer.ViserVisualizer(cfg) + visualizer._model = "dummy-model" + visualizer._env_ids = None # normally set by initialize() -> _compute_visualized_env_ids() + visualizer._create_viewer(record_to_viser="record.viser", metadata={"num_envs": 8}) + + assert captured["set_model"] == "dummy-model" + assert captured["visible_worlds"] == expected_visible + assert captured["set_world_offsets"] == (0.0, 0.0, 0.0) + + +@pytest.mark.parametrize( + ("cfg_max_visible_envs", "expected_visible"), + [ + (None, None), + (0, []), + (3, [0, 1, 2]), + ], +) +def test_rerun_visualizer_initialize_applies_visible_worlds_and_world_offsets( + monkeypatch: pytest.MonkeyPatch, + cfg_max_visible_envs: int | None, + expected_visible: list[int] | None, +): + captured = {} + + class _FakeNewtonViewerRerun: + def __init__( + self, + *, + app_id: str, + address: str | None, + serve_web_viewer: bool, + web_port: int, + grpc_port: int, + keep_historical_data: bool, + keep_scalar_history: bool, + record_to_rrd: str | None, + ): + captured["init"] = { + "app_id": app_id, + "address": address, + "serve_web_viewer": serve_web_viewer, + "web_port": web_port, + "grpc_port": grpc_port, + "keep_historical_data": keep_historical_data, + "keep_scalar_history": keep_scalar_history, + "record_to_rrd": record_to_rrd, + } + + def set_model(self, model: Any) -> None: + captured["set_model"] = model + + def set_visible_worlds(self, worlds) -> None: + captured["visible_worlds"] = worlds + + def set_world_offsets(self, spacing) -> None: + captured["set_world_offsets"] = tuple(spacing) + + def close(self) -> None: + captured["closed"] = True + + class _DummyRerunSceneDataProvider: + def get_metadata(self) -> dict: + return {"num_envs": 4} + + def get_newton_model(self): + return "dummy-model" + + def get_newton_state(self): + return {"ok": True} + + def get_camera_transforms(self): + return {} + + monkeypatch.setattr(rerun_visualizer, "NewtonViewerRerun", _FakeNewtonViewerRerun) + monkeypatch.setattr( + rerun_visualizer, "_ensure_rerun_server", lambda **kwargs: ("rerun+http://127.0.0.1:9876/proxy", False) + ) + monkeypatch.setattr(rerun_visualizer, "_open_rerun_web_viewer", lambda *args, **kwargs: None) + monkeypatch.setattr( + rerun_visualizer.RerunVisualizer, + "_resolve_initial_camera_pose", + lambda self: ((1.0, 2.0, 3.0), (0.0, 0.0, 0.0)), + ) + monkeypatch.setattr(rerun_visualizer.RerunVisualizer, "_apply_camera_pose", lambda self, pose: None) + + cfg = RerunVisualizerCfg( + open_browser=False, + max_visible_envs=cfg_max_visible_envs, + randomly_sample_visible_envs=False, + ) + visualizer = rerun_visualizer.RerunVisualizer(cfg) + visualizer.initialize(cast(Any, _DummyRerunSceneDataProvider())) + + assert captured["set_model"] == "dummy-model" + assert captured["visible_worlds"] == expected_visible + assert captured["set_world_offsets"] == (0.0, 0.0, 0.0) + + +def test_rerun_visualizer_marker_failure_still_ends_frame(monkeypatch: pytest.MonkeyPatch): + class _FakeRerunViewer: + def __init__(self): + self.calls = [] + + def is_paused(self): + return False + + def begin_frame(self, sim_time): + self.calls.append(("begin_frame", sim_time)) + + def log_state(self, state): + self.calls.append(("log_state", state)) + + def end_frame(self): + self.calls.append(("end_frame",)) + + class _DummyRerunSceneDataProvider: + def get_metadata(self) -> dict: + return {"num_envs": 4} + + def get_newton_state(self): + return {"ok": True} + + def get_camera_transforms(self): + return {} + + def _raise_marker_render(*args, **kwargs): + raise RuntimeError("marker render failed") + + monkeypatch.setattr(rerun_visualizer, "render_newton_visualization_markers", _raise_marker_render) + + visualizer = rerun_visualizer.RerunVisualizer(RerunVisualizerCfg()) + viewer = _FakeRerunViewer() + visualizer._is_initialized = True + visualizer._is_closed = False + visualizer._viewer = viewer + visualizer._scene_data_provider = _DummyRerunSceneDataProvider() + visualizer._resolved_visible_env_ids = None + + with pytest.raises(RuntimeError, match="marker render failed"): + visualizer.step(0.25) + + assert [call[0] for call in viewer.calls] == ["begin_frame", "log_state", "end_frame"] + + +def test_kit_visualizer_default_camera_source_does_not_require_camera_prim(monkeypatch: pytest.MonkeyPatch): + """Default ``--viz kit`` should work for envs without a camera prim.""" + + class _FakeViewportApi: + def __init__(self): + self.set_active_camera_calls = [] + + def get_active_camera(self): + return "/OmniverseKit_Persp" + + def set_active_camera(self, camera_path): + self.set_active_camera_calls.append(camera_path) + + class _FakeViewportWindow: + def __init__(self): + self.viewport_api = _FakeViewportApi() + + class _FakeStage: + def GetPrimAtPath(self, path): + raise AssertionError(f"default Kit visualizer should not look up camera prims: {path}") + + class _FakeProvider: + def get_usd_stage(self): + return _FakeStage() + + viewport_window = _FakeViewportWindow() + viewport_utility = type( + "ViewportUtility", + (), + { + "create_viewport_window": staticmethod(lambda **kwargs: viewport_window), + "get_active_viewport_window": staticmethod(lambda: viewport_window), + }, + ) + monkeypatch.setitem(sys.modules, "omni", type(sys)("omni")) + monkeypatch.setitem(sys.modules, "omni.kit", type(sys)("omni.kit")) + monkeypatch.setitem(sys.modules, "omni.kit.viewport", type(sys)("omni.kit.viewport")) + monkeypatch.setitem(sys.modules, "omni.kit.viewport.utility", viewport_utility) + monkeypatch.setitem(sys.modules, "omni.ui", type("OmniUi", (), {"DockPosition": object})()) + + applied_camera_poses = [] + monkeypatch.setattr( + kit_visualizer.KitVisualizer, + "_set_viewport_camera", + lambda self, eye, target: applied_camera_poses.append((tuple(eye), tuple(target))), + ) + + cfg = KitVisualizerCfg() + visualizer = kit_visualizer.KitVisualizer(cfg) + visualizer._scene_data_provider = _FakeProvider() + visualizer._runtime_headless = False + + visualizer._setup_viewport() + + assert cfg.cam_source == "cfg" + assert applied_camera_poses == [(cfg.eye, cfg.lookat)] + assert viewport_window.viewport_api.set_active_camera_calls == [] + assert visualizer._controlled_camera_path == "/OmniverseKit_Persp" + + +def test_get_cli_visualizer_types_handles_non_string_setting_without_crashing(): + ctx = object.__new__(SimulationContext) + ctx.get_setting = lambda name: {"types": "newton,kit"} if name == "/isaaclab/visualizer/types" else None + + assert ctx._get_cli_visualizer_types() == [] + + +# --------------------------------------------------------------------------- +# Shared helpers for config-resolution and initialize_visualizers tests +# --------------------------------------------------------------------------- + + +class _FakeVisualizerCfg: + """Minimal visualizer config for testing initialize_visualizers.""" + + def __init__(self, visualizer_type: str, *, fail_create: bool = False, fail_init: bool = False): + self.visualizer_type = visualizer_type + self._fail_create = fail_create + self._fail_init = fail_init + + def create_visualizer(self): + if self._fail_create: + raise RuntimeError("create failed") + return _FakeVisualizer() if not self._fail_init else _FailingInitVisualizer() + + +class _FailingInitVisualizer(_FakeVisualizer): + def initialize(self, provider): + raise RuntimeError("init failed") + + +def _make_context_with_settings( + settings: dict, + visualizer_cfgs=None, + *, + has_gui: bool = False, + has_offscreen_render: bool = False, +): + """Build a minimal SimulationContext suitable for testing is_rendering, _resolve_visualizer_cfgs, + and initialize_visualizers. + + Centralises the ``object.__new__`` construction so new internal attributes only need to be added + in one place when the production code changes. + """ + cfg = type( + "Cfg", + (), + { + "visualizer_cfgs": visualizer_cfgs, + "physics": type("PhysicsCfg", (), {"dt": 0.01})(), + "dt": 0.01, + "render_interval": 1, + }, + )() + ctx = object.__new__(SimulationContext) + ctx.cfg = cfg + ctx._has_gui = has_gui + ctx._has_offscreen_render = has_offscreen_render + ctx._xr_enabled = False + ctx._pending_camera_view = None + ctx._render_generation = 0 + ctx._visualizers = [] + ctx._scene_data_provider = _FakeProvider() + ctx._scene_data_requirements = None + ctx._clone_plans = {} + ctx._visualizer_step_counter = 0 + ctx._viz_dt = 0.01 + ctx.get_setting = lambda name: settings.get(name) + return ctx + + +def test_is_rendering_true_when_only_cfg_visualizer_is_set(): + cfg_visualizer = type("CfgVisualizer", (), {"visualizer_type": "newton"})() + settings = { + "/isaaclab/render/rtx_sensors": False, + "/isaaclab/visualizer/types": "", + "/isaaclab/visualizer/explicit": False, + "/isaaclab/visualizer/disable_all": False, + } + ctx = _make_context_with_settings(settings, visualizer_cfgs=[cfg_visualizer]) + assert ctx.is_rendering is True + + +def test_is_rendering_false_when_cli_disable_all_even_with_cfg_visualizer(): + cfg_visualizer = type("CfgVisualizer", (), {"visualizer_type": "newton"})() + settings = { + "/isaaclab/render/rtx_sensors": False, + "/isaaclab/visualizer/types": "", + "/isaaclab/visualizer/explicit": True, + "/isaaclab/visualizer/disable_all": True, + } + ctx = _make_context_with_settings(settings, visualizer_cfgs=[cfg_visualizer]) + assert ctx.is_rendering is False + + +def test_explicit_unknown_visualizer_type_raises(): + """Requesting an unknown visualizer type via CLI raises RuntimeError.""" + settings = { + "/isaaclab/visualizer/types": "bogus_viz", + "/isaaclab/visualizer/explicit": True, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": None, + } + ctx = _make_context_with_settings(settings) + + with pytest.raises(RuntimeError, match="bogus_viz"): + ctx.initialize_visualizers() + + +def test_explicit_missing_package_raises(monkeypatch: pytest.MonkeyPatch): + """Requesting a valid type whose package is not installed raises RuntimeError.""" + settings = { + "/isaaclab/visualizer/types": "rerun", + "/isaaclab/visualizer/explicit": True, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": None, + } + ctx = _make_context_with_settings(settings) + + # Force import to fail for the rerun visualizer module + import builtins + + real_import = builtins.__import__ + + def _failing_import(name, *args, **kwargs): + if "isaaclab_visualizers.rerun" in name: + raise ImportError("No module named 'isaaclab_visualizers.rerun'") + return real_import(name, *args, **kwargs) + + monkeypatch.setattr("builtins.__import__", _failing_import) + + with pytest.raises(RuntimeError, match="rerun"): + ctx.initialize_visualizers() + + +def test_explicit_visualizer_create_failure_raises(monkeypatch: pytest.MonkeyPatch): + """When cli_explicit, a failure in create_visualizer raises RuntimeError.""" + failing_cfg = _FakeVisualizerCfg("newton", fail_create=True) + settings = { + "/isaaclab/visualizer/types": "newton", + "/isaaclab/visualizer/explicit": True, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": None, + } + ctx = _make_context_with_settings(settings, visualizer_cfgs=[failing_cfg]) + + import isaaclab.sim.simulation_context as sc_mod + + monkeypatch.setattr(sc_mod, "resolve_scene_data_requirements", lambda **kwargs: type("R", (), {})()) + + with pytest.raises(RuntimeError, match="failed to create or initialize"): + ctx.initialize_visualizers() + + +def test_explicit_visualizer_init_failure_raises(monkeypatch: pytest.MonkeyPatch): + """When cli_explicit, a failure in visualizer.initialize raises RuntimeError.""" + failing_cfg = _FakeVisualizerCfg("newton", fail_init=True) + settings = { + "/isaaclab/visualizer/types": "newton", + "/isaaclab/visualizer/explicit": True, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": None, + } + ctx = _make_context_with_settings(settings, visualizer_cfgs=[failing_cfg]) + + import isaaclab.sim.simulation_context as sc_mod + + monkeypatch.setattr(sc_mod, "resolve_scene_data_requirements", lambda **kwargs: type("R", (), {})()) + + with pytest.raises(RuntimeError, match="failed to create or initialize"): + ctx.initialize_visualizers() + + +def test_explicit_partial_valid_types_raises_for_invalid(): + """Requesting 'newton,bogus_viz' via CLI raises for the unknown type even though newton is valid.""" + settings = { + "/isaaclab/visualizer/types": "newton,bogus_viz", + "/isaaclab/visualizer/explicit": True, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": None, + } + ctx = _make_context_with_settings(settings) + + with pytest.raises(RuntimeError, match="bogus_viz"): + ctx.initialize_visualizers() + + +def test_non_explicit_unknown_type_silently_skipped(caplog): + """Without --visualizer flag, unknown types are silently skipped (no error).""" + settings = { + "/isaaclab/visualizer/types": "bogus_viz", + "/isaaclab/visualizer/explicit": False, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": None, + } + ctx = _make_context_with_settings(settings) + + # Non-explicit: should not raise + ctx.initialize_visualizers() + assert ctx._visualizers == [] + + +def test_non_explicit_create_failure_silently_logged(monkeypatch: pytest.MonkeyPatch, caplog): + """Without --visualizer flag, create_visualizer failures are logged, not raised.""" + failing_cfg = _FakeVisualizerCfg("newton", fail_create=True) + settings = { + "/isaaclab/visualizer/types": "", + "/isaaclab/visualizer/explicit": False, + "/isaaclab/visualizer/disable_all": False, + "/isaaclab/visualizer/max_visible_envs": None, + } + ctx = _make_context_with_settings(settings, visualizer_cfgs=[failing_cfg]) + + import isaaclab.sim.simulation_context as sc_mod + + monkeypatch.setattr(sc_mod, "resolve_scene_data_requirements", lambda **kwargs: type("R", (), {})()) + + with caplog.at_level("ERROR"): + ctx.initialize_visualizers() + assert ctx._visualizers == [] + assert any("Failed to initialize visualizer" in r.message for r in caplog.records) diff --git a/source/isaaclab/test/sim/test_simulation_render_config.py b/source/isaaclab/test/sim/test_simulation_render_config.py index 1bab84d11d7e..e5ad4274d184 100644 --- a/source/isaaclab/test/sim/test_simulation_render_config.py +++ b/source/isaaclab/test/sim/test_simulation_render_config.py @@ -20,8 +20,7 @@ import pytest import toml -import carb - +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.sim.simulation_cfg import RenderCfg, SimulationCfg from isaaclab.sim.simulation_context import SimulationContext from isaaclab.utils.version import get_isaac_sim_version @@ -76,21 +75,17 @@ def test_render_cfg(): assert sim.cfg.render.enable_shadows == enable_shadows assert sim.cfg.render.enable_ambient_occlusion == enable_ambient_occlusion - carb_settings_iface = carb.settings.get_settings() - assert carb_settings_iface.get("/rtx/translucency/enabled") == sim.cfg.render.enable_translucency - assert carb_settings_iface.get("/rtx/reflections/enabled") == sim.cfg.render.enable_reflections - assert carb_settings_iface.get("/rtx/indirectDiffuse/enabled") == sim.cfg.render.enable_global_illumination - assert carb_settings_iface.get("/rtx-transient/dlssg/enabled") == sim.cfg.render.enable_dlssg - assert carb_settings_iface.get("/rtx-transient/dldenoiser/enabled") == sim.cfg.render.enable_dl_denoiser - assert carb_settings_iface.get("/rtx/post/dlss/execMode") == sim.cfg.render.dlss_mode - assert carb_settings_iface.get("/rtx/directLighting/enabled") == sim.cfg.render.enable_direct_lighting - assert ( - carb_settings_iface.get("/rtx/directLighting/sampledLighting/samplesPerPixel") - == sim.cfg.render.samples_per_pixel - ) - assert carb_settings_iface.get("/rtx/shadows/enabled") == sim.cfg.render.enable_shadows - assert carb_settings_iface.get("/rtx/ambientOcclusion/enabled") == sim.cfg.render.enable_ambient_occlusion - assert carb_settings_iface.get("/rtx/post/aa/op") == 4 # dlss = 3, dlaa=4 + assert sim.get_setting("/rtx/translucency/enabled") == sim.cfg.render.enable_translucency + assert sim.get_setting("/rtx/reflections/enabled") == sim.cfg.render.enable_reflections + assert sim.get_setting("/rtx/indirectDiffuse/enabled") == sim.cfg.render.enable_global_illumination + assert sim.get_setting("/rtx-transient/dlssg/enabled") == sim.cfg.render.enable_dlssg + assert sim.get_setting("/rtx-transient/dldenoiser/enabled") == sim.cfg.render.enable_dl_denoiser + assert sim.get_setting("/rtx/post/dlss/execMode") == sim.cfg.render.dlss_mode + assert sim.get_setting("/rtx/directLighting/enabled") == sim.cfg.render.enable_direct_lighting + assert sim.get_setting("/rtx/directLighting/sampledLighting/samplesPerPixel") == sim.cfg.render.samples_per_pixel + assert sim.get_setting("/rtx/shadows/enabled") == sim.cfg.render.enable_shadows + assert sim.get_setting("/rtx/ambientOcclusion/enabled") == sim.cfg.render.enable_ambient_occlusion + assert sim.get_setting("/rtx/post/aa/op") == 4 # dlss = 3, dlaa=4 @pytest.mark.isaacsim_ci @@ -105,11 +100,14 @@ def test_render_cfg_presets(): rendering_modes = ["performance", "balanced", "quality"] for rendering_mode in rendering_modes: + # Clear any existing simulation context before creating a new one + SimulationContext.clear_instance() + # grab isaac lab apps path isaaclab_app_exp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), *[".."] * 4, "apps") - # for Isaac Sim 4.5 compatibility, we use the 4.5 rendering mode app files in a different folder - if get_isaac_sim_version().major < 5: - isaaclab_app_exp_path = os.path.join(isaaclab_app_exp_path, "isaacsim_4_5") + # for Isaac Sim 5 compatibility, we use the 5 rendering mode app files in a different folder + if get_isaac_sim_version().major < 6: + isaaclab_app_exp_path = os.path.join(isaaclab_app_exp_path, "isaacsim_5") # grab preset settings preset_filename = os.path.join(isaaclab_app_exp_path, f"rendering_modes/{rendering_mode}.kit") @@ -127,23 +125,26 @@ def test_render_cfg_presets(): SimulationContext(cfg) - carb_settings_iface = carb.settings.get_settings() + settings = get_settings_manager() for key, val in preset_dict.items(): - setting_name = "/" + key.replace(".", "/") # convert to carb setting format + setting_name = "/" + key.replace(".", "/") # convert to setting path format if setting_name in carb_settings: - # grab groundtruth from carb setting dictionary overrides setting_gt = carb_settings[setting_name] elif setting_name == dlss_mode[0]: - # grab groundtruth from user-friendly setting overrides setting_gt = dlss_mode[1] else: - # grab groundtruth from preset setting_gt = val - setting_val = carb_settings_iface.get(setting_name) + setting_val = settings.get(setting_name) - assert setting_gt == setting_val + assert setting_gt == setting_val, ( + f"Mismatch for '{setting_name}' in mode '{rendering_mode}': " + f"expected {setting_gt!r}, got {setting_val!r}" + ) + + # Clean up after the test + SimulationContext.clear_instance() @pytest.mark.skip(reason="Timeline not stopped") @@ -192,18 +193,14 @@ def test_render_cfg_defaults(): assert sim.cfg.render.enable_shadows == enable_shadows assert sim.cfg.render.enable_ambient_occlusion == enable_ambient_occlusion - carb_settings_iface = carb.settings.get_settings() - assert carb_settings_iface.get("/rtx/translucency/enabled") == sim.cfg.render.enable_translucency - assert carb_settings_iface.get("/rtx/reflections/enabled") == sim.cfg.render.enable_reflections - assert carb_settings_iface.get("/rtx/indirectDiffuse/enabled") == sim.cfg.render.enable_global_illumination - assert carb_settings_iface.get("/rtx-transient/dlssg/enabled") == sim.cfg.render.enable_dlssg - assert carb_settings_iface.get("/rtx-transient/dldenoiser/enabled") == sim.cfg.render.enable_dl_denoiser - assert carb_settings_iface.get("/rtx/post/dlss/execMode") == sim.cfg.render.dlss_mode - assert carb_settings_iface.get("/rtx/directLighting/enabled") == sim.cfg.render.enable_direct_lighting - assert ( - carb_settings_iface.get("/rtx/directLighting/sampledLighting/samplesPerPixel") - == sim.cfg.render.samples_per_pixel - ) - assert carb_settings_iface.get("/rtx/shadows/enabled") == sim.cfg.render.enable_shadows - assert carb_settings_iface.get("/rtx/ambientOcclusion/enabled") == sim.cfg.render.enable_ambient_occlusion - assert carb_settings_iface.get("/rtx/post/aa/op") == 3 # dlss = 3, dlaa=4 + assert sim.get_setting("/rtx/translucency/enabled") == sim.cfg.render.enable_translucency + assert sim.get_setting("/rtx/reflections/enabled") == sim.cfg.render.enable_reflections + assert sim.get_setting("/rtx/indirectDiffuse/enabled") == sim.cfg.render.enable_global_illumination + assert sim.get_setting("/rtx-transient/dlssg/enabled") == sim.cfg.render.enable_dlssg + assert sim.get_setting("/rtx-transient/dldenoiser/enabled") == sim.cfg.render.enable_dl_denoiser + assert sim.get_setting("/rtx/post/dlss/execMode") == sim.cfg.render.dlss_mode + assert sim.get_setting("/rtx/directLighting/enabled") == sim.cfg.render.enable_direct_lighting + assert sim.get_setting("/rtx/directLighting/sampledLighting/samplesPerPixel") == sim.cfg.render.samples_per_pixel + assert sim.get_setting("/rtx/shadows/enabled") == sim.cfg.render.enable_shadows + assert sim.get_setting("/rtx/ambientOcclusion/enabled") == sim.cfg.render.enable_ambient_occlusion + assert sim.get_setting("/rtx/post/aa/op") == 3 # dlss = 3, dlaa=4 diff --git a/source/isaaclab/test/sim/test_simulation_stage_in_memory.py b/source/isaaclab/test/sim/test_simulation_stage_in_memory.py index 68d9d86c666e..1add70c70989 100644 --- a/source/isaaclab/test/sim/test_simulation_stage_in_memory.py +++ b/source/isaaclab/test/sim/test_simulation_stage_in_memory.py @@ -17,14 +17,14 @@ import pytest +import torch -import omni import omni.physx import omni.usd import usdrt -from isaacsim.core.cloner import GridCloner import isaaclab.sim as sim_utils +from isaaclab import cloner from isaaclab.sim.simulation_context import SimulationCfg, SimulationContext from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR from isaaclab.utils.version import get_isaac_sim_version @@ -39,8 +39,6 @@ def sim(): yield sim omni.physx.get_physx_simulation_interface().detach_stage() sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -56,15 +54,12 @@ def test_stage_in_memory_with_shapes(sim): if get_isaac_sim_version().major < 5: pytest.skip("Stage in memory is not supported in this version of Isaac Sim") - # define parameters - num_clones = 10 - # grab stage in memory and set as current stage via the with statement - stage_in_memory = sim.get_initial_stage() + stage_in_memory = sim.stage with sim_utils.use_stage(stage_in_memory): - # create cloned cone stage - for i in range(num_clones): - sim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + # create parent prim for shape prototypes + sim_utils.create_prim("/World/Cone", "Xform") + num_shape_prototypes = 3 cfg = sim_utils.MultiAssetSpawnerCfg( assets_cfg=[ @@ -109,31 +104,19 @@ def test_stage_in_memory_with_shapes(sim): mass_props=sim_utils.MassPropertiesCfg(mass=1.0), collision_props=sim_utils.CollisionPropertiesCfg(), ) - prim_path_regex = "/World/env_.*/Cone" + prim_path_regex = "/World/Cone/asset_.*" cfg.func(prim_path_regex, cfg) - # verify stage is in memory - assert sim_utils.is_current_stage_in_memory() - - # verify prims exist in stage in memory + # verify prims exist in stage prims = sim_utils.find_matching_prim_paths(prim_path_regex) - assert len(prims) == num_clones - - # verify prims do not exist in context stage - context_stage = omni.usd.get_context().get_stage() - with sim_utils.use_stage(context_stage): - prims = sim_utils.find_matching_prim_paths(prim_path_regex) - assert len(prims) != num_clones - - # attach stage to context - sim_utils.attach_stage_to_usd_context() + assert len(prims) == num_shape_prototypes # verify stage is no longer in memory assert not sim_utils.is_current_stage_in_memory() # verify prims now exist in context stage prims = sim_utils.find_matching_prim_paths(prim_path_regex) - assert len(prims) == num_clones + assert len(prims) == num_shape_prototypes def test_stage_in_memory_with_usds(sim): @@ -144,18 +127,20 @@ def test_stage_in_memory_with_usds(sim): pytest.skip("Stage in memory is not supported in this version of Isaac Sim") # define parameters - num_clones = 10 + num_robot_prototypes = 2 usd_paths = [ f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd", f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd", ] - # grab stage in memory and set as current stage via the with statement - stage_in_memory = sim.get_initial_stage() + # verify stage is attached to USD context (happens automatically now with create_stage_in_memory) + assert not sim_utils.is_current_stage_in_memory() + + # grab stage and set as current stage via the with statement + stage_in_memory = sim.stage with sim_utils.use_stage(stage_in_memory): - # create cloned robot stage - for i in range(num_clones): - sim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + # create parent prim for robot prototypes + sim_utils.create_prim("/World/Robot", "Xform") cfg = sim_utils.MultiUsdFileCfg( usd_path=usd_paths, @@ -174,31 +159,19 @@ def test_stage_in_memory_with_usds(sim): ), activate_contact_sensors=True, ) - prim_path_regex = "/World/env_.*/Robot" + prim_path_regex = "/World/Robot/asset_.*" cfg.func(prim_path_regex, cfg) - # verify stage is in memory - assert sim_utils.is_current_stage_in_memory() - - # verify prims exist in stage in memory + # verify prims exist in stage prims = sim_utils.find_matching_prim_paths(prim_path_regex) - assert len(prims) == num_clones - - # verify prims do not exist in context stage - context_stage = omni.usd.get_context().get_stage() - with sim_utils.use_stage(context_stage): - prims = sim_utils.find_matching_prim_paths(prim_path_regex) - assert len(prims) != num_clones - - # attach stage to context - sim_utils.attach_stage_to_usd_context() + assert len(prims) == num_robot_prototypes # verify stage is no longer in memory assert not sim_utils.is_current_stage_in_memory() # verify prims now exist in context stage prims = sim_utils.find_matching_prim_paths(prim_path_regex) - assert len(prims) == num_clones + assert len(prims) == num_robot_prototypes def test_stage_in_memory_with_clone_in_fabric(sim): @@ -212,46 +185,31 @@ def test_stage_in_memory_with_clone_in_fabric(sim): usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd" num_clones = 100 - # grab stage in memory and set as current stage via the with statement - stage_in_memory = sim.get_initial_stage() + # verify stage is attached to USD context (happens automatically now with create_stage_in_memory) + assert not sim_utils.is_current_stage_in_memory() + + # grab stage and set as current stage via the with statement + stage_in_memory = sim.stage with sim_utils.use_stage(stage_in_memory): # set up paths base_env_path = "/World/envs" source_prim_path = f"{base_env_path}/env_0" - # create cloner - cloner = GridCloner(spacing=3, stage=stage_in_memory) - cloner.define_base_env(base_env_path) + # create environment clones using Isaac Lab's cloner utilities + env_ids = torch.arange(num_clones, dtype=torch.long, device="cpu") + env_origins, _ = cloner.grid_transforms(num_clones, spacing=3.0, device="cpu") # create source prim + stage_in_memory.DefinePrim(source_prim_path, "Xform") sim_utils.create_prim(f"{source_prim_path}/Robot", "Xform", usd_path=usd_path) # generate target paths - target_paths = cloner.generate_paths("/World/envs/env", num_clones) + env_fmt = "/World/envs/env_{}" # clone robots at target paths - cloner.clone( - source_prim_path=source_prim_path, - base_env_path=base_env_path, - prim_paths=target_paths, - replicate_physics=True, - clone_in_fabric=True, - ) - prim_path_regex = "/World/envs/env_.*" - - # verify prims do not exist in context stage - context_stage = omni.usd.get_context().get_stage() - with sim_utils.use_stage(context_stage): - prims = sim_utils.find_matching_prim_paths(prim_path_regex) - assert len(prims) != num_clones - - # attach stage to context - sim_utils.attach_stage_to_usd_context() - - # verify stage is no longer in memory - assert not sim_utils.is_current_stage_in_memory() + cloner.usd_replicate(stage_in_memory, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) - # verify prims now exist in fabric stage using usdrt apis + # verify prims exist in fabric stage using usdrt apis stage_id = sim_utils.get_current_stage_id() usdrt_stage = usdrt.Usd.Stage.Attach(stage_id) for i in range(num_clones): diff --git a/source/isaaclab/test/sim/test_spawn_from_files.py b/source/isaaclab/test/sim/test_spawn_from_files.py index 9edf535ac91e..009949de4c9c 100644 --- a/source/isaaclab/test/sim/test_spawn_from_files.py +++ b/source/isaaclab/test/sim/test_spawn_from_files.py @@ -13,14 +13,12 @@ """Rest everything follows.""" import pytest -from packaging.version import Version import omni.kit.app import isaaclab.sim as sim_utils from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR -from isaaclab.utils.version import get_isaac_sim_version @pytest.fixture @@ -39,8 +37,6 @@ def sim(): # cleanup after test sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -69,15 +65,12 @@ def test_spawn_usd_fails(sim): @pytest.mark.isaacsim_ci def test_spawn_urdf(sim): """Test loading prim from URDF file.""" - # pin the urdf importer extension to the older version + # enable the URDF importer extension manager = omni.kit.app.get_app().get_extension_manager() - if get_isaac_sim_version() >= Version("5.1"): - pinned_urdf_extension_name = "isaacsim.asset.importer.urdf-2.4.31" - manager.set_extension_enabled_immediate(pinned_urdf_extension_name, True) - else: - pinned_urdf_extension_name = "isaacsim.asset.importer.urdf" + if not manager.is_extension_enabled("isaacsim.asset.importer.urdf"): + manager.set_extension_enabled_immediate("isaacsim.asset.importer.urdf", True) # retrieve path to urdf importer extension - extension_id = manager.get_enabled_extension_id(pinned_urdf_extension_name) + extension_id = manager.get_enabled_extension_id("isaacsim.asset.importer.urdf") extension_path = manager.get_extension_path(extension_id) # Spawn franka from URDF cfg = sim_utils.UrdfFileCfg( diff --git a/source/isaaclab/test/sim/test_spawn_lights.py b/source/isaaclab/test/sim/test_spawn_lights.py index 9dbbd98cf7a2..3a019c2f90b2 100644 --- a/source/isaaclab/test/sim/test_spawn_lights.py +++ b/source/isaaclab/test/sim/test_spawn_lights.py @@ -21,6 +21,8 @@ from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.utils.string import to_camel_case +pytestmark = pytest.mark.isaacsim_ci + @pytest.fixture(autouse=True) def sim(): @@ -39,8 +41,6 @@ def sim(): # Teardown: Stop simulation sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() diff --git a/source/isaaclab/test/sim/test_spawn_materials.py b/source/isaaclab/test/sim/test_spawn_materials.py index e5c3b14f50d7..7f517f2bc1f6 100644 --- a/source/isaaclab/test/sim/test_spawn_materials.py +++ b/source/isaaclab/test/sim/test_spawn_materials.py @@ -21,6 +21,8 @@ from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.utils.assets import NVIDIA_NUCLEUS_DIR +pytestmark = pytest.mark.isaacsim_ci + @pytest.fixture def sim(): @@ -31,14 +33,12 @@ def sim(): sim_utils.update_stage() yield sim sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() def test_spawn_preview_surface(sim): """Test spawning preview surface.""" - cfg = sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)) + cfg = sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)) prim = cfg.func("/Looks/PreviewSurface", cfg) # Check validity assert prim.IsValid() @@ -50,7 +50,7 @@ def test_spawn_preview_surface(sim): def test_spawn_mdl_material(sim): """Test spawning mdl material.""" - cfg = sim_utils.materials.MdlFileCfg( + cfg = sim_utils.MdlFileCfg( mdl_path=f"{NVIDIA_NUCLEUS_DIR}/Materials/Base/Metals/Aluminum_Anodized.mdl", project_uvw=True, albedo_brightness=0.5, @@ -67,7 +67,7 @@ def test_spawn_mdl_material(sim): def test_spawn_glass_mdl_material(sim): """Test spawning a glass mdl material.""" - cfg = sim_utils.materials.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0)) + cfg = sim_utils.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0)) prim = cfg.func("/Looks/GlassMaterial", cfg) # Check validity assert prim.IsValid() @@ -81,7 +81,7 @@ def test_spawn_glass_mdl_material(sim): def test_spawn_rigid_body_material(sim): """Test spawning a rigid body material.""" - cfg = sim_utils.materials.RigidBodyMaterialCfg( + cfg = sim_utils.RigidBodyMaterialCfg( dynamic_friction=1.5, restitution=1.5, static_friction=0.5, @@ -100,36 +100,11 @@ def test_spawn_rigid_body_material(sim): assert prim.GetAttribute("physxMaterial:frictionCombineMode").Get() == cfg.friction_combine_mode -def test_spawn_deformable_body_material(sim): - """Test spawning a deformable body material.""" - cfg = sim_utils.materials.DeformableBodyMaterialCfg( - density=1.0, - dynamic_friction=0.25, - youngs_modulus=50000000.0, - poissons_ratio=0.5, - elasticity_damping=0.005, - damping_scale=1.0, - ) - prim = cfg.func("/Looks/DeformableBodyMaterial", cfg) - # Check validity - assert prim.IsValid() - assert sim.stage.GetPrimAtPath("/Looks/DeformableBodyMaterial").IsValid() - # Check properties - assert prim.GetAttribute("physxDeformableBodyMaterial:density").Get() == cfg.density - assert prim.GetAttribute("physxDeformableBodyMaterial:dynamicFriction").Get() == cfg.dynamic_friction - assert prim.GetAttribute("physxDeformableBodyMaterial:youngsModulus").Get() == cfg.youngs_modulus - assert prim.GetAttribute("physxDeformableBodyMaterial:poissonsRatio").Get() == cfg.poissons_ratio - assert prim.GetAttribute("physxDeformableBodyMaterial:elasticityDamping").Get() == pytest.approx( - cfg.elasticity_damping - ) - assert prim.GetAttribute("physxDeformableBodyMaterial:dampingScale").Get() == cfg.damping_scale - - def test_apply_rigid_body_material_on_visual_material(sim): """Test applying a rigid body material on a visual material.""" - cfg = sim_utils.materials.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0)) + cfg = sim_utils.GlassMdlCfg(thin_walled=False, glass_ior=1.0, glass_color=(0.0, 1.0, 0.0)) prim = cfg.func("/Looks/Material", cfg) - cfg = sim_utils.materials.RigidBodyMaterialCfg( + cfg = sim_utils.RigidBodyMaterialCfg( dynamic_friction=1.5, restitution=1.5, static_friction=0.5, diff --git a/source/isaaclab/test/sim/test_spawn_meshes.py b/source/isaaclab/test/sim/test_spawn_meshes.py index 43fbc7852c2a..7b4c4678ca54 100644 --- a/source/isaaclab/test/sim/test_spawn_meshes.py +++ b/source/isaaclab/test/sim/test_spawn_meshes.py @@ -18,6 +18,8 @@ import isaaclab.sim as sim_utils from isaaclab.sim import SimulationCfg, SimulationContext +pytestmark = pytest.mark.isaacsim_ci + @pytest.fixture def sim(): @@ -34,8 +36,6 @@ def sim(): # Cleanup sim._disable_app_control_on_stop_handle = True # prevent timeout sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -119,110 +119,25 @@ def test_spawn_sphere(sim): assert prim.GetPrimTypeInfo().GetTypeName() == "Mesh" -""" -Physics properties. -""" - - -def test_spawn_cone_with_deformable_props(sim): - """Test spawning of UsdGeomMesh prim for a cone with deformable body API.""" - # Spawn cone - cfg = sim_utils.MeshConeCfg( - radius=1.0, - height=2.0, - deformable_props=sim_utils.DeformableBodyPropertiesCfg(deformable_enabled=True), - ) - prim = cfg.func("/World/Cone", cfg) - - # Check validity - assert prim.IsValid() - assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() - - # Check properties - # Unlike rigid body, deformable body properties are on the mesh prim - prim = sim.stage.GetPrimAtPath("/World/Cone/geometry/mesh") - assert prim.GetAttribute("physxDeformable:deformableEnabled").Get() == cfg.deformable_props.deformable_enabled - - -def test_spawn_cone_with_deformable_and_mass_props(sim): - """Test spawning of UsdGeomMesh prim for a cone with deformable body and mass API.""" - # Spawn cone - cfg = sim_utils.MeshConeCfg( - radius=1.0, - height=2.0, - deformable_props=sim_utils.DeformableBodyPropertiesCfg(deformable_enabled=True), - mass_props=sim_utils.MassPropertiesCfg(mass=1.0), - ) - prim = cfg.func("/World/Cone", cfg) - - # Check validity - assert prim.IsValid() - assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() - # Check properties - prim = sim.stage.GetPrimAtPath("/World/Cone/geometry/mesh") - assert prim.GetAttribute("physics:mass").Get() == cfg.mass_props.mass - - # check sim playing - sim.play() - for _ in range(10): - sim.step() - - -def test_spawn_cone_with_deformable_and_density_props(sim): - """Test spawning of UsdGeomMesh prim for a cone with deformable body and mass API. - - Note: - In this case, we specify the density instead of the mass. In that case, physics need to know - the collision shape to compute the mass. Thus, we have to set the collider properties. In - order to not have a collision shape, we disable the collision. - """ - # Spawn cone - cfg = sim_utils.MeshConeCfg( - radius=1.0, - height=2.0, - deformable_props=sim_utils.DeformableBodyPropertiesCfg(deformable_enabled=True), - mass_props=sim_utils.MassPropertiesCfg(density=10.0), - ) - prim = cfg.func("/World/Cone", cfg) +@pytest.mark.parametrize("resolution", [(1, 1), (3, 2)]) +def test_spawn_square(sim, resolution): + """Test spawning of UsdGeomMesh as a square prim.""" + # Spawn square + cfg = sim_utils.MeshSquareCfg(size=1.0, resolution=resolution) + prim = cfg.func("/World/Square", cfg) # Check validity assert prim.IsValid() - assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() + assert sim.stage.GetPrimAtPath("/World/Square").IsValid() + assert prim.GetPrimTypeInfo().GetTypeName() == "Xform" # Check properties - prim = sim.stage.GetPrimAtPath("/World/Cone/geometry/mesh") - assert prim.GetAttribute("physics:density").Get() == cfg.mass_props.density - # check sim playing - sim.play() - for _ in range(10): - sim.step() - - -def test_spawn_cone_with_all_deformable_props(sim): - """Test spawning of UsdGeomMesh prim for a cone with all deformable properties.""" - # Spawn cone - cfg = sim_utils.MeshConeCfg( - radius=1.0, - height=2.0, - mass_props=sim_utils.MassPropertiesCfg(mass=5.0), - deformable_props=sim_utils.DeformableBodyPropertiesCfg(), - visual_material=sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), - physics_material=sim_utils.materials.DeformableBodyMaterialCfg(), - ) - prim = cfg.func("/World/Cone", cfg) + prim = sim.stage.GetPrimAtPath("/World/Square/geometry/mesh") + assert prim.GetPrimTypeInfo().GetTypeName() == "Mesh" - # Check validity - assert prim.IsValid() - assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() - assert sim.stage.GetPrimAtPath("/World/Cone/geometry/material").IsValid() - # Check properties - # -- deformable body - prim = sim.stage.GetPrimAtPath("/World/Cone/geometry/mesh") - assert prim.GetAttribute("physxDeformable:deformableEnabled").Get() is True - # check sim playing - sim.play() - for _ in range(10): - sim.step() +""" +Physics properties. +""" def test_spawn_cone_with_all_rigid_props(sim): @@ -236,8 +151,8 @@ def test_spawn_cone_with_all_rigid_props(sim): rigid_body_enabled=True, solver_position_iteration_count=8, sleep_threshold=0.1 ), collision_props=sim_utils.CollisionPropertiesCfg(), - visual_material=sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), - physics_material=sim_utils.materials.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), + physics_material=sim_utils.RigidBodyMaterialCfg(), ) prim = cfg.func("/World/Cone", cfg) diff --git a/source/isaaclab/test/sim/test_spawn_sensors.py b/source/isaaclab/test/sim/test_spawn_sensors.py index 63f29af7830c..f8604b99e54b 100644 --- a/source/isaaclab/test/sim/test_spawn_sensors.py +++ b/source/isaaclab/test/sim/test_spawn_sensors.py @@ -22,6 +22,8 @@ from isaaclab.sim.spawners.sensors.sensors import CUSTOM_FISHEYE_CAMERA_ATTRIBUTES, CUSTOM_PINHOLE_CAMERA_ATTRIBUTES from isaaclab.utils.string import to_camel_case +pytestmark = pytest.mark.isaacsim_ci + @pytest.fixture def sim(): @@ -32,8 +34,6 @@ def sim(): sim_utils.update_stage() yield sim sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() diff --git a/source/isaaclab/test/sim/test_spawn_shapes.py b/source/isaaclab/test/sim/test_spawn_shapes.py index 4c18753d52ea..532b29c22849 100644 --- a/source/isaaclab/test/sim/test_spawn_shapes.py +++ b/source/isaaclab/test/sim/test_spawn_shapes.py @@ -17,6 +17,8 @@ import isaaclab.sim as sim_utils from isaaclab.sim import SimulationCfg, SimulationContext +pytestmark = pytest.mark.isaacsim_ci + @pytest.fixture def sim(): @@ -28,8 +30,6 @@ def sim(): yield sim sim._disable_app_control_on_stop_handle = True # prevent timeout sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -217,8 +217,8 @@ def test_spawn_cone_with_all_props(sim): mass_props=sim_utils.MassPropertiesCfg(mass=5.0), rigid_props=sim_utils.RigidBodyPropertiesCfg(), collision_props=sim_utils.CollisionPropertiesCfg(), - visual_material=sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), - physics_material=sim_utils.materials.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), + physics_material=sim_utils.RigidBodyMaterialCfg(), ) prim = cfg.func("/World/Cone", cfg) @@ -258,26 +258,18 @@ def test_spawn_cone_clones_invalid_paths(sim): def test_spawn_cone_clones(sim): """Test spawning of cone clones.""" - num_clones = 10 - for i in range(num_clones): - sim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + sim_utils.create_prim("/World/env_0", "Xform", translation=(0, 0, 0)) # Spawn cone on valid cloning path cfg = sim_utils.ConeCfg(radius=1.0, height=2.0, copy_from_source=True) prim = cfg.func("/World/env_.*/Cone", cfg) - # Check validity assert prim.IsValid() assert str(prim.GetPath()) == "/World/env_0/Cone" - # find matching prims - prims = sim_utils.find_matching_prim_paths("/World/env_.*/Cone") - assert len(prims) == num_clones def test_spawn_cone_clone_with_all_props_global_material(sim): """Test spawning of cone clones with global material reference.""" - num_clones = 10 - for i in range(num_clones): - sim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + sim_utils.create_prim("/World/env_0", "Xform", translation=(0, 0, 0)) # Spawn cone on valid cloning path cfg = sim_utils.ConeCfg( radius=1.0, @@ -285,8 +277,8 @@ def test_spawn_cone_clone_with_all_props_global_material(sim): mass_props=sim_utils.MassPropertiesCfg(mass=5.0), rigid_props=sim_utils.RigidBodyPropertiesCfg(), collision_props=sim_utils.CollisionPropertiesCfg(), - visual_material=sim_utils.materials.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), - physics_material=sim_utils.materials.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), + physics_material=sim_utils.RigidBodyMaterialCfg(), visual_material_path="/Looks/visualMaterial", physics_material_path="/Looks/physicsMaterial", ) @@ -295,9 +287,6 @@ def test_spawn_cone_clone_with_all_props_global_material(sim): # Check validity assert prim.IsValid() assert str(prim.GetPath()) == "/World/env_0/Cone" - # find matching prims - prims = sim_utils.find_matching_prim_paths("/World/env_.*/Cone") - assert len(prims) == num_clones # find matching material prims prims = sim_utils.find_matching_prim_paths("/Looks/visualMaterial.*") assert len(prims) == 1 diff --git a/source/isaaclab/test/sim/test_spawn_wrappers.py b/source/isaaclab/test/sim/test_spawn_wrappers.py index 1571bb62bdc4..c053a6362f4d 100644 --- a/source/isaaclab/test/sim/test_spawn_wrappers.py +++ b/source/isaaclab/test/sim/test_spawn_wrappers.py @@ -19,6 +19,8 @@ from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR +pytestmark = pytest.mark.isaacsim_ci + @pytest.fixture def sim(): @@ -29,16 +31,58 @@ def sim(): sim_utils.update_stage() yield sim sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() +def test_spawn_multiple_shapes_with_regex_prefix(sim): + """Ensure assets are spawned and cloned when using regex prefix paths.""" + num_envs = 3 + num_assets = 3 + for env_idx in range(num_envs): + env_path = f"/World/env_{env_idx}" + sim_utils.create_prim(env_path, "Xform", translation=(0, 0, 0)) + sim_utils.create_prim(f"{env_path}/Cone", "Xform") + + cfg = sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + sim_utils.ConeCfg( + radius=0.3, + height=0.6, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), # this one should get overridden + ), + sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0), metallic=0.2), + ), + sim_utils.SphereCfg( + radius=0.3, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0), metallic=0.2), + ), + ], + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ) + + prim = cfg.func("/World/env_.*/Cone/asset_.*", cfg) + assert str(prim.GetPath()) == "/World/env_0/Cone/asset_0" + + prim_paths = sim_utils.find_matching_prim_paths("/World/env_.*/Cone/asset_.*") + assert len(prim_paths) == num_assets * num_envs + + for env_idx in range(num_envs): + for asset_idx in range(num_assets): + path = f"/World/env_{env_idx}/Cone/asset_{asset_idx}" + assert path in prim_paths + assert sim.stage.GetPrimAtPath(path).GetAttribute("physics:mass").Get() == cfg.mass_props.mass + + def test_spawn_multiple_shapes_with_global_settings(sim): """Test spawning of shapes randomly with global rigid body settings.""" - num_clones = 10 - for i in range(num_clones): - sim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + sim_utils.create_prim("/World/template", "Xform", translation=(0, 0, 0)) cfg = sim_utils.MultiAssetSpawnerCfg( assets_cfg=[ @@ -57,19 +101,18 @@ def test_spawn_multiple_shapes_with_global_settings(sim): visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0), metallic=0.2), ), ], - random_choice=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), mass_props=sim_utils.MassPropertiesCfg(mass=1.0), collision_props=sim_utils.CollisionPropertiesCfg(), ) - prim = cfg.func("/World/env_.*/Cone", cfg) + prim = cfg.func("/World/template/Cone/asset_.*", cfg) assert prim.IsValid() - assert str(prim.GetPath()) == "/World/env_0/Cone" - prim_paths = sim_utils.find_matching_prim_paths("/World/env_.*/Cone") - assert len(prim_paths) == num_clones + assert str(prim.GetPath()) == "/World/template/Cone/asset_0" + prim_paths = sim_utils.find_matching_prim_paths("/World/template/Cone/asset_.*") + assert len(prim_paths) == 3 for prim_path in prim_paths: prim = sim.stage.GetPrimAtPath(prim_path) @@ -78,9 +121,7 @@ def test_spawn_multiple_shapes_with_global_settings(sim): def test_spawn_multiple_shapes_with_individual_settings(sim): """Test spawning of shapes randomly with individual rigid object settings.""" - num_clones = 10 - for i in range(num_clones): - sim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + sim_utils.create_prim("/World/template", "Xform", translation=(0, 0, 0)) mass_variations = [2.0, 3.0, 4.0] cfg = sim_utils.MultiAssetSpawnerCfg( @@ -108,14 +149,13 @@ def test_spawn_multiple_shapes_with_individual_settings(sim): collision_props=sim_utils.CollisionPropertiesCfg(), ), ], - random_choice=True, ) - prim = cfg.func("/World/env_.*/Cone", cfg) + prim = cfg.func("/World/template/Cone/asset_.*", cfg) assert prim.IsValid() - assert str(prim.GetPath()) == "/World/env_0/Cone" - prim_paths = sim_utils.find_matching_prim_paths("/World/env_.*/Cone") - assert len(prim_paths) == num_clones + assert str(prim.GetPath()) == "/World/template/Cone/asset_0" + prim_paths = sim_utils.find_matching_prim_paths("/World/template/Cone/asset_.*") + assert len(prim_paths) == 3 for prim_path in prim_paths: prim = sim.stage.GetPrimAtPath(prim_path) @@ -129,16 +169,13 @@ def test_spawn_multiple_shapes_with_individual_settings(sim): def test_spawn_multiple_files_with_global_settings(sim): """Test spawning of files randomly with global articulation settings.""" - num_clones = 10 - for i in range(num_clones): - sim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + sim_utils.create_prim("/World/template", "Xform", translation=(0, 0, 0)) cfg = sim_utils.MultiUsdFileCfg( usd_path=[ f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd", f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd", ], - random_choice=True, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=False, retain_accelerations=False, @@ -153,9 +190,9 @@ def test_spawn_multiple_files_with_global_settings(sim): ), activate_contact_sensors=True, ) - prim = cfg.func("/World/env_.*/Robot", cfg) + prim = cfg.func("/World/template/Robot/asset_.*", cfg) assert prim.IsValid() - assert str(prim.GetPath()) == "/World/env_0/Robot" - prim_paths = sim_utils.find_matching_prim_paths("/World/env_.*/Robot") - assert len(prim_paths) == num_clones + assert str(prim.GetPath()) == "/World/template/Robot/asset_0" + prim_paths = sim_utils.find_matching_prim_paths("/World/template/Robot/asset_.*") + assert len(prim_paths) == 2 diff --git a/source/isaaclab/test/sim/test_urdf_converter.py b/source/isaaclab/test/sim/test_urdf_converter.py index f91c58f015f9..65c697029f97 100644 --- a/source/isaaclab/test/sim/test_urdf_converter.py +++ b/source/isaaclab/test/sim/test_urdf_converter.py @@ -12,19 +12,20 @@ """Rest everything follows.""" +import math import os +import tempfile +import warnings +import xml.etree.ElementTree as ET -import numpy as np import pytest -from packaging.version import Version import omni.kit.app -from isaacsim.core.prims import Articulation import isaaclab.sim as sim_utils from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.sim.converters import UrdfConverter, UrdfConverterCfg -from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.sim.converters.urdf_utils import merge_fixed_joints # Create a fixture for setup and teardown @@ -32,22 +33,19 @@ def sim_config(): # Create a new stage sim_utils.create_new_stage() - # pin the urdf importer extension to the older version + # enable the URDF importer extension manager = omni.kit.app.get_app().get_extension_manager() - if get_isaac_sim_version() >= Version("5.1"): - pinned_urdf_extension_name = "isaacsim.asset.importer.urdf-2.4.31" - manager.set_extension_enabled_immediate(pinned_urdf_extension_name, True) - else: - pinned_urdf_extension_name = "isaacsim.asset.importer.urdf" + if not manager.is_extension_enabled("isaacsim.asset.importer.urdf"): + manager.set_extension_enabled_immediate("isaacsim.asset.importer.urdf", True) # obtain the extension path - extension_id = manager.get_enabled_extension_id(pinned_urdf_extension_name) + extension_id = manager.get_enabled_extension_id("isaacsim.asset.importer.urdf") extension_path = manager.get_extension_path(extension_id) # default configuration config = UrdfConverterCfg( asset_path=f"{extension_path}/data/urdf/robots/franka_description/robots/panda_arm_hand.urdf", fix_base=True, joint_drive=UrdfConverterCfg.JointDriveCfg( - gains=UrdfConverterCfg.JointDriveCfg.PDGainsCfg(stiffness=400.0, damping=40.0) + gains=UrdfConverterCfg.JointDriveCfg.PDGainsCfg(stiffness=None, damping=None) ), ) # Simulation time-step @@ -58,8 +56,6 @@ def sim_config(): # Teardown sim._disable_app_control_on_stop_handle = True # prevent timeout sim.stop() - sim.clear() - sim.clear_all_callbacks() sim.clear_instance() @@ -113,40 +109,601 @@ def test_create_prim_from_usd(sim_config): @pytest.mark.isaacsim_ci def test_config_drive_type(sim_config): - """Change the drive mechanism of the robot to be position.""" + """Verify that ``target_type='position'`` plus uniform PD gains are written into every joint's DriveAPI. + + Reads the converter's USD output directly via :class:`pxr.UsdPhysics.DriveAPI` so the assertion does + not depend on a running PhysX simulation. Revolute joints are checked in N·m/deg (the USD storage + convention) and prismatic joints in N/m. + """ sim, config = sim_config - # Create directory to dump results test_dir = os.path.dirname(os.path.abspath(__file__)) output_dir = os.path.join(test_dir, "output", "urdf_converter") - if not os.path.exists(output_dir): - os.makedirs(output_dir, exist_ok=True) + os.makedirs(output_dir, exist_ok=True) + + stiffness = 42.0 + damping = 4.2 - # change the config config.force_usd_conversion = True config.joint_drive.target_type = "position" - config.joint_drive.gains.stiffness = 42.0 - config.joint_drive.gains.damping = 4.2 + config.joint_drive.gains.stiffness = stiffness + config.joint_drive.gains.damping = damping + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + revolute_count = 0 + prismatic_count = 0 + for prim in stage.Traverse(): + is_revolute = prim.IsA(UsdPhysics.RevoluteJoint) + is_prismatic = prim.IsA(UsdPhysics.PrismaticJoint) + if not (is_revolute or is_prismatic): + continue + instance_name = "angular" if is_revolute else "linear" + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + actual_stiffness = drive.GetStiffnessAttr().Get() + actual_damping = drive.GetDampingAttr().Get() + + if is_revolute: + expected_stiffness = stiffness * math.pi / 180.0 + expected_damping = damping * math.pi / 180.0 + revolute_count += 1 + else: + expected_stiffness = stiffness + expected_damping = damping + prismatic_count += 1 + + assert abs(actual_stiffness - expected_stiffness) < 1e-4, ( + f"Joint {prim.GetName()}: expected stiffness {expected_stiffness}, got {actual_stiffness}" + ) + assert abs(actual_damping - expected_damping) < 1e-4, ( + f"Joint {prim.GetName()}: expected damping {expected_damping}, got {actual_damping}" + ) + + # Franka Panda has 7 revolute arm joints and 2 prismatic finger joints. + assert revolute_count == 7, f"Expected 7 revolute joints, got {revolute_count}" + assert prismatic_count == 2, f"Expected 2 prismatic joints, got {prismatic_count}" + + +@pytest.mark.isaacsim_ci +def test_merge_fixed_joints_xml(): + """Test that the merge_fixed_joints utility correctly modifies URDF XML. + + Uses ``test_merge_joints.urdf`` which has: + - 7 links (root_link, base_link, link_1, link_2, palm_link, finger_link_1, finger_link_2) + - 6 joints (3 fixed, 1 continuous, 2 prismatic) + + After merging: + - 4 links (root_link, link_1, finger_link_1, finger_link_2) + - 3 joints (0 fixed, 1 continuous, 2 prismatic) + """ + manager = omni.kit.app.get_app().get_extension_manager() + if not manager.is_extension_enabled("isaacsim.asset.importer.urdf"): + manager.set_extension_enabled_immediate("isaacsim.asset.importer.urdf", True) + extension_id = manager.get_enabled_extension_id("isaacsim.asset.importer.urdf") + extension_path = manager.get_extension_path(extension_id) + + urdf_path = os.path.join(extension_path, "data", "urdf", "tests", "test_merge_joints.urdf") + + with tempfile.TemporaryDirectory(prefix="isaaclab_test_merge_") as tmpdir: + output_path = os.path.join(tmpdir, "merged.urdf") + merge_fixed_joints(urdf_path, output_path) + + # parse the output URDF + tree = ET.parse(output_path) + root = tree.getroot() + + links = root.findall("link") + joints = root.findall("joint") + link_names = sorted(link.get("name") for link in links) + joint_types = [j.get("type") for j in joints] + + # verify link count and names + assert len(links) == 4, f"Expected 4 links, got {len(links)}: {link_names}" + assert sorted(link_names) == sorted(["root_link", "link_1", "finger_link_1", "finger_link_2"]) + + # verify no fixed joints remain + assert "fixed" not in joint_types, f"Fixed joints should be removed, got types: {joint_types}" + + # verify joint count and types + assert len(joints) == 3, f"Expected 3 joints, got {len(joints)}" + + # verify that visuals from merged links were transferred + # root_link should have base_link's visual (1 visual from base_link) + root_link = next(link for link in links if link.get("name") == "root_link") + root_visuals = root_link.findall("visual") + assert len(root_visuals) >= 1, "root_link should have at least 1 visual from merged base_link" + + # link_1 should have visuals from link_1, link_2, and palm_link (3 total) + link_1 = next(link for link in links if link.get("name") == "link_1") + link_1_visuals = link_1.findall("visual") + assert len(link_1_visuals) == 3, ( + f"link_1 should have 3 visuals (own + link_2 + palm_link), got {len(link_1_visuals)}" + ) + + # verify that re-parented joints (finger joints) now reference link_1 + for joint in joints: + parent_name = joint.find("parent").get("link") + child_name = joint.find("child").get("link") + # finger joints were parented to palm_link, should now be parented to link_1 + if child_name in ("finger_link_1", "finger_link_2"): + assert parent_name == "link_1", f"Expected finger joint parent to be 'link_1', got '{parent_name}'" + + +@pytest.mark.isaacsim_ci +def test_merge_fixed_joints_converter(sim_config): + """Test the full URDF converter pipeline with merge_fixed_joints enabled.""" + sim, config = sim_config + # Create directory to dump results + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_converter_merge") + os.makedirs(output_dir, exist_ok=True) + + # use a URDF that has fixed joints + manager = omni.kit.app.get_app().get_extension_manager() + extension_id = manager.get_enabled_extension_id("isaacsim.asset.importer.urdf") + extension_path = manager.get_extension_path(extension_id) + + config.asset_path = os.path.join(extension_path, "data", "urdf", "tests", "test_merge_joints.urdf") + config.merge_fixed_joints = True + config.force_usd_conversion = True + config.usd_dir = output_dir + + urdf_converter = UrdfConverter(config) + + # check the USD file was created + assert os.path.exists(urdf_converter.usd_path), f"USD file not found at: {urdf_converter.usd_path}" + + # create a prim from it and verify it's valid + prim_path = "/World/MergedRobot" + sim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) + assert sim.stage.GetPrimAtPath(prim_path).IsValid() + + +@pytest.mark.isaacsim_ci +def test_fix_base_creates_fixed_joint(sim_config): + """Verify that fix_base=True creates a FixedJoint in the output USD.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_fix_base") + os.makedirs(output_dir, exist_ok=True) + + config.fix_base = True + config.force_usd_conversion = True + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + # search for a FixedJoint in the output + fixed_joints = [p for p in stage.Traverse() if p.IsA(UsdPhysics.FixedJoint)] + assert len(fixed_joints) > 0, "Expected at least one FixedJoint from fix_base=True" + + # the first FixedJoint should target a rigid body link via body1 + fj = UsdPhysics.FixedJoint(fixed_joints[0]) + body1_targets = fj.GetBody1Rel().GetTargets() + assert len(body1_targets) > 0, "FixedJoint should target a rigid body link via body1" + + +@pytest.mark.isaacsim_ci +def test_no_fix_base(sim_config): + """Verify that fix_base=False does not create a fix_base_joint.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_no_fix_base") + os.makedirs(output_dir, exist_ok=True) + + config.fix_base = False + config.force_usd_conversion = True + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + # there should be no prim named "fix_base_joint" + fix_base_prims = [p for p in stage.Traverse() if p.GetName() == "fix_base_joint"] + assert len(fix_base_prims) == 0, "Expected no fix_base_joint when fix_base=False" + + +@pytest.mark.isaacsim_ci +def test_collision_from_visuals(sim_config): + """Verify that collision_from_visuals runs without error and produces valid output. + + Note: CollisionAPI is applied on the intermediate stage before the asset transformer + restructures the USD. The transformer may not preserve CollisionAPI in the final + output, so this test verifies the pipeline executes successfully rather than + inspecting the final USD for CollisionAPI schemas. + """ + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_collision_visuals") + os.makedirs(output_dir, exist_ok=True) + + config.collision_from_visuals = True + config.force_usd_conversion = True config.usd_dir = output_dir urdf_converter = UrdfConverter(config) - # check the drive type of the robot + + assert os.path.exists(urdf_converter.usd_path), "USD file should exist after conversion" + prim_path = "/World/Robot" sim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) + assert sim.stage.GetPrimAtPath(prim_path).IsValid() - # access the robot - robot = Articulation(prim_path, reset_xform_properties=False) - # play the simulator and initialize the robot - sim.reset() - robot.initialize() - # check drive values for the robot (read from physx) - drive_stiffness, drive_damping = robot.get_gains() - np.testing.assert_array_equal(drive_stiffness.cpu().numpy(), config.joint_drive.gains.stiffness) - np.testing.assert_array_equal(drive_damping.cpu().numpy(), config.joint_drive.gains.damping) +@pytest.mark.isaacsim_ci +def test_no_collision_from_visuals(sim_config): + """Verify that conversion succeeds when collision_from_visuals is disabled.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_no_collision_visuals") + os.makedirs(output_dir, exist_ok=True) - # check drive values for the robot (read from usd) - # Note: Disable the app control callback to prevent hanging during sim.stop() - sim._disable_app_control_on_stop_handle = True - sim.stop() - drive_stiffness, drive_damping = robot.get_gains() - np.testing.assert_array_equal(drive_stiffness.cpu().numpy(), config.joint_drive.gains.stiffness) - np.testing.assert_array_equal(drive_damping.cpu().numpy(), config.joint_drive.gains.damping) + config.collision_from_visuals = False + config.force_usd_conversion = True + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + assert os.path.exists(urdf_converter.usd_path), "USD file should exist after conversion" + + prim_path = "/World/Robot" + sim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) + assert sim.stage.GetPrimAtPath(prim_path).IsValid() + + +@pytest.mark.isaacsim_ci +def test_self_collision(sim_config): + """Verify that self_collision=True enables self-collision on the articulation.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_self_collision") + os.makedirs(output_dir, exist_ok=True) + + config.self_collision = True + config.force_usd_conversion = True + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import PhysxSchema, Usd + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + # find prim with PhysxArticulationAPI and check self-collision flag + found_self_collision = False + for prim in stage.Traverse(): + if prim.HasAPI(PhysxSchema.PhysxArticulationAPI): + physx_api = PhysxSchema.PhysxArticulationAPI(prim) + sc_attr = physx_api.GetEnabledSelfCollisionsAttr() + if sc_attr and sc_attr.HasValue() and sc_attr.Get(): + found_self_collision = True + break + + assert found_self_collision, "Expected self-collision to be enabled on the articulation" + + +@pytest.mark.isaacsim_ci +def test_drive_type_acceleration(sim_config): + """Verify that drive_type='acceleration' is applied to all joints.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_drive_accel") + os.makedirs(output_dir, exist_ok=True) + + config.force_usd_conversion = True + config.joint_drive.drive_type = "acceleration" + config.joint_drive.gains.stiffness = 100.0 + config.joint_drive.gains.damping = 10.0 + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + joint_count = 0 + for prim in stage.Traverse(): + if prim.IsA(UsdPhysics.RevoluteJoint) or prim.IsA(UsdPhysics.PrismaticJoint): + instance_name = "angular" if prim.IsA(UsdPhysics.RevoluteJoint) else "linear" + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + type_attr = drive.GetTypeAttr() + assert type_attr and type_attr.HasValue(), f"Joint {prim.GetName()} missing drive type" + assert type_attr.Get() == "acceleration", ( + f"Expected drive type 'acceleration' on {prim.GetName()}, got '{type_attr.Get()}'" + ) + joint_count += 1 + + assert joint_count > 0, "No joints found in the output USD" + + +@pytest.mark.isaacsim_ci +def test_target_type_none_zeros_gains(sim_config): + """Verify that ``target_type='none'`` zeros the DriveAPI stiffness and damping on every joint.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_target_none") + os.makedirs(output_dir, exist_ok=True) + + config.force_usd_conversion = True + config.joint_drive.target_type = "none" + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + joint_count = 0 + for prim in stage.Traverse(): + is_revolute = prim.IsA(UsdPhysics.RevoluteJoint) + is_prismatic = prim.IsA(UsdPhysics.PrismaticJoint) + if not (is_revolute or is_prismatic): + continue + instance_name = "angular" if is_revolute else "linear" + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + assert abs(drive.GetStiffnessAttr().Get()) < 1e-6, ( + f"Joint {prim.GetName()}: expected zero stiffness, got {drive.GetStiffnessAttr().Get()}" + ) + assert abs(drive.GetDampingAttr().Get()) < 1e-6, ( + f"Joint {prim.GetName()}: expected zero damping, got {drive.GetDampingAttr().Get()}" + ) + joint_count += 1 + + assert joint_count > 0, "No joints found in the output USD" + + +@pytest.mark.isaacsim_ci +def test_per_joint_dict_gains(sim_config): + """Verify that per-joint dict-based gains are applied correctly.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_dict_gains") + os.makedirs(output_dir, exist_ok=True) + + arm_stiffness = 100.0 + finger_stiffness = 200.0 + arm_damping = 10.0 + finger_damping = 20.0 + + config.force_usd_conversion = True + config.joint_drive.target_type = "position" + config.joint_drive.gains = UrdfConverterCfg.JointDriveCfg.PDGainsCfg( + stiffness={ + "panda_joint[1-7]": arm_stiffness, + "panda_finger": finger_stiffness, + }, + damping={ + "panda_joint[1-7]": arm_damping, + "panda_finger": finger_damping, + }, + ) + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + # inspect the USD directly rather than going through PhysX to verify per-joint values + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + arm_joint_count = 0 + finger_joint_count = 0 + for prim in stage.Traverse(): + if not (prim.IsA(UsdPhysics.RevoluteJoint) or prim.IsA(UsdPhysics.PrismaticJoint)): + continue + name = prim.GetName() + is_revolute = prim.IsA(UsdPhysics.RevoluteJoint) + instance_name = "angular" if is_revolute else "linear" + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + stiffness_attr = drive.GetStiffnessAttr() + damping_attr = drive.GetDampingAttr() + + if "panda_joint" in name and "finger" not in name: + # arm joint (revolute) — USD stores in Nm/deg, so expected = value * pi/180 + expected_s = arm_stiffness * math.pi / 180.0 + expected_d = arm_damping * math.pi / 180.0 + assert abs(stiffness_attr.Get() - expected_s) < 0.01, ( + f"Arm joint {name}: expected stiffness ~{expected_s}, got {stiffness_attr.Get()}" + ) + assert abs(damping_attr.Get() - expected_d) < 0.01, ( + f"Arm joint {name}: expected damping ~{expected_d}, got {damping_attr.Get()}" + ) + arm_joint_count += 1 + elif "finger" in name: + # finger joint (prismatic) — USD stores directly in N/m + assert abs(stiffness_attr.Get() - finger_stiffness) < 0.01, ( + f"Finger joint {name}: expected stiffness {finger_stiffness}, got {stiffness_attr.Get()}" + ) + assert abs(damping_attr.Get() - finger_damping) < 0.01, ( + f"Finger joint {name}: expected damping {finger_damping}, got {damping_attr.Get()}" + ) + finger_joint_count += 1 + + assert arm_joint_count == 7, f"Expected 7 arm joints, got {arm_joint_count}" + assert finger_joint_count == 2, f"Expected 2 finger joints, got {finger_joint_count}" + + +@pytest.mark.isaacsim_ci +def test_per_joint_dict_drive_type(sim_config): + """Verify that per-joint dict-based drive type is applied correctly.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_dict_drive_type") + os.makedirs(output_dir, exist_ok=True) + + config.force_usd_conversion = True + config.joint_drive.drive_type = { + "panda_joint[1-7]": "acceleration", + "panda_finger": "force", + } + config.joint_drive.gains.stiffness = 50.0 + config.joint_drive.gains.damping = 5.0 + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + for prim in stage.Traverse(): + if not (prim.IsA(UsdPhysics.RevoluteJoint) or prim.IsA(UsdPhysics.PrismaticJoint)): + continue + name = prim.GetName() + instance_name = "angular" if prim.IsA(UsdPhysics.RevoluteJoint) else "linear" + drive = UsdPhysics.DriveAPI.Get(prim, instance_name) + type_attr = drive.GetTypeAttr() + + if "panda_joint" in name and "finger" not in name: + assert type_attr.Get() == "acceleration", ( + f"Arm joint {name}: expected 'acceleration', got '{type_attr.Get()}'" + ) + elif "finger" in name: + assert type_attr.Get() == "force", f"Finger joint {name}: expected 'force', got '{type_attr.Get()}'" + + +@pytest.mark.isaacsim_ci +def test_natural_frequency_gains_deprecation(sim_config): + """Verify that NaturalFrequencyGainsCfg emits a DeprecationWarning and conversion still succeeds.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_nat_freq") + os.makedirs(output_dir, exist_ok=True) + + config.force_usd_conversion = True + config.joint_drive.gains = UrdfConverterCfg.JointDriveCfg.NaturalFrequencyGainsCfg( + natural_frequency=10.0, + ) + config.usd_dir = output_dir + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + urdf_converter = UrdfConverter(config) + dep_warnings = [x for x in w if issubclass(x.category, DeprecationWarning)] + assert len(dep_warnings) >= 1, "Expected DeprecationWarning for NaturalFrequencyGainsCfg" + assert "NaturalFrequencyGainsCfg" in str(dep_warnings[0].message) + + # conversion should still succeed + assert os.path.exists(urdf_converter.usd_path), "USD file should be created despite deprecation" + + # verify we can spawn from the output + prim_path = "/World/Robot" + sim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) + assert sim.stage.GetPrimAtPath(prim_path).IsValid() + + +@pytest.mark.isaacsim_ci +def test_usd_structure_has_joints_and_links(sim_config): + """Validate that the output USD contains the expected joint and link prims for Franka Panda.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_structure") + os.makedirs(output_dir, exist_ok=True) + + config.merge_fixed_joints = False + config.force_usd_conversion = True + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + # count revolute and prismatic joints + revolute_joints = [p for p in stage.Traverse() if p.IsA(UsdPhysics.RevoluteJoint)] + prismatic_joints = [p for p in stage.Traverse() if p.IsA(UsdPhysics.PrismaticJoint)] + rigid_bodies = [p for p in stage.Traverse() if p.HasAPI(UsdPhysics.RigidBodyAPI)] + + # Franka Panda: 7 revolute arm joints, 2 prismatic finger joints + assert len(revolute_joints) >= 7, f"Expected at least 7 revolute joints, got {len(revolute_joints)}" + assert len(prismatic_joints) >= 2, f"Expected at least 2 prismatic joints, got {len(prismatic_joints)}" + assert len(rigid_bodies) >= 1, "Expected at least one rigid body link" + + # all joints should have DriveAPI applied + for joint_prim in revolute_joints + prismatic_joints: + instance_name = "angular" if joint_prim.IsA(UsdPhysics.RevoluteJoint) else "linear" + drive = UsdPhysics.DriveAPI.Get(joint_prim, instance_name) + stiffness_attr = drive.GetStiffnessAttr() + assert stiffness_attr and stiffness_attr.HasValue(), ( + f"Joint {joint_prim.GetName()} missing stiffness attribute in DriveAPI" + ) + + +@pytest.mark.isaacsim_ci +def test_link_density(sim_config): + """Verify that link_density applies density to rigid body links. + + Note: The Franka Panda URDF has explicit mass on all links, so ``_apply_link_density`` + only sets density on links without explicit mass (mass == 0). This test verifies the + pipeline runs without errors when link_density is set. + """ + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_link_density") + os.makedirs(output_dir, exist_ok=True) + + config.link_density = 500.0 + config.force_usd_conversion = True + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + from pxr import Usd, UsdPhysics + + stage = Usd.Stage.Open(urdf_converter.usd_path) + + # verify conversion succeeds and prims with MassAPI exist + mass_prims = [p for p in stage.Traverse() if p.HasAPI(UsdPhysics.MassAPI)] + assert len(mass_prims) > 0, "Expected prims with MassAPI" + + # verify we can spawn from the output + prim_path = "/World/Robot" + sim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) + assert sim.stage.GetPrimAtPath(prim_path).IsValid() + + +@pytest.mark.isaacsim_ci +def test_collider_type_convex_decomposition(sim_config): + """Verify that collider_type='convex_decomposition' runs without error and produces valid output. + + Note: MeshCollisionAPI is applied on the intermediate stage before the asset transformer. + The transformer may not preserve these schemas in the final output, so this test + verifies the pipeline executes successfully and produces a spawnable USD. + """ + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_convex_decomp") + os.makedirs(output_dir, exist_ok=True) + + config.collision_from_visuals = True + config.collider_type = "convex_decomposition" + config.force_usd_conversion = True + config.usd_dir = output_dir + urdf_converter = UrdfConverter(config) + + assert os.path.exists(urdf_converter.usd_path), "USD file should exist after conversion" + + prim_path = "/World/Robot" + sim_utils.create_prim(prim_path, usd_path=urdf_converter.usd_path) + assert sim.stage.GetPrimAtPath(prim_path).IsValid() + + +@pytest.mark.isaacsim_ci +def test_unsupported_features_warn(sim_config): + """Verify that deprecated config options emit warnings without failing.""" + sim, config = sim_config + test_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(test_dir, "output", "urdf_deprecated_warn") + os.makedirs(output_dir, exist_ok=True) + + config.convert_mimic_joints_to_normal_joints = True + config.replace_cylinders_with_capsules = True + config.root_link_name = "some_link" + config.force_usd_conversion = True + config.usd_dir = output_dir + + # conversion should succeed despite deprecated options + urdf_converter = UrdfConverter(config) + assert os.path.exists(urdf_converter.usd_path), "USD file should be created despite deprecated options" diff --git a/source/isaaclab/test/sim/test_utils_prims.py b/source/isaaclab/test/sim/test_utils_prims.py index 16584d113ed7..0d11afe22f45 100644 --- a/source/isaaclab/test/sim/test_utils_prims.py +++ b/source/isaaclab/test/sim/test_utils_prims.py @@ -23,7 +23,9 @@ import isaaclab.sim as sim_utils from isaaclab.sim.utils.prims import _to_tuple # type: ignore[reportPrivateUsage] -from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +pytestmark = pytest.mark.isaacsim_ci @pytest.fixture(autouse=True) @@ -40,8 +42,14 @@ def test_setup_teardown(): sim_utils.clear_stage() -def assert_quat_close(q1: Gf.Quatf | Gf.Quatd, q2: Gf.Quatf | Gf.Quatd, eps: float = 1e-6): +def assert_quat_close( + q1: Gf.Quatf | Gf.Quatd | tuple | list, q2: Gf.Quatf | Gf.Quatd | tuple | list, eps: float = 1e-6 +): """Assert two quaternions are close.""" + if isinstance(q1, (tuple, list)): + q1 = Gf.Quatd(q1[3], q1[0], q1[1], q1[2]) + if isinstance(q2, (tuple, list)): + q2 = Gf.Quatd(q2[3], q2[0], q2[1], q2[2]) assert math.isclose(q1.GetReal(), q2.GetReal(), abs_tol=eps) for i in range(3): assert math.isclose(q1.GetImaginary()[i], q2.GetImaginary()[i], abs_tol=eps) @@ -87,7 +95,8 @@ def test_create_prim(): for prim_spec in prim.GetPrimStack(): references.extend(prim_spec.referenceList.prependedItems) assert len(references) == 1 - assert str(references[0].assetPath) == franka_usd + expected_path = retrieve_file_path(franka_usd) + assert str(references[0].assetPath) == expected_path # check adding semantic label prim = sim_utils.create_prim( @@ -102,7 +111,7 @@ def test_create_prim(): # check setting transform pos = (1.0, 2.0, 3.0) - quat = (0.0, 0.0, 0.0, 1.0) + quat = (0.0, 0.0, 1.0, 0.0) scale = (1.0, 0.5, 0.5) prim = sim_utils.create_prim( "/World/Test/Xform", "Xform", stage=stage, translation=pos, orientation=quat, scale=scale @@ -112,7 +121,7 @@ def test_create_prim(): assert prim.GetPrimPath() == "/World/Test/Xform" assert prim.GetTypeName() == "Xform" assert prim.GetAttribute("xformOp:translate").Get() == Gf.Vec3d(pos) - assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), Gf.Quatd(*quat)) + assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), quat) assert prim.GetAttribute("xformOp:scale").Get() == Gf.Vec3d(scale) # check xform operation order op_names = [op.GetOpName() for op in UsdGeom.Xformable(prim).GetOrderedXformOps()] @@ -131,7 +140,7 @@ def test_create_prim_with_different_input_types(input_type: str): # Define test values translation_vals = [1.0, 2.0, 3.0] - orientation_vals = [1.0, 0.0, 0.0, 0.0] # w, x, y, z + orientation_vals = [0.0, 0.0, 0.0, 1.0] # x, y, z, w scale_vals = [2.0, 3.0, 4.0] # Convert to the specified input type @@ -174,7 +183,7 @@ def test_create_prim_with_different_input_types(input_type: str): # Verify transform values assert prim.GetAttribute("xformOp:translate").Get() == Gf.Vec3d(*translation_vals) - assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), Gf.Quatd(*orientation_vals)) + assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), orientation_vals) assert prim.GetAttribute("xformOp:scale").Get() == Gf.Vec3d(*scale_vals) # Verify xform operation order @@ -198,12 +207,12 @@ def test_create_prim_with_world_position_different_types(input_type: str): "Xform", stage=stage, translation=(5.0, 10.0, 15.0), - orientation=(1.0, 0.0, 0.0, 0.0), + orientation=(0.0, 0.0, 0.0, 1.0), ) # Define world position and orientation values world_pos_vals = [10.0, 20.0, 30.0] - world_orient_vals = [0.7071068, 0.0, 0.7071068, 0.0] # 90 deg around Y + world_orient_vals = [0.0, 0.7071068, 0.0, 0.7071068] # 90 deg around Y # Convert to the specified input type if input_type == "list": @@ -266,7 +275,7 @@ def test_create_prim_non_xformable(): "Material", stage=stage, translation=(1.0, 2.0, 3.0), # These should be ignored - orientation=(1.0, 0.0, 0.0, 0.0), # These should be ignored + orientation=(0.0, 0.0, 0.0, 1.0), # These should be ignored scale=(2.0, 2.0, 2.0), # These should be ignored ) @@ -336,45 +345,6 @@ def test_delete_prim(): assert not prim2.IsValid() -def test_move_prim(): - """Test move_prim() function.""" - # obtain stage handle - stage = sim_utils.get_current_stage() - # create scene - sim_utils.create_prim("/World/Test", "Xform", stage=stage) - prim = sim_utils.create_prim( - "/World/Test/Xform", - "Xform", - usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd", - translation=(1.0, 2.0, 3.0), - orientation=(0.0, 0.0, 0.0, 1.0), - stage=stage, - ) - - # move prim - sim_utils.create_prim("/World/TestMove", "Xform", stage=stage, translation=(1.0, 1.0, 1.0)) - sim_utils.move_prim("/World/Test/Xform", "/World/TestMove/Xform", stage=stage) - # check prim moved - prim = stage.GetPrimAtPath("/World/TestMove/Xform") - assert prim.IsValid() - assert prim.GetPrimPath() == "/World/TestMove/Xform" - assert prim.GetAttribute("xformOp:translate").Get() == Gf.Vec3d((0.0, 1.0, 2.0)) - assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), Gf.Quatd(0.0, 0.0, 0.0, 1.0)) - - # check moving prim with keep_world_transform=False - # it should preserve the local transform from last move - sim_utils.create_prim( - "/World/TestMove2", "Xform", stage=stage, translation=(2.0, 2.0, 2.0), orientation=(0.0, 0.7071, 0.0, 0.7071) - ) - sim_utils.move_prim("/World/TestMove/Xform", "/World/TestMove2/Xform", keep_world_transform=False, stage=stage) - # check prim moved - prim = stage.GetPrimAtPath("/World/TestMove2/Xform") - assert prim.IsValid() - assert prim.GetPrimPath() == "/World/TestMove2/Xform" - assert prim.GetAttribute("xformOp:translate").Get() == Gf.Vec3d((0.0, 1.0, 2.0)) - assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), Gf.Quatd(0.0, 0.0, 0.0, 1.0)) - - """ USD references and variants. """ @@ -394,10 +364,11 @@ def test_get_usd_references(): # Create a prim with a USD reference franka_usd = f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd" sim_utils.create_prim("/World/WithReference", usd_path=franka_usd, stage=stage) - # Check that it has the expected reference + # Check that it has the expected reference (remote URLs are resolved to local paths) refs = sim_utils.get_usd_references("/World/WithReference", stage=stage) assert len(refs) == 1 - assert refs == [franka_usd] + expected_path = retrieve_file_path(franka_usd) + assert refs == [expected_path] # Test with invalid prim path with pytest.raises(ValueError, match="not valid"): diff --git a/source/isaaclab/test/sim/test_utils_stage.py b/source/isaaclab/test/sim/test_utils_stage.py index 033a461e1a1f..f6d73d518662 100644 --- a/source/isaaclab/test/sim/test_utils_stage.py +++ b/source/isaaclab/test/sim/test_utils_stage.py @@ -23,6 +23,8 @@ import isaaclab.sim as sim_utils +pytestmark = pytest.mark.isaacsim_ci + def test_create_new_stage(): """Test creating a new stage attached to USD context.""" @@ -57,7 +59,7 @@ def test_create_multiple_stages(): def test_create_new_stage_in_memory(): """Test creating a new stage in memory (Isaac Sim 5.0+).""" - stage = sim_utils.create_new_stage_in_memory() + stage = sim_utils.create_new_stage() # Should return a valid stage assert stage is not None @@ -70,16 +72,17 @@ def test_create_new_stage_in_memory(): def test_is_current_stage_in_memory(): """Test checking if current stage is in memory.""" - # Create a regular stage (attached to context) + # Create a stage - in kitless mode, this creates an in-memory stage sim_utils.create_new_stage() is_in_memory = sim_utils.is_current_stage_in_memory() # Should return a boolean assert isinstance(is_in_memory, bool) - assert is_in_memory is False + # With kitless mode support, create_new_stage() creates an in-memory stage + assert is_in_memory is True - # Create a stage in memory - stage = sim_utils.create_new_stage_in_memory() + # Create a stage in memory explicitly + stage = sim_utils.create_new_stage() with sim_utils.use_stage(stage): is_in_memory = sim_utils.is_current_stage_in_memory() assert isinstance(is_in_memory, bool) @@ -103,11 +106,10 @@ def test_save_and_open_stage(): assert save_path.exists() # Open the saved stage - open_result = sim_utils.open_stage(str(save_path)) - assert open_result is True + opened_stage = sim_utils.open_stage(str(save_path)) + assert isinstance(opened_stage, Usd.Stage) # Verify content was preserved - opened_stage = sim_utils.get_current_stage() test_cube = opened_stage.GetPrimAtPath("/World/TestCube") assert test_cube.IsValid() assert test_cube.GetTypeName() == "Cube" @@ -214,25 +216,6 @@ def test_close_stage(): assert isinstance(result, bool) -def test_close_stage_with_callback(): - """Test closing stage with a callback function.""" - # Create a stage - sim_utils.create_new_stage() - - # Track callback invocations - callback_called = [] - - def callback(success: bool, error_msg: str): - callback_called.append((success, error_msg)) - - # Close with callback - result = sim_utils.close_stage(callback_fn=callback) - - # Callback might be called or not depending on implementation - # Just verify no exceptions were raised - assert isinstance(result, bool) - - def test_clear_stage(): """Test clearing the stage.""" # Create a new stage @@ -250,21 +233,6 @@ def test_clear_stage(): assert stage is not None -def test_is_stage_loading(): - """Test checking if stage is loading.""" - # Create a new stage - sim_utils.create_new_stage() - - # Check loading status - is_loading = sim_utils.is_stage_loading() - - # Should return a boolean - assert isinstance(is_loading, bool) - - # After creation, should not be loading - assert is_loading is False - - def test_get_current_stage(): """Test getting the current stage.""" # Create a new stage @@ -287,3 +255,108 @@ def test_get_current_stage_id(): # Should be a valid integer ID assert isinstance(stage_id, int) assert stage_id >= 0 + + +def test_resolve_paths(): + """Test resolve_paths helper for asset path resolution.""" + from isaaclab.sim.utils.stage import resolve_paths + + with tempfile.TemporaryDirectory() as temp_dir: + # Create a source stage with a sublayer reference + source_path = Path(temp_dir) / "source" / "source_stage.usd" + source_path.parent.mkdir(parents=True, exist_ok=True) + + # Create source stage with some content + source_stage = Usd.Stage.CreateNew(str(source_path)) + source_stage.DefinePrim("/World", "Xform") + source_stage.DefinePrim("/World/Cube", "Cube") + source_stage.GetRootLayer().Save() + + # Copy to a different location using layer transfer + dest_path = Path(temp_dir) / "dest" / "dest_stage.usd" + dest_path.parent.mkdir(parents=True, exist_ok=True) + + from pxr import Sdf + + dest_layer = Sdf.Layer.CreateNew(str(dest_path)) + dest_layer.TransferContent(source_stage.GetRootLayer()) + + # Resolve paths (should not raise any errors) + resolve_paths(str(source_path), str(dest_path)) + dest_layer.Save() + + # Open destination stage and verify content was preserved + dest_stage = Usd.Stage.Open(str(dest_path)) + cube_prim = dest_stage.GetPrimAtPath("/World/Cube") + assert cube_prim.IsValid() + assert cube_prim.GetTypeName() == "Cube" + + +def test_stage_context_tracking(): + """Test that stage context is properly tracked across operations.""" + # Create initial stage + stage1 = sim_utils.create_new_stage() + stage1.DefinePrim("/Stage1Marker", "Xform") + + # Verify it's the current stage + current = sim_utils.get_current_stage() + assert current.GetPrimAtPath("/Stage1Marker").IsValid() + + # Create another stage - should become current + stage2 = sim_utils.create_new_stage() + stage2.DefinePrim("/Stage2Marker", "Xform") + + current = sim_utils.get_current_stage() + assert current.GetPrimAtPath("/Stage2Marker").IsValid() + assert not current.GetPrimAtPath("/Stage1Marker").IsValid() + + # Use stage context manager to temporarily switch + with sim_utils.use_stage(stage1): + current = sim_utils.get_current_stage() + assert current.GetPrimAtPath("/Stage1Marker").IsValid() + + # After context manager, should be back to stage2 + current = sim_utils.get_current_stage() + assert current.GetPrimAtPath("/Stage2Marker").IsValid() + + +def test_is_prim_deletable(): + """Test _is_prim_deletable with various prim types.""" + from isaaclab.sim.utils.stage import _is_prim_deletable + + stage = sim_utils.create_new_stage() + + # Create a locally authored prim - should be deletable + local_prim = stage.DefinePrim("/World/LocalPrim", "Xform") + assert _is_prim_deletable(local_prim) is True + + # Create another deletable prim + another_prim = stage.DefinePrim("/World/AnotherPrim", "Cube") + assert _is_prim_deletable(another_prim) is True + + # Root prim should not be deletable + root_prim = stage.GetPseudoRoot() + assert _is_prim_deletable(root_prim) is False + + +def test_open_stage_sets_current(): + """Test that open_stage sets the opened stage as current.""" + with tempfile.TemporaryDirectory() as temp_dir: + # Create and save a stage + stage = sim_utils.create_new_stage() + stage.DefinePrim("/TestPrim", "Xform") + + save_path = Path(temp_dir) / "test.usd" + sim_utils.save_stage(str(save_path), save_and_reload_in_place=False) + + # Create a different stage + sim_utils.create_new_stage() + sim_utils.get_current_stage().DefinePrim("/DifferentPrim", "Xform") + + # Open the saved stage + opened = sim_utils.open_stage(str(save_path)) + + # Opened stage should now be current + current = sim_utils.get_current_stage() + assert current == opened + assert current.GetPrimAtPath("/TestPrim").IsValid() diff --git a/source/isaaclab/test/sim/test_utils_transforms.py b/source/isaaclab/test/sim/test_utils_transforms.py index 040cfe333aa7..9cd43800b528 100644 --- a/source/isaaclab/test/sim/test_utils_transforms.py +++ b/source/isaaclab/test/sim/test_utils_transforms.py @@ -46,10 +46,16 @@ def assert_vec3_close(v1: Gf.Vec3d | Gf.Vec3f, v2: tuple | Gf.Vec3d | Gf.Vec3f, assert math.isclose(v1[i], v2[i], abs_tol=eps), f"Vector mismatch at index {i}: {v1[i]} != {v2[i]}" -def assert_quat_close(q1: Gf.Quatf | Gf.Quatd, q2: Gf.Quatf | Gf.Quatd | tuple, eps: float = 1e-6): +def assert_quat_close( + q1: Gf.Quatf | Gf.Quatd | tuple | list, q2: Gf.Quatf | Gf.Quatd | tuple | list, eps: float = 1e-6 +): """Assert two quaternions are close, accounting for double-cover (q and -q represent same rotation).""" - if isinstance(q2, tuple): - q2 = Gf.Quatd(*q2) + if isinstance(q1, (tuple, list)): + # q1 is (x, y, z, w) + q1 = Gf.Quatd(q1[3], q1[0], q1[1], q1[2]) + if isinstance(q2, (tuple, list)): + # q2 is (x, y, z, w) + q2 = Gf.Quatd(q2[3], q2[0], q2[1], q2[2]) # Check if quaternions are close (either q1 ≈ q2 or q1 ≈ -q2) real_match = math.isclose(q1.GetReal(), q2.GetReal(), abs_tol=eps) imag_match = all(math.isclose(q1.GetImaginary()[i], q2.GetImaginary()[i], abs_tol=eps) for i in range(3)) @@ -83,7 +89,7 @@ def test_standardize_xform_ops_basic(): "/World/TestXform", "Xform", translation=(1.0, 2.0, 3.0), - orientation=(1.0, 0.0, 0.0, 0.0), # w, x, y, z + orientation=(0.0, 0.0, 0.0, 1.0), # x, y, z, w (identity) scale=(1.0, 1.0, 1.0), stage=stage, ) @@ -105,7 +111,7 @@ def test_standardize_xform_ops_basic(): # Verify the transform values are preserved (approximately) assert_vec3_close(prim.GetAttribute("xformOp:translate").Get(), (1.0, 2.0, 3.0)) - assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), (1.0, 0.0, 0.0, 0.0)) + assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), (0.0, 0.0, 0.0, 1.0)) assert_vec3_close(prim.GetAttribute("xformOp:scale").Get(), (1.0, 1.0, 1.0)) @@ -139,7 +145,7 @@ def test_standardize_xform_ops_with_rotation_xyz(): pos_after, quat_after = sim_utils.resolve_prim_pose(prim) # Verify world pose is preserved (may have small numeric differences due to rotation conversion) assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-4) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-4) + assert_quat_close(quat_before, quat_after, eps=1e-4) # Verify the deprecated operation is removed assert "xformOp:rotateXYZ" not in prim.GetPropertyNames() @@ -182,7 +188,7 @@ def test_standardize_xform_ops_with_transform_matrix(): pos_after, quat_after = sim_utils.resolve_prim_pose(prim) # Verify world pose is preserved assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) # Verify the transform operation is removed assert "xformOp:transform" not in prim.GetPropertyNames() @@ -200,7 +206,7 @@ def test_standardize_xform_ops_preserves_world_pose(): # Create a prim with specific world pose translation = (10.0, 20.0, 30.0) # Rotation of 90 degrees around Z axis - orientation = (0.7071068, 0.0, 0.0, 0.7071068) # w, x, y, z + orientation = (0.0, 0.0, 0.7071068, 0.7071068) # x, y, z, w scale = (2.0, 3.0, 4.0) prim = sim_utils.create_prim( @@ -223,7 +229,7 @@ def test_standardize_xform_ops_preserves_world_pose(): pos_after, quat_after = sim_utils.resolve_prim_pose(prim) # Verify the world pose is preserved assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) def test_standardize_xform_ops_with_units_resolve(): @@ -259,7 +265,7 @@ def test_standardize_xform_ops_with_units_resolve(): pos_after, quat_after = sim_utils.resolve_prim_pose(prim) # Verify pose is preserved assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) # Verify unitsResolve is removed assert "xformOp:scale:unitsResolve" not in prim.GetPropertyNames() @@ -279,7 +285,7 @@ def test_standardize_xform_ops_with_hierarchy(): "/World/Parent", "Xform", translation=(5.0, 0.0, 0.0), - orientation=(1.0, 0.0, 0.0, 0.0), + orientation=(0.0, 0.0, 0.0, 1.0), scale=(2.0, 2.0, 2.0), stage=stage, ) @@ -289,7 +295,7 @@ def test_standardize_xform_ops_with_hierarchy(): "/World/Parent/Child", "Xform", translation=(0.0, 3.0, 0.0), - orientation=(0.7071068, 0.0, 0.7071068, 0.0), # 90 deg around Y + orientation=(0.0, 0.7071068, 0.0, 0.7071068), # 90 deg around Y scale=(0.5, 0.5, 0.5), stage=stage, ) @@ -308,9 +314,9 @@ def test_standardize_xform_ops_with_hierarchy(): # Verify world poses are preserved assert_vec3_close(Gf.Vec3d(*parent_pos_before), parent_pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*parent_quat_before), parent_quat_after, eps=1e-5) + assert_quat_close(parent_quat_before, parent_quat_after, eps=1e-5) assert_vec3_close(Gf.Vec3d(*child_pos_before), child_pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*child_quat_before), child_quat_after, eps=1e-5) + assert_quat_close(child_quat_before, child_quat_after, eps=1e-5) def test_standardize_xform_ops_multiple_deprecated_ops(): @@ -346,7 +352,7 @@ def test_standardize_xform_ops_multiple_deprecated_ops(): pos_after, quat_after = sim_utils.resolve_prim_pose(prim) # Verify world pose is preserved assert_vec3_close(Gf.Vec3d(*pos), Gf.Vec3d(*pos_after), eps=1e-5) - assert_quat_close(Gf.Quatd(*quat), Gf.Quatd(*quat_after), eps=1e-5) + assert_quat_close(quat, quat_after, eps=1e-5) # Verify all deprecated operations are removed assert "xformOp:rotateX" not in prim.GetPropertyNames() @@ -367,7 +373,7 @@ def test_standardize_xform_ops_with_existing_standard_ops(): "/World/TestExistingStandard", "Xform", translation=(7.0, 8.0, 9.0), - orientation=(0.9238795, 0.3826834, 0.0, 0.0), # rotation around X + orientation=(0.3826834, 0.0, 0.0, 0.9238795), # rotation around X scale=(1.5, 2.5, 3.5), stage=stage, ) @@ -388,7 +394,7 @@ def test_standardize_xform_ops_with_existing_standard_ops(): pos_after, quat_after = sim_utils.resolve_prim_pose(prim) # Verify world pose is preserved assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) # Verify operations still exist and are in correct order xform_ops = get_xform_ops(prim) @@ -430,7 +436,7 @@ def test_standardize_xform_ops_on_geometry_prim(): "/World/TestCube", "Cube", translation=(1.0, 2.0, 3.0), - orientation=(1.0, 0.0, 0.0, 0.0), + orientation=(0.0, 0.0, 0.0, 1.0), scale=(2.0, 2.0, 2.0), attributes={"size": 1.0}, stage=stage, @@ -446,7 +452,7 @@ def test_standardize_xform_ops_on_geometry_prim(): pos_after, quat_after = sim_utils.resolve_prim_pose(cube_prim) # Verify world pose is preserved assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) # Verify standard operations exist xform_ops = get_xform_ops(cube_prim) @@ -463,7 +469,7 @@ def test_standardize_xform_ops_with_non_uniform_scale(): "/World/TestNonUniformScale", "Xform", translation=(5.0, 10.0, 15.0), - orientation=(0.7071068, 0.0, 0.7071068, 0.0), # 90 deg around Y + orientation=(0.0, 0.7071068, 0.0, 0.7071068), # 90 deg around Y scale=(1.0, 2.0, 3.0), # Non-uniform scale stage=stage, ) @@ -482,7 +488,7 @@ def test_standardize_xform_ops_with_non_uniform_scale(): pos_after, quat_after = sim_utils.resolve_prim_pose(prim) # Verify world pose is preserved assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) # Verify scale is preserved final_scale = prim.GetAttribute("xformOp:scale").Get() assert_vec3_close(initial_scale, final_scale, eps=1e-5) @@ -498,7 +504,7 @@ def test_standardize_xform_ops_identity_transform(): "/World/TestIdentity", "Xform", translation=(0.0, 0.0, 0.0), - orientation=(1.0, 0.0, 0.0, 0.0), # Identity quaternion + orientation=(0.0, 0.0, 0.0, 1.0), # Identity quaternion scale=(1.0, 1.0, 1.0), stage=stage, ) @@ -512,7 +518,7 @@ def test_standardize_xform_ops_identity_transform(): # Verify identity values assert_vec3_close(prim.GetAttribute("xformOp:translate").Get(), (0.0, 0.0, 0.0)) - assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), (1.0, 0.0, 0.0, 0.0)) + assert_quat_close(prim.GetAttribute("xformOp:orient").Get(), (0.0, 0.0, 0.0, 1.0)) assert_vec3_close(prim.GetAttribute("xformOp:scale").Get(), (1.0, 1.0, 1.0)) @@ -526,14 +532,14 @@ def test_standardize_xform_ops_with_explicit_values(): "/World/TestExplicitValues", "Xform", translation=(10.0, 10.0, 10.0), - orientation=(0.7071068, 0.7071068, 0.0, 0.0), + orientation=(0.7071068, 0.0, 0.0, 0.7071068), scale=(5.0, 5.0, 5.0), stage=stage, ) # Apply standardize_xform_ops with new explicit values new_translation = (1.0, 2.0, 3.0) - new_orientation = (1.0, 0.0, 0.0, 0.0) + new_orientation = (0.0, 0.0, 0.0, 1.0) # xyzw identity new_scale = (2.0, 2.0, 2.0) result = sim_utils.standardize_xform_ops( @@ -549,7 +555,7 @@ def test_standardize_xform_ops_with_explicit_values(): # Verify the prim is at the expected world location pos_after, quat_after = sim_utils.resolve_prim_pose(prim) assert_vec3_close(Gf.Vec3d(*pos_after), new_translation, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_after), new_orientation, eps=1e-5) + assert_quat_close(quat_after, new_orientation, eps=1e-5) # Verify standard operation order xform_ops = get_xform_ops(prim) @@ -566,7 +572,7 @@ def test_standardize_xform_ops_with_partial_values(): "/World/TestPartialValues", "Xform", translation=(1.0, 2.0, 3.0), - orientation=(0.9238795, 0.3826834, 0.0, 0.0), # rotation around X + orientation=(0.3826834, 0.0, 0.0, 0.9238795), # rotation around X scale=(2.0, 2.0, 2.0), stage=stage, ) @@ -586,12 +592,12 @@ def test_standardize_xform_ops_with_partial_values(): # Verify orientation and scale are preserved quat_after = prim.GetAttribute("xformOp:orient").Get() scale_after = prim.GetAttribute("xformOp:scale").Get() - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) assert_vec3_close(scale_before, scale_after, eps=1e-5) # Verify the prim's world orientation hasn't changed (only translation changed) _, quat_after_world = sim_utils.resolve_prim_pose(prim) - assert_quat_close(Gf.Quatd(*quat_before), quat_after_world, eps=1e-5) + assert_quat_close(quat_before, quat_after_world, eps=1e-5) def test_standardize_xform_ops_non_xformable_prim(caplog): @@ -677,7 +683,7 @@ def test_standardize_xform_ops_with_complex_hierarchy(): pos_before, quat_before = poses_before[name] pos_after, quat_after = poses_after[name] assert_vec3_close(Gf.Vec3d(*pos_before), pos_after, eps=1e-5) - assert_quat_close(Gf.Quatd(*quat_before), quat_after, eps=1e-5) + assert_quat_close(quat_before, quat_after, eps=1e-5) def test_standardize_xform_ops_preserves_float_precision(): @@ -695,7 +701,7 @@ def test_standardize_xform_ops_preserves_float_precision(): translate_op.Set(Gf.Vec3f(1.0, 2.0, 3.0)) orient_op = xformable.AddOrientOp(UsdGeom.XformOp.PrecisionFloat) - orient_op.Set(Gf.Quatf(1.0, 0.0, 0.0, 0.0)) + orient_op.Set(Gf.Quatf(0.0, 0.0, 0.0, 1.0)) scale_op = xformable.AddScaleOp(UsdGeom.XformOp.PrecisionFloat) scale_op.Set(Gf.Vec3f(1.0, 1.0, 1.0)) @@ -707,7 +713,7 @@ def test_standardize_xform_ops_preserves_float_precision(): # Now apply standardize_xform_ops with new values (provided as double precision Python floats) new_translation = (5.0, 10.0, 15.0) - new_orientation = (0.7071068, 0.7071068, 0.0, 0.0) # 90 deg around X + new_orientation = (0.7071068, 0.0, 0.0, 0.7071068) # 90 deg around X new_scale = (2.0, 3.0, 4.0) result = sim_utils.standardize_xform_ops( @@ -740,7 +746,7 @@ def test_standardize_xform_ops_preserves_float_precision(): # Verify the world pose matches what we set pos_after, quat_after = sim_utils.resolve_prim_pose(prim) assert_vec3_close(Gf.Vec3d(*pos_after), new_translation, eps=1e-4) - assert_quat_close(Gf.Quatd(*quat_after), new_orientation, eps=1e-4) + assert_quat_close(quat_after, new_orientation, eps=1e-4) """ @@ -758,7 +764,7 @@ def test_validate_standard_xform_ops_valid(): "/World/TestValid", "Xform", translation=(1.0, 2.0, 3.0), - orientation=(1.0, 0.0, 0.0, 0.0), + orientation=(0.0, 0.0, 0.0, 1.0), scale=(1.0, 1.0, 1.0), stage=stage, ) @@ -788,7 +794,7 @@ def test_validate_standard_xform_ops_invalid_order(): translate_op.Set(Gf.Vec3d(1.0, 2.0, 3.0)) orient_op = xformable.AddOrientOp(UsdGeom.XformOp.PrecisionDouble) - orient_op.Set(Gf.Quatd(1.0, 0.0, 0.0, 0.0)) + orient_op.Set(Gf.Quatd(0.0, 0.0, 0.0, 1.0)) # Validate it - should return False assert sim_utils.validate_standard_xform_ops(prim) is False @@ -884,7 +890,7 @@ def test_validate_standard_xform_ops_extra_operations(): "/World/TestExtra", "Xform", translation=(1.0, 2.0, 3.0), - orientation=(1.0, 0.0, 0.0, 0.0), + orientation=(0.0, 0.0, 0.0, 1.0), scale=(1.0, 1.0, 1.0), stage=stage, ) @@ -937,7 +943,7 @@ def test_validate_standard_xform_ops_on_geometry(): "/World/TestCube", "Cube", translation=(1.0, 2.0, 3.0), - orientation=(1.0, 0.0, 0.0, 0.0), + orientation=(0.0, 0.0, 0.0, 1.0), scale=(2.0, 2.0, 2.0), stage=stage, ) @@ -1045,7 +1051,7 @@ def test_resolve_prim_pose(): pos, quat = sim_utils.resolve_prim_pose(dummy_prim, ref_prim=xform_prim) pos, quat = np.array(pos), np.array(quat) np.testing.assert_allclose(pos, np.zeros(3), atol=1e-3) - np.testing.assert_allclose(quat, np.array([1, 0, 0, 0]), atol=1e-3) + np.testing.assert_allclose(quat, np.array([0.0, 0.0, 0.0, 1.0]), atol=1e-3) # xform prim w.r.t. cube prim pos, quat = sim_utils.resolve_prim_pose(xform_prim, ref_prim=cube_prim) pos, quat = np.array(pos), np.array(quat) @@ -1149,14 +1155,14 @@ def test_convert_world_pose_to_local_basic(): "/World/Parent", "Xform", translation=(5.0, 0.0, 0.0), - orientation=(1.0, 0.0, 0.0, 0.0), # identity rotation + orientation=(0.0, 0.0, 0.0, 1.0), # identity rotation scale=(1.0, 1.0, 1.0), stage=stage, ) # World pose we want to achieve for a child world_position = (10.0, 3.0, 0.0) - world_orientation = (1.0, 0.0, 0.0, 0.0) # identity rotation + world_orientation = (0.0, 0.0, 0.0, 1.0) # identity rotation # Convert to local space local_translation, local_orientation = sim_utils.convert_world_pose_to_local( @@ -1167,7 +1173,7 @@ def test_convert_world_pose_to_local_basic(): # The expected local translation is world_position - parent_position = (10-5, 3-0, 0-0) = (5, 3, 0) assert_vec3_close(Gf.Vec3d(*local_translation), (5.0, 3.0, 0.0), eps=1e-5) - assert_quat_close(Gf.Quatd(*local_orientation), (1.0, 0.0, 0.0, 0.0), eps=1e-5) + assert_quat_close(local_orientation, (0.0, 0.0, 0.0, 1.0), eps=1e-5) def test_convert_world_pose_to_local_with_rotation(): @@ -1180,14 +1186,14 @@ def test_convert_world_pose_to_local_with_rotation(): "/World/RotatedParent", "Xform", translation=(0.0, 0.0, 0.0), - orientation=(0.7071068, 0.0, 0.0, 0.7071068), # 90 deg around Z + orientation=(0.0, 0.0, 0.7071068, 0.7071068), # 90 deg around Z scale=(1.0, 1.0, 1.0), stage=stage, ) # World pose: position at (1, 0, 0) with identity rotation world_position = (1.0, 0.0, 0.0) - world_orientation = (1.0, 0.0, 0.0, 0.0) + world_orientation = (0.0, 0.0, 0.0, 1.0) # Convert to local space local_translation, local_orientation = sim_utils.convert_world_pose_to_local( @@ -1208,7 +1214,7 @@ def test_convert_world_pose_to_local_with_rotation(): # Verify it matches the desired world pose assert_vec3_close(Gf.Vec3d(*child_world_pos), world_position, eps=1e-5) - assert_quat_close(Gf.Quatd(*child_world_quat), world_orientation, eps=1e-5) + assert_quat_close(child_world_quat, world_orientation, eps=1e-5) def test_convert_world_pose_to_local_with_scale(): @@ -1221,14 +1227,14 @@ def test_convert_world_pose_to_local_with_scale(): "/World/ScaledParent", "Xform", translation=(1.0, 2.0, 3.0), - orientation=(1.0, 0.0, 0.0, 0.0), + orientation=(0.0, 0.0, 0.0, 1.0), scale=(2.0, 2.0, 2.0), stage=stage, ) # World pose we want world_position = (5.0, 6.0, 7.0) - world_orientation = (0.7071068, 0.7071068, 0.0, 0.0) # 90 deg around X + world_orientation = (0.7071068, 0.0, 0.0, 0.7071068) # 90 deg around X # Convert to local space local_translation, local_orientation = sim_utils.convert_world_pose_to_local( @@ -1249,7 +1255,7 @@ def test_convert_world_pose_to_local_with_scale(): # Verify (may have some tolerance due to scale effects on rotation) assert_vec3_close(Gf.Vec3d(*child_world_pos), world_position, eps=1e-4) - assert_quat_close(Gf.Quatd(*child_world_quat), world_orientation, eps=1e-4) + assert_quat_close(child_world_quat, world_orientation, eps=1e-4) def test_convert_world_pose_to_local_invalid_parent(): @@ -1262,7 +1268,7 @@ def test_convert_world_pose_to_local_invalid_parent(): assert not invalid_prim.IsValid() world_position = (10.0, 20.0, 30.0) - world_orientation = (0.7071068, 0.0, 0.7071068, 0.0) + world_orientation = (0.0, 0.7071068, 0.0, 0.7071068) # Convert with invalid reference prim with pytest.raises(ValueError): @@ -1278,7 +1284,7 @@ def test_convert_world_pose_to_local_root_parent(): root_prim = stage.GetPrimAtPath("/") world_position = (15.0, 25.0, 35.0) - world_orientation = (0.9238795, 0.3826834, 0.0, 0.0) + world_orientation = (0.3826834, 0.0, 0.0, 0.9238795) # Convert with root parent local_translation, local_orientation = sim_utils.convert_world_pose_to_local( @@ -1289,7 +1295,7 @@ def test_convert_world_pose_to_local_root_parent(): # Should return unchanged assert_vec3_close(Gf.Vec3d(*local_translation), world_position, eps=1e-10) - assert_quat_close(Gf.Quatd(*local_orientation), world_orientation, eps=1e-10) + assert_quat_close(local_orientation, world_orientation, eps=1e-10) def test_convert_world_pose_to_local_none_orientation(): @@ -1302,7 +1308,7 @@ def test_convert_world_pose_to_local_none_orientation(): "/World/ParentNoOrient", "Xform", translation=(3.0, 4.0, 5.0), - orientation=(0.7071068, 0.0, 0.0, 0.7071068), # 90 deg around Z + orientation=(0.0, 0.0, 0.7071068, 0.7071068), # 90 deg around Z stage=stage, ) @@ -1327,7 +1333,7 @@ def test_convert_world_pose_to_local_complex_hierarchy(): "/World/Grandparent", "Xform", translation=(10.0, 0.0, 0.0), - orientation=(0.7071068, 0.0, 0.0, 0.7071068), # 90 deg around Z + orientation=(0.0, 0.0, 0.7071068, 0.7071068), # 90 deg around Z scale=(2.0, 2.0, 2.0), stage=stage, ) @@ -1336,14 +1342,14 @@ def test_convert_world_pose_to_local_complex_hierarchy(): "/World/Grandparent/Parent", "Xform", translation=(5.0, 0.0, 0.0), # local to grandparent - orientation=(0.7071068, 0.7071068, 0.0, 0.0), # 90 deg around X + orientation=(0.7071068, 0.0, 0.0, 0.7071068), # 90 deg around X scale=(0.5, 0.5, 0.5), stage=stage, ) # World pose we want to achieve world_position = (20.0, 15.0, 10.0) - world_orientation = (1.0, 0.0, 0.0, 0.0) + world_orientation = (0.0, 0.0, 0.0, 1.0) # Convert to local space relative to parent local_translation, local_orientation = sim_utils.convert_world_pose_to_local( @@ -1364,7 +1370,7 @@ def test_convert_world_pose_to_local_complex_hierarchy(): # Should match the desired world pose (with some tolerance for complex transforms) assert_vec3_close(Gf.Vec3d(*child_world_pos), world_position, eps=1e-4) - assert_quat_close(Gf.Quatd(*child_world_quat), world_orientation, eps=1e-4) + assert_quat_close(child_world_quat, world_orientation, eps=1e-4) def test_convert_world_pose_to_local_with_mixed_prim_types(): @@ -1378,7 +1384,7 @@ def test_convert_world_pose_to_local_with_mixed_prim_types(): "/World/Grandparent", "Xform", translation=(5.0, 3.0, 2.0), - orientation=(0.7071068, 0.0, 0.0, 0.7071068), # 90 deg around Z + orientation=(0.0, 0.0, 0.7071068, 0.7071068), # 90 deg around Z scale=(2.0, 2.0, 2.0), stage=stage, ) @@ -1389,14 +1395,14 @@ def test_convert_world_pose_to_local_with_mixed_prim_types(): # Obtain parent prim pose (should be grandparent's transform) parent_pos, parent_quat = sim_utils.resolve_prim_pose(parent) assert_vec3_close(Gf.Vec3d(*parent_pos), (5.0, 3.0, 2.0), eps=1e-5) - assert_quat_close(Gf.Quatd(*parent_quat), (0.7071068, 0.0, 0.0, 0.7071068), eps=1e-5) + assert_quat_close(parent_quat, (0.0, 0.0, 0.7071068, 0.7071068), eps=1e-5) # Child: Mesh prim (geometry) child = sim_utils.create_prim("/World/Grandparent/Parent/Child", "Mesh", stage=stage) # World pose we want to achieve for the child world_position = (10.0, 5.0, 3.0) - world_orientation = (1.0, 0.0, 0.0, 0.0) # identity rotation + world_orientation = (0.0, 0.0, 0.0, 1.0) # identity rotation # Convert to local space relative to parent (Scope) local_translation, local_orientation = sim_utils.convert_world_pose_to_local( @@ -1411,7 +1417,7 @@ def test_convert_world_pose_to_local_with_mixed_prim_types(): translate_op = xformable.GetTranslateOp() translate_op.Set(Gf.Vec3d(*local_translation)) orient_op = xformable.GetOrientOp() - orient_op.Set(Gf.Quatd(*local_orientation)) + orient_op.Set(Gf.Quatd(local_orientation[3], local_orientation[0], local_orientation[1], local_orientation[2])) # Verify world pose of child child_world_pos, child_world_quat = sim_utils.resolve_prim_pose(child) @@ -1420,4 +1426,4 @@ def test_convert_world_pose_to_local_with_mixed_prim_types(): # Note: Scope prims typically have no transform, so the child's world pose should account # for the grandparent's transform assert_vec3_close(Gf.Vec3d(*child_world_pos), world_position, eps=1e-10) - assert_quat_close(Gf.Quatd(*child_world_quat), world_orientation, eps=1e-10) + assert_quat_close(child_world_quat, world_orientation, eps=1e-10) diff --git a/source/isaaclab/test/sim/test_views_xform_prim.py b/source/isaaclab/test/sim/test_views_xform_prim.py index 94b49a560bc3..64cd86a7466f 100644 --- a/source/isaaclab/test/sim/test_views_xform_prim.py +++ b/source/isaaclab/test/sim/test_views_xform_prim.py @@ -3,1498 +3,295 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Launch Isaac Sim Simulator first.""" +"""USD backend tests for FrameView. + +Imports the shared contract tests and provides the USD-specific +``view_factory`` fixture. Also includes USD-only tests for visibility, +prim ordering, xformOp standardization, and Isaac Sim comparison. +""" from isaaclab.app import AppLauncher -# launch omniverse app simulation_app = AppLauncher(headless=True).app -"""Rest everything follows.""" - import pytest # noqa: E402 import torch # noqa: E402 +import warp as wp # noqa: E402 + +from pxr import Gf, UsdGeom # noqa: E402 try: - from isaacsim.core.prims import XFormPrim as _IsaacSimXformPrimView + from isaacsim.core.experimental.prims import XformPrim as _IsaacSimXformPrimView except (ModuleNotFoundError, ImportError): _IsaacSimXformPrimView = None +from frame_view_contract_utils import * # noqa: F401, F403, E402 +from frame_view_contract_utils import CHILD_OFFSET, ViewBundle # noqa: E402 + import isaaclab.sim as sim_utils # noqa: E402 -from isaaclab.sim.views import XformPrimView as XformPrimView # noqa: E402 +from isaaclab.sim.views import UsdFrameView as FrameView # noqa: E402 from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR # noqa: E402 +pytestmark = pytest.mark.isaacsim_ci +PARENT_POS = (0.0, 0.0, 1.0) + @pytest.fixture(autouse=True) def test_setup_teardown(): - """Create a blank new stage for each test.""" - # Setup: Create a new stage sim_utils.create_new_stage() sim_utils.update_stage() - - # Yield for the test yield - - # Teardown: Clear stage after each test sim_utils.clear_stage() sim_utils.SimulationContext.clear_instance() -""" -Helper functions. -""" - - -def _prepare_indices(index_type, target_indices, num_prims, device): - """Helper function to prepare indices based on type.""" - if index_type == "list": - return target_indices, target_indices - elif index_type == "torch_tensor": - return torch.tensor(target_indices, dtype=torch.int64, device=device), target_indices - elif index_type == "slice_none": - return slice(None), list(range(num_prims)) - else: - raise ValueError(f"Unknown index type: {index_type}") - - -def _skip_if_backend_unavailable(backend: str, device: str): - """Skip tests when the requested backend is unavailable.""" - if device.startswith("cuda") and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - if backend == "fabric" and device == "cpu": - pytest.skip("Warp fabricarray operations on CPU have known issues") - - -def _prim_type_for_backend(backend: str) -> str: - """Return a prim type that is compatible with the backend.""" - return "Camera" if backend == "fabric" else "Xform" - - -def _create_view(pattern: str, device: str, backend: str) -> XformPrimView: - """Create an XformPrimView for the requested backend.""" - if backend == "fabric": - sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, device=device, use_fabric=True)) - return XformPrimView(pattern, device=device) - - -""" -Tests - Initialization. -""" - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_xform_prim_view_initialization_single_prim(device): - """Test XformPrimView initialization with a single prim.""" - # check if CUDA is available - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - # Create a single xform prim - stage = sim_utils.get_current_stage() - sim_utils.create_prim("/World/Object", "Xform", translation=(1.0, 2.0, 3.0), stage=stage) - - # Create view - view = XformPrimView("/World/Object", device=device) - - # Verify properties - assert view.count == 1 - assert view.prim_paths == ["/World/Object"] - assert view.device == device - assert len(view.prims) == 1 - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_xform_prim_view_initialization_multiple_prims(device): - """Test XformPrimView initialization with multiple prims using pattern matching.""" - # check if CUDA is available - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - # Create multiple prims - num_prims = 10 - stage = sim_utils.get_current_stage() - for i in range(num_prims): - sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=(i * 2.0, 0.0, 1.0), stage=stage) - - # Create view with pattern - view = XformPrimView("/World/Env_.*/Object", device=device) - - # Verify properties - assert view.count == num_prims - assert view.device == device - assert len(view.prims) == num_prims - assert view.prim_paths == [f"/World/Env_{i}/Object" for i in range(num_prims)] - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_xform_prim_view_initialization_multiple_prims_order(device): - """Test XformPrimView initialization with multiple prims using pattern matching with multiple objects per prim. - - This test validates that XformPrimView respects USD stage traversal order, which is based on - creation order (depth-first search), NOT alphabetical/lexical sorting. This is an important - edge case that ensures deterministic prim ordering that matches USD's internal representation. - - The test creates prims in a deliberately non-alphabetical order (1, 0, A, a, 2) and verifies - that they are retrieved in creation order, not sorted order (0, 1, 2, A, a). - """ - # check if CUDA is available - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - # Create multiple prims - num_prims = 10 - stage = sim_utils.get_current_stage() - - # NOTE: Prims are created in a specific order to test that XformPrimView respects - # USD stage traversal order (DFS based on creation order), NOT alphabetical/lexical order. - # This is an important edge case: children under the same parent are returned in the - # order they were created, not sorted by name. - - # First batch: Create Object_1, Object_0, Object_A for each environment - # (intentionally non-alphabetical: 1, 0, A instead of 0, 1, A) - for i in range(num_prims): - sim_utils.create_prim(f"/World/Env_{i}/Object_1", "Xform", translation=(i * 2.0, -2.0, 1.0), stage=stage) - sim_utils.create_prim(f"/World/Env_{i}/Object_0", "Xform", translation=(i * 2.0, 2.0, 1.0), stage=stage) - sim_utils.create_prim(f"/World/Env_{i}/Object_A", "Xform", translation=(i * 2.0, 0.0, -1.0), stage=stage) - - # Second batch: Create Object_a, Object_2 for each environment - # (created after the first batch to verify traversal is depth-first per environment) - for i in range(num_prims): - sim_utils.create_prim(f"/World/Env_{i}/Object_a", "Xform", translation=(i * 2.0, 2.0, -1.0), stage=stage) - sim_utils.create_prim(f"/World/Env_{i}/Object_2", "Xform", translation=(i * 2.0, 2.0, 1.0), stage=stage) - - # Create view with pattern - view = XformPrimView("/World/Env_.*/Object_.*", device=device) - - # Expected ordering: DFS traversal by environment, with children in creation order - # For each Env_i, we expect: Object_1, Object_0, Object_A, Object_a, Object_2 - # (matches creation order, NOT alphabetical: would be 0, 1, 2, A, a if sorted) - expected_prim_paths_ordering = [] - for i in range(num_prims): - expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_1") - expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_0") - expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_A") - expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_a") - expected_prim_paths_ordering.append(f"/World/Env_{i}/Object_2") - - # Verify properties - assert view.count == num_prims * 5 - assert view.device == device - assert len(view.prims) == num_prims * 5 - assert view.prim_paths == expected_prim_paths_ordering - - # Additional validation: Verify ordering is NOT alphabetical - # If it were alphabetical, Object_0 would come before Object_1 - alphabetical_order = [] - for i in range(num_prims): - alphabetical_order.append(f"/World/Env_{i}/Object_0") - alphabetical_order.append(f"/World/Env_{i}/Object_1") - alphabetical_order.append(f"/World/Env_{i}/Object_2") - alphabetical_order.append(f"/World/Env_{i}/Object_A") - alphabetical_order.append(f"/World/Env_{i}/Object_a") - - assert view.prim_paths != alphabetical_order, ( - "Prim paths should follow creation order, not alphabetical order. " - "This test validates that USD stage traversal respects creation order." - ) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_xform_prim_view_initialization_invalid_prim(device): - """Test XformPrimView initialization fails for non-xformable prims.""" - # check if CUDA is available - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - stage = sim_utils.get_current_stage() - - # Create a prim with non-standard xform operations - stage.DefinePrim("/World/InvalidPrim", "Xform") - - # XformPrimView should raise ValueError because prim doesn't have standard operations - with pytest.raises(ValueError, match="not a xformable prim"): - XformPrimView("/World/InvalidPrim", device=device) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_xform_prim_view_initialization_empty_pattern(device): - """Test XformPrimView initialization with pattern that matches no prims.""" - # check if CUDA is available - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - sim_utils.create_new_stage() - - # Create view with pattern that matches nothing - view = XformPrimView("/World/NonExistent_.*", device=device) - - # Should have zero count - assert view.count == 0 - assert len(view.prims) == 0 - - -""" -Tests - Getters. -""" - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_get_world_poses(device, backend): - """Test getting world poses from XformPrimView.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims with known world poses - expected_positions = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)] - expected_orientations = [(1.0, 0.0, 0.0, 0.0), (0.7071068, 0.0, 0.0, 0.7071068), (0.7071068, 0.7071068, 0.0, 0.0)] - - for i, (pos, quat) in enumerate(zip(expected_positions, expected_orientations)): - sim_utils.create_prim(f"/World/Object_{i}", prim_type, translation=pos, orientation=quat, stage=stage) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Convert expected values to tensors - expected_positions_tensor = torch.tensor(expected_positions, dtype=torch.float32, device=device) - expected_orientations_tensor = torch.tensor(expected_orientations, dtype=torch.float32, device=device) - - # Get world poses - positions, orientations = view.get_world_poses() - - # Verify shapes - assert positions.shape == (3, 3) - assert orientations.shape == (3, 4) - - # Verify positions - torch.testing.assert_close(positions, expected_positions_tensor, atol=1e-5, rtol=0) - - # Verify orientations (allow for quaternion sign ambiguity) - try: - torch.testing.assert_close(orientations, expected_orientations_tensor, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(orientations, -expected_orientations_tensor, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_get_local_poses(device, backend): - """Test getting local poses from XformPrimView.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create parent and child prims - sim_utils.create_prim("/World/Parent", "Xform", translation=(10.0, 0.0, 0.0), stage=stage) - - # Children with different local poses - expected_local_positions = [(1.0, 0.0, 0.0), (0.0, 2.0, 0.0), (0.0, 0.0, 3.0)] - expected_local_orientations = [ - (1.0, 0.0, 0.0, 0.0), - (0.7071068, 0.0, 0.0, 0.7071068), - (0.7071068, 0.7071068, 0.0, 0.0), - ] - - for i, (pos, quat) in enumerate(zip(expected_local_positions, expected_local_orientations)): - sim_utils.create_prim(f"/World/Parent/Child_{i}", prim_type, translation=pos, orientation=quat, stage=stage) - - # Create view - view = _create_view("/World/Parent/Child_.*", device=device, backend=backend) +# ------------------------------------------------------------------ +# Contract fixture +# ------------------------------------------------------------------ - # Get local poses - translations, orientations = view.get_local_poses() - - # Verify shapes - assert translations.shape == (3, 3) - assert orientations.shape == (3, 4) - - # Convert expected values to tensors - expected_translations_tensor = torch.tensor(expected_local_positions, dtype=torch.float32, device=device) - expected_orientations_tensor = torch.tensor(expected_local_orientations, dtype=torch.float32, device=device) - - # Verify translations - torch.testing.assert_close(translations, expected_translations_tensor, atol=1e-5, rtol=0) - - # Verify orientations (allow for quaternion sign ambiguity) - try: - torch.testing.assert_close(orientations, expected_orientations_tensor, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(orientations, -expected_orientations_tensor, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_get_scales(device, backend): - """Test getting scales from XformPrimView.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims with different scales - expected_scales = [(1.0, 1.0, 1.0), (2.0, 2.0, 2.0), (1.0, 2.0, 3.0)] - - for i, scale in enumerate(expected_scales): - sim_utils.create_prim(f"/World/Object_{i}", prim_type, scale=scale, stage=stage) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - expected_scales_tensor = torch.tensor(expected_scales, dtype=torch.float32, device=device) - - # Get scales - scales = view.get_scales() - - # Verify shape and values - assert scales.shape == (3, 3) - torch.testing.assert_close(scales, expected_scales_tensor, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_get_visibility(device): - """Test getting visibility when all prims are visible.""" - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - stage = sim_utils.get_current_stage() - - # Create prims (default is visible) - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) - - # Create view - view = XformPrimView("/World/Object_.*", device=device) - - # Get visibility - visibility = view.get_visibility() - - # Verify shape and values - assert visibility.shape == (num_prims,) - assert visibility.dtype == torch.bool - assert torch.all(visibility), "All prims should be visible by default" - - -""" -Tests - Setters. -""" - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_set_world_poses(device, backend): - """Test setting world poses in XformPrimView.""" - _skip_if_backend_unavailable(backend, device) +def _get_parent_positions(num_envs, device="cpu"): + """Read parent Xform positions from USD.""" stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", prim_type, translation=(0.0, 0.0, 0.0), stage=stage) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Set new world poses - new_positions = torch.tensor( - [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0], [13.0, 14.0, 15.0]], device=device - ) - new_orientations = torch.tensor( - [ - [1.0, 0.0, 0.0, 0.0], - [0.7071068, 0.0, 0.0, 0.7071068], - [0.7071068, 0.7071068, 0.0, 0.0], - [0.9238795, 0.3826834, 0.0, 0.0], - [0.7071068, 0.0, 0.7071068, 0.0], - ], - device=device, - ) + xform_cache = UsdGeom.XformCache() + positions = [] + for i in range(num_envs): + prim = stage.GetPrimAtPath(f"/World/Parent_{i}") + tf = xform_cache.GetLocalToWorldTransform(prim) + t = tf.ExtractTranslation() + positions.append([float(t[0]), float(t[1]), float(t[2])]) + return torch.tensor(positions, dtype=torch.float32, device=device) - view.set_world_poses(new_positions, new_orientations) - # Get the poses back - retrieved_positions, retrieved_orientations = view.get_world_poses() - - # Verify they match - torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) - # Check quaternions (allow sign flip) - try: - torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_set_world_poses_only_positions(device, backend): - """Test setting only positions, leaving orientations unchanged.""" - _skip_if_backend_unavailable(backend, device) +def _set_parent_positions(positions, num_envs): + """Write parent Xform positions to USD.""" + from pxr import Sdf # noqa: PLC0415 stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims with specific orientations - initial_quat = (0.7071068, 0.0, 0.0, 0.7071068) # 90 deg around Z - for i in range(3): - sim_utils.create_prim( - f"/World/Object_{i}", prim_type, translation=(0.0, 0.0, 0.0), orientation=initial_quat, stage=stage + with Sdf.ChangeBlock(): + for i in range(num_envs): + prim = stage.GetPrimAtPath(f"/World/Parent_{i}") + pos = positions[i] + prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(float(pos[0]), float(pos[1]), float(pos[2]))) + + +@pytest.fixture +def view_factory(): + """USD factory: parent Xform at PARENT_POS + child Xform at CHILD_OFFSET.""" + + def factory(num_envs: int, device: str) -> ViewBundle: + stage = sim_utils.get_current_stage() + for i in range(num_envs): + sim_utils.create_prim(f"/World/Parent_{i}", "Xform", translation=PARENT_POS, stage=stage) + sim_utils.create_prim(f"/World/Parent_{i}/Child", "Xform", translation=CHILD_OFFSET, stage=stage) + + view = FrameView("/World/Parent_.*/Child", device=device) + return ViewBundle( + view=view, + get_parent_pos=_get_parent_positions, + set_parent_pos=_set_parent_positions, + teardown=lambda: None, ) - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Get initial orientations - _, initial_orientations = view.get_world_poses() - - # Set only positions - new_positions = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]], device=device) - view.set_world_poses(positions=new_positions, orientations=None) - - # Get poses back - retrieved_positions, retrieved_orientations = view.get_world_poses() - - # Positions should be updated - torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) - - # Orientations should be unchanged - try: - torch.testing.assert_close(retrieved_orientations, initial_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(retrieved_orientations, -initial_orientations, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_set_world_poses_only_orientations(device, backend): - """Test setting only orientations, leaving positions unchanged.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims with specific positions - for i in range(3): - sim_utils.create_prim(f"/World/Object_{i}", prim_type, translation=(float(i), 0.0, 0.0), stage=stage) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Get initial positions - initial_positions, _ = view.get_world_poses() - - # Set only orientations - new_orientations = torch.tensor( - [[0.7071068, 0.0, 0.0, 0.7071068], [0.7071068, 0.7071068, 0.0, 0.0], [0.9238795, 0.3826834, 0.0, 0.0]], - device=device, - ) - view.set_world_poses(positions=None, orientations=new_orientations) - - # Get poses back - retrieved_positions, retrieved_orientations = view.get_world_poses() - - # Positions should be unchanged - torch.testing.assert_close(retrieved_positions, initial_positions, atol=1e-5, rtol=0) - - # Orientations should be updated - try: - torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_set_world_poses_with_hierarchy(device, backend): - """Test setting world poses correctly handles parent transformations.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - child_prim_type = _prim_type_for_backend(backend) - - # Create parent prims - for i in range(3): - parent_pos = (i * 10.0, 0.0, 0.0) - parent_quat = (0.7071068, 0.0, 0.0, 0.7071068) # 90 deg around Z - sim_utils.create_prim( - f"/World/Parent_{i}", "Xform", translation=parent_pos, orientation=parent_quat, stage=stage - ) - # Create child prims - sim_utils.create_prim(f"/World/Parent_{i}/Child", child_prim_type, translation=(0.0, 0.0, 0.0), stage=stage) - - # Create view for children - view = _create_view("/World/Parent_.*/Child", device=device, backend=backend) - - # Set world poses for children - desired_world_positions = torch.tensor([[5.0, 5.0, 0.0], [15.0, 5.0, 0.0], [25.0, 5.0, 0.0]], device=device) - desired_world_orientations = torch.tensor( - [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], device=device - ) - - view.set_world_poses(desired_world_positions, desired_world_orientations) - - # Get world poses back - retrieved_positions, retrieved_orientations = view.get_world_poses() - - # Should match desired world poses - torch.testing.assert_close(retrieved_positions, desired_world_positions, atol=1e-4, rtol=0) - try: - torch.testing.assert_close(retrieved_orientations, desired_world_orientations, atol=1e-4, rtol=0) - except AssertionError: - torch.testing.assert_close(retrieved_orientations, -desired_world_orientations, atol=1e-4, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_set_local_poses(device, backend): - """Test setting local poses in XformPrimView.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create parent - sim_utils.create_prim("/World/Parent", "Xform", translation=(5.0, 5.0, 5.0), stage=stage) - - # Create children - num_prims = 4 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Parent/Child_{i}", prim_type, translation=(0.0, 0.0, 0.0), stage=stage) - - # Create view - view = _create_view("/World/Parent/Child_.*", device=device, backend=backend) - - # Set new local poses - new_translations = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0], [4.0, 4.0, 4.0]], device=device) - new_orientations = torch.tensor( - [ - [1.0, 0.0, 0.0, 0.0], - [0.7071068, 0.0, 0.0, 0.7071068], - [0.7071068, 0.7071068, 0.0, 0.0], - [0.9238795, 0.3826834, 0.0, 0.0], - ], - device=device, - ) - - view.set_local_poses(new_translations, new_orientations) - - # Get local poses back - retrieved_translations, retrieved_orientations = view.get_local_poses() - - # Verify they match - torch.testing.assert_close(retrieved_translations, new_translations, atol=1e-5, rtol=0) - try: - torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_set_local_poses_only_translations(device, backend): - """Test setting only local translations.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create parent and children with specific orientations - sim_utils.create_prim("/World/Parent", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) - initial_quat = (0.7071068, 0.0, 0.0, 0.7071068) - - for i in range(3): - sim_utils.create_prim( - f"/World/Parent/Child_{i}", - prim_type, - translation=(0.0, 0.0, 0.0), - orientation=initial_quat, - stage=stage, - ) - - # Create view - view = _create_view("/World/Parent/Child_.*", device=device, backend=backend) - - # Get initial orientations - _, initial_orientations = view.get_local_poses() - - # Set only translations - new_translations = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]], device=device) - view.set_local_poses(translations=new_translations, orientations=None) - - # Get poses back - retrieved_translations, retrieved_orientations = view.get_local_poses() + return factory - # Translations should be updated - torch.testing.assert_close(retrieved_translations, new_translations, atol=1e-5, rtol=0) - # Orientations should be unchanged - try: - torch.testing.assert_close(retrieved_orientations, initial_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(retrieved_orientations, -initial_orientations, atol=1e-5, rtol=0) +# ================================================================== +# USD-only: Visibility +# ================================================================== @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_set_scales(device, backend): - """Test setting scales in XformPrimView.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", prim_type, scale=(1.0, 1.0, 1.0), stage=stage) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Set new scales - new_scales = torch.tensor( - [[2.0, 2.0, 2.0], [1.0, 2.0, 3.0], [0.5, 0.5, 0.5], [3.0, 1.0, 2.0], [1.5, 1.5, 1.5]], device=device - ) - - view.set_scales(new_scales) - - # Get scales back - retrieved_scales = view.get_scales() - - # Verify they match - torch.testing.assert_close(retrieved_scales, new_scales, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_set_visibility(device): +def test_visibility_toggle(device): """Test toggling visibility multiple times.""" if device == "cuda" and not torch.cuda.is_available(): pytest.skip("CUDA not available") stage = sim_utils.get_current_stage() - - # Create prims num_prims = 3 for i in range(num_prims): sim_utils.create_prim(f"/World/Object_{i}", "Xform", stage=stage) - # Create view - view = XformPrimView("/World/Object_.*", device=device) + view = FrameView("/World/Object_.*", device=device) - # Initial state: all visible - visibility = view.get_visibility() - assert torch.all(visibility), "All should be visible initially" + assert torch.all(view.get_visibility()) - # Make all invisible view.set_visibility(torch.zeros(num_prims, dtype=torch.bool, device=device)) - visibility = view.get_visibility() - assert not torch.any(visibility), "All should be invisible" + assert not torch.any(view.get_visibility()) - # Make all visible again view.set_visibility(torch.ones(num_prims, dtype=torch.bool, device=device)) - visibility = view.get_visibility() - assert torch.all(visibility), "All should be visible again" - - # Toggle individual prims - view.set_visibility(torch.tensor([False], dtype=torch.bool, device=device), indices=[1]) - visibility = view.get_visibility() - assert visibility[0] and not visibility[1] and visibility[2], "Only middle prim should be invisible" - - -""" -Tests - Index Handling. -""" - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("index_type", ["list", "torch_tensor", "slice_none"]) -@pytest.mark.parametrize("method", ["world_poses", "local_poses", "scales", "visibility"]) -def test_index_types_get_methods(device, index_type, method): - """Test that getter methods work with different index types.""" - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - stage = sim_utils.get_current_stage() - - # Create prims based on method type - num_prims = 10 - if method == "local_poses": - # Create parent and children for local poses - sim_utils.create_prim("/World/Parent", "Xform", translation=(10.0, 0.0, 0.0), stage=stage) - for i in range(num_prims): - sim_utils.create_prim( - f"/World/Parent/Child_{i}", "Xform", translation=(float(i), float(i) * 0.5, 0.0), stage=stage - ) - view = XformPrimView("/World/Parent/Child_.*", device=device) - elif method == "scales": - # Create prims with different scales - for i in range(num_prims): - scale = (1.0 + i * 0.5, 1.0 + i * 0.3, 1.0 + i * 0.2) - sim_utils.create_prim(f"/World/Object_{i}", "Xform", scale=scale, stage=stage) - view = XformPrimView("/World/Object_.*", device=device) - else: # world_poses - # Create prims with different positions - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) - view = XformPrimView("/World/Object_.*", device=device) - - # Get all data as reference - if method == "world_poses": - all_data1, all_data2 = view.get_world_poses() - elif method == "local_poses": - all_data1, all_data2 = view.get_local_poses() - elif method == "scales": - all_data1 = view.get_scales() - all_data2 = None - else: # visibility - all_data1 = view.get_visibility() - all_data2 = None - - # Prepare indices - target_indices_base = [2, 5, 7] - indices, target_indices = _prepare_indices(index_type, target_indices_base, num_prims, device) - - # Get subset - if method == "world_poses": - subset_data1, subset_data2 = view.get_world_poses(indices=indices) # type: ignore[arg-type] - elif method == "local_poses": - subset_data1, subset_data2 = view.get_local_poses(indices=indices) # type: ignore[arg-type] - elif method == "scales": - subset_data1 = view.get_scales(indices=indices) # type: ignore[arg-type] - subset_data2 = None - else: # visibility - subset_data1 = view.get_visibility(indices=indices) # type: ignore[arg-type] - subset_data2 = None - - # Verify shapes - expected_count = len(target_indices) - if method == "visibility": - assert subset_data1.shape == (expected_count,) - else: - assert subset_data1.shape == (expected_count, 3) - if subset_data2 is not None: - assert subset_data2.shape == (expected_count, 4) - - # Verify values - target_indices_tensor = torch.tensor(target_indices, dtype=torch.int64, device=device) - torch.testing.assert_close(subset_data1, all_data1[target_indices_tensor], atol=1e-5, rtol=0) - if subset_data2 is not None and all_data2 is not None: - torch.testing.assert_close(subset_data2, all_data2[target_indices_tensor], atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("index_type", ["list", "torch_tensor", "slice_none"]) -@pytest.mark.parametrize("method", ["world_poses", "local_poses", "scales", "visibility"]) -def test_index_types_set_methods(device, index_type, method): - """Test that setter methods work with different index types.""" - if device == "cuda" and not torch.cuda.is_available(): - pytest.skip("CUDA not available") - - stage = sim_utils.get_current_stage() - - # Create prims based on method type - num_prims = 10 - if method == "local_poses": - # Create parent and children for local poses - sim_utils.create_prim("/World/Parent", "Xform", translation=(5.0, 5.0, 0.0), stage=stage) - for i in range(num_prims): - sim_utils.create_prim(f"/World/Parent/Child_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) - view = XformPrimView("/World/Parent/Child_.*", device=device) - else: # world_poses or scales - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) - view = XformPrimView("/World/Object_.*", device=device) - - # Get initial data - if method == "world_poses": - initial_data1, initial_data2 = view.get_world_poses() - elif method == "local_poses": - initial_data1, initial_data2 = view.get_local_poses() - elif method == "scales": - initial_data1 = view.get_scales() - initial_data2 = None - else: # visibility - initial_data1 = view.get_visibility() - initial_data2 = None - - # Prepare indices - target_indices_base = [2, 5, 7] - indices, target_indices = _prepare_indices(index_type, target_indices_base, num_prims, device) - - # Prepare new data - num_to_set = len(target_indices) - if method in ["world_poses", "local_poses"]: - new_data1 = torch.randn(num_to_set, 3, device=device) * 10.0 - new_data2 = torch.tensor([[1.0, 0.0, 0.0, 0.0]] * num_to_set, dtype=torch.float32, device=device) - elif method == "scales": - new_data1 = torch.rand(num_to_set, 3, device=device) * 2.0 + 0.5 - new_data2 = None - else: # visibility - # Set to False to test change (default is True) - new_data1 = torch.zeros(num_to_set, dtype=torch.bool, device=device) - new_data2 = None - - # Set data - if method == "world_poses": - view.set_world_poses(positions=new_data1, orientations=new_data2, indices=indices) # type: ignore[arg-type] - elif method == "local_poses": - view.set_local_poses(translations=new_data1, orientations=new_data2, indices=indices) # type: ignore[arg-type] - elif method == "scales": - view.set_scales(scales=new_data1, indices=indices) # type: ignore[arg-type] - else: # visibility - view.set_visibility(visibility=new_data1, indices=indices) # type: ignore[arg-type] - - # Get all data after update - if method == "world_poses": - updated_data1, updated_data2 = view.get_world_poses() - elif method == "local_poses": - updated_data1, updated_data2 = view.get_local_poses() - elif method == "scales": - updated_data1 = view.get_scales() - updated_data2 = None - else: # visibility - updated_data1 = view.get_visibility() - updated_data2 = None - - # Verify that specified indices were updated - for i, target_idx in enumerate(target_indices): - torch.testing.assert_close(updated_data1[target_idx], new_data1[i], atol=1e-5, rtol=0) - if new_data2 is not None and updated_data2 is not None: - try: - torch.testing.assert_close(updated_data2[target_idx], new_data2[i], atol=1e-5, rtol=0) - except AssertionError: - # Account for quaternion sign ambiguity - torch.testing.assert_close(updated_data2[target_idx], -new_data2[i], atol=1e-5, rtol=0) - - # Verify that other indices were NOT updated (only for non-slice(None) cases) - if index_type != "slice_none": - for i in range(num_prims): - if i not in target_indices: - torch.testing.assert_close(updated_data1[i], initial_data1[i], atol=1e-5, rtol=0) - if initial_data2 is not None and updated_data2 is not None: - try: - torch.testing.assert_close(updated_data2[i], initial_data2[i], atol=1e-5, rtol=0) - except AssertionError: - # Account for quaternion sign ambiguity - torch.testing.assert_close(updated_data2[i], -initial_data2[i], atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_indices_single_element(device, backend): - """Test with a single index.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", prim_type, translation=(float(i), 0.0, 0.0), stage=stage) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Test with single index - indices = [3] - positions, orientations = view.get_world_poses(indices=indices) - - # Verify shapes - assert positions.shape == (1, 3) - assert orientations.shape == (1, 4) - - # Set pose for single index - new_position = torch.tensor([[100.0, 200.0, 300.0]], device=device) - view.set_world_poses(positions=new_position, indices=indices) - - # Verify it was set - retrieved_positions, _ = view.get_world_poses(indices=indices) - torch.testing.assert_close(retrieved_positions, new_position, atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_indices_out_of_order(device, backend): - """Test with indices provided in non-sequential order.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) + assert torch.all(view.get_visibility()) - # Create prims - num_prims = 10 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", prim_type, translation=(0.0, 0.0, 0.0), stage=stage) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Use out-of-order indices - indices = [7, 2, 9, 0, 5] - new_positions = torch.tensor( - [[7.0, 0.0, 0.0], [2.0, 0.0, 0.0], [9.0, 0.0, 0.0], [0.0, 0.0, 0.0], [5.0, 0.0, 0.0]], device=device + view.set_visibility( + torch.tensor([False], dtype=torch.bool, device=device), indices=wp.array([1], dtype=wp.int32, device=device) ) - - # Set poses with out-of-order indices - view.set_world_poses(positions=new_positions, indices=indices) - - # Get all poses - all_positions, _ = view.get_world_poses() - - # Verify each index got the correct value - expected_x_values = [0.0, 0.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0, 0.0, 9.0] - for i in range(num_prims): - assert abs(all_positions[i, 0].item() - expected_x_values[i]) < 1e-5 + vis = view.get_visibility() + assert vis[0] and not vis[1] and vis[2] @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) -def test_indices_with_only_positions_or_orientations(device, backend): - """Test indices work correctly when setting only positions or only orientations.""" - _skip_if_backend_unavailable(backend, device) - - stage = sim_utils.get_current_stage() - prim_type = _prim_type_for_backend(backend) - - # Create prims - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim( - f"/World/Object_{i}", - prim_type, - translation=(0.0, 0.0, 0.0), - orientation=(1.0, 0.0, 0.0, 0.0), - stage=stage, - ) - - # Create view - view = _create_view("/World/Object_.*", device=device, backend=backend) - - # Get initial poses - initial_positions, initial_orientations = view.get_world_poses() - - # Set only positions for specific indices - indices = [1, 3] - new_positions = torch.tensor([[10.0, 0.0, 0.0], [30.0, 0.0, 0.0]], device=device) - view.set_world_poses(positions=new_positions, orientations=None, indices=indices) - - # Get updated poses - updated_positions, updated_orientations = view.get_world_poses() - - # Verify positions updated for indices 1 and 3, others unchanged - torch.testing.assert_close(updated_positions[1], new_positions[0], atol=1e-5, rtol=0) - torch.testing.assert_close(updated_positions[3], new_positions[1], atol=1e-5, rtol=0) - torch.testing.assert_close(updated_positions[0], initial_positions[0], atol=1e-5, rtol=0) - - # Verify all orientations unchanged - try: - torch.testing.assert_close(updated_orientations, initial_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(updated_orientations, -initial_orientations, atol=1e-5, rtol=0) - - # Now set only orientations for different indices - indices2 = [0, 4] - new_orientations = torch.tensor([[0.7071068, 0.0, 0.0, 0.7071068], [0.7071068, 0.7071068, 0.0, 0.0]], device=device) - view.set_world_poses(positions=None, orientations=new_orientations, indices=indices2) - - # Get final poses - final_positions, final_orientations = view.get_world_poses() - - # Verify positions unchanged from previous step - torch.testing.assert_close(final_positions, updated_positions, atol=1e-5, rtol=0) - - # Verify orientations updated for indices 0 and 4 - try: - torch.testing.assert_close(final_orientations[0], new_orientations[0], atol=1e-5, rtol=0) - torch.testing.assert_close(final_orientations[4], new_orientations[1], atol=1e-5, rtol=0) - except AssertionError: - # Account for quaternion sign ambiguity - torch.testing.assert_close(final_orientations[0], -new_orientations[0], atol=1e-5, rtol=0) - torch.testing.assert_close(final_orientations[4], -new_orientations[1], atol=1e-5, rtol=0) - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_index_type_none_equivalent_to_all(device): - """Test that indices=None is equivalent to getting/setting all prims.""" +def test_visibility_parent_inheritance(device): + """Making a parent invisible hides all children.""" if device == "cuda" and not torch.cuda.is_available(): pytest.skip("CUDA not available") stage = sim_utils.get_current_stage() + sim_utils.create_prim("/World/Parent", "Xform", stage=stage) + for i in range(4): + sim_utils.create_prim(f"/World/Parent/Child_{i}", "Xform", stage=stage) - # Create prims - num_prims = 6 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Object_{i}", "Xform", translation=(float(i), 0.0, 0.0), stage=stage) - - # Create view - view = XformPrimView("/World/Object_.*", device=device) - - # Get poses with indices=None - pos_none, quat_none = view.get_world_poses(indices=None) - - # Get poses with no argument (default) - pos_default, quat_default = view.get_world_poses() - - # Get poses with slice(None) - pos_slice, quat_slice = view.get_world_poses(indices=slice(None)) # type: ignore[arg-type] - - # All should be equivalent - torch.testing.assert_close(pos_none, pos_default, atol=1e-10, rtol=0) - torch.testing.assert_close(quat_none, quat_default, atol=1e-10, rtol=0) - torch.testing.assert_close(pos_none, pos_slice, atol=1e-10, rtol=0) - torch.testing.assert_close(quat_none, quat_slice, atol=1e-10, rtol=0) - - # Test the same for set operations - new_positions = torch.randn(num_prims, 3, device=device) * 10.0 - new_orientations = torch.tensor([[1.0, 0.0, 0.0, 0.0]] * num_prims, dtype=torch.float32, device=device) - - # Set with indices=None - view.set_world_poses(positions=new_positions, orientations=new_orientations, indices=None) - pos_after_none, quat_after_none = view.get_world_poses() - - # Reset - view.set_world_poses(positions=torch.zeros(num_prims, 3, device=device), indices=None) + parent_view = FrameView("/World/Parent", device=device) + children_view = FrameView("/World/Parent/Child_.*", device=device) - # Set with slice(None) - view.set_world_poses( - positions=new_positions, - orientations=new_orientations, - indices=slice(None), # type: ignore[arg-type] - ) - pos_after_slice, quat_after_slice = view.get_world_poses() + parent_view.set_visibility(torch.tensor([False], dtype=torch.bool, device=device)) + assert not torch.any(children_view.get_visibility()) - # Should be equivalent - torch.testing.assert_close(pos_after_none, pos_after_slice, atol=1e-5, rtol=0) - torch.testing.assert_close(quat_after_none, quat_after_slice, atol=1e-5, rtol=0) + parent_view.set_visibility(torch.tensor([True], dtype=torch.bool, device=device)) + assert torch.all(children_view.get_visibility()) -""" -Tests - Integration. -""" +# ================================================================== +# USD-only: Prim ordering +# ================================================================== @pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_with_franka_robots(device): - """Test XformPrimView with real Franka robot USD assets.""" +def test_prim_ordering_follows_creation_order(device): + """Prims are returned in USD creation order (DFS), not alphabetical.""" if device == "cuda" and not torch.cuda.is_available(): pytest.skip("CUDA not available") stage = sim_utils.get_current_stage() + num_envs = 3 + for i in range(num_envs): + sim_utils.create_prim(f"/World/Env_{i}/Object_1", "Xform", stage=stage) + sim_utils.create_prim(f"/World/Env_{i}/Object_0", "Xform", stage=stage) + sim_utils.create_prim(f"/World/Env_{i}/Object_A", "Xform", stage=stage) - # Load Franka robot assets - franka_usd_path = f"{ISAAC_NUCLEUS_DIR}/Robots/FrankaRobotics/FrankaPanda/franka.usd" - - # Add two Franka robots to the stage - sim_utils.create_prim("/World/Franka_1", "Xform", usd_path=franka_usd_path, stage=stage) - sim_utils.create_prim("/World/Franka_2", "Xform", usd_path=franka_usd_path, stage=stage) - - # Create view for both Frankas - frankas_view = XformPrimView("/World/Franka_.*", device=device) - - # Verify count - assert frankas_view.count == 2 - - # Get initial world poses (should be at origin) - initial_positions, initial_orientations = frankas_view.get_world_poses() - - # Verify initial positions are at origin - expected_initial_positions = torch.zeros(2, 3, device=device) - torch.testing.assert_close(initial_positions, expected_initial_positions, atol=1e-5, rtol=0) - - # Verify initial orientations are identity - expected_initial_orientations = torch.tensor([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], device=device) - try: - torch.testing.assert_close(initial_orientations, expected_initial_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(initial_orientations, -expected_initial_orientations, atol=1e-5, rtol=0) - - # Set new world poses - new_positions = torch.tensor([[10.0, 10.0, 0.0], [-40.0, -40.0, 0.0]], device=device) - # 90° rotation around Z axis for first, -90° for second - new_orientations = torch.tensor( - [[0.7071068, 0.0, 0.0, 0.7071068], [0.7071068, 0.0, 0.0, -0.7071068]], device=device - ) + view = FrameView("/World/Env_.*/Object_.*", device=device) + expected = [] + for i in range(num_envs): + expected += [f"/World/Env_{i}/Object_1", f"/World/Env_{i}/Object_0", f"/World/Env_{i}/Object_A"] - frankas_view.set_world_poses(positions=new_positions, orientations=new_orientations) + assert view.prim_paths == expected - # Get poses back and verify - retrieved_positions, retrieved_orientations = frankas_view.get_world_poses() - torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) - try: - torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) +# ================================================================== +# USD-only: xformOp standardization +# ================================================================== @pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_with_nested_targets(device): - """Test with nested frame/target structure similar to Isaac Sim tests.""" +def test_standardize_transform_op(device): + """FrameView standardizes a prim with xformOp:transform to translate/orient/scale.""" if device == "cuda" and not torch.cuda.is_available(): pytest.skip("CUDA not available") - stage = sim_utils.get_current_stage() - - # Create frames and targets - for i in range(1, 4): - sim_utils.create_prim(f"/World/Frame_{i}", "Xform", stage=stage) - sim_utils.create_prim(f"/World/Frame_{i}/Target", "Xform", stage=stage) - - # Create views - frames_view = XformPrimView("/World/Frame_.*", device=device) - targets_view = XformPrimView("/World/Frame_.*/Target", device=device) + expected_pos = (3.0, -1.0, 0.5) + matrix = Gf.Matrix4d(1.0) + matrix.SetTranslateOnly(Gf.Vec3d(*expected_pos)) - assert frames_view.count == 3 - assert targets_view.count == 3 - - # Set local poses for frames - frame_translations = torch.tensor([[0.0, 0.0, 0.0], [0.0, 10.0, 5.0], [0.0, 3.0, 5.0]], device=device) - frames_view.set_local_poses(translations=frame_translations) + stage = sim_utils.get_current_stage() + prim = stage.DefinePrim("/World/TransformPrim", "Xform") + UsdGeom.Xformable(prim).AddTransformOp().Set(matrix) - # Set local poses for targets - target_translations = torch.tensor([[0.0, 20.0, 10.0], [0.0, 30.0, 20.0], [0.0, 50.0, 10.0]], device=device) - targets_view.set_local_poses(translations=target_translations) + view = FrameView("/World/TransformPrim", device=device) + assert sim_utils.validate_standard_xform_ops(view.prims[0]) - # Get world poses of targets - world_positions, _ = targets_view.get_world_poses() + ordered_ops = UsdGeom.Xformable(view.prims[0]).GetOrderedXformOps() + op_names = [op.GetOpName() for op in ordered_ops] + assert op_names == ["xformOp:translate", "xformOp:orient", "xformOp:scale"] + assert ordered_ops[0].Get() == Gf.Vec3d(*expected_pos) - # Expected world positions are frame_translation + target_translation - expected_positions = torch.tensor([[0.0, 20.0, 10.0], [0.0, 40.0, 25.0], [0.0, 53.0, 15.0]], device=device) - torch.testing.assert_close(world_positions, expected_positions, atol=1e-5, rtol=0) +# ================================================================== +# USD-only: Nested hierarchy (frame + target) +# ================================================================== @pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_visibility_with_hierarchy(device): - """Test visibility with parent-child hierarchy and inheritance.""" +def test_nested_hierarchy_world_poses(device): + """World pose of nested child == sum of parent + child translations.""" if device == "cuda" and not torch.cuda.is_available(): pytest.skip("CUDA not available") stage = sim_utils.get_current_stage() + frame_positions = [(0.0, 0.0, 0.0), (0.0, 10.0, 5.0), (0.0, 3.0, 5.0)] + target_positions = [(0.0, 20.0, 10.0), (0.0, 30.0, 20.0), (0.0, 50.0, 10.0)] - # Create parent and children - sim_utils.create_prim("/World/Parent", "Xform", stage=stage) - - num_children = 4 - for i in range(num_children): - sim_utils.create_prim(f"/World/Parent/Child_{i}", "Xform", stage=stage) - - # Create views for both parent and children - parent_view = XformPrimView("/World/Parent", device=device) - children_view = XformPrimView("/World/Parent/Child_.*", device=device) - - # Verify parent and all children are visible initially - parent_visibility = parent_view.get_visibility() - children_visibility = children_view.get_visibility() - assert parent_visibility[0], "Parent should be visible initially" - assert torch.all(children_visibility), "All children should be visible initially" - - # Make some children invisible directly - new_visibility = torch.tensor([True, False, True, False], dtype=torch.bool, device=device) - children_view.set_visibility(new_visibility) - - # Verify the visibility changes - retrieved_visibility = children_view.get_visibility() - torch.testing.assert_close(retrieved_visibility, new_visibility) - - # Make all children visible again - children_view.set_visibility(torch.ones(num_children, dtype=torch.bool, device=device)) - all_visible = children_view.get_visibility() - assert torch.all(all_visible), "All children should be visible again" - - # Now test parent visibility inheritance: - # Make parent invisible - parent_view.set_visibility(torch.tensor([False], dtype=torch.bool, device=device)) - - # Verify parent is invisible - parent_visibility = parent_view.get_visibility() - assert not parent_visibility[0], "Parent should be invisible" - - # Verify children are also invisible (due to parent being invisible) - children_visibility = children_view.get_visibility() - assert not torch.any(children_visibility), "All children should be invisible when parent is invisible" + for i in range(3): + sim_utils.create_prim(f"/World/Frame_{i}", "Xform", translation=frame_positions[i], stage=stage) + sim_utils.create_prim(f"/World/Frame_{i}/Target", "Xform", translation=target_positions[i], stage=stage) - # Make parent visible again - parent_view.set_visibility(torch.tensor([True], dtype=torch.bool, device=device)) + frames_view = FrameView("/World/Frame_.*", device=device) + targets_view = FrameView("/World/Frame_.*/Target", device=device) - # Verify parent is visible - parent_visibility = parent_view.get_visibility() - assert parent_visibility[0], "Parent should be visible again" + frames_view.set_local_poses(translations=torch.tensor(frame_positions, device=device)) + targets_view.set_local_poses(translations=torch.tensor(target_positions, device=device)) - # Verify children are also visible again - children_visibility = children_view.get_visibility() - assert torch.all(children_visibility), "All children should be visible again when parent is visible" + world_pos = targets_view.get_world_poses()[0].torch + expected = torch.tensor( + [[f[j] + t[j] for j in range(3)] for f, t in zip(frame_positions, target_positions)], + device=device, + ) + torch.testing.assert_close(world_pos, expected, atol=1e-5, rtol=0) -""" -Tests - Comparison with Isaac Sim Implementation. -""" +# ================================================================== +# USD-only: Comparison with Isaac Sim +# ================================================================== def test_compare_get_world_poses_with_isaacsim(): """Compare get_world_poses with Isaac Sim's implementation.""" - stage = sim_utils.get_current_stage() - - # Check if Isaac Sim is available if _IsaacSimXformPrimView is None: pytest.skip("Isaac Sim is not available") - # Create prims with various poses + stage = sim_utils.get_current_stage() num_prims = 10 for i in range(num_prims): pos = (i * 2.0, i * 0.5, i * 1.5) - # Vary orientations - if i % 3 == 0: - quat = (1.0, 0.0, 0.0, 0.0) # Identity - elif i % 3 == 1: - quat = (0.7071068, 0.0, 0.0, 0.7071068) # 90 deg around Z - else: - quat = (0.7071068, 0.7071068, 0.0, 0.0) # 90 deg around X + quat = (0.0, 0.0, 0.0, 1.0) if i % 2 == 0 else (0.0, 0.0, 0.7071068, 0.7071068) sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=pos, orientation=quat, stage=stage) pattern = "/World/Env_.*/Object" - - # Create both views - isaaclab_view = XformPrimView(pattern, device="cpu") + isaaclab_view = FrameView(pattern, device="cpu") isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) - # Get world poses from both - isaaclab_pos, isaaclab_quat = isaaclab_view.get_world_poses() + isaaclab_pos = isaaclab_view.get_world_poses()[0].torch isaacsim_pos, isaacsim_quat = isaacsim_view.get_world_poses() - - # Convert Isaac Sim results to torch tensors if needed if not isinstance(isaacsim_pos, torch.Tensor): isaacsim_pos = torch.tensor(isaacsim_pos, dtype=torch.float32) - if not isinstance(isaacsim_quat, torch.Tensor): - isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) - # Compare results torch.testing.assert_close(isaaclab_pos, isaacsim_pos, atol=1e-5, rtol=0) - # Compare quaternions (account for sign ambiguity) - try: - torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-5, rtol=0) - - -def test_compare_set_world_poses_with_isaacsim(): - """Compare set_world_poses with Isaac Sim's implementation.""" - stage = sim_utils.get_current_stage() - - # Check if Isaac Sim is available - if _IsaacSimXformPrimView is None: - pytest.skip("Isaac Sim is not available") - - # Create prims - num_prims = 8 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) - - pattern = "/World/Env_.*/Object" - - # Create both views - isaaclab_view = XformPrimView(pattern, device="cpu") - isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) - # Generate new poses - new_positions = torch.randn(num_prims, 3) * 10.0 - new_orientations = torch.tensor([[1.0, 0.0, 0.0, 0.0]] * num_prims, dtype=torch.float32) - - # Set poses using both implementations - isaaclab_view.set_world_poses(new_positions.clone(), new_orientations.clone()) - isaacsim_view.set_world_poses(new_positions.clone(), new_orientations.clone()) - - # Get poses back from both - isaaclab_pos, isaaclab_quat = isaaclab_view.get_world_poses() - isaacsim_pos, isaacsim_quat = isaacsim_view.get_world_poses() - - # Convert Isaac Sim results to torch tensors if needed - if not isinstance(isaacsim_pos, torch.Tensor): - isaacsim_pos = torch.tensor(isaacsim_pos, dtype=torch.float32) - if not isinstance(isaacsim_quat, torch.Tensor): - isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) - - # Compare results - both implementations should produce the same world poses - torch.testing.assert_close(isaaclab_pos, isaacsim_pos, atol=1e-4, rtol=0) - try: - torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-4, rtol=0) - except AssertionError: - torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-4, rtol=0) - - -def test_compare_get_local_poses_with_isaacsim(): - """Compare get_local_poses with Isaac Sim's implementation.""" - stage = sim_utils.get_current_stage() - - # Check if Isaac Sim is available - if _IsaacSimXformPrimView is None: - pytest.skip("Isaac Sim is not available") - - # Create hierarchical prims - num_prims = 5 - for i in range(num_prims): - # Create parent - sim_utils.create_prim(f"/World/Env_{i}", "Xform", translation=(i * 5.0, 0.0, 0.0), stage=stage) - # Create child with local pose - local_pos = (1.0, float(i), 0.0) - local_quat = (1.0, 0.0, 0.0, 0.0) if i % 2 == 0 else (0.7071068, 0.0, 0.0, 0.7071068) - sim_utils.create_prim( - f"/World/Env_{i}/Object", "Xform", translation=local_pos, orientation=local_quat, stage=stage - ) - - pattern = "/World/Env_.*/Object" - - # Create both views - isaaclab_view = XformPrimView(pattern, device="cpu") - isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) - - # Get local poses from both - isaaclab_trans, isaaclab_quat = isaaclab_view.get_local_poses() - isaacsim_trans, isaacsim_quat = isaacsim_view.get_local_poses() - - # Convert Isaac Sim results to torch tensors if needed - if not isinstance(isaacsim_trans, torch.Tensor): - isaacsim_trans = torch.tensor(isaacsim_trans, dtype=torch.float32) - if not isinstance(isaacsim_quat, torch.Tensor): - isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) - - # Compare results - torch.testing.assert_close(isaaclab_trans, isaacsim_trans, atol=1e-5, rtol=0) - try: - torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-5, rtol=0) - except AssertionError: - torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-5, rtol=0) - - -def test_compare_set_local_poses_with_isaacsim(): - """Compare set_local_poses with Isaac Sim's implementation.""" - stage = sim_utils.get_current_stage() - - # Check if Isaac Sim is available - if _IsaacSimXformPrimView is None: - pytest.skip("Isaac Sim is not available") - - # Create hierarchical prims - num_prims = 6 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Env_{i}", "Xform", translation=(i * 3.0, 0.0, 0.0), stage=stage) - sim_utils.create_prim(f"/World/Env_{i}/Object", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) - - pattern = "/World/Env_.*/Object" - - # Create both views - isaaclab_view = XformPrimView(pattern, device="cpu") - isaacsim_view = _IsaacSimXformPrimView(pattern, reset_xform_properties=False) - - # Generate new local poses - new_translations = torch.randn(num_prims, 3) * 5.0 - new_orientations = torch.tensor( - [[1.0, 0.0, 0.0, 0.0], [0.7071068, 0.0, 0.0, 0.7071068]] * (num_prims // 2), dtype=torch.float32 - ) - - # Set local poses using both implementations - isaaclab_view.set_local_poses(new_translations.clone(), new_orientations.clone()) - isaacsim_view.set_local_poses(new_translations.clone(), new_orientations.clone()) - - # Get local poses back from both - isaaclab_trans, isaaclab_quat = isaaclab_view.get_local_poses() - isaacsim_trans, isaacsim_quat = isaacsim_view.get_local_poses() - - # Convert Isaac Sim results to torch tensors if needed - if not isinstance(isaacsim_trans, torch.Tensor): - isaacsim_trans = torch.tensor(isaacsim_trans, dtype=torch.float32) - if not isinstance(isaacsim_quat, torch.Tensor): - isaacsim_quat = torch.tensor(isaacsim_quat, dtype=torch.float32) - - # Compare results - torch.testing.assert_close(isaaclab_trans, isaacsim_trans, atol=1e-4, rtol=0) - try: - torch.testing.assert_close(isaaclab_quat, isaacsim_quat, atol=1e-4, rtol=0) - except AssertionError: - torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-4, rtol=0) - - -""" -Tests - Fabric Operations. -""" - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_fabric_initialization(device): - """Test XformPrimView initialization with Fabric enabled.""" - _skip_if_backend_unavailable("fabric", device) - - stage = sim_utils.get_current_stage() - - # Create camera prims (Boundable prims that support Fabric) - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Cam_{i}", "Camera", translation=(i * 1.0, 0.0, 1.0), stage=stage) - - # Create view with Fabric enabled - view = _create_view("/World/Cam_.*", device=device, backend="fabric") - - # Verify properties - assert view.count == num_prims - assert view.device == device - assert len(view.prims) == num_prims +# ================================================================== +# USD-only: Franka integration +# ================================================================== @pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_fabric_usd_consistency(device): - """Test that Fabric round-trip (write→read) is consistent, matching Isaac Sim's design. - - Note: This does NOT test Fabric vs USD reads on initialization, as Fabric is designed - for write-first workflows. Instead, it tests that: - 1. Fabric write→read round-trip works correctly - 2. This matches Isaac Sim's Fabric behavior - """ - _skip_if_backend_unavailable("fabric", device) +def test_with_franka_robots(device): + """Verify FrameView works with real Franka robot USD assets.""" + if device == "cuda" and not torch.cuda.is_available(): + pytest.skip("CUDA not available") stage = sim_utils.get_current_stage() + franka_usd_path = f"{ISAAC_NUCLEUS_DIR}/Robots/FrankaRobotics/FrankaPanda/franka.usd" - # Create prims - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim( - f"/World/Cam_{i}", - "Camera", - translation=(i * 1.0, 2.0, 3.0), - orientation=(0.7071068, 0.0, 0.0, 0.7071068), - stage=stage, - ) - - # Create Fabric view - view_fabric = _create_view("/World/Cam_.*", device=device, backend="fabric") - - # Test Fabric write→read round-trip (Isaac Sim's intended workflow) - # Initialize Fabric state by WRITING first - init_positions = torch.zeros((num_prims, 3), dtype=torch.float32, device=device) - init_positions[:, 0] = torch.arange(num_prims, dtype=torch.float32, device=device) - init_positions[:, 1] = 2.0 - init_positions[:, 2] = 3.0 - init_orientations = torch.tensor([[0.7071068, 0.0, 0.0, 0.7071068]] * num_prims, dtype=torch.float32, device=device) - - view_fabric.set_world_poses(init_positions, init_orientations) + sim_utils.create_prim("/World/Franka_1", "Xform", usd_path=franka_usd_path, stage=stage) + sim_utils.create_prim("/World/Franka_2", "Xform", usd_path=franka_usd_path, stage=stage) - # Read back from Fabric (should match what we wrote) - pos_fabric, quat_fabric = view_fabric.get_world_poses() - torch.testing.assert_close(pos_fabric, init_positions, atol=1e-4, rtol=0) - torch.testing.assert_close(quat_fabric, init_orientations, atol=1e-4, rtol=0) + view = FrameView("/World/Franka_.*", device=device) + assert view.count == 2 - # Test another round-trip with different values - new_positions = torch.rand((num_prims, 3), dtype=torch.float32, device=device) * 10.0 - new_orientations = torch.tensor([[1.0, 0.0, 0.0, 0.0]] * num_prims, dtype=torch.float32, device=device) + positions = view.get_world_poses()[0].torch + torch.testing.assert_close(positions, torch.zeros(2, 3, device=device), atol=1e-5, rtol=0) - view_fabric.set_world_poses(new_positions, new_orientations) + new_pos = torch.tensor([[10.0, 10.0, 0.0], [-40.0, -40.0, 0.0]], device=device) + new_quat = torch.tensor([[0.0, 0.0, 0.7071068, 0.7071068], [0.0, 0.0, -0.7071068, 0.7071068]], device=device) + view.set_world_poses(positions=new_pos, orientations=new_quat) - # Read back from Fabric (should match) - pos_fabric_after, quat_fabric_after = view_fabric.get_world_poses() - torch.testing.assert_close(pos_fabric_after, new_positions, atol=1e-4, rtol=0) - torch.testing.assert_close(quat_fabric_after, new_orientations, atol=1e-4, rtol=0) + ret_pos = view.get_world_poses()[0].torch + torch.testing.assert_close(ret_pos, new_pos, atol=1e-5, rtol=0) diff --git a/source/isaaclab/test/terrains/check_terrain_importer.py b/source/isaaclab/test/terrains/check_terrain_importer.py index d88ec65c86d0..a8229023f90c 100644 --- a/source/isaaclab/test/terrains/check_terrain_importer.py +++ b/source/isaaclab/test/terrains/check_terrain_importer.py @@ -62,26 +62,16 @@ """Rest everything follows.""" -import numpy as np - -import omni.kit -import omni.kit.commands -from isaacsim.core.api.materials import PhysicsMaterial -from isaacsim.core.api.materials.preview_surface import PreviewSurface -from isaacsim.core.api.objects import DynamicSphere -from isaacsim.core.cloner import GridCloner -from isaacsim.core.prims import RigidPrim, SingleGeometryPrim, SingleRigidPrim -from isaacsim.core.utils.extensions import enable_extension +import torch import isaaclab.sim as sim_utils import isaaclab.terrains as terrain_gen +from isaaclab import cloner as lab_cloner from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.terrains.config.rough import ROUGH_TERRAINS_CFG from isaaclab.terrains.terrain_importer import TerrainImporter from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -enable_extension("omni.kit.primitive.mesh") - def main(): """Generates a terrain from isaaclab.""" @@ -95,8 +85,10 @@ def main(): num_balls = 2048 # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") + # Create environment clones using Lab's cloner utilities + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_balls, dtype=torch.long, device=sim.device) + env_origins, _ = lab_cloner.grid_transforms(num_balls, spacing=2.0, device=sim.device) # Everything under the namespace "/World/envs/env_0" will be cloned sim_utils.define_prim("/World/envs/env_0") @@ -116,59 +108,78 @@ def main(): # -- Light cfg = sim_utils.DistantLightCfg(intensity=1000.0) cfg.func("/World/Light", cfg) - # -- Ball + + # -- Ball with physics properties using Isaac Lab spawners + ball_prim_path = "/World/envs/env_0/ball" + + # Create physics material + physics_material_cfg = sim_utils.RigidBodyMaterialCfg( + static_friction=0.2, + dynamic_friction=1.0, + restitution=0.0, + ) + + # Create visual material + visual_material_cfg = sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)) + if args_cli.geom_sphere: - # -- Ball physics - _ = DynamicSphere( - prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25 + # Spawn a geom sphere with rigid body properties + sphere_cfg = sim_utils.SphereCfg( + radius=0.25, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=visual_material_cfg, + physics_material=physics_material_cfg, ) + sphere_cfg.func(ball_prim_path, sphere_cfg, translation=(0.0, 0.0, 5.0)) else: - # -- Ball geometry - cube_prim_path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")[1] - sim_utils.move_prim(cube_prim_path, "/World/envs/env_0/ball") - # -- Ball physics - SingleRigidPrim( - prim_path="/World/envs/env_0/ball", mass=0.5, scale=(0.5, 0.5, 0.5), translation=(0.0, 0.0, 0.5) + # Spawn a mesh sphere with rigid body properties + mesh_sphere_cfg = sim_utils.MeshSphereCfg( + radius=0.25, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=True), + visual_material=visual_material_cfg, + physics_material=physics_material_cfg, ) - SingleGeometryPrim(prim_path="/World/envs/env_0/ball", collision=True) - # -- Ball material - sphere_geom = SingleGeometryPrim(prim_path="/World/envs/env_0/ball", collision=True) - visual_material = PreviewSurface(prim_path="/World/Looks/ballColorMaterial", color=np.asarray([0.0, 0.0, 1.0])) - physics_material = PhysicsMaterial( - prim_path="/World/Looks/ballPhysicsMaterial", - dynamic_friction=1.0, - static_friction=0.2, - restitution=0.0, - ) - sphere_geom.set_collision_approximation("convexHull") - sphere_geom.apply_visual_material(visual_material) - sphere_geom.apply_physics_material(physics_material) + mesh_sphere_cfg.func(ball_prim_path, mesh_sphere_cfg, translation=(0.0, 0.0, 0.5)) # Clone the scene - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls) - cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) - physics_scene_path = sim.get_physics_context().prim_path - cloner.filter_collisions( - physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] + envs_prim_paths = [f"/World/envs/env_{i}" for i in range(num_balls)] + lab_cloner.usd_replicate(sim.stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) + physics_scene_path = sim.cfg.physics.physics_prim_path + lab_cloner.filter_collisions( + sim.stage, physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] ) - # Set ball positions over terrain origins - # Create a view over all the balls - ball_view = RigidPrim("/World/envs/env_.*/ball", reset_xform_properties=False) + # Set ball positions over terrain origins using FrameView (before simulation starts) + xform_view = sim_utils.FrameView("/World/envs/env_.*/ball") # cache initial state of the balls - ball_initial_positions = terrain_importer.env_origins + ball_initial_positions = terrain_importer.env_origins.clone() ball_initial_positions[:, 2] += 5.0 - # set initial poses - # note: setting here writes to USD :) - ball_view.set_world_poses(positions=ball_initial_positions) + # set initial poses (writes to USD before simulation) + xform_view.set_world_poses(positions=ball_initial_positions) # Play simulator sim.reset() - # Initialize the ball views for physics simulation - ball_view.initialize() + + # Create a PhysX rigid body view for physics simulation + physics_sim_view = sim.physics_manager.get_physics_sim_view() + ball_view = physics_sim_view.create_rigid_body_view("/World/envs/env_*/ball") + + # Cache initial velocities (all zeros) ball_initial_velocities = ball_view.get_velocities() + # Build initial transforms tensor for reset: (N, 7) = [pos(3), quat_xyzw(4)] + num_balls_actual = ball_initial_positions.shape[0] + ball_initial_transforms = torch.zeros(num_balls_actual, 7, device=ball_initial_positions.device) + ball_initial_transforms[:, :3] = ball_initial_positions + ball_initial_transforms[:, 6] = 1.0 # w=1 for identity quaternion (xyzw format) + + # Create indices for all balls (required by PhysX view API) + all_indices = torch.arange(num_balls_actual, dtype=torch.int32, device=ball_initial_positions.device) + # Create a counter for resetting the scene step_count = 0 # Simulate physics @@ -182,9 +193,9 @@ def main(): continue # Reset the scene if step_count % 500 == 0: - # reset the balls - ball_view.set_world_poses(positions=ball_initial_positions) - ball_view.set_velocities(ball_initial_velocities) + # reset the balls using PhysX tensor API + ball_view.set_transforms(ball_initial_transforms, all_indices) + ball_view.set_velocities(ball_initial_velocities, all_indices) # reset the counter step_count = 0 # Step simulation diff --git a/source/isaaclab/test/terrains/test_terrain_importer.py b/source/isaaclab/test/terrains/test_terrain_importer.py index 05ed76e0811e..5234df4cae51 100644 --- a/source/isaaclab/test/terrains/test_terrain_importer.py +++ b/source/isaaclab/test/terrains/test_terrain_importer.py @@ -18,18 +18,13 @@ import pytest import torch import trimesh +import warp as wp -import omni.kit -import omni.kit.commands -from isaacsim.core.api.materials import PhysicsMaterial, PreviewSurface -from isaacsim.core.api.objects import DynamicSphere -from isaacsim.core.cloner import GridCloner -from isaacsim.core.prims import RigidPrim, SingleGeometryPrim, SingleRigidPrim -from isaacsim.core.utils.extensions import enable_extension -from pxr import Usd, UsdGeom +from pxr import UsdGeom import isaaclab.sim as sim_utils import isaaclab.terrains as terrain_gen +from isaaclab import cloner as lab_cloner from isaaclab.sim import PreviewSurfaceCfg, SimulationContext, build_simulation_context, get_first_matching_child_prim from isaaclab.terrains import TerrainImporter, TerrainImporterCfg from isaaclab.terrains.config.rough import ROUGH_TERRAINS_CFG @@ -39,8 +34,8 @@ @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.parametrize("env_spacing", [1.0, 4.325, 8.0]) @pytest.mark.parametrize("num_envs", [1, 4, 125, 379, 1024]) -def test_grid_clone_env_origins(device, env_spacing, num_envs): - """Tests that env origins are consistent when computed using the TerrainImporter and IsaacSim GridCloner.""" +def test_terrain_importer_env_origins(device, env_spacing, num_envs): + """Tests that env origins are consistent when computed using the TerrainImporter and Lab's grid_transforms.""" with build_simulation_context(device=device, auto_add_lighting=True) as sim: sim._app_control_on_stop_handle = None # create terrain importer @@ -55,11 +50,11 @@ def test_grid_clone_env_origins(device, env_spacing, num_envs): # obtain env origins using terrain importer terrain_importer_origins = terrain_importer.env_origins - # obtain env origins using grid cloner - grid_cloner_origins = _obtain_grid_cloner_env_origins(num_envs, env_spacing, stage=sim.stage, device=sim.device) + # obtain env origins using Lab's grid_transforms + lab_grid_origins, _ = lab_cloner.grid_transforms(num_envs, spacing=env_spacing, device=sim.device) # check if the env origins are the same - torch.testing.assert_close(terrain_importer_origins, grid_cloner_origins, rtol=1e-5, atol=1e-5) + torch.testing.assert_close(terrain_importer_origins, lab_grid_origins, rtol=1e-5, atol=1e-5) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @@ -162,6 +157,7 @@ def test_usd(device): assert actualSize[1] == pytest.approx(expectedSizeY) +@pytest.mark.skip(reason="It seems like IsaacSim is not setting the initial positions correctly for the balls.") @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) def test_ball_drop(device): """Generates assorted terrains and spheres created as meshes. @@ -174,13 +170,12 @@ def test_ball_drop(device): # Create a scene with rough terrain and balls _populate_scene(geom_sphere=False, sim=sim) - # Create a view over all the balls - ball_view = RigidPrim("/World/envs/env_.*/ball", reset_xform_properties=False) - # Play simulator sim.reset() - # Initialize the ball views for physics simulation - ball_view.initialize() + + # Create a view over all the balls using PhysX view + physics_sim_view = sim.physics_manager.get_physics_sim_view() + ball_view = physics_sim_view.create_rigid_body_view("/World/envs/env_*/ball") # Run simulator for _ in range(500): @@ -188,10 +183,12 @@ def test_ball_drop(device): # Ball may have some small non-zero velocity if the roll on terrain <~.2 # If balls fall through terrain velocity is much higher ~82.0 - max_velocity_z = torch.max(torch.abs(ball_view.get_linear_velocities()[:, 2])) + view_velocities = ball_view.get_linear_velocities().contiguous() + max_velocity_z = torch.max(torch.abs(wp.to_torch(view_velocities)[:, 2])) assert max_velocity_z.item() <= 0.5 +@pytest.mark.skip(reason="It seems like IsaacSim is not setting the initial positions correctly for the balls.") @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) def test_ball_drop_geom_sphere(device): """Generates assorted terrains and geom spheres. @@ -207,13 +204,12 @@ def test_ball_drop_geom_sphere(device): # the issue is fixed. _populate_scene(geom_sphere=False, sim=sim) - # Create a view over all the balls - ball_view = RigidPrim("/World/envs/env_.*/ball", reset_xform_properties=False) - # Play simulator sim.reset() - # Initialize the ball views for physics simulation - ball_view.initialize() + + # Create a view over all the balls using PhysX view + physics_sim_view = sim.physics_manager.get_physics_sim_view() + ball_view = physics_sim_view.create_rigid_body_view("/World/envs/env_*/ball") # Run simulator for _ in range(500): @@ -221,7 +217,8 @@ def test_ball_drop_geom_sphere(device): # Ball may have some small non-zero velocity if the roll on terrain <~.2 # If balls fall through terrain velocity is much higher ~82.0 - max_velocity_z = torch.max(torch.abs(ball_view.get_linear_velocities()[:, 2])) + view_velocities = ball_view.get_linear_velocities().contiguous() + max_velocity_z = torch.max(torch.abs(wp.to_torch(view_velocities)[:, 2])) assert max_velocity_z.item() <= 0.5 @@ -243,20 +240,6 @@ def _obtain_collision_mesh(mesh_prim_path: str, mesh_type: Literal["Mesh", "Plan return None -def _obtain_grid_cloner_env_origins(num_envs: int, env_spacing: float, stage: Usd.Stage, device: str) -> torch.Tensor: - """Obtain the env origins generated by IsaacSim GridCloner (grid_cloner.py).""" - # create grid cloner - cloner = GridCloner(spacing=env_spacing) - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) - # create source prim - stage.DefinePrim("/World/envs/env_0", "Xform") - # clone envs using grid cloner - env_origins = cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) - # return as tensor - return torch.tensor(env_origins, dtype=torch.float32, device=device) - - def _populate_scene(sim: SimulationContext, num_balls: int = 2048, geom_sphere: bool = False): """Create a scene with terrain and randomly spawned balls. @@ -273,62 +256,64 @@ def _populate_scene(sim: SimulationContext, num_balls: int = 2048, geom_sphere: ) terrain_importer = TerrainImporter(terrain_importer_cfg) - # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") + # Create environment clones using Lab's cloner utilities + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_balls, dtype=torch.long, device=sim.device) + env_origins, _ = lab_cloner.grid_transforms(num_balls, spacing=2.0, device=sim.device) # Everything under the namespace "/World/envs/env_0" will be cloned sim.stage.DefinePrim("/World/envs/env_0", "Xform") # Define the scene - # -- Ball - if geom_sphere: - # -- Ball physics - _ = DynamicSphere( - prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25 - ) - else: - # -- Ball geometry - enable_extension("omni.kit.primitive.mesh") - cube_prim_path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")[1] - sim_utils.move_prim(cube_prim_path, "/World/envs/env_0/ball") - # -- Ball physics - SingleRigidPrim( - prim_path="/World/envs/env_0/ball", mass=0.5, scale=(0.5, 0.5, 0.5), translation=(0.0, 0.0, 0.5) - ) - SingleGeometryPrim(prim_path="/World/envs/env_0/ball", collision=True) + # -- Ball with physics properties using Isaac Lab spawners + ball_prim_path = "/World/envs/env_0/ball" - # -- Ball material - sphere_geom = SingleGeometryPrim(prim_path="/World/envs/env_0/ball", collision=True) - visual_material = PreviewSurface(prim_path="/World/Looks/ballColorMaterial", color=np.asarray([0.0, 0.0, 1.0])) - physics_material = PhysicsMaterial( - prim_path="/World/Looks/ballPhysicsMaterial", - dynamic_friction=1.0, + # Create physics material + physics_material_cfg = sim_utils.RigidBodyMaterialCfg( static_friction=0.2, + dynamic_friction=1.0, restitution=0.0, ) - sphere_geom.set_collision_approximation("convexHull") - sphere_geom.apply_visual_material(visual_material) - sphere_geom.apply_physics_material(physics_material) + + # Create visual material + visual_material_cfg = sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)) + + if geom_sphere: + # Spawn a geom sphere with rigid body properties + sphere_cfg = sim_utils.SphereCfg( + radius=0.25, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=visual_material_cfg, + physics_material=physics_material_cfg, + ) + sphere_cfg.func(ball_prim_path, sphere_cfg, translation=(0.0, 0.0, 5.0)) + else: + # Spawn a mesh sphere with rigid body properties + mesh_sphere_cfg = sim_utils.MeshSphereCfg( + radius=0.25, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=True), + visual_material=visual_material_cfg, + physics_material=physics_material_cfg, + ) + mesh_sphere_cfg.func(ball_prim_path, mesh_sphere_cfg, translation=(0.0, 0.0, 0.5)) # Clone the scene - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls) - cloner.clone( - source_prim_path="/World/envs/env_0", - prim_paths=envs_prim_paths, - replicate_physics=True, - ) - physics_scene_path = sim.get_physics_context().prim_path - cloner.filter_collisions( - physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] + envs_prim_paths = [f"/World/envs/env_{i}" for i in range(num_balls)] + lab_cloner.usd_replicate(sim.stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) + physics_scene_path = sim.cfg.physics_prim_path + lab_cloner.filter_collisions( + sim.stage, physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"] ) # Set ball positions over terrain origins - # Create a view over all the balls - ball_view = RigidPrim("/World/envs/env_.*/ball", reset_xform_properties=False) + # Create a view over all the balls using Isaac Lab's FrameView + ball_view = sim_utils.FrameView("/World/envs/env_.*/ball") # cache initial state of the balls - ball_initial_positions = terrain_importer.env_origins + ball_initial_positions = terrain_importer.env_origins.clone() ball_initial_positions[:, 2] += 5.0 # set initial poses # note: setting here writes to USD :) - ball_view.set_world_poses(positions=ball_initial_positions) + ball_view.set_world_poses(positions=wp.from_torch(ball_initial_positions)) diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py new file mode 100644 index 000000000000..a10fe5150ff6 --- /dev/null +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py @@ -0,0 +1,352 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for mock asset interfaces.""" + +import pytest +import torch + +from isaaclab.test.mock_interfaces.assets import ( + MockArticulation, + MockRigidObject, + MockRigidObjectCollection, + create_mock_articulation, + create_mock_humanoid, + create_mock_quadruped, + create_mock_rigid_object, + create_mock_rigid_object_collection, +) +from isaaclab.test.mock_interfaces.utils import MockArticulationBuilder + +# ============================================================================== +# MockArticulation Tests +# ============================================================================== + + +class TestMockArticulation: + """Tests for MockArticulation and MockArticulationData.""" + + @pytest.fixture + def robot(self): + """Create a mock articulation fixture.""" + return MockArticulation( + num_instances=4, + num_joints=12, + num_bodies=13, + device="cpu", + ) + + def test_initialization(self, robot): + """Test that MockArticulation initializes correctly.""" + assert robot.num_instances == 4 + assert robot.num_joints == 12 + assert robot.num_bodies == 13 + assert robot.device == "cpu" + assert robot.root_view is None + assert robot.data is not None + + def test_joint_state_shapes(self, robot): + """Test joint state tensor shapes.""" + assert robot.data.joint_pos.shape == (4, 12) + assert robot.data.joint_vel.shape == (4, 12) + assert robot.data.joint_acc.shape == (4, 12) + + def test_root_state_shapes(self, robot): + """Test root state tensor shapes.""" + # Link frame - pose is wp.transformf so shape is (4,) but converts to (4, 7) + assert robot.data.root_link_pose_w.torch.shape == (4, 7) + # vel is wp.spatial_vectorf so shape is (4,) but converts to (4, 6) + assert robot.data.root_link_vel_w.torch.shape == (4, 6) + assert robot.data.root_link_state_w.torch.shape == (4, 13) + + # Sliced properties + assert robot.data.root_link_pos_w.torch.shape == (4, 3) + assert robot.data.root_link_quat_w.torch.shape == (4, 4) + assert robot.data.root_link_lin_vel_w.torch.shape == (4, 3) + assert robot.data.root_link_ang_vel_w.torch.shape == (4, 3) + + def test_body_state_shapes(self, robot): + """Test body state tensor shapes.""" + # body_link_pose_w is wp.transformf so shape is (4, 13) but converts to (4, 13, 7) + assert robot.data.body_link_pose_w.torch.shape == (4, 13, 7) + # body_link_vel_w is wp.spatial_vectorf so shape is (4, 13) but converts to (4, 13, 6) + assert robot.data.body_link_vel_w.torch.shape == (4, 13, 6) + assert robot.data.body_link_state_w.torch.shape == (4, 13, 13) + + def test_default_state_shapes(self, robot): + """Test default state tensor shapes.""" + # default_root_pose is wp.transformf so shape is (4,) but converts to (4, 7) + assert robot.data.default_root_pose.torch.shape == (4, 7) + # default_root_vel is wp.spatial_vectorf so shape is (4,) but converts to (4, 6) + assert robot.data.default_root_vel.torch.shape == (4, 6) + assert robot.data.default_root_state.torch.shape == (4, 13) + assert robot.data.default_joint_pos.shape == (4, 12) + assert robot.data.default_joint_vel.shape == (4, 12) + + def test_identity_quaternion_default(self, robot): + """Test that default quaternions are identity quaternions.""" + quat = robot.data.root_link_quat_w.torch + # XYZW format: x=y=z=0, w=1 + expected = torch.zeros_like(quat) + expected[:, 3] = 1.0 # Set w=1 + assert torch.allclose(quat, expected, atol=1e-5) + + def test_set_joint_pos(self, robot): + """Test setting joint positions.""" + joint_pos = torch.randn(4, 12) + robot.data.set_joint_pos(joint_pos) + assert torch.allclose(robot.data.joint_pos.torch, joint_pos) + + def test_set_mock_data_bulk(self, robot): + """Test bulk data setter.""" + joint_pos = torch.randn(4, 12) + joint_vel = torch.randn(4, 12) + + robot.data.set_mock_data(joint_pos=joint_pos, joint_vel=joint_vel) + + assert torch.allclose(robot.data.joint_pos.torch, joint_pos) + assert torch.allclose(robot.data.joint_vel.torch, joint_vel) + + def test_find_joints(self): + """Test joint finding by regex.""" + joint_names = ["FL_hip", "FL_thigh", "FL_calf", "FR_hip", "FR_thigh", "FR_calf"] + robot = MockArticulation( + num_instances=1, + num_joints=6, + num_bodies=7, + joint_names=joint_names, + device="cpu", + ) + + # Find all hip joints + indices, names = robot.find_joints(".*_hip") + assert len(indices) == 2 + assert "FL_hip" in names + assert "FR_hip" in names + + # Find FL leg joints + indices, names = robot.find_joints("FL_.*") + assert len(indices) == 3 + + def test_find_bodies(self): + """Test body finding by regex.""" + body_names = ["base", "FL_hip", "FL_thigh", "FL_calf", "FR_hip", "FR_thigh", "FR_calf"] + robot = MockArticulation( + num_instances=1, + num_joints=6, + num_bodies=7, + body_names=body_names, + device="cpu", + ) + + # Find base + indices, names = robot.find_bodies("base") + assert indices == [0] + + # Find all thigh bodies + indices, names = robot.find_bodies(".*_thigh") + assert len(indices) == 2 + + def test_set_joint_position_target(self, robot): + """Test setting joint position targets.""" + target = torch.randn(4, 12) + robot.set_joint_position_target(target) + assert torch.allclose(robot.data.joint_pos_target.torch, target) + + def test_joint_limits(self, robot): + """Test joint limits.""" + limits = robot.data.joint_pos_limits.torch + assert limits.shape == (4, 12, 2) + # Default limits should be -inf to inf + assert torch.all(limits[..., 0] == float("-inf")) + assert torch.all(limits[..., 1] == float("inf")) + + +# ============================================================================== +# MockRigidObject Tests +# ============================================================================== + + +class TestMockRigidObject: + """Tests for MockRigidObject and MockRigidObjectData.""" + + @pytest.fixture + def obj(self): + """Create a mock rigid object fixture.""" + return MockRigidObject(num_instances=4, device="cpu") + + def test_initialization(self, obj): + """Test that MockRigidObject initializes correctly.""" + assert obj.num_instances == 4 + assert obj.num_bodies == 1 # Always 1 for rigid object + assert obj.root_view is None + + def test_root_state_shapes(self, obj): + """Test root state tensor shapes.""" + # root_link_pose_w is wp.transformf so shape is (4,) but converts to (4, 7) + assert obj.data.root_link_pose_w.torch.shape == (4, 7) + # root_link_vel_w is wp.spatial_vectorf so shape is (4,) but converts to (4, 6) + assert obj.data.root_link_vel_w.torch.shape == (4, 6) + assert obj.data.root_link_state_w.torch.shape == (4, 13) + + def test_body_state_shapes(self, obj): + """Test body state tensor shapes (single body).""" + # body_link_pose_w is wp.transformf so shape is (4, 1) but converts to (4, 1, 7) + assert obj.data.body_link_pose_w.torch.shape == (4, 1, 7) + # body_link_vel_w is wp.spatial_vectorf so shape is (4, 1) but converts to (4, 1, 6) + assert obj.data.body_link_vel_w.torch.shape == (4, 1, 6) + + def test_body_properties(self, obj): + """Test body property shapes.""" + assert obj.data.body_mass.shape == (4, 1) + assert obj.data.body_inertia.shape == (4, 1, 9) + + +# ============================================================================== +# MockRigidObjectCollection Tests +# ============================================================================== + + +class TestMockRigidObjectCollection: + """Tests for MockRigidObjectCollection and MockRigidObjectCollectionData.""" + + @pytest.fixture + def collection(self): + """Create a mock rigid object collection fixture.""" + return MockRigidObjectCollection( + num_instances=4, + num_bodies=5, + device="cpu", + ) + + def test_initialization(self, collection): + """Test that MockRigidObjectCollection initializes correctly.""" + assert collection.num_instances == 4 + assert collection.num_bodies == 5 + + def test_body_state_shapes(self, collection): + """Test body state tensor shapes.""" + # body_link_pose_w is wp.transformf so shape is (4, 5) but converts to (4, 5, 7) + assert collection.data.body_link_pose_w.torch.shape == (4, 5, 7) + # body_link_vel_w is wp.spatial_vectorf so shape is (4, 5) but converts to (4, 5, 6) + assert collection.data.body_link_vel_w.torch.shape == (4, 5, 6) + assert collection.data.body_link_state_w.torch.shape == (4, 5, 13) + + def test_find_bodies_returns_mask(self, collection): + """Test that find_bodies returns a mask tensor.""" + body_mask, names = collection.find_bodies("body_0") + assert isinstance(body_mask, torch.Tensor) + assert body_mask.shape == (5,) + assert body_mask[0] + + +# ============================================================================== +# Factory Function Tests +# ============================================================================== + + +class TestAssetFactories: + """Tests for asset factory functions.""" + + def test_create_mock_quadruped(self): + """Test quadruped factory function.""" + robot = create_mock_quadruped(num_instances=4) + assert robot.num_instances == 4 + assert robot.num_joints == 12 + assert robot.num_bodies == 13 + assert "FL_hip" in robot.joint_names + assert "base" in robot.body_names + + def test_create_mock_humanoid(self): + """Test humanoid factory function.""" + robot = create_mock_humanoid(num_instances=2) + assert robot.num_instances == 2 + assert robot.num_joints == 21 + + def test_create_mock_articulation(self): + """Test generic articulation factory function.""" + robot = create_mock_articulation( + num_instances=2, + num_joints=6, + num_bodies=7, + is_fixed_base=True, + ) + assert robot.num_instances == 2 + assert robot.num_joints == 6 + assert robot.is_fixed_base + + def test_create_mock_rigid_object(self): + """Test rigid object factory function.""" + obj = create_mock_rigid_object(num_instances=3) + assert obj.num_instances == 3 + assert obj.num_bodies == 1 + # root_link_pose_w is wp.transformf so shape is (3,) but converts to (3, 7) + assert obj.data.root_link_pose_w.torch.shape == (3, 7) + + def test_create_mock_rigid_object_collection(self): + """Test rigid object collection factory function.""" + + collection = create_mock_rigid_object_collection( + num_instances=4, + num_bodies=6, + body_names=["obj_0", "obj_1", "obj_2", "obj_3", "obj_4", "obj_5"], + ) + assert collection.num_instances == 4 + assert collection.num_bodies == 6 + assert collection.body_names == ["obj_0", "obj_1", "obj_2", "obj_3", "obj_4", "obj_5"] + # body_link_pose_w is wp.transformf so shape is (4, 6) but converts to (4, 6, 7) + assert collection.data.body_link_pose_w.torch.shape == (4, 6, 7) + + +# ============================================================================== +# MockArticulationBuilder Tests +# ============================================================================== + + +class TestMockArticulationBuilder: + """Tests for MockArticulationBuilder.""" + + def test_basic_build(self): + """Test building a basic articulation.""" + robot = ( + MockArticulationBuilder() + .with_num_instances(4) + .with_joints(["joint_0", "joint_1", "joint_2"]) + .with_bodies(["base", "link_1", "link_2", "link_3"]) + .build() + ) + + assert robot.num_instances == 4 + assert robot.num_joints == 3 + assert robot.num_bodies == 4 + + def test_with_default_positions(self): + """Test setting default joint positions.""" + + default_pos = [0.0, 0.5, -0.5] + robot = ( + MockArticulationBuilder() + .with_num_instances(2) + .with_joints(["j0", "j1", "j2"], default_pos=default_pos) + .build() + ) + + expected = torch.tensor([default_pos, default_pos]) + assert torch.allclose(robot.data.joint_pos.torch, expected) + + def test_with_joint_limits(self): + """Test setting joint limits.""" + + robot = ( + MockArticulationBuilder() + .with_num_instances(1) + .with_joints(["j0", "j1"]) + .with_joint_limits(-1.0, 1.0) + .build() + ) + + limits = robot.data.joint_pos_limits.torch + assert torch.all(limits[..., 0] == -1.0) + assert torch.all(limits[..., 1] == 1.0) diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py new file mode 100644 index 000000000000..b68bb8141e1c --- /dev/null +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py @@ -0,0 +1,902 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Comprehensive tests for mock data properties - shapes, setters, and device handling.""" + +import pytest +import torch + +from isaaclab.test.mock_interfaces.assets import ( + MockArticulationData, + MockRigidObjectCollectionData, + MockRigidObjectData, +) +from isaaclab.test.mock_interfaces.sensors import ( + MockContactSensorData, + MockFrameTransformerData, + MockImuData, + MockPvaData, +) + +# ============================================================================== +# IMU Data Property Tests +# ============================================================================== + + +class TestMockImuDataProperties: + """Comprehensive tests for all MockImuData properties. + + MockImuData only provides angular velocity and linear acceleration. + """ + + @pytest.fixture + def data(self): + """Create MockImuData fixture.""" + return MockImuData(num_instances=4, device="cpu") + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("ang_vel_b", (4, 3)), + ("lin_acc_b", (4, 3)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_ang_vel_b", "ang_vel_b", (4, 3)), + ("set_lin_acc_b", "lin_acc_b", (4, 3)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name).torch + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + def test_bulk_setter(self, data): + """Test that set_mock_data updates multiple properties at once.""" + + ang_vel = torch.randn(4, 3) + lin_acc = torch.randn(4, 3) + + data.set_mock_data(ang_vel_b=ang_vel, lin_acc_b=lin_acc) + + assert torch.allclose(data.ang_vel_b.torch, ang_vel) + assert torch.allclose(data.lin_acc_b.torch, lin_acc) + + +# ============================================================================== +# PVA Data Property Tests +# ============================================================================== + + +class TestMockPvaDataProperties: + """Comprehensive tests for all MockPvaData properties.""" + + @pytest.fixture + def data(self): + """Create MockPvaData fixture.""" + return MockPvaData(num_instances=4, device="cpu") + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("pos_w", (4, 3)), + ("quat_w", (4, 4)), + ("pose_w", (4, 7)), + ("projected_gravity_b", (4, 3)), + ("lin_vel_b", (4, 3)), + ("ang_vel_b", (4, 3)), + ("lin_acc_b", (4, 3)), + ("ang_acc_b", (4, 3)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_pos_w", "pos_w", (4, 3)), + ("set_quat_w", "quat_w", (4, 4)), + ("set_projected_gravity_b", "projected_gravity_b", (4, 3)), + ("set_lin_vel_b", "lin_vel_b", (4, 3)), + ("set_ang_vel_b", "ang_vel_b", (4, 3)), + ("set_lin_acc_b", "lin_acc_b", (4, 3)), + ("set_ang_acc_b", "ang_acc_b", (4, 3)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name).torch + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + def test_bulk_setter(self, data): + """Test that set_mock_data updates multiple properties at once.""" + + lin_vel = torch.randn(4, 3) + ang_vel = torch.randn(4, 3) + lin_acc = torch.randn(4, 3) + + data.set_mock_data(lin_vel_b=lin_vel, ang_vel_b=ang_vel, lin_acc_b=lin_acc) + + assert torch.allclose(data.lin_vel_b.torch, lin_vel) + assert torch.allclose(data.ang_vel_b.torch, ang_vel) + assert torch.allclose(data.lin_acc_b.torch, lin_acc) + + def test_default_quaternion_is_identity(self, data): + """Test that default quaternion is identity in XYZW format: (x, y, z, w) = (0, 0, 0, 1).""" + + quat = data.quat_w.torch + assert torch.allclose(quat[:, :3], torch.zeros(4, 3)) # xyz=0 + assert torch.allclose(quat[:, 3], torch.ones(4)) # w=1 + + def test_default_gravity_points_down(self, data): + """Test that default gravity points in -z direction.""" + + gravity = data.projected_gravity_b.torch + expected = torch.tensor([[0, 0, -1]] * 4, dtype=torch.float32) + assert torch.allclose(gravity, expected) + + +# ============================================================================== +# Contact Sensor Data Property Tests +# ============================================================================== + + +class TestMockContactSensorDataProperties: + """Comprehensive tests for all MockContactSensorData properties.""" + + @pytest.fixture + def data(self): + """Create MockContactSensorData fixture.""" + return MockContactSensorData( + num_instances=4, + num_bodies=3, + device="cpu", + history_length=2, + num_filter_bodies=5, + ) + + @pytest.fixture + def data_no_history(self): + """Create MockContactSensorData without history.""" + return MockContactSensorData( + num_instances=4, + num_bodies=3, + device="cpu", + history_length=0, + num_filter_bodies=0, + ) + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("pos_w", (4, 3, 3)), + ("quat_w", (4, 3, 4)), + ("pose_w", (4, 3, 7)), + ("net_forces_w", (4, 3, 3)), + ("last_air_time", (4, 3)), + ("current_air_time", (4, 3)), + ("last_contact_time", (4, 3)), + ("current_contact_time", (4, 3)), + ], + ) + def test_basic_property_shapes(self, data, property_name, expected_shape): + """Test basic properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("net_forces_w_history", (4, 2, 3, 3)), # (N, T, B, 3) + ("force_matrix_w", (4, 3, 5, 3)), # (N, B, M, 3) + ("force_matrix_w_history", (4, 2, 3, 5, 3)), # (N, T, B, M, 3) + ("contact_pos_w", (4, 3, 5, 3)), # (N, B, M, 3) + ("friction_forces_w", (4, 3, 5, 3)), # (N, B, M, 3) + ], + ) + def test_optional_property_shapes_with_history(self, data, property_name, expected_shape): + """Test optional properties with history/filter enabled.""" + prop = getattr(data, property_name) + assert prop is not None + # force_matrix_w_history is torch.Tensor due to 5D limitation in warp + if property_name == "force_matrix_w_history": + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + else: + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + def test_optional_properties_none_without_config(self, data_no_history): + """Test optional properties are None when not configured.""" + assert data_no_history.net_forces_w_history is None + assert data_no_history.force_matrix_w is None + assert data_no_history.force_matrix_w_history is None + assert data_no_history.contact_pos_w is None + assert data_no_history.friction_forces_w is None + + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_pos_w", "pos_w", (4, 3, 3)), + ("set_quat_w", "quat_w", (4, 3, 4)), + ("set_net_forces_w", "net_forces_w", (4, 3, 3)), + ("set_last_air_time", "last_air_time", (4, 3)), + ("set_current_air_time", "current_air_time", (4, 3)), + ("set_last_contact_time", "last_contact_time", (4, 3)), + ("set_current_contact_time", "current_contact_time", (4, 3)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name).torch + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + +# ============================================================================== +# Frame Transformer Data Property Tests +# ============================================================================== + + +class TestMockFrameTransformerDataProperties: + """Comprehensive tests for all MockFrameTransformerData properties.""" + + @pytest.fixture + def data(self): + """Create MockFrameTransformerData fixture.""" + return MockFrameTransformerData( + num_instances=4, + num_target_frames=3, + target_frame_names=["frame_0", "frame_1", "frame_2"], + device="cpu", + ) + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("source_pos_w", (4, 3)), + ("source_quat_w", (4, 4)), + ("source_pose_w", (4, 7)), + ("target_pos_w", (4, 3, 3)), + ("target_quat_w", (4, 3, 4)), + ("target_pose_w", (4, 3, 7)), + ("target_pos_source", (4, 3, 3)), + ("target_quat_source", (4, 3, 4)), + ("target_pose_source", (4, 3, 7)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_source_pos_w", "source_pos_w", (4, 3)), + ("set_source_quat_w", "source_quat_w", (4, 4)), + ("set_target_pos_w", "target_pos_w", (4, 3, 3)), + ("set_target_quat_w", "target_quat_w", (4, 3, 4)), + ("set_target_pos_source", "target_pos_source", (4, 3, 3)), + ("set_target_quat_source", "target_quat_source", (4, 3, 4)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name).torch + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + def test_target_frame_names(self, data): + """Test that target frame names are correctly stored.""" + assert data.target_frame_names == ["frame_0", "frame_1", "frame_2"] + + +# ============================================================================== +# Articulation Data Property Tests +# ============================================================================== + + +class TestMockArticulationDataProperties: + """Comprehensive tests for all MockArticulationData properties.""" + + @pytest.fixture + def data(self): + """Create MockArticulationData fixture.""" + return MockArticulationData( + num_instances=4, + num_joints=12, + num_bodies=13, + num_fixed_tendons=2, + num_spatial_tendons=1, + device="cpu", + ) + + # -- Joint State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("joint_pos", (4, 12)), + ("joint_vel", (4, 12)), + ("joint_acc", (4, 12)), + ("joint_pos_target", (4, 12)), + ("joint_vel_target", (4, 12)), + ("joint_effort_target", (4, 12)), + ("computed_torque", (4, 12)), + ("applied_torque", (4, 12)), + ], + ) + def test_joint_state_shapes(self, data, property_name, expected_shape): + """Test joint state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Joint Property Shapes -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("joint_stiffness", (4, 12)), + ("joint_damping", (4, 12)), + ("joint_armature", (4, 12)), + ("joint_friction_coeff", (4, 12)), + ("joint_dynamic_friction_coeff", (4, 12)), + ("joint_viscous_friction_coeff", (4, 12)), + ("joint_pos_limits", (4, 12, 2)), + ("joint_vel_limits", (4, 12)), + ("joint_effort_limits", (4, 12)), + ("soft_joint_pos_limits", (4, 12, 2)), + ("soft_joint_vel_limits", (4, 12)), + ("gear_ratio", (4, 12)), + ], + ) + def test_joint_property_shapes(self, data, property_name, expected_shape): + """Test joint property shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Root State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("root_link_pose_w", (4, 7)), + ("root_link_vel_w", (4, 6)), + ("root_link_state_w", (4, 13)), + ("root_link_pos_w", (4, 3)), + ("root_link_quat_w", (4, 4)), + ("root_link_lin_vel_w", (4, 3)), + ("root_link_ang_vel_w", (4, 3)), + ("root_com_pose_w", (4, 7)), + ("root_com_vel_w", (4, 6)), + ("root_com_state_w", (4, 13)), + ("root_com_pos_w", (4, 3)), + ("root_com_quat_w", (4, 4)), + ("root_com_lin_vel_w", (4, 3)), + ("root_com_ang_vel_w", (4, 3)), + ("root_state_w", (4, 13)), + ], + ) + def test_root_state_shapes(self, data, property_name, expected_shape): + """Test root state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Body State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("body_link_pose_w", (4, 13, 7)), + ("body_link_vel_w", (4, 13, 6)), + ("body_link_state_w", (4, 13, 13)), + ("body_link_pos_w", (4, 13, 3)), + ("body_link_quat_w", (4, 13, 4)), + ("body_link_lin_vel_w", (4, 13, 3)), + ("body_link_ang_vel_w", (4, 13, 3)), + ("body_com_pose_w", (4, 13, 7)), + ("body_com_vel_w", (4, 13, 6)), + ("body_com_state_w", (4, 13, 13)), + ("body_com_acc_w", (4, 13, 6)), + ("body_com_pos_w", (4, 13, 3)), + ("body_com_quat_w", (4, 13, 4)), + ("body_com_lin_vel_w", (4, 13, 3)), + ("body_com_ang_vel_w", (4, 13, 3)), + ("body_com_lin_acc_w", (4, 13, 3)), + ("body_com_ang_acc_w", (4, 13, 3)), + ], + ) + def test_body_state_shapes(self, data, property_name, expected_shape): + """Test body state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Body Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("body_mass", (4, 13)), + ("body_inertia", (4, 13, 9)), + ], + ) + def test_body_property_shapes(self, data, property_name, expected_shape): + """Test body property shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Default State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("default_root_pose", (4, 7)), + ("default_root_vel", (4, 6)), + ("default_root_state", (4, 13)), + ("default_joint_pos", (4, 12)), + ("default_joint_vel", (4, 12)), + ], + ) + def test_default_state_shapes(self, data, property_name, expected_shape): + """Test default state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Derived Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("projected_gravity_b", (4, 3)), + ("heading_w", (4,)), + ("root_link_lin_vel_b", (4, 3)), + ("root_link_ang_vel_b", (4, 3)), + ("root_com_lin_vel_b", (4, 3)), + ("root_com_ang_vel_b", (4, 3)), + ], + ) + def test_derived_property_shapes(self, data, property_name, expected_shape): + """Test derived properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Tendon Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("fixed_tendon_stiffness", (4, 2)), + ("fixed_tendon_damping", (4, 2)), + ("fixed_tendon_limit_stiffness", (4, 2)), + ("fixed_tendon_rest_length", (4, 2)), + ("fixed_tendon_offset", (4, 2)), + ("fixed_tendon_pos_limits", (4, 2, 2)), + ("spatial_tendon_stiffness", (4, 1)), + ("spatial_tendon_damping", (4, 1)), + ("spatial_tendon_limit_stiffness", (4, 1)), + ("spatial_tendon_offset", (4, 1)), + ], + ) + def test_tendon_property_shapes(self, data, property_name, expected_shape): + """Test tendon properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Setter Tests -- + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_joint_pos", "joint_pos", (4, 12)), + ("set_joint_vel", "joint_vel", (4, 12)), + ("set_joint_acc", "joint_acc", (4, 12)), + ("set_root_link_pose_w", "root_link_pose_w", (4, 7)), + ("set_root_link_vel_w", "root_link_vel_w", (4, 6)), + ("set_body_link_pose_w", "body_link_pose_w", (4, 13, 7)), + ("set_body_link_vel_w", "body_link_vel_w", (4, 13, 6)), + ("set_body_mass", "body_mass", (4, 13)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name).torch + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + def test_bulk_setter_with_multiple_properties(self, data): + """Test set_mock_data with multiple properties.""" + joint_pos = torch.randn(4, 12) + joint_vel = torch.randn(4, 12) + root_pose = torch.randn(4, 7) + + data.set_mock_data( + joint_pos=joint_pos, + joint_vel=joint_vel, + root_link_pose_w=root_pose, + ) + + assert torch.allclose(data.joint_pos.torch, joint_pos) + assert torch.allclose(data.joint_vel.torch, joint_vel) + assert torch.allclose(data.root_link_pose_w.torch, root_pose) + + def test_bulk_setter_unknown_property_raises(self, data): + """Test that set_mock_data raises on unknown property.""" + with pytest.raises(ValueError, match="Unknown property"): + data.set_mock_data(unknown_property=torch.zeros(4)) + + +# ============================================================================== +# Rigid Object Data Property Tests +# ============================================================================== + + +class TestMockRigidObjectDataProperties: + """Comprehensive tests for MockRigidObjectData properties.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectData fixture.""" + return MockRigidObjectData(num_instances=4, device="cpu") + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("root_link_pose_w", (4, 7)), + ("root_link_vel_w", (4, 6)), + ("root_link_state_w", (4, 13)), + ("root_link_pos_w", (4, 3)), + ("root_link_quat_w", (4, 4)), + ("root_link_lin_vel_w", (4, 3)), + ("root_link_ang_vel_w", (4, 3)), + ("root_com_pose_w", (4, 7)), + ("root_com_vel_w", (4, 6)), + ("root_com_state_w", (4, 13)), + ("root_state_w", (4, 13)), + ("body_link_pose_w", (4, 1, 7)), + ("body_link_vel_w", (4, 1, 6)), + ("body_link_state_w", (4, 1, 13)), + ("body_com_pose_w", (4, 1, 7)), + ("body_com_vel_w", (4, 1, 6)), + ("body_com_state_w", (4, 1, 13)), + ("body_com_acc_w", (4, 1, 6)), + ("body_mass", (4, 1)), + ("body_inertia", (4, 1, 9)), + ("projected_gravity_b", (4, 3)), + ("heading_w", (4,)), + ("default_root_pose", (4, 7)), + ("default_root_vel", (4, 6)), + ("default_root_state", (4, 13)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + +# ============================================================================== +# Rigid Object Collection Data Property Tests +# ============================================================================== + + +class TestMockRigidObjectCollectionDataProperties: + """Comprehensive tests for MockRigidObjectCollectionData properties.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectCollectionData fixture.""" + return MockRigidObjectCollectionData( + num_instances=4, + num_bodies=5, + device="cpu", + ) + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("body_link_pose_w", (4, 5, 7)), + ("body_link_vel_w", (4, 5, 6)), + ("body_link_state_w", (4, 5, 13)), + ("body_link_pos_w", (4, 5, 3)), + ("body_link_quat_w", (4, 5, 4)), + ("body_link_lin_vel_w", (4, 5, 3)), + ("body_link_ang_vel_w", (4, 5, 3)), + ("body_link_lin_vel_b", (4, 5, 3)), + ("body_link_ang_vel_b", (4, 5, 3)), + ("body_com_pose_w", (4, 5, 7)), + ("body_com_vel_w", (4, 5, 6)), + ("body_com_state_w", (4, 5, 13)), + ("body_com_acc_w", (4, 5, 6)), + ("body_com_pos_w", (4, 5, 3)), + ("body_com_quat_w", (4, 5, 4)), + ("body_com_lin_vel_w", (4, 5, 3)), + ("body_com_ang_vel_w", (4, 5, 3)), + ("body_com_lin_vel_b", (4, 5, 3)), + ("body_com_ang_vel_b", (4, 5, 3)), + ("body_com_lin_acc_w", (4, 5, 3)), + ("body_com_ang_acc_w", (4, 5, 3)), + ("body_com_pose_b", (4, 5, 7)), + ("body_com_pos_b", (4, 5, 3)), + ("body_com_quat_b", (4, 5, 4)), + ("body_mass", (4, 5)), + ("body_inertia", (4, 5, 9)), + ("projected_gravity_b", (4, 5, 3)), + ("heading_w", (4, 5)), + ("default_body_pose", (4, 5, 7)), + ("default_body_vel", (4, 5, 6)), + ("default_body_state", (4, 5, 13)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.torch.shape == expected_shape, f"{property_name} has wrong shape" + + +# ============================================================================== +# Device Handling Tests +# ============================================================================== + + +class TestDeviceHandling: + """Test that tensors are created on the correct device.""" + + def test_imu_data_device(self): + """Test IMU data tensors are on correct device.""" + data = MockImuData(num_instances=2, device="cpu") + assert str(data.ang_vel_b.device) == "cpu" + assert str(data.lin_acc_b.device) == "cpu" + + def test_contact_sensor_data_device(self): + """Test contact sensor data tensors are on correct device.""" + data = MockContactSensorData(num_instances=2, num_bodies=3, device="cpu") + assert str(data.net_forces_w.device) == "cpu" + + def test_articulation_data_device(self): + """Test articulation data tensors are on correct device.""" + data = MockArticulationData(num_instances=2, num_joints=6, num_bodies=7, device="cpu") + assert str(data.joint_pos.device) == "cpu" + assert str(data.root_link_pose_w.device) == "cpu" + + def test_setter_moves_tensor_to_device(self): + """Test that setters move tensors to the correct device.""" + data = MockImuData(num_instances=2, device="cpu") + # Create tensor on CPU (default) + test_tensor = torch.randn(2, 3) + data.set_ang_vel_b(test_tensor) + assert str(data.ang_vel_b.device) == "cpu" + + +# ============================================================================== +# Composite Property Tests +# ============================================================================== + + +class TestCompositeProperties: + """Test that composite properties are correctly composed from components.""" + + def test_pva_pose_composition(self): + """Test PVA pose_w is correctly composed from pos_w and quat_w.""" + data = MockPvaData(num_instances=2, device="cpu") + pos = torch.randn(2, 3) + quat = torch.tensor([[1, 0, 0, 0], [0.707, 0.707, 0, 0]], dtype=torch.float32) + + data.set_pos_w(pos) + data.set_quat_w(quat) + + pose = data.pose_w.torch + assert torch.allclose(pose[:, :3], pos) + assert torch.allclose(pose[:, 3:], quat) + + def test_articulation_root_state_composition(self): + """Test articulation root_link_state_w is correctly composed.""" + data = MockArticulationData(num_instances=2, num_joints=6, num_bodies=7, device="cpu") + pose = torch.randn(2, 7) + vel = torch.randn(2, 6) + + data.set_root_link_pose_w(pose) + data.set_root_link_vel_w(vel) + + state = data.root_link_state_w.torch + assert torch.allclose(state[:, :7], pose) + assert torch.allclose(state[:, 7:], vel) + + def test_articulation_default_state_composition(self): + """Test articulation default_root_state is correctly composed.""" + data = MockArticulationData(num_instances=2, num_joints=6, num_bodies=7, device="cpu") + pose = torch.randn(2, 7) + vel = torch.randn(2, 6) + + data.set_default_root_pose(pose) + data.set_default_root_vel(vel) + + state = data.default_root_state.torch + assert torch.allclose(state[:, :7], pose) + assert torch.allclose(state[:, 7:], vel) + + +# ============================================================================== +# Convenience Alias Property Tests +# ============================================================================== + + +class TestArticulationConvenienceAliases: + """Test convenience alias properties for MockArticulationData.""" + + @pytest.fixture + def data(self): + """Create MockArticulationData fixture.""" + return MockArticulationData( + num_instances=4, + num_joints=12, + num_bodies=13, + num_fixed_tendons=2, + device="cpu", + ) + + # -- Root state aliases (without _link_ or _com_ prefix) -- + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("root_pos_w", "root_link_pos_w", (4, 3)), + ("root_quat_w", "root_link_quat_w", (4, 4)), + ("root_pose_w", "root_link_pose_w", (4, 7)), + ("root_vel_w", "root_com_vel_w", (4, 6)), + ("root_lin_vel_w", "root_com_lin_vel_w", (4, 3)), + ("root_ang_vel_w", "root_com_ang_vel_w", (4, 3)), + ("root_lin_vel_b", "root_com_lin_vel_b", (4, 3)), + ("root_ang_vel_b", "root_com_ang_vel_b", (4, 3)), + ], + ) + def test_root_aliases(self, data, alias, source, expected_shape): + """Test root state aliases reference correct properties.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.torch.shape == expected_shape, f"{alias} has wrong shape" + assert torch.allclose(alias_prop.torch, source_prop.torch), f"{alias} should equal {source}" + + # -- Body state aliases (without _link_ or _com_ prefix) -- + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("body_pos_w", "body_link_pos_w", (4, 13, 3)), + ("body_quat_w", "body_link_quat_w", (4, 13, 4)), + ("body_pose_w", "body_link_pose_w", (4, 13, 7)), + ("body_vel_w", "body_com_vel_w", (4, 13, 6)), + ("body_state_w", "body_com_state_w", (4, 13, 13)), + ("body_lin_vel_w", "body_com_lin_vel_w", (4, 13, 3)), + ("body_ang_vel_w", "body_com_ang_vel_w", (4, 13, 3)), + ("body_acc_w", "body_com_acc_w", (4, 13, 6)), + ("body_lin_acc_w", "body_com_lin_acc_w", (4, 13, 3)), + ("body_ang_acc_w", "body_com_ang_acc_w", (4, 13, 3)), + ], + ) + def test_body_aliases(self, data, alias, source, expected_shape): + """Test body state aliases reference correct properties.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.torch.shape == expected_shape, f"{alias} has wrong shape" + assert torch.allclose(alias_prop.torch, source_prop.torch), f"{alias} should equal {source}" + + # -- CoM in body frame -- + @pytest.mark.parametrize( + "alias,expected_shape", + [ + ("com_pos_b", (4, 13, 3)), + ("com_quat_b", (4, 13, 4)), + ], + ) + def test_com_body_frame_aliases(self, data, alias, expected_shape): + """Test CoM in body frame aliases.""" + prop = getattr(data, alias) + assert prop.torch.shape == expected_shape, f"{alias} has wrong shape" + + # -- Joint property aliases -- + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("joint_limits", "joint_pos_limits", (4, 12, 2)), + ("joint_velocity_limits", "joint_vel_limits", (4, 12)), + ("joint_friction", "joint_friction_coeff", (4, 12)), + ], + ) + def test_joint_aliases(self, data, alias, source, expected_shape): + """Test joint property aliases.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.torch.shape == expected_shape, f"{alias} has wrong shape" + assert torch.allclose(alias_prop.torch, source_prop.torch), f"{alias} should equal {source}" + + # -- Fixed tendon alias -- + def test_fixed_tendon_limit_alias(self, data): + """Test fixed_tendon_limit is alias for fixed_tendon_pos_limits.""" + assert data.fixed_tendon_limit.torch.shape == (4, 2, 2) + assert torch.allclose(data.fixed_tendon_limit.torch, data.fixed_tendon_pos_limits.torch) + + +class TestRigidObjectConvenienceAliases: + """Test convenience alias properties for MockRigidObjectData.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectData fixture.""" + return MockRigidObjectData(num_instances=4, device="cpu") + + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("root_pos_w", "root_link_pos_w", (4, 3)), + ("root_quat_w", "root_link_quat_w", (4, 4)), + ("root_pose_w", "root_link_pose_w", (4, 7)), + ("root_vel_w", "root_com_vel_w", (4, 6)), + ("root_lin_vel_w", "root_com_lin_vel_w", (4, 3)), + ("root_ang_vel_w", "root_com_ang_vel_w", (4, 3)), + ("root_lin_vel_b", "root_com_lin_vel_b", (4, 3)), + ("root_ang_vel_b", "root_com_ang_vel_b", (4, 3)), + ("body_pos_w", "body_link_pos_w", (4, 1, 3)), + ("body_quat_w", "body_link_quat_w", (4, 1, 4)), + ("body_pose_w", "body_link_pose_w", (4, 1, 7)), + ("body_vel_w", "body_com_vel_w", (4, 1, 6)), + ("body_state_w", "body_com_state_w", (4, 1, 13)), + ("body_lin_vel_w", "body_com_lin_vel_w", (4, 1, 3)), + ("body_ang_vel_w", "body_com_ang_vel_w", (4, 1, 3)), + ("body_acc_w", "body_com_acc_w", (4, 1, 6)), + ("body_lin_acc_w", "body_com_lin_acc_w", (4, 1, 3)), + ("body_ang_acc_w", "body_com_ang_acc_w", (4, 1, 3)), + ], + ) + def test_aliases(self, data, alias, source, expected_shape): + """Test convenience aliases reference correct properties.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.torch.shape == expected_shape, f"{alias} has wrong shape" + assert torch.allclose(alias_prop.torch, source_prop.torch), f"{alias} should equal {source}" + + @pytest.mark.parametrize( + "alias,expected_shape", + [ + ("com_pos_b", (4, 1, 3)), + ("com_quat_b", (4, 1, 4)), + ], + ) + def test_com_body_frame_aliases(self, data, alias, expected_shape): + """Test CoM in body frame aliases.""" + prop = getattr(data, alias) + assert prop.torch.shape == expected_shape, f"{alias} has wrong shape" + + +class TestRigidObjectCollectionConvenienceAliases: + """Test convenience alias properties for MockRigidObjectCollectionData.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectCollectionData fixture.""" + return MockRigidObjectCollectionData( + num_instances=4, + num_bodies=5, + device="cpu", + ) + + def test_body_state_w_alias(self, data): + """Test body_state_w is alias for body_com_state_w.""" + assert data.body_state_w.torch.shape == (4, 5, 13) + assert torch.allclose(data.body_state_w.torch, data.body_com_state_w.torch) diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py new file mode 100644 index 000000000000..949677f1c398 --- /dev/null +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py @@ -0,0 +1,386 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for mock sensor interfaces.""" + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True) +simulation_app = app_launcher.app + +import pytest +import torch +import warp as wp + +from isaaclab.test.mock_interfaces.sensors import ( + MockContactSensor, + MockFrameTransformer, + MockImu, + MockPva, + create_mock_foot_contact_sensor, + create_mock_frame_transformer, + create_mock_imu, + create_mock_pva, +) +from isaaclab.test.mock_interfaces.utils import MockSensorBuilder + +# ============================================================================== +# MockImu Tests +# ============================================================================== + + +class TestMockImu: + """Tests for MockImu and MockImuData. + + MockImu only provides angular velocity and linear acceleration. + """ + + @pytest.fixture + def imu(self): + """Create a mock IMU fixture.""" + return MockImu(num_instances=4, device="cpu") + + def test_initialization(self, imu): + """Test that MockImu initializes correctly.""" + assert imu.num_instances == 4 + assert imu.device == "cpu" + assert imu.data is not None + + def test_lazy_tensor_initialization(self, imu): + """Test that unset properties return zero tensors with correct shapes.""" + # IMU only has angular velocity and linear acceleration + ang_vel = imu.data.ang_vel_b.torch + assert ang_vel.shape == (4, 3) + assert torch.all(ang_vel == 0) + + lin_acc = imu.data.lin_acc_b.torch + assert lin_acc.shape == (4, 3) + assert torch.all(lin_acc == 0) + + def test_set_mock_data(self, imu): + """Test bulk data setter.""" + ang_vel = torch.randn(4, 3) + lin_acc = torch.randn(4, 3) + + imu.data.set_mock_data(ang_vel_b=ang_vel, lin_acc_b=lin_acc) + + assert torch.allclose(imu.data.ang_vel_b.torch, ang_vel) + assert torch.allclose(imu.data.lin_acc_b.torch, lin_acc) + + def test_per_property_setter(self, imu): + """Test individual property setters.""" + lin_acc = torch.randn(4, 3) + imu.data.set_lin_acc_b(lin_acc) + assert torch.allclose(imu.data.lin_acc_b.torch, lin_acc) + + ang_vel = torch.randn(4, 3) + imu.data.set_ang_vel_b(ang_vel) + assert torch.allclose(imu.data.ang_vel_b.torch, ang_vel) + + +# ============================================================================== +# MockPva Tests +# ============================================================================== + + +class TestMockPva: + """Tests for MockPva and MockPvaData.""" + + @pytest.fixture + def pva(self): + """Create a mock PVA fixture.""" + return MockPva(num_instances=4, device="cpu") + + def test_initialization(self, pva): + """Test that MockPva initializes correctly.""" + assert pva.num_instances == 4 + assert pva.device == "cpu" + assert pva.data is not None + + def test_lazy_tensor_initialization(self, pva): + """Test that unset properties return zero tensors with correct shapes.""" + # Position + pos = pva.data.pos_w.torch + assert pos.shape == (4, 3) + assert torch.all(pos == 0) + + # Quaternion (should be identity in XYZW format: x=0, y=0, z=0, w=1) + quat = pva.data.quat_w.torch + assert quat.shape == (4, 4) + assert torch.all(quat[:, :3] == 0) # xyz components + assert torch.all(quat[:, 3] == 1) # w component + + # Velocities and accelerations + assert pva.data.lin_vel_b.shape == (4, 3) + assert pva.data.ang_vel_b.shape == (4, 3) + assert pva.data.lin_acc_b.shape == (4, 3) + assert pva.data.ang_acc_b.shape == (4, 3) + + def test_projected_gravity_default(self, pva): + """Test default gravity direction.""" + gravity = pva.data.projected_gravity_b.torch + assert gravity.shape == (4, 3) + # Default gravity should point down: (0, 0, -1) + assert torch.all(gravity[:, 2] == -1) + + def test_set_mock_data(self, pva): + """Test bulk data setter.""" + lin_vel = torch.randn(4, 3) + ang_vel = torch.randn(4, 3) + + pva.data.set_mock_data(lin_vel_b=lin_vel, ang_vel_b=ang_vel) + + assert torch.allclose(pva.data.lin_vel_b.torch, lin_vel) + assert torch.allclose(pva.data.ang_vel_b.torch, ang_vel) + + def test_per_property_setter(self, pva): + """Test individual property setters.""" + lin_acc = torch.randn(4, 3) + pva.data.set_lin_acc_b(lin_acc) + assert torch.allclose(pva.data.lin_acc_b.torch, lin_acc) + + def test_pose_composition(self, pva): + """Test that pose_w combines pos_w and quat_w correctly.""" + pos = torch.randn(4, 3) + quat = torch.tensor([[0, 0, 0, 1]] * 4, dtype=torch.float32) # XYZW format + + pva.data.set_pos_w(pos) + pva.data.set_quat_w(quat) + + pose = pva.data.pose_w.torch + assert pose.shape == (4, 7) + assert torch.allclose(pose[:, :3], pos) + assert torch.allclose(pose[:, 3:], quat) + + +# ============================================================================== +# MockContactSensor Tests +# ============================================================================== + + +class TestMockContactSensor: + """Tests for MockContactSensor and MockContactSensorData.""" + + @pytest.fixture + def sensor(self): + """Create a mock contact sensor fixture.""" + return MockContactSensor( + num_instances=4, + num_bodies=4, + body_names=["FL_foot", "FR_foot", "RL_foot", "RR_foot"], + device="cpu", + ) + + def test_initialization(self, sensor): + """Test that MockContactSensor initializes correctly.""" + assert sensor.num_instances == 4 + assert sensor.num_bodies == 4 + assert sensor.body_names == ["FL_foot", "FR_foot", "RL_foot", "RR_foot"] + assert sensor.contact_view is None + + def test_lazy_tensor_shapes(self, sensor): + """Test that unset properties return tensors with correct shapes.""" + forces = sensor.data.net_forces_w + assert forces.shape == (4, 4, 3) + + contact_time = sensor.data.current_contact_time + assert contact_time.shape == (4, 4) + + air_time = sensor.data.current_air_time + assert air_time.shape == (4, 4) + + def test_find_bodies(self, sensor): + """Test body finding by regex.""" + # Find all bodies matching ".*_foot" + indices, names = sensor.find_bodies(".*_foot") + assert len(indices) == 4 + assert set(names) == {"FL_foot", "FR_foot", "RL_foot", "RR_foot"} + + # Find specific body + indices, names = sensor.find_bodies("FL_foot") + assert indices == [0] + assert names == ["FL_foot"] + + # Find front feet + indices, names = sensor.find_bodies("F._foot") + assert len(indices) == 2 + assert "FL_foot" in names + assert "FR_foot" in names + + def test_compute_first_contact(self, sensor): + """Test first contact computation.""" + + # Set contact time to 0.5 for all bodies + sensor.data.set_current_contact_time(torch.full((4, 4), 0.5)) + + # Check with dt=1.0 - should be True (0.5 < 1.0) + first_contact = sensor.compute_first_contact(dt=1.0) + assert torch.all(wp.to_torch(first_contact)) + + # Check with dt=0.1 - should be False (0.5 > 0.1) + first_contact = sensor.compute_first_contact(dt=0.1) + assert torch.all(~wp.to_torch(first_contact)) + + def test_compute_first_air(self, sensor): + """Test first air computation.""" + + sensor.data.set_current_air_time(torch.full((4, 4), 0.2)) + + first_air = sensor.compute_first_air(dt=0.5) + assert torch.all(wp.to_torch(first_air)) + + def test_history_buffer(self): + """Test history buffer when enabled.""" + sensor_with_history = MockContactSensor( + num_instances=2, + num_bodies=2, + history_length=3, + device="cpu", + ) + + history = sensor_with_history.data.net_forces_w_history + assert history is not None + assert history.shape == (2, 3, 2, 3) + + def test_no_history_buffer(self, sensor): + """Test that history buffer is None when not enabled.""" + history = sensor.data.net_forces_w_history + assert history is None + + +# ============================================================================== +# MockFrameTransformer Tests +# ============================================================================== + + +class TestMockFrameTransformer: + """Tests for MockFrameTransformer and MockFrameTransformerData.""" + + @pytest.fixture + def transformer(self): + """Create a mock frame transformer fixture.""" + return MockFrameTransformer( + num_instances=2, + num_target_frames=3, + target_frame_names=["target_1", "target_2", "target_3"], + device="cpu", + ) + + def test_initialization(self, transformer): + """Test that MockFrameTransformer initializes correctly.""" + assert transformer.num_instances == 2 + assert transformer.num_bodies == 3 + assert transformer.body_names == ["target_1", "target_2", "target_3"] + + def test_data_shapes(self, transformer): + """Test that data properties have correct shapes.""" + # Source frame + assert transformer.data.source_pos_w.shape == (2, 3) + assert transformer.data.source_quat_w.shape == (2, 4) + + # Target frames + assert transformer.data.target_pos_w.shape == (2, 3, 3) + assert transformer.data.target_quat_w.shape == (2, 3, 4) + + # Relative poses + assert transformer.data.target_pos_source.shape == (2, 3, 3) + assert transformer.data.target_quat_source.shape == (2, 3, 4) + + def test_pose_composition(self, transformer): + """Test that pose properties combine position and orientation correctly.""" + source_pose = transformer.data.source_pose_w + assert source_pose.shape == (2, 7) + + target_pose = transformer.data.target_pose_w + assert target_pose.shape == (2, 3, 7) + + def test_find_bodies(self, transformer): + """Test frame finding by regex.""" + indices, names = transformer.find_bodies("target_.*") + assert len(indices) == 3 + + indices, names = transformer.find_bodies("target_1") + assert indices == [0] + + +# ============================================================================== +# Factory Function Tests +# ============================================================================== + + +class TestSensorFactories: + """Tests for sensor factory functions.""" + + def test_create_mock_imu(self): + """Test IMU factory function.""" + imu = create_mock_imu(num_instances=4) + assert imu.num_instances == 4 + assert imu.data.ang_vel_b.shape == (4, 3) + assert imu.data.lin_acc_b.shape == (4, 3) + + def test_create_mock_pva(self): + """Test PVA factory function.""" + pva = create_mock_pva(num_instances=4) + assert pva.num_instances == 4 + assert pva.data.projected_gravity_b.shape == (4, 3) + + def test_create_mock_foot_contact_sensor(self): + """Test foot contact sensor factory function.""" + sensor = create_mock_foot_contact_sensor(num_instances=4, num_feet=4) + assert sensor.num_instances == 4 + assert sensor.num_bodies == 4 + assert "FL_foot" in sensor.body_names + + def test_create_mock_frame_transformer(self): + """Test frame transformer factory function.""" + transformer = create_mock_frame_transformer(num_instances=2, num_target_frames=5) + assert transformer.num_instances == 2 + assert transformer.num_bodies == 5 + + +# ============================================================================== +# MockSensorBuilder Tests +# ============================================================================== + + +class TestMockSensorBuilder: + """Tests for MockSensorBuilder.""" + + def test_build_contact_sensor(self): + """Test building a contact sensor.""" + sensor = ( + MockSensorBuilder("contact") + .with_num_instances(4) + .with_bodies(["foot_1", "foot_2"]) + .with_history_length(3) + .build() + ) + + assert sensor.num_instances == 4 + assert sensor.num_bodies == 2 + assert sensor.data.net_forces_w_history.shape == (4, 3, 2, 3) + + def test_build_imu_sensor(self): + """Test building an IMU sensor.""" + sensor = MockSensorBuilder("imu").with_num_instances(2).build() + assert sensor.num_instances == 2 + + def test_build_pva_sensor(self): + """Test building a PVA sensor.""" + sensor = MockSensorBuilder("pva").with_num_instances(2).build() + assert sensor.num_instances == 2 + + def test_build_frame_transformer(self): + """Test building a frame transformer sensor.""" + sensor = ( + MockSensorBuilder("frame_transformer") + .with_num_instances(3) + .with_target_frames(["target_1", "target_2"]) + .build() + ) + + assert sensor.num_instances == 3 + assert sensor.num_bodies == 2 diff --git a/source/isaaclab/test/test_scripts_torcharray_patterns.py b/source/isaaclab/test/test_scripts_torcharray_patterns.py new file mode 100644 index 000000000000..227bbda7be3c --- /dev/null +++ b/source/isaaclab/test/test_scripts_torcharray_patterns.py @@ -0,0 +1,133 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Static scanner ensuring scripts/source do not regress the ProxyArray migration. + +Every public ``.data.`` on an asset or sensor now returns a +:class:`~isaaclab.utils.warp.ProxyArray` (or, for CameraData, a raw +``torch.Tensor``). Legacy conversion or tensor-method callsites on these +properties should migrate to explicit ``.torch`` or ``.warp`` access. These +tests regex-scan user-facing scripts/source files and flag regressions before +they reach users running tutorials or demos. +""" + +from __future__ import annotations + +import re +from pathlib import Path + +import pytest + +# Matches: +# wp.to_torch(.data.) +# where can be , [...], ., .[...], etc. +# Anchored so we don't match tool scripts that document the pattern as prose. +_WP_TO_TORCH_DOT_DATA = re.compile( + r"wp\.to_torch\(\s*" + r"[a-zA-Z_][a-zA-Z_0-9]*" # first name + r"(?:\[[^\]\[]*\]|\.[a-zA-Z_][a-zA-Z_0-9]*)*" # chain of [..] or .name + r"\.data\." # .data. + r"[a-zA-Z_][a-zA-Z_0-9]*" # field + r"\s*\)" +) + +# Matches: +# wp.to_torch(_data.) +# Catches the aliased pattern like ``object_data: RigidObjectData = ...`` followed +# by ``wp.to_torch(object_data.root_pos_w)``. +_WP_TO_TORCH_NAME_DATA = re.compile( + r"wp\.to_torch\(\s*" + r"[a-zA-Z_][a-zA-Z_0-9]*_data\." + r"[a-zA-Z_][a-zA-Z_0-9]*" + r"\s*\)" +) + +# Matches: +# .data..clone() +# .data.[...].clone() +# .data..assign(...) +# These are tensor/wp.array instance methods that ProxyArray intentionally does +# not forward. ``data.output[...]`` is camera data and remains torch-native. +_PROXYARRAY_DIRECT_METHOD_DOT_DATA = re.compile( + r"\.data\." + r"(?!_)" # ignore private backing buffers such as data._sim_bind_... + r"(?!output\b)" # camera output dict is torch-native + r"[a-zA-Z_][a-zA-Z_0-9]*" + r"(?:\[[^\]\[]*\])?" + r"\.(?:clone|assign)\s*\(" +) + +# scripts/tools/wrap_warp_to_torch.py is the migration utility and documents +# the old pattern inside strings. Exclude it from the scan. +_EXCLUDE = {"tools/wrap_warp_to_torch.py"} +_EXCLUDE_PREFIXES = ("source/isaaclab_contrib/",) + + +def _repo_root() -> Path: + # this test lives at source/isaaclab/test/test_scripts_torcharray_patterns.py + # parents[0]=test, [1]=isaaclab, [2]=source, [3]=repo root + return Path(__file__).resolve().parents[3] + + +def _scripts_files() -> list[Path]: + scripts = _repo_root() / "scripts" + return sorted(p for p in scripts.rglob("*.py") if "__pycache__" not in p.parts) + + +def _source_and_scripts_files() -> list[Path]: + roots = [_repo_root() / "scripts", _repo_root() / "source"] + return sorted(p for root in roots for p in root.rglob("*.py") if "__pycache__" not in p.parts) + + +@pytest.mark.parametrize("path", _scripts_files(), ids=lambda p: str(p.relative_to(_repo_root()))) +def test_no_wp_to_torch_on_torcharray_data(path: Path) -> None: + """No ``wp.to_torch(.data.)`` / ``wp.to_torch(_data.)`` in scripts/. + + Post-migration, ``.data.`` returns a ``ProxyArray`` + (or ``torch.Tensor`` for CameraData). The temporary ``wp.to_torch`` + shim is deprecated, so use the ``.torch`` accessor instead (or omit + the wrap entirely for torch-native fields). + """ + rel = path.relative_to(_repo_root()).as_posix() + if any(rel.endswith(suffix) for suffix in _EXCLUDE): + pytest.skip(f"{rel} is excluded from the ProxyArray hygiene scan") + if rel.startswith(_EXCLUDE_PREFIXES): + pytest.skip(f"{rel} is outside the ProxyArray migration scan scope") + + text = path.read_text(encoding="utf-8") + + offenders: list[str] = [] + for i, line in enumerate(text.splitlines(), start=1): + if _WP_TO_TORCH_DOT_DATA.search(line) or _WP_TO_TORCH_NAME_DATA.search(line): + offenders.append(f"{rel}:{i}: {line.rstrip()}") + + if offenders: + pytest.fail( + "Found wp.to_torch(...) calls on a migrated ProxyArray data accessor. " + "Use .torch instead of wp.to_torch(...) (see isaaclab 4.6.15 CHANGELOG).\n" + "\n".join(offenders) + ) + + +@pytest.mark.parametrize("path", _source_and_scripts_files(), ids=lambda p: str(p.relative_to(_repo_root()))) +def test_no_direct_proxyarray_data_methods(path: Path) -> None: + """No direct tensor/wp.array methods on migrated ``.data.`` accessors.""" + rel = path.relative_to(_repo_root()).as_posix() + if any(rel.endswith(suffix) for suffix in _EXCLUDE): + pytest.skip(f"{rel} is excluded from the ProxyArray hygiene scan") + if rel.startswith(_EXCLUDE_PREFIXES): + pytest.skip(f"{rel} is outside the ProxyArray migration scan scope") + + text = path.read_text(encoding="utf-8") + + offenders: list[str] = [] + for i, line in enumerate(text.splitlines(), start=1): + if _PROXYARRAY_DIRECT_METHOD_DOT_DATA.search(line): + offenders.append(f"{rel}:{i}: {line.rstrip()}") + + if offenders: + pytest.fail( + "Found direct tensor/wp.array methods on migrated ProxyArray data accessors. " + "Use .torch.clone() for tensor copies or .warp.assign(...) for warp writes.\n" + "\n".join(offenders) + ) diff --git a/source/isaaclab/test/utils/test_assets.py b/source/isaaclab/test/utils/test_assets.py index 483c7d93d9fe..147b8c04c831 100644 --- a/source/isaaclab/test/utils/test_assets.py +++ b/source/isaaclab/test/utils/test_assets.py @@ -6,14 +6,6 @@ from __future__ import annotations """Launch Isaac Sim Simulator first.""" - -from isaaclab.app import AppLauncher - -# launch omniverse app -simulation_app = AppLauncher(headless=True).app - -"""Rest everything follows.""" - import isaaclab.utils.assets as assets_utils @@ -37,16 +29,3 @@ def test_check_file_path_invalid(): usd_path = f"{assets_utils.ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_xyz.usd" # check file path assert assets_utils.check_file_path(usd_path) == 0 - - -def test_check_usd_path_with_timeout(): - """Test checking a USD path with timeout.""" - # robot file path - usd_path = f"{assets_utils.ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd" - # check file path - assert assets_utils.check_usd_path_with_timeout(usd_path) is True - - # invalid file path - usd_path = f"{assets_utils.ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_xyz.usd" - # check file path - assert assets_utils.check_usd_path_with_timeout(usd_path) is False diff --git a/source/isaaclab/test/utils/test_circular_buffer.py b/source/isaaclab/test/utils/test_circular_buffer.py index 52a2c16829d8..7517aca468c5 100644 --- a/source/isaaclab/test/utils/test_circular_buffer.py +++ b/source/isaaclab/test/utils/test_circular_buffer.py @@ -3,9 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -import pytest -import torch - """Launch Isaac Sim Simulator first.""" from isaaclab.app import AppLauncher @@ -13,7 +10,10 @@ # launch omniverse app in headless mode simulation_app = AppLauncher(headless=True).app -"""Rest everything follows from here.""" +"""Rest everything follows.""" + +import pytest +import torch from isaaclab.utils import CircularBuffer diff --git a/source/isaaclab/test/utils/test_configclass.py b/source/isaaclab/test/utils/test_configclass.py index 0c024be03f3b..60f77367e066 100644 --- a/source/isaaclab/test/utils/test_configclass.py +++ b/source/isaaclab/test/utils/test_configclass.py @@ -26,9 +26,10 @@ import pytest import torch -from isaaclab.utils.configclass import configclass +from isaaclab.utils.configclass import _field_module_dir, configclass from isaaclab.utils.dict import class_to_dict, dict_to_md5_hash, update_class_from_dict from isaaclab.utils.io import dump_yaml, load_yaml +from isaaclab.utils.string import ResolvableString """ Mock classes and functions. @@ -107,7 +108,7 @@ class EnvCfg: @configclass class RobotDefaultStateCfg: pos = (0.0, 0.0, 0.0) # type annotation missing on purpose (immutable) - rot: tuple = (1.0, 0.0, 0.0, 0.0) + rot: tuple = (0.0, 0.0, 0.0, 1.0) # xyzw format dof_pos: tuple = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0) dof_vel = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # type annotation missing on purpose (mutable) @@ -217,7 +218,7 @@ class ChildADemoCfg(ParentDemoCfg): def __post_init__(self): self.b = 3 # change value of existing field - self.m.rot = (2.0, 0.0, 0.0, 0.0) # change value of default + self.m.rot = (0.0, 0.0, 0.0, 2.0) # change value of default (xyzw format) self.i = ["a", "b"] # change value of existing field @@ -401,7 +402,7 @@ class MissingChildDemoCfg(MissingParentDemoCfg): "env": {"num_envs": 56, "episode_length": 2000, "viewer": {"eye": [7.5, 7.5, 7.5], "lookat": [0.0, 0.0, 0.0]}}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), - "rot": (1.0, 0.0, 0.0, 0.0), + "rot": (0.0, 0.0, 0.0, 1.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, @@ -413,7 +414,7 @@ class MissingChildDemoCfg(MissingParentDemoCfg): "env": {"num_envs": 22, "episode_length": 2000, "viewer": {"eye": (2.0, 2.0, 2.0), "lookat": [0.0, 0.0, 0.0]}}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), - "rot": (1.0, 0.0, 0.0, 0.0), + "rot": (0.0, 0.0, 0.0, 1.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, @@ -425,7 +426,7 @@ class MissingChildDemoCfg(MissingParentDemoCfg): "env": {"num_envs": 22, "episode_length": 2000, "viewer": None}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), - "rot": (1.0, 0.0, 0.0, 0.0), + "rot": (0.0, 0.0, 0.0, 1.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, @@ -437,7 +438,7 @@ class MissingChildDemoCfg(MissingParentDemoCfg): "env": {"num_envs": 56, "episode_length": 2000, "viewer": {"eye": [7.5, 7.5, 7.5], "lookat": [0.0, 0.0, 0.0]}}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), - "rot": (1.0, 0.0, 0.0, 0.0), + "rot": (0.0, 0.0, 0.0, 1.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, @@ -447,7 +448,7 @@ class MissingChildDemoCfg(MissingParentDemoCfg): basic_demo_cfg_nested_dict_and_list = { "dict_1": { - "dict_2": {"func": dummy_function2}, + "dict_2": {"func": "test_configclass:dummy_function2"}, }, "list_1": [ {"num_envs": 23, "episode_length": 3000, "viewer": {"eye": [5.0, 5.0, 5.0], "lookat": [0.0, 0.0, 0.0]}}, @@ -459,7 +460,7 @@ class MissingChildDemoCfg(MissingParentDemoCfg): "env": {"num_envs": 56, "episode_length": 2000, "viewer": {"eye": [7.5, 7.5, 7.5], "lookat": [0.0, 0.0, 0.0]}}, "robot_default_state": { "pos": (0.0, 0.0, 0.0), - "rot": (1.0, 0.0, 0.0, 0.0), + "rot": (0.0, 0.0, 0.0, 1.0), "dof_pos": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), "dof_vel": [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], }, @@ -643,6 +644,47 @@ def test_config_update_nested_dict(): assert isinstance(cfg.list_1[1].viewer, ViewerCfg) +def test_wrap_resolvable_strings_handles_cyclic_containers(): + """Cyclic container graphs in config values should not recurse forever.""" + + @configclass + class CyclicContainerCfg: + payload: dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + cycle = {} + cycle["self"] = cycle + cycle["tuple"] = (cycle, {"back": cycle}) + self.payload = cycle + + cfg = CyclicContainerCfg() + + assert cfg.payload["self"] is cfg.payload + assert cfg.payload["tuple"][0] is cfg.payload + assert cfg.payload["tuple"][1]["back"] is cfg.payload + + +def test_dir_resolution_uses_declaring_class_for_inherited_field(): + """{DIR} expansion should use the field declaring class, not subclass module.""" + + @configclass + class _BaseCfg: + class_type: type | str = "{DIR}.base_mod:BaseSymbol" + + @configclass + class _ChildCfg(_BaseCfg): + pass + + # Simulate subclass declared in a different package than the parent config. + _BaseCfg.__module__ = "test_pkg.parent.base_cfg" + _ChildCfg.__module__ = "other_pkg.child.child_cfg" + + cfg = _ChildCfg() + + assert isinstance(cfg.class_type, ResolvableString) + assert str(cfg.class_type) == "test_pkg.parent.base_mod:BaseSymbol" + + def test_config_update_different_iterable_lengths(): """Iterables are whole replaced, even if their lengths are different.""" @@ -930,7 +972,7 @@ def test_config_inheritance(): # check post init assert cfg_a.b == 3 assert cfg_a.i == ["a", "b"] - assert cfg_a.m.rot == (2.0, 0.0, 0.0, 0.0) + assert cfg_a.m.rot == (0.0, 0.0, 0.0, 2.0) def test_config_inheritance_independence(): @@ -951,8 +993,8 @@ def test_config_inheritance_independence(): assert cfg_b.b == 8 assert cfg_a.c == RobotDefaultStateCfg() assert isinstance(cfg_b.c, type(MISSING)) - assert cfg_a.m.rot == (2.0, 0.0, 0.0, 0.0) - assert cfg_b.m.rot == (1.0, 0.0, 0.0, 0.0) + assert cfg_a.m.rot == (0.0, 0.0, 0.0, 2.0) + assert cfg_b.m.rot == (0.0, 0.0, 0.0, 1.0) assert isinstance(cfg_a.j, type(MISSING)) assert cfg_b.j == ["3", "4"] assert cfg_a.i == ["a", "b"] @@ -1077,3 +1119,92 @@ def test_validity(): # check that no more than the expected missing fields are in the error message assert len(error_message.split("\n")) - 2 == len(validity_expected_fields) + + +def test_dir_resolution_in_subclass(): + """Test that {DIR} in inherited fields resolves relative to the declaring class's module.""" + + @configclass + class ParentCfg: + class_type: str = "{DIR}.my_module:MyClass" + name: str = "default" + + @configclass + class ChildCfg(ParentCfg): + extra: int = 42 + + # Pretend the parent lives in a real package and the child lives in a test file + ParentCfg.__module__ = "some_package.sub_package.parent_cfg" + ChildCfg.__module__ = "test_some_feature" + + parent = ParentCfg.__new__(ParentCfg) + child = ChildCfg.__new__(ChildCfg) + + # class_type should resolve to the parent's module dir in both cases + assert _field_module_dir(parent, "class_type") == "some_package.sub_package" + assert _field_module_dir(child, "class_type") == "some_package.sub_package" + # extra should resolve to the child's module dir + assert _field_module_dir(child, "extra") == "test_some_feature" + + +# ============================================================================= +# Tests: checked_apply +# ============================================================================= + + +def test_checked_apply_forwards_all_fields(): + """checked_apply forwards every declared field on src onto target.""" + from dataclasses import dataclass as plain_dataclass + + from isaaclab.utils import checked_apply + + @configclass + class WrapperCfg: + gap: float = 0.01 + margin: float = 0.0 + + @plain_dataclass + class UpstreamLike: + gap: float = 99.0 + margin: float = 99.0 + unrelated: str = "keep me" + + src = WrapperCfg(margin=0.005) + target = UpstreamLike() + checked_apply(src, target) + + assert target.gap == 0.01 + assert target.margin == 0.005 + # fields not declared on src are not touched + assert target.unrelated == "keep me" + + +def test_checked_apply_raises_on_missing_target_field(): + """checked_apply fails loudly when target lacks a declared field.""" + from dataclasses import dataclass as plain_dataclass + + from isaaclab.utils import checked_apply + + @configclass + class WrapperCfg: + margin: float = 0.01 + renamed_in_upstream: float = 0.0 + + @plain_dataclass + class UpstreamMissingField: + margin: float = 0.0 + # 'renamed_in_upstream' was renamed/removed upstream + + with pytest.raises(AttributeError, match="renamed_in_upstream"): + checked_apply(WrapperCfg(), UpstreamMissingField()) + + +def test_checked_apply_rejects_non_dataclass_src(): + """checked_apply requires src to be a dataclass.""" + from isaaclab.utils import checked_apply + + class NotADataclass: + margin = 0.01 + + with pytest.raises(TypeError, match="must be a dataclass"): + checked_apply(NotADataclass(), object()) diff --git a/source/isaaclab/test/utils/test_dict.py b/source/isaaclab/test/utils/test_dict.py index 35ce35f26577..464a26d878ed 100644 --- a/source/isaaclab/test/utils/test_dict.py +++ b/source/isaaclab/test/utils/test_dict.py @@ -16,7 +16,10 @@ import random +import pytest + import isaaclab.utils.dict as dict_utils +import isaaclab.utils.string as string_utils def _test_function(x): @@ -50,7 +53,7 @@ def test_string_callable_function_conversion(): # convert function to string test_string = dict_utils.callable_to_string(_test_function) # convert string to function - test_function_2 = dict_utils.string_to_callable(test_string) + test_function_2 = string_utils.string_to_callable(test_string) # check that functions are the same assert _test_function(2) == test_function_2(2) @@ -61,7 +64,7 @@ def test_string_callable_function_with_lambda_in_name_conversion(): # convert function to string test_string = dict_utils.callable_to_string(_test_lambda_function) # convert string to function - test_function_2 = dict_utils.string_to_callable(test_string) + test_function_2 = string_utils.string_to_callable(test_string) # check that functions are the same assert _test_function(2) == test_function_2(2) @@ -74,7 +77,7 @@ def test_string_callable_lambda_conversion(): # convert function to string test_string = dict_utils.callable_to_string(func) # convert string to function - func_2 = dict_utils.string_to_callable(test_string) + func_2 = string_utils.string_to_callable(test_string) # check that functions are the same assert test_string == "lambda x: x**2" assert func(2) == func_2(2) @@ -97,3 +100,28 @@ def test_dict_to_md5(): for _ in range(200): md5_hash_2 = dict_utils.dict_to_md5_hash(test_dict) assert md5_hash_1 == md5_hash_2 + + +class _CallableCfg: + class_type = _test_function + + +def test_update_class_from_dict_keeps_callable_string_lazy(): + """Callable-string updates should remain lazy via ResolvableString.""" + cfg = _CallableCfg() + dict_utils.update_class_from_dict(cfg, {"class_type": "math:sin"}) + + assert isinstance(cfg.class_type, string_utils.ResolvableString) + # Dunder probing should not force resolution/import side effects. + assert hasattr(cfg.class_type, "__dataclass_fields__") is False + # Runtime use still resolves correctly. + assert pytest.approx(cfg.class_type(0.0), rel=0.0, abs=1e-9) == 0.0 + + +def test_update_class_from_dict_does_not_rewrap_resolvable_string(): + """Existing ResolvableString should be preserved, not re-wrapped.""" + cfg = _CallableCfg() + existing = string_utils.ResolvableString("math:sin") + dict_utils.update_class_from_dict(cfg, {"class_type": existing}) + + assert cfg.class_type is existing diff --git a/source/isaaclab/test/utils/test_logger.py b/source/isaaclab/test/utils/test_logger.py index 69df76f4c660..1aed1e377e1f 100644 --- a/source/isaaclab/test/utils/test_logger.py +++ b/source/isaaclab/test/utils/test_logger.py @@ -486,8 +486,8 @@ def test_configure_logging_basic(): # Should return root logger assert logger is not None assert logger is logging.getLogger() - # Root logger is always set to DEBUG to ensure all messages are logged - assert logger.level == logging.DEBUG + # Root logger level matches the requested level + assert logger.level == logging.INFO # Should have exactly one handler (stream handler) assert len(logger.handlers) == 1 @@ -556,8 +556,8 @@ def test_configure_logging_levels(): for level_str in levels: logger = configure_logging(logging_level=level_str, save_logs_to_file=False) - # Root logger is always set to DEBUG to ensure all messages are logged - assert logger.level == logging.DEBUG + # Root logger level matches the requested level + assert logger.level == level_values[level_str] # Handler level should match the requested level assert logger.handlers[0].level == level_values[level_str] @@ -585,8 +585,8 @@ def test_configure_logging_default_log_dir(): logger = configure_logging(logging_level="INFO", save_logs_to_file=True, log_dir=None) - # Root logger is always set to DEBUG - assert logger.level == logging.DEBUG + # Root logger level matches the requested level + assert logger.level == logging.INFO # Should have file handler assert len(logger.handlers) == 2 @@ -614,8 +614,8 @@ def test_configure_logging_custom_log_dir(): assert os.path.exists(custom_log_dir) assert os.path.isdir(custom_log_dir) - # Root logger is always set to DEBUG - assert logger.level == logging.DEBUG + # Root logger level matches the requested level + assert logger.level == logging.INFO # Log file should be in custom directory file_handler = logger.handlers[1] @@ -629,8 +629,8 @@ def test_configure_logging_log_file_format(): with tempfile.TemporaryDirectory() as temp_dir: logger = configure_logging(logging_level="INFO", save_logs_to_file=True, log_dir=temp_dir) - # Root logger is always set to DEBUG - assert logger.level == logging.DEBUG + # Root logger level matches the requested level + assert logger.level == logging.INFO # Get log file name file_handler = logger.handlers[1] @@ -648,8 +648,8 @@ def test_configure_logging_file_formatter(): with tempfile.TemporaryDirectory() as temp_dir: logger = configure_logging(logging_level="INFO", save_logs_to_file=True, log_dir=temp_dir) - # Root logger is always set to DEBUG - assert logger.level == logging.DEBUG + # Root logger level matches the requested level + assert logger.level == logging.INFO stream_handler = logger.handlers[0] file_handler = logger.handlers[1] diff --git a/source/isaaclab/test/utils/test_math.py b/source/isaaclab/test/utils/test_math.py index 2f256728e9ee..e0d85b14eb25 100644 --- a/source/isaaclab/test/utils/test_math.py +++ b/source/isaaclab/test/utils/test_math.py @@ -139,37 +139,37 @@ def test_copysign(device): @pytest.mark.parametrize("device", ["cpu", "cuda:0"]) def test_is_identity_pose(device): """Test is_identity_pose method.""" - # Single row identity pose + # Single row identity pose (xyzw format) identity_pos = torch.zeros(3, device=device) - identity_rot = torch.tensor((1.0, 0.0, 0.0, 0.0), device=device) + identity_rot = torch.tensor((0.0, 0.0, 0.0, 1.0), device=device) assert math_utils.is_identity_pose(identity_pos, identity_rot) is True - # Modified single row pose + # Modified single row pose (not identity) identity_pos = torch.tensor([1.0, 0.0, 0.0], device=device) - identity_rot = torch.tensor((1.0, 1.0, 0.0, 0.0), device=device) + identity_rot = torch.tensor((0.0, 1.0, 0.0, 1.0), device=device) assert math_utils.is_identity_pose(identity_pos, identity_rot) is False - # Multi-row identity pose + # Multi-row identity pose (xyzw format) identity_pos = torch.zeros(3, 3, device=device) - identity_rot = torch.tensor([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], device=device) + identity_rot = torch.tensor([[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]], device=device) assert math_utils.is_identity_pose(identity_pos, identity_rot) is True - # Modified multi-row pose + # Modified multi-row pose (not identity) identity_pos = torch.tensor([[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=device) - identity_rot = torch.tensor([[1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], device=device) + identity_rot = torch.tensor([[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]], device=device) assert math_utils.is_identity_pose(identity_pos, identity_rot) is False @pytest.mark.parametrize("device", ["cpu", "cuda:0"]) def test_axis_angle_from_quat(device): """Test axis_angle_from_quat method.""" - # Quaternions of the form (2,4) and (2,2,4) + # Quaternions of the form (2,4) and (2,2,4) in xyzw format quats = [ - torch.Tensor([[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]]).to(device), + torch.Tensor([[0.0, 0.0, 0.0, 1.0], [0.142006, 0.0, 0.5206887, 0.8418536]]).to(device), torch.Tensor( [ - [[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]], - [[1.0, 0.0, 0.0, 0.0], [0.9850375, 0.0995007, 0.0995007, 0.0995007]], + [[0.0, 0.0, 0.0, 1.0], [0.142006, 0.0, 0.5206887, 0.8418536]], + [[0.0, 0.0, 0.0, 1.0], [0.0995007, 0.0995007, 0.0995007, 0.9850375]], ] ).to(device), ] @@ -195,9 +195,9 @@ def test_axis_angle_from_quat_approximation(device): theta = torch.Tensor([0.0000001]).to(device) # Arbitrary normalized axis of rotation in rads, (x,y,z) axis = [-0.302286, 0.205494, -0.930803] - # Generate quaternion + # Generate quaternion in xyzw format: [qx, qy, qz, qw] qw = torch.cos(theta / 2) - quat_vect = [qw] + [d * torch.sin(theta / 2) for d in axis] + quat_vect = [d * torch.sin(theta / 2) for d in axis] + [qw] quaternion = torch.tensor(quat_vect, dtype=torch.float32, device=device) # Convert quaternion to axis-angle @@ -213,33 +213,33 @@ def test_axis_angle_from_quat_approximation(device): @pytest.mark.parametrize("device", ["cpu", "cuda:0"]) def test_quat_error_magnitude(device): """Test quat_error_magnitude method.""" - # No rotation - q1 = torch.Tensor([1, 0, 0, 0]).to(device) - q2 = torch.Tensor([1, 0, 0, 0]).to(device) + # No rotation (xyzw format) + q1 = torch.Tensor([0, 0, 0, 1]).to(device) + q2 = torch.Tensor([0, 0, 0, 1]).to(device) expected_diff = torch.Tensor([0.0]).to(device) q12_diff = math_utils.quat_error_magnitude(q1, q2) assert math.isclose(q12_diff.item(), expected_diff.item(), rel_tol=1e-5) - # PI/2 rotation - q1 = torch.Tensor([1.0, 0, 0.0, 0]).to(device) - q2 = torch.Tensor([0.7071068, 0.7071068, 0, 0]).to(device) + # PI/2 rotation around x (xyzw format) + q1 = torch.Tensor([0, 0, 0, 1.0]).to(device) + q2 = torch.Tensor([0.7071068, 0, 0, 0.7071068]).to(device) expected_diff = torch.Tensor([PI / 2]).to(device) q12_diff = math_utils.quat_error_magnitude(q1, q2) assert math.isclose(q12_diff.item(), expected_diff.item(), rel_tol=1e-5) - # PI rotation - q1 = torch.Tensor([1.0, 0, 0.0, 0]).to(device) - q2 = torch.Tensor([0.0, 0.0, 1.0, 0]).to(device) + # PI rotation around y (xyzw format) + q1 = torch.Tensor([0, 0, 0, 1.0]).to(device) + q2 = torch.Tensor([0.0, 1.0, 0, 0.0]).to(device) expected_diff = torch.Tensor([PI]).to(device) q12_diff = math_utils.quat_error_magnitude(q1, q2) assert math.isclose(q12_diff.item(), expected_diff.item(), rel_tol=1e-5) - # Batched inputs + # Batched inputs (xyzw format) q1 = torch.stack( - [torch.Tensor([1, 0, 0, 0]), torch.Tensor([1.0, 0, 0.0, 0]), torch.Tensor([1.0, 0, 0.0, 0])], dim=0 + [torch.Tensor([0, 0, 0, 1]), torch.Tensor([0, 0, 0, 1.0]), torch.Tensor([0, 0, 0, 1.0])], dim=0 ).to(device) q2 = torch.stack( - [torch.Tensor([1, 0, 0, 0]), torch.Tensor([0.7071068, 0.7071068, 0, 0]), torch.Tensor([0.0, 0.0, 1.0, 0])], + [torch.Tensor([0, 0, 0, 1]), torch.Tensor([0.7071068, 0, 0, 0.7071068]), torch.Tensor([0.0, 1.0, 0, 0.0])], dim=0, ).to(device) expected_diff = ( @@ -258,11 +258,11 @@ def test_quat_unique(device): # Test positive real quaternion pos_real_quats = math_utils.quat_unique(quats) - # Test that the real part is positive - assert torch.all(pos_real_quats[:, 0] > 0).item() + # Test that the real part (w, at index 3 in xyzw format) is positive + assert torch.all(pos_real_quats[:, 3] > 0).item() - non_pos_indices = quats[:, 0] < 0 - # Check imaginary part have sign flipped if real part is negative + non_pos_indices = quats[:, 3] < 0 + # Check quaternion is negated if real part was negative torch.testing.assert_close(pos_real_quats[non_pos_indices], -quats[non_pos_indices]) torch.testing.assert_close(pos_real_quats[~non_pos_indices], quats[~non_pos_indices]) @@ -326,9 +326,10 @@ def test_quat_error_mag_with_quat_unique(device): @pytest.mark.parametrize("device", ["cpu", "cuda:0"]) def test_convention_converter(device): """Test convert_camera_frame_orientation_convention to and from ros, opengl, and world conventions.""" - quat_ros = torch.tensor([[-0.17591989, 0.33985114, 0.82047325, -0.42470819]], device=device) - quat_opengl = torch.tensor([[0.33985113, 0.17591988, 0.42470818, 0.82047324]], device=device) - quat_world = torch.tensor([[-0.3647052, -0.27984815, -0.1159169, 0.88047623]], device=device) + # Quaternions in xyzw format (converted from original wxyz test values) + quat_ros = torch.tensor([[0.33985114, 0.82047325, -0.42470819, -0.17591989]], device=device) + quat_opengl = torch.tensor([[0.17591988, 0.42470818, 0.82047324, 0.33985113]], device=device) + quat_world = torch.tensor([[-0.27984815, -0.1159169, 0.88047623, -0.3647052]], device=device) # from ROS torch.testing.assert_close( @@ -402,10 +403,11 @@ def test_quat_conjugate(device): quat = math_utils.random_orientation(1000, device=device) value = math_utils.quat_conjugate(quat) - expected_real = quat[..., 0] - expected_imag = -quat[..., 1:] - torch.testing.assert_close(expected_real, value[..., 0]) - torch.testing.assert_close(expected_imag, value[..., 1:]) + # In xyzw format: xyz is imaginary (indices 0-2), w is real (index 3) + expected_imag = -quat[..., :3] + expected_real = quat[..., 3] + torch.testing.assert_close(expected_imag, value[..., :3]) + torch.testing.assert_close(expected_real, value[..., 3]) @pytest.mark.parametrize("device", ("cpu", "cuda:0")) @@ -426,15 +428,15 @@ def test_quat_from_euler_xyz(device, num_envs, euler_angles): angles = torch.tensor(euler_angles, device=device).unsqueeze(0).repeat((num_envs, 1)) quat_value = math_utils.quat_unique(math_utils.quat_from_euler_xyz(angles[:, 0], angles[:, 1], angles[:, 2])) - expected_quat = math_utils.convert_quat( + # scipy's as_quat() already returns xyzw format, no conversion needed + expected_quat = ( torch.tensor( scipy_tf.Rotation.from_euler("xyz", euler_angles, degrees=False).as_quat(), device=device, dtype=torch.float, ) .unsqueeze(0) - .repeat((num_envs, 1)), - to="wxyz", + .repeat((num_envs, 1)) ) torch.testing.assert_close(expected_quat, quat_value) @@ -622,16 +624,15 @@ def test_quat_to_and_from_angle_axis(device): n = 1024 q_rand = math_utils.quat_unique(math_utils.random_orientation(num=n, device=device)) rot_vec_value = math_utils.axis_angle_from_quat(q_rand) + # Our quaternions are already in xyzw format, which scipy expects rot_vec_scipy = torch.tensor( - scipy_tf.Rotation.from_quat( - math_utils.convert_quat(quat=q_rand.to(device="cpu").numpy(), to="xyzw") - ).as_rotvec(), + scipy_tf.Rotation.from_quat(q_rand.to(device="cpu").numpy()).as_rotvec(), device=device, dtype=torch.float32, ) torch.testing.assert_close(rot_vec_scipy, rot_vec_value) axis = math_utils.normalize(rot_vec_value.clone()) - angle = torch.norm(rot_vec_value.clone(), dim=-1) + angle = torch.linalg.norm(rot_vec_value.clone(), dim=-1) q_value = math_utils.quat_unique(math_utils.quat_from_angle_axis(angle, axis)) torch.testing.assert_close(q_rand, q_value) @@ -679,9 +680,9 @@ def test_quat_box_minus_and_quat_box_plus(device): device=device, ) - # Initialize quaternion trajectory starting from identity quaternion + # Initialize quaternion trajectory starting from identity quaternion (xyzw format) quat_trajectory = torch.zeros((len(delta_angle), 2 * n + 1, 4), device=device) - quat_trajectory[:, 0, :] = torch.tensor([[1.0, 0.0, 0.0, 0.0]], device=device).repeat(len(delta_angle), 1) + quat_trajectory[:, 0, :] = torch.tensor([[0.0, 0.0, 0.0, 1.0]], device=device).repeat(len(delta_angle), 1) # Integrate incremental rotations forward to form a closed loop trajectory for i in range(1, 2 * n + 1): @@ -795,7 +796,7 @@ def test_compute_pose_error(device, rot_error_type): else: axis_angle = math_utils.quat_box_minus(q02, q01) axis = math_utils.normalize(axis_angle) - angle = torch.norm(axis_angle, dim=-1) + angle = torch.linalg.norm(axis_angle, dim=-1) torch.testing.assert_close( math_utils.quat_unique(math_utils.quat_from_angle_axis(angle, axis)), @@ -836,12 +837,12 @@ def test_yaw_quat(device): """ Test for yaw_quat methods. """ - # 90-degree (n/2 radians) rotations about the Y-axis - quat_input = torch.tensor([0.7071, 0, 0.7071, 0], device=device) + # 90-degree (pi/2 radians) rotations about the Y-axis (xyzw format) + quat_input = torch.tensor([0, 0.7071, 0, 0.7071], device=device) cloned_quat_input = quat_input.clone() - # Calculated output that the function should return - expected_output = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) + # Calculated output that the function should return (identity in xyzw) + expected_output = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # Compute the result using the existing implementation result = math_utils.yaw_quat(quat_input) @@ -891,8 +892,9 @@ def test_matrix_from_quat(device): # prepare random quaternions and vectors q_rand = math_utils.quat_unique(math_utils.random_orientation(num=n, device=device)) rot_mat = math_utils.matrix_from_quat(quaternions=q_rand) + # Our quaternions are already in xyzw format, which scipy expects rot_mat_scipy = torch.tensor( - scipy_tf.Rotation.from_quat(math_utils.convert_quat(quat=q_rand.to(device="cpu"), to="xyzw")).as_matrix(), + scipy_tf.Rotation.from_quat(q_rand.to(device="cpu").numpy()).as_matrix(), device=device, dtype=torch.float32, ) @@ -940,7 +942,8 @@ def test_quat_apply(device): # prepare random quaternions and vectors n = 1024 q_rand = math_utils.random_orientation(num=n, device=device) - Rotation = scipy_tf.Rotation.from_quat(math_utils.convert_quat(quat=q_rand.to(device="cpu").numpy(), to="xyzw")) + # Our quaternions are already in xyzw format, which scipy expects + Rotation = scipy_tf.Rotation.from_quat(q_rand.to(device="cpu").numpy()) v_rand = math_utils.sample_uniform(-1000, 1000, (n, 3), device=device) @@ -957,7 +960,8 @@ def test_quat_apply_inverse(device): # prepare random quaternions and vectors n = 1024 q_rand = math_utils.random_orientation(num=n, device=device) - Rotation = scipy_tf.Rotation.from_quat(math_utils.convert_quat(quat=q_rand.to(device="cpu").numpy(), to="xyzw")) + # Our quaternions are already in xyzw format, which scipy expects + Rotation = scipy_tf.Rotation.from_quat(q_rand.to(device="cpu").numpy()) v_rand = math_utils.sample_uniform(-1000, 1000, (n, 3), device=device) @@ -974,7 +978,7 @@ def test_quat_inv(device): """Test for quat_inv method. For random unit and non-unit quaternions q, the Hamilton products - q ⊗ q⁻¹ and q⁻¹ ⊗ q must both equal the identity quaternion (1,0,0,0) + q ⊗ q⁻¹ and q⁻¹ ⊗ q must both equal the identity quaternion (0,0,0,1) within numerical precision. """ num = 2048 @@ -986,7 +990,7 @@ def test_quat_inv(device): q_unit = torch.randn(num, 4, device=device) q_unit = q_unit / q_unit.norm(dim=-1, keepdim=True) - identity = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) + identity = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # xyzw format for q in (q_nonunit, q_unit): q_inv = math_utils.quat_inv(q) @@ -1005,13 +1009,13 @@ def test_quat_apply_benchmarks(): """ # define old implementation for quat_rotate and quat_rotate_inverse - # Based on commit: cdfa954fcc4394ca8daf432f61994e25a7b8e9e2 + # Updated for xyzw format (xyz at indices 0-2, w at index 3) @torch.jit.script def bmm_quat_rotate(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: shape = q.shape - q_w = q[:, 0] - q_vec = q[:, 1:] + q_vec = q[:, :3] # xyz + q_w = q[:, 3] # w a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * torch.bmm(q_vec.view(shape[0], 1, 3), v.view(shape[0], 3, 1)).squeeze(-1) * 2.0 @@ -1020,8 +1024,8 @@ def bmm_quat_rotate(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: @torch.jit.script def bmm_quat_rotate_inverse(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: shape = q.shape - q_w = q[:, 0] - q_vec = q[:, 1:] + q_vec = q[:, :3] # xyz + q_w = q[:, 3] # w a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * torch.bmm(q_vec.view(shape[0], 1, 3), v.view(shape[0], 3, 1)).squeeze(-1) * 2.0 @@ -1029,8 +1033,8 @@ def bmm_quat_rotate_inverse(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: @torch.jit.script def einsum_quat_rotate(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: - q_w = q[..., 0] - q_vec = q[..., 1:] + q_vec = q[..., :3] # xyz + q_w = q[..., 3] # w a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * torch.einsum("...i,...i->...", q_vec, v).unsqueeze(-1) * 2.0 @@ -1038,8 +1042,8 @@ def einsum_quat_rotate(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: @torch.jit.script def einsum_quat_rotate_inverse(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor: - q_w = q[..., 0] - q_vec = q[..., 1:] + q_vec = q[..., :3] # xyz + q_w = q[..., 3] # w a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1) b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 c = q_vec * torch.einsum("...i,...i->...", q_vec, v).unsqueeze(-1) * 2.0 @@ -1261,26 +1265,27 @@ def test_euler_xyz_from_quat(): against the expected output for various quaternions. The test includes quaternions representing different rotations around the x, y, and z axes. The test is performed for both the default output range (-π, π] and the wrapped output range [0, 2π). + Quaternions are in xyzw format. """ quats = [ - torch.Tensor([[1.0, 0.0, 0.0, 0.0]]), # 0° around x, y, z + torch.Tensor([[0.0, 0.0, 0.0, 1.0]]), # 0° around x, y, z (identity) torch.Tensor( [ - [0.9238795, 0.3826834, 0.0, 0.0], # 45° around x - [0.9238795, 0.0, -0.3826834, 0.0], # -45° around y - [0.9238795, 0.0, 0.0, -0.3826834], # -45° around z + [0.3826834, 0.0, 0.0, 0.9238795], # 45° around x + [0.0, -0.3826834, 0.0, 0.9238795], # -45° around y + [0.0, 0.0, -0.3826834, 0.9238795], # -45° around z ] ), torch.Tensor( [ - [0.7071068, -0.7071068, 0.0, 0.0], # -90° around x - [0.7071068, 0.0, 0.0, -0.7071068], # -90° around z + [-0.7071068, 0.0, 0.0, 0.7071068], # -90° around x + [0.0, 0.0, -0.7071068, 0.7071068], # -90° around z ] ), torch.Tensor( [ - [0.3826834, -0.9238795, 0.0, 0.0], # -135° around x - [0.3826834, 0.0, 0.0, -0.9238795], # -135° around y + [-0.9238795, 0.0, 0.0, 0.3826834], # -135° around x + [0.0, 0.0, -0.9238795, 0.3826834], # -135° around z ] ), ] diff --git a/source/isaaclab/test/utils/test_string.py b/source/isaaclab/test/utils/test_string.py index d171a3885e10..22f51ab6f483 100644 --- a/source/isaaclab/test/utils/test_string.py +++ b/source/isaaclab/test/utils/test_string.py @@ -19,6 +19,45 @@ import pytest import isaaclab.utils.string as string_utils +from isaaclab.utils.string import _resolve_matching_names_impl + + +def test_resolvable_string_metadata_is_non_eager(): + """Test metadata access on ResolvableString without triggering import/resolve.""" + ref = string_utils.ResolvableString("package.subpkg.module:Outer.InnerCallable") + assert ref.__module__ == "package.subpkg.module" + assert ref.__qualname__ == "Outer.InnerCallable" + assert ref.__name__ == "InnerCallable" + + +def test_resolvable_string_metadata_stable_after_resolution(): + """Test metadata before/after resolution remains consistent.""" + ref = string_utils.ResolvableString("math:sin") + assert ref.__module__ == "math" + assert ref.__qualname__ == "sin" + assert ref.__name__ == "sin" + # Force resolution. + assert pytest.approx(ref(0.0), rel=0.0, abs=1e-9) == 0.0 + # Metadata should remain identical after resolution cache is populated. + assert ref.__module__ == "math" + assert ref.__qualname__ == "sin" + assert ref.__name__ == "sin" + + +def test_resolvable_string_dunder_introspection_stays_lazy(): + """Test dunder probing doesn't force resolution for invalid references.""" + ref = string_utils.ResolvableString("not_a_real_module.path:Nope") + # dunder attribute probe should not attempt import/resolve + assert hasattr(ref, "__dataclass_fields__") is False + # runtime use should still attempt resolve and fail + with pytest.raises(ValueError): + ref() + + +def test_resolvable_string_runtime_resolution_still_works(): + """Test runtime call path still resolves the callable target.""" + ref = string_utils.ResolvableString("math:sin") + assert pytest.approx(ref(0.0), rel=0.0, abs=1e-9) == 0.0 def test_case_conversion(): @@ -213,3 +252,22 @@ def test_resolve_matching_names_values_with_basic_strings_and_preserved_order(): query_names = {"a|c": 1, "b": 0, "f": 2} with pytest.raises(ValueError): _ = string_utils.resolve_matching_names_values(query_names, target_names, preserve_order=True) + + +def test_clear_resolve_matching_names_cache(): + """Clearing the cache discards previously cached entries.""" + target_names = ["a", "b", "c"] + # Populate the cache + string_utils.resolve_matching_names("a", target_names) + info_before = _resolve_matching_names_impl.cache_info() + assert info_before.currsize > 0 + + # Clear the cache + string_utils.clear_resolve_matching_names_cache() + info_after = _resolve_matching_names_impl.cache_info() + assert info_after.currsize == 0 + + # Results are still correct after clearing + idx, names = string_utils.resolve_matching_names("a", target_names) + assert idx == [0] + assert names == ["a"] diff --git a/source/isaaclab/test/utils/test_timer.py b/source/isaaclab/test/utils/test_timer.py index 8d99db3b2d80..9337b201a675 100644 --- a/source/isaaclab/test/utils/test_timer.py +++ b/source/isaaclab/test/utils/test_timer.py @@ -3,20 +3,14 @@ # # SPDX-License-Identifier: BSD-3-Clause -# NOTE: While we don't actually use the simulation app in this test, we still need to launch it -# because warp is only available in the context of a running simulation -"""Launch Isaac Sim Simulator first.""" - -from isaaclab.app import AppLauncher +import time -# launch omniverse app -simulation_app = AppLauncher(headless=True).app +import pytest +import warp as wp -"""Rest everything follows.""" +wp.init() -import time - -from isaaclab.utils.timer import Timer +from isaaclab.utils.timer import Timer, TimerError # number of decimal places to check PRECISION_PLACES = 2 @@ -24,6 +18,7 @@ def test_timer_as_object(): """Test using a `Timer` as a regular object.""" + Timer.reset() timer = Timer() timer.start() assert abs(0 - timer.time_elapsed) < 10 ** (-PRECISION_PLACES) @@ -35,7 +30,253 @@ def test_timer_as_object(): def test_timer_as_context_manager(): """Test using a `Timer` as a context manager.""" + Timer.reset() with Timer() as timer: assert abs(0 - timer.time_elapsed) < 10 ** (-PRECISION_PLACES) time.sleep(1) assert abs(1 - timer.time_elapsed) < 10 ** (-PRECISION_PLACES) + + +def test_timer_with_name_logs_to_global_dict(): + """Test that a named timer logs to the global timing_info dict with correct keys.""" + Timer.reset() + timer_name = "test_named_timer" + + with Timer(name=timer_name): + time.sleep(0.01) + + assert timer_name in Timer.timing_info + info = Timer.timing_info[timer_name] + assert "last" in info + assert "mean" in info + assert "std" in info + assert "n" in info + assert info["n"] == 1 + + +def test_get_timer_info_returns_last_elapsed(): + """Test that get_timer_info returns the last elapsed time (backward compatibility).""" + Timer.reset() + timer_name = "test_get_info" + + with Timer(name=timer_name): + time.sleep(0.02) + + last_time = Timer.get_timer_info(timer_name) + assert isinstance(last_time, float) + assert last_time >= 0.02 + assert last_time == Timer.timing_info[timer_name]["last"] + + +def test_get_timer_info_nonexistent_raises(): + """Test that get_timer_info raises TimerError for non-existent timer.""" + Timer.reset() + + with pytest.raises(TimerError): + Timer.get_timer_info("nonexistent_timer") + + +def test_get_timer_statistics(): + """Test get_timer_statistics returns correct keys and values for single measurement.""" + Timer.reset() + timer_name = "test_statistics" + + with Timer(name=timer_name): + time.sleep(0.02) + + stats = Timer.get_timer_statistics(timer_name) + assert "mean" in stats + assert "std" in stats + assert "n" in stats + assert stats["n"] == 1 + # For single measurement, std should be 0 + assert stats["std"] == 0.0 + + +def test_get_timer_statistics_nonexistent_raises(): + """Test that get_timer_statistics raises TimerError for non-existent timer.""" + Timer.reset() + + with pytest.raises(TimerError): + Timer.get_timer_statistics("nonexistent_timer") + + +def test_welford_statistics_multiple_iterations(): + """Test that Welford's algorithm correctly computes statistics over multiple iterations.""" + Timer.reset() + timer_name = "test_welford" + num_iterations = 5 + sleep_duration = 0.02 + measurements = [] + + for _ in range(num_iterations): + with Timer(name=timer_name): + time.sleep(sleep_duration) + measurements.append(Timer.timing_info[timer_name]["last"]) + + stats = Timer.get_timer_statistics(timer_name) + + # Check n incremented correctly + assert stats["n"] == num_iterations + + # Check mean is approximately correct + expected_mean = sum(measurements) / len(measurements) + assert abs(stats["mean"] - expected_mean) < 1e-9 + + # Check std is non-negative and reasonable + assert stats["std"] >= 0 + # Std should be bounded by the range of measurements + measurement_range = max(measurements) - min(measurements) + assert stats["std"] <= measurement_range + + +def test_multiple_timer_instances_same_name(): + """Test that different timer instances with same name share statistics in global dict.""" + Timer.reset() + timer_name = "shared_timer" + + # First timer instance + timer1 = Timer(name=timer_name) + timer1.start() + time.sleep(0.01) + timer1.stop() + + assert Timer.timing_info[timer_name]["n"] == 1 + first_mean = Timer.timing_info[timer_name]["mean"] + + # Second timer instance with same name + timer2 = Timer(name=timer_name) + timer2.start() + time.sleep(0.02) + timer2.stop() + + assert Timer.timing_info[timer_name]["n"] == 2 + # Mean should have changed + assert Timer.timing_info[timer_name]["mean"] != first_mean + + +def test_global_enable_toggle(): + """Test that Timer.enable globally disables all timers.""" + Timer.reset() + Timer.enable = True + + try: + # Create timer while globally disabled + Timer.enable = False + timer = Timer(name="disabled_timer") + timer.start() + time.sleep(0.01) + timer.stop() + + # Should not have recorded anything + assert "disabled_timer" not in Timer.timing_info + assert timer.total_run_time == 0.0 + finally: + Timer.enable = True + + +def test_instance_enable_toggle(): + """Test that per-instance enable=False disables a single timer.""" + Timer.reset() + + timer = Timer(name="instance_disabled", enable=False) + timer.start() + time.sleep(0.01) + timer.stop() + + assert "instance_disabled" not in Timer.timing_info + assert timer.total_run_time == 0.0 + + +def test_enable_display_output(capsys): + """Test that Timer.enable_display_output controls context manager print output.""" + Timer.reset() + Timer.enable_display_output = True + + try: + # With display enabled + with Timer(msg="visible"): + time.sleep(0.01) + captured = capsys.readouterr() + assert "visible" in captured.out + + # With display disabled + Timer.enable_display_output = False + with Timer(msg="hidden"): + time.sleep(0.01) + captured = capsys.readouterr() + assert captured.out == "" + finally: + Timer.enable_display_output = True + + +def test_time_unit_multiplier(): + """Test that time_unit correctly scales the string representation.""" + Timer.reset() + + timer = Timer(time_unit="ms") + timer.start() + time.sleep(0.01) + timer.stop() + + # total_run_time always returns seconds + assert timer.total_run_time >= 0.01 + # __str__ should show milliseconds + output = str(timer) + assert "ms" in output + # The numeric value should be >= 10 (0.01s = 10ms) + numeric_part = float(output.split()[0]) + assert numeric_part >= 10.0 + + +def test_time_unit_us_and_ns(): + """Test microsecond and nanosecond time units.""" + timer_us = Timer(time_unit="us") + timer_us.start() + time.sleep(0.001) + timer_us.stop() + assert "us" in str(timer_us) + + timer_ns = Timer(time_unit="ns") + timer_ns.start() + time.sleep(0.001) + timer_ns.stop() + assert "ns" in str(timer_ns) + + +def test_invalid_time_unit_raises(): + """Test that an invalid time_unit raises ValueError.""" + with pytest.raises(ValueError, match="Invalid time_unit"): + Timer(time_unit="hours") + + +def test_reset_specific_timer(): + """Test that Timer.reset(name) only resets the specified timer.""" + Timer.reset() + + with Timer(name="keep"): + time.sleep(0.01) + with Timer(name="remove"): + time.sleep(0.01) + + assert "keep" in Timer.timing_info + assert "remove" in Timer.timing_info + + Timer.reset("remove") + + assert "keep" in Timer.timing_info + assert "remove" not in Timer.timing_info + + +def test_get_timer_statistics_includes_last(): + """Test that get_timer_statistics includes the 'last' key.""" + Timer.reset() + + with Timer(name="stats_last"): + time.sleep(0.01) + + stats = Timer.get_timer_statistics("stats_last") + assert "last" in stats + assert stats["last"] >= 0.01 + # For single measurement, mean equals last + assert stats["mean"] == stats["last"] diff --git a/source/isaaclab/test/utils/test_wrench_composer.py b/source/isaaclab/test/utils/test_wrench_composer.py index 3cc88b3b9028..cffa927c802b 100644 --- a/source/isaaclab/test/utils/test_wrench_composer.py +++ b/source/isaaclab/test/utils/test_wrench_composer.py @@ -13,90 +13,66 @@ import torch import warp as wp -from isaaclab.assets import RigidObject +from isaaclab.test.mock_interfaces.assets import MockRigidObjectCollection from isaaclab.utils.wrench_composer import WrenchComposer -class MockAssetData: - """Mock data class that provides body link positions and quaternions.""" - - def __init__( - self, - num_envs: int, - num_bodies: int, - device: str, - link_pos: torch.Tensor | None = None, - link_quat: torch.Tensor | None = None, - ): - """Initialize mock asset data. - - Args: - num_envs: Number of environments. - num_bodies: Number of bodies. - device: Device to use. - link_pos: Optional link positions (num_envs, num_bodies, 3). Defaults to zeros. - link_quat: Optional link quaternions in (w, x, y, z) format (num_envs, num_bodies, 4). - Defaults to identity quaternion. - """ - if link_pos is not None: - self.body_link_pos_w = link_pos.to(device=device, dtype=torch.float32) - else: - self.body_link_pos_w = torch.zeros((num_envs, num_bodies, 3), dtype=torch.float32, device=device) - - if link_quat is not None: - self.body_link_quat_w = link_quat.to(device=device, dtype=torch.float32) - else: - # Identity quaternion (w, x, y, z) = (1, 0, 0, 0) - self.body_link_quat_w = torch.zeros((num_envs, num_bodies, 4), dtype=torch.float32, device=device) - self.body_link_quat_w[..., 0] = 1.0 - - -class MockRigidObject: - """Mock RigidObject that provides the minimal interface required by WrenchComposer. - - This mock enables testing WrenchComposer in isolation without requiring a full simulation setup. - It passes isinstance checks by registering as a virtual subclass of RigidObject. +def create_mock_asset( + num_envs: int, + num_bodies: int, + device: str, + link_pos: torch.Tensor | None = None, + link_quat: torch.Tensor | None = None, +) -> MockRigidObjectCollection: + """Create a MockRigidObjectCollection with optional custom link poses. + + Args: + num_envs: Number of environments. + num_bodies: Number of bodies. + device: Device to use. + link_pos: Optional link positions (num_envs, num_bodies, 3). Defaults to zeros. + link_quat: Optional link quaternions in (x, y, z, w) format (num_envs, num_bodies, 4). + Defaults to identity quaternion. + + Returns: + MockRigidObjectCollection with body_link_pose_w set. """ + mock = MockRigidObjectCollection(num_instances=num_envs, num_bodies=num_bodies, device=device) + + # Build combined pose (N, B, 7) = pos(3) + quat_xyzw(4) matching wp.transformf layout + if link_pos is None: + pos = torch.zeros(num_envs, num_bodies, 3, dtype=torch.float32) + else: + pos = link_pos.float() + + if link_quat is None: + # Identity quaternion in (x, y, z, w) format = (0, 0, 0, 1) + quat = torch.zeros(num_envs, num_bodies, 4, dtype=torch.float32) + quat[..., 3] = 1.0 + else: + quat = link_quat.float() - def __init__( - self, - num_envs: int, - num_bodies: int, - device: str, - link_pos: torch.Tensor | None = None, - link_quat: torch.Tensor | None = None, - ): - """Initialize mock rigid object. - - Args: - num_envs: Number of environments. - num_bodies: Number of bodies. - device: Device to use. - link_pos: Optional link positions (num_envs, num_bodies, 3). - link_quat: Optional link quaternions in (w, x, y, z) format (num_envs, num_bodies, 4). - """ - self.num_instances = num_envs - self.num_bodies = num_bodies - self.device = device - self.data = MockAssetData(num_envs, num_bodies, device, link_pos, link_quat) + pose = torch.cat([pos, quat], dim=-1) # (N, B, 7) + mock.data.set_body_link_pose_w(pose) + return mock # --- Helper functions for quaternion math --- -def quat_rotate_inv_np(quat_wxyz: np.ndarray, vec: np.ndarray) -> np.ndarray: +def quat_rotate_inv_np(quat_xyzw: np.ndarray, vec: np.ndarray) -> np.ndarray: """Rotate a vector by the inverse of a quaternion (numpy). Args: - quat_wxyz: Quaternion in (w, x, y, z) format. Shape: (..., 4) + quat_xyzw: Quaternion in (x, y, z, w) format. Shape: (..., 4) vec: Vector to rotate. Shape: (..., 3) Returns: Rotated vector. Shape: (..., 3) """ # Extract components - w = quat_wxyz[..., 0:1] - xyz = quat_wxyz[..., 1:4] + xyz = quat_xyzw[..., 0:3] + w = quat_xyzw[..., 3:4] # For inverse rotation, we conjugate the quaternion (negate xyz) # q^-1 * v * q = q_conj * v * q_conj^-1 for unit quaternion @@ -110,7 +86,7 @@ def quat_rotate_inv_np(quat_wxyz: np.ndarray, vec: np.ndarray) -> np.ndarray: def random_unit_quaternion_np(rng: np.random.Generator, shape: tuple) -> np.ndarray: - """Generate random unit quaternions in (w, x, y, z) format. + """Generate random unit quaternions in (x, y, z, w) format. Args: rng: Random number generator. @@ -126,11 +102,6 @@ def random_unit_quaternion_np(rng: np.random.Generator, shape: tuple) -> np.ndar return q -# Register MockRigidObject as a virtual subclass of RigidObject -# This allows isinstance(mock, RigidObject) to return True -RigidObject.register(MockRigidObject) - - @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.parametrize("num_envs", [1, 10, 100, 1000]) @pytest.mark.parametrize("num_bodies", [1, 3, 5, 10]) @@ -139,7 +110,7 @@ def test_wrench_composer_add_force(device: str, num_envs: int, num_bodies: int): rng = np.random.default_rng(seed=0) for _ in range(10): - mock_asset = MockRigidObject(num_envs, num_bodies, device) + mock_asset = create_mock_asset(num_envs, num_bodies, device) wrench_composer = WrenchComposer(mock_asset) # Initialize hand-calculated composed force hand_calculated_composed_force_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) @@ -160,11 +131,13 @@ def test_wrench_composer_add_force(device: str, num_envs: int, num_bodies: int): ) forces = wp.from_numpy(forces_np, dtype=wp.vec3f, device=device) # Add forces to wrench composer - wrench_composer.add_forces_and_torques(forces=forces, body_ids=body_ids, env_ids=env_ids) + wrench_composer.add_forces_and_torques_index(forces=forces, body_ids=body_ids, env_ids=env_ids) # Add forces to hand-calculated composed force hand_calculated_composed_force_np[env_ids_np[:, None], body_ids_np[None, :], :] += forces_np + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() # Get composed force from wrench composer - composed_force_np = wrench_composer.composed_force.numpy() + composed_force_np = wrench_composer.out_force_b.warp.numpy() assert np.allclose(composed_force_np, hand_calculated_composed_force_np, atol=1, rtol=1e-7) @@ -176,7 +149,7 @@ def test_wrench_composer_add_torque(device: str, num_envs: int, num_bodies: int) rng = np.random.default_rng(seed=1) for _ in range(10): - mock_asset = MockRigidObject(num_envs, num_bodies, device) + mock_asset = create_mock_asset(num_envs, num_bodies, device) wrench_composer = WrenchComposer(mock_asset) # Initialize hand-calculated composed torque hand_calculated_composed_torque_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) @@ -197,24 +170,26 @@ def test_wrench_composer_add_torque(device: str, num_envs: int, num_bodies: int) ) torques = wp.from_numpy(torques_np, dtype=wp.vec3f, device=device) # Add torques to wrench composer - wrench_composer.add_forces_and_torques(torques=torques, body_ids=body_ids, env_ids=env_ids) + wrench_composer.add_forces_and_torques_index(torques=torques, body_ids=body_ids, env_ids=env_ids) # Add torques to hand-calculated composed torque hand_calculated_composed_torque_np[env_ids_np[:, None], body_ids_np[None, :], :] += torques_np + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() # Get composed torque from wrench composer - composed_torque_np = wrench_composer.composed_torque.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_torque_np, hand_calculated_composed_torque_np, atol=1, rtol=1e-7) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.parametrize("num_envs", [1, 10, 100, 1000]) @pytest.mark.parametrize("num_bodies", [1, 3, 5, 10]) -def test_add_forces_at_positons(device: str, num_envs: int, num_bodies: int): +def test_add_forces_at_positions(device: str, num_envs: int, num_bodies: int): """Test adding forces at local positions (offset from link frame).""" rng = np.random.default_rng(seed=2) for _ in range(10): # Initialize wrench composer - mock_asset = MockRigidObject(num_envs, num_bodies, device) + mock_asset = create_mock_asset(num_envs, num_bodies, device) wrench_composer = WrenchComposer(mock_asset) # Initialize hand-calculated composed force hand_calculated_composed_force_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) @@ -243,7 +218,7 @@ def test_add_forces_at_positons(device: str, num_envs: int, num_bodies: int): forces = wp.from_numpy(forces_np, dtype=wp.vec3f, device=device) positions = wp.from_numpy(positions_np, dtype=wp.vec3f, device=device) # Add forces at positions to wrench composer - wrench_composer.add_forces_and_torques( + wrench_composer.add_forces_and_torques_index( forces=forces, positions=positions, body_ids=body_ids, env_ids=env_ids ) # Add forces to hand-calculated composed force @@ -254,11 +229,13 @@ def test_add_forces_at_positons(device: str, num_envs: int, num_bodies: int): for j in range(num_bodies_np): hand_calculated_composed_torque_np[env_ids_np[i], body_ids_np[j], :] += torques_from_forces[i, j, :] + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() # Get composed force from wrench composer - composed_force_np = wrench_composer.composed_force.numpy() + composed_force_np = wrench_composer.out_force_b.warp.numpy() assert np.allclose(composed_force_np, hand_calculated_composed_force_np, atol=1, rtol=1e-7) # Get composed torque from wrench composer - composed_torque_np = wrench_composer.composed_torque.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_torque_np, hand_calculated_composed_torque_np, atol=1, rtol=1e-7) @@ -269,7 +246,7 @@ def test_add_torques_at_position(device: str, num_envs: int, num_bodies: int): rng = np.random.default_rng(seed=3) for _ in range(10): - mock_asset = MockRigidObject(num_envs, num_bodies, device) + mock_asset = create_mock_asset(num_envs, num_bodies, device) wrench_composer = WrenchComposer(mock_asset) # Initialize hand-calculated composed torque hand_calculated_composed_torque_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) @@ -296,13 +273,15 @@ def test_add_torques_at_position(device: str, num_envs: int, num_bodies: int): torques = wp.from_numpy(torques_np, dtype=wp.vec3f, device=device) positions = wp.from_numpy(positions_np, dtype=wp.vec3f, device=device) # Add torques at positions to wrench composer - wrench_composer.add_forces_and_torques( + wrench_composer.add_forces_and_torques_index( torques=torques, positions=positions, body_ids=body_ids, env_ids=env_ids ) # Add torques to hand-calculated composed torque hand_calculated_composed_torque_np[env_ids_np[:, None], body_ids_np[None, :], :] += torques_np + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() # Get composed torque from wrench composer - composed_torque_np = wrench_composer.composed_torque.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_torque_np, hand_calculated_composed_torque_np, atol=1, rtol=1e-7) @@ -314,7 +293,7 @@ def test_add_forces_and_torques_at_position(device: str, num_envs: int, num_bodi rng = np.random.default_rng(seed=4) for _ in range(10): - mock_asset = MockRigidObject(num_envs, num_bodies, device) + mock_asset = create_mock_asset(num_envs, num_bodies, device) wrench_composer = WrenchComposer(mock_asset) # Initialize hand-calculated composed force and torque hand_calculated_composed_force_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) @@ -348,7 +327,7 @@ def test_add_forces_and_torques_at_position(device: str, num_envs: int, num_bodi torques = wp.from_numpy(torques_np, dtype=wp.vec3f, device=device) positions = wp.from_numpy(positions_np, dtype=wp.vec3f, device=device) # Add forces and torques at positions to wrench composer - wrench_composer.add_forces_and_torques( + wrench_composer.add_forces_and_torques_index( forces=forces, torques=torques, positions=positions, body_ids=body_ids, env_ids=env_ids ) # Add forces to hand-calculated composed force @@ -359,11 +338,13 @@ def test_add_forces_and_torques_at_position(device: str, num_envs: int, num_bodi for j in range(num_bodies_np): hand_calculated_composed_torque_np[env_ids_np[i], body_ids_np[j], :] += torques_from_forces[i, j, :] hand_calculated_composed_torque_np[env_ids_np[:, None], body_ids_np[None, :], :] += torques_np + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() # Get composed force from wrench composer - composed_force_np = wrench_composer.composed_force.numpy() + composed_force_np = wrench_composer.out_force_b.warp.numpy() assert np.allclose(composed_force_np, hand_calculated_composed_force_np, atol=1, rtol=1e-7) # Get composed torque from wrench composer - composed_torque_np = wrench_composer.composed_torque.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_torque_np, hand_calculated_composed_torque_np, atol=1, rtol=1e-7) @@ -373,7 +354,7 @@ def test_add_forces_and_torques_at_position(device: str, num_envs: int, num_bodi def test_wrench_composer_reset(device: str, num_envs: int, num_bodies: int): rng = np.random.default_rng(seed=5) for _ in range(10): - mock_asset = MockRigidObject(num_envs, num_bodies, device) + mock_asset = create_mock_asset(num_envs, num_bodies, device) wrench_composer = WrenchComposer(mock_asset) # Get random number of envs and bodies and their indices num_envs_np = rng.integers(1, num_envs, endpoint=True) @@ -397,14 +378,18 @@ def test_wrench_composer_reset(device: str, num_envs: int, num_bodies: int): forces = wp.from_numpy(forces_np, dtype=wp.vec3f, device=device) torques = wp.from_numpy(torques_np, dtype=wp.vec3f, device=device) # Add forces and torques to wrench composer - wrench_composer.add_forces_and_torques(forces=forces, torques=torques, body_ids=body_ids, env_ids=env_ids) + wrench_composer.add_forces_and_torques_index(forces=forces, torques=torques, body_ids=body_ids, env_ids=env_ids) # Reset wrench composer wrench_composer.reset() - # Get composed force and torque from wrench composer - composed_force_np = wrench_composer.composed_force.numpy() - composed_torque_np = wrench_composer.composed_torque.numpy() - assert np.allclose(composed_force_np, np.zeros((num_envs, num_bodies, 3)), atol=1, rtol=1e-7) - assert np.allclose(composed_torque_np, np.zeros((num_envs, num_bodies, 3)), atol=1, rtol=1e-7) + # Check all 7 buffers are zero (5 input + 2 output) + zeros = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + assert np.allclose(wrench_composer.global_force_w.numpy(), zeros, atol=1, rtol=1e-7) + assert np.allclose(wrench_composer.global_torque_w.numpy(), zeros, atol=1, rtol=1e-7) + assert np.allclose(wrench_composer.global_force_at_com_w.numpy(), zeros, atol=1, rtol=1e-7) + assert np.allclose(wrench_composer.local_force_b.numpy(), zeros, atol=1, rtol=1e-7) + assert np.allclose(wrench_composer.local_torque_b.numpy(), zeros, atol=1, rtol=1e-7) + assert np.allclose(wrench_composer.out_force_b.warp.numpy(), zeros, atol=1, rtol=1e-7) + assert np.allclose(wrench_composer.out_torque_b.warp.numpy(), zeros, atol=1, rtol=1e-7) # ============================================================================ @@ -425,7 +410,7 @@ def test_global_forces_with_rotation(device: str, num_envs: int, num_bodies: int link_quat_torch = torch.from_numpy(link_quat_np) # Create mock asset with custom quaternions - mock_asset = MockRigidObject(num_envs, num_bodies, device, link_quat=link_quat_torch) + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_quat=link_quat_torch) wrench_composer = WrenchComposer(mock_asset) # Generate random global forces for all envs and bodies @@ -433,13 +418,22 @@ def test_global_forces_with_rotation(device: str, num_envs: int, num_bodies: int forces_global = wp.from_numpy(forces_global_np, dtype=wp.vec3f, device=device) # Apply global forces - wrench_composer.add_forces_and_torques(forces=forces_global, is_global=True) + wrench_composer.add_forces_and_torques_index(forces=forces_global, is_global=True) # Compute expected local forces by rotating global forces by inverse quaternion expected_forces_local = quat_rotate_inv_np(link_quat_np, forces_global_np) + # Check raw global buffer has the global forces + global_force_np = wrench_composer.global_force_at_com_w.numpy() + assert np.allclose(global_force_np, forces_global_np, atol=1e-4, rtol=1e-5), ( + f"Global force buffer mismatch.\nExpected:\n{forces_global_np}\nGot:\n{global_force_np}" + ) + + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() + # Verify - composed_force_np = wrench_composer.composed_force.numpy() + composed_force_np = wrench_composer.out_force_b.warp.numpy() assert np.allclose(composed_force_np, expected_forces_local, atol=1e-4, rtol=1e-5), ( f"Global force rotation failed.\nExpected:\n{expected_forces_local}\nGot:\n{composed_force_np}" ) @@ -458,7 +452,7 @@ def test_global_torques_with_rotation(device: str, num_envs: int, num_bodies: in link_quat_torch = torch.from_numpy(link_quat_np) # Create mock asset with custom quaternions - mock_asset = MockRigidObject(num_envs, num_bodies, device, link_quat=link_quat_torch) + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_quat=link_quat_torch) wrench_composer = WrenchComposer(mock_asset) # Generate random global torques @@ -466,13 +460,22 @@ def test_global_torques_with_rotation(device: str, num_envs: int, num_bodies: in torques_global = wp.from_numpy(torques_global_np, dtype=wp.vec3f, device=device) # Apply global torques - wrench_composer.add_forces_and_torques(torques=torques_global, is_global=True) + wrench_composer.add_forces_and_torques_index(torques=torques_global, is_global=True) # Compute expected local torques expected_torques_local = quat_rotate_inv_np(link_quat_np, torques_global_np) + # Check raw global buffer has the global torques + global_torque_np = wrench_composer.global_torque_w.numpy() + assert np.allclose(global_torque_np, torques_global_np, atol=1e-4, rtol=1e-5), ( + f"Global torque buffer mismatch.\nExpected:\n{torques_global_np}\nGot:\n{global_torque_np}" + ) + + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() + # Verify - composed_torque_np = wrench_composer.composed_torque.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_torque_np, expected_torques_local, atol=1e-4, rtol=1e-5), ( f"Global torque rotation failed.\nExpected:\n{expected_torques_local}\nGot:\n{composed_torque_np}" ) @@ -493,7 +496,7 @@ def test_global_forces_at_global_position(device: str, num_envs: int, num_bodies link_quat_torch = torch.from_numpy(link_quat_np) # Create mock asset - mock_asset = MockRigidObject(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) wrench_composer = WrenchComposer(mock_asset) # Generate random global forces and positions @@ -503,32 +506,40 @@ def test_global_forces_at_global_position(device: str, num_envs: int, num_bodies positions_global = wp.from_numpy(positions_global_np, dtype=wp.vec3f, device=device) # Apply global forces at global positions - wrench_composer.add_forces_and_torques(forces=forces_global, positions=positions_global, is_global=True) + wrench_composer.add_forces_and_torques_index(forces=forces_global, positions=positions_global, is_global=True) # Compute expected results: # 1. Force in local frame = quat_rotate_inv(link_quat, global_force) expected_forces_local = quat_rotate_inv_np(link_quat_np, forces_global_np) - # 2. Position offset in local frame = global_position - link_position (then used for torque) + # 2. Torque about CoM in world frame = cross(P_global - link_pos, F_global) + # Then rotate to body frame position_offset_global = positions_global_np - link_pos_np - - # 3. Torque = skew(position_offset_global) @ force_global, then rotate to local expected_torques_local = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) for i in range(num_envs): for j in range(num_bodies): - pos_offset = position_offset_global[i, j] # global frame offset - force_local = expected_forces_local[i, j] # local frame force - # skew(pos_offset) @ force_local - expected_torques_local[i, j] = np.cross(pos_offset, force_local) + torque_w = np.cross(position_offset_global[i, j], forces_global_np[i, j]) + expected_torques_local[i, j] = quat_rotate_inv_np( + link_quat_np[i : i + 1, j : j + 1], torque_w.reshape(1, 1, 3) + )[0, 0] + + # Check raw global force buffer has the global forces + global_force_np = wrench_composer.global_force_w.numpy() + assert np.allclose(global_force_np, forces_global_np, atol=1e-4, rtol=1e-5), ( + f"Global force buffer mismatch.\nExpected:\n{forces_global_np}\nGot:\n{global_force_np}" + ) + + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() # Verify forces - composed_force_np = wrench_composer.composed_force.numpy() + composed_force_np = wrench_composer.out_force_b.warp.numpy() assert np.allclose(composed_force_np, expected_forces_local, atol=1e-3, rtol=1e-4), ( f"Global force at position failed.\nExpected forces:\n{expected_forces_local}\nGot:\n{composed_force_np}" ) # Verify torques - composed_torque_np = wrench_composer.composed_torque.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_torque_np, expected_torques_local, atol=1e-3, rtol=1e-4), ( f"Global force at position failed.\nExpected torques:\n{expected_torques_local}\nGot:\n{composed_torque_np}" ) @@ -541,8 +552,8 @@ def test_local_vs_global_identity_quaternion(device: str): num_envs, num_bodies = 10, 5 # Create mock with identity pose (default) - mock_asset_local = MockRigidObject(num_envs, num_bodies, device) - mock_asset_global = MockRigidObject(num_envs, num_bodies, device) + mock_asset_local = create_mock_asset(num_envs, num_bodies, device) + mock_asset_global = create_mock_asset(num_envs, num_bodies, device) wrench_composer_local = WrenchComposer(mock_asset_local) wrench_composer_global = WrenchComposer(mock_asset_global) @@ -554,20 +565,24 @@ def test_local_vs_global_identity_quaternion(device: str): torques = wp.from_numpy(torques_np, dtype=wp.vec3f, device=device) # Apply as local - wrench_composer_local.add_forces_and_torques(forces=forces, torques=torques, is_global=False) + wrench_composer_local.add_forces_and_torques_index(forces=forces, torques=torques, is_global=False) # Apply as global (should be same with identity quaternion) - wrench_composer_global.add_forces_and_torques(forces=forces, torques=torques, is_global=True) + wrench_composer_global.add_forces_and_torques_index(forces=forces, torques=torques, is_global=True) + + # Compose to body frame before checking output + wrench_composer_local.compose_to_body_frame() + wrench_composer_global.compose_to_body_frame() # Results should be identical assert np.allclose( - wrench_composer_local.composed_force.numpy(), - wrench_composer_global.composed_force.numpy(), + wrench_composer_local.out_force_b.warp.numpy(), + wrench_composer_global.out_force_b.warp.numpy(), atol=1e-6, ) assert np.allclose( - wrench_composer_local.composed_torque.numpy(), - wrench_composer_global.composed_torque.numpy(), + wrench_composer_local.out_torque_b.warp.numpy(), + wrench_composer_global.out_torque_b.warp.numpy(), atol=1e-6, ) @@ -577,26 +592,29 @@ def test_90_degree_rotation_global_force(device: str): """Test global force with a known 90-degree rotation for easy verification.""" num_envs, num_bodies = 1, 1 - # 90-degree rotation around Z-axis: (w, x, y, z) = (cos(45°), 0, 0, sin(45°)) + # 90-degree rotation around Z-axis: (x, y, z, w) = (0, 0, sin(45°), cos(45°)) # This rotates X -> Y, Y -> -X angle = np.pi / 2 - link_quat_np = np.array([[[[np.cos(angle / 2), 0, 0, np.sin(angle / 2)]]]], dtype=np.float32).reshape(1, 1, 4) + link_quat_np = np.array([[[[0, 0, np.sin(angle / 2), np.cos(angle / 2)]]]], dtype=np.float32).reshape(1, 1, 4) link_quat_torch = torch.from_numpy(link_quat_np) - mock_asset = MockRigidObject(num_envs, num_bodies, device, link_quat=link_quat_torch) + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_quat=link_quat_torch) wrench_composer = WrenchComposer(mock_asset) # Apply force in global +X direction force_global = np.array([[[1.0, 0.0, 0.0]]], dtype=np.float32) force_wp = wp.from_numpy(force_global, dtype=wp.vec3f, device=device) - wrench_composer.add_forces_and_torques(forces=force_wp, is_global=True) + wrench_composer.add_forces_and_torques_index(forces=force_wp, is_global=True) # Expected: After inverse rotation (rotate by -90° around Z), X becomes -Y # Actually, inverse rotation of +90° around Z applied to (1,0,0) gives (0,-1,0) expected_force_local = np.array([[[0.0, -1.0, 0.0]]], dtype=np.float32) - composed_force_np = wrench_composer.composed_force.numpy() + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() + + composed_force_np = wrench_composer.out_force_b.warp.numpy() assert np.allclose(composed_force_np, expected_force_local, atol=1e-5), ( f"90-degree rotation test failed.\nExpected:\n{expected_force_local}\nGot:\n{composed_force_np}" ) @@ -612,7 +630,7 @@ def test_composition_mixed_local_and_global(device: str): link_quat_np = random_unit_quaternion_np(rng, (num_envs, num_bodies)) link_quat_torch = torch.from_numpy(link_quat_np) - mock_asset = MockRigidObject(num_envs, num_bodies, device, link_quat=link_quat_torch) + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_quat=link_quat_torch) wrench_composer = WrenchComposer(mock_asset) # Generate random local and global forces @@ -623,16 +641,25 @@ def test_composition_mixed_local_and_global(device: str): forces_global = wp.from_numpy(forces_global_np, dtype=wp.vec3f, device=device) # Add local forces first - wrench_composer.add_forces_and_torques(forces=forces_local, is_global=False) + wrench_composer.add_forces_and_torques_index(forces=forces_local, is_global=False) # Add global forces - wrench_composer.add_forces_and_torques(forces=forces_global, is_global=True) + wrench_composer.add_forces_and_torques_index(forces=forces_global, is_global=True) # Expected: local forces stay as-is, global forces get rotated, then sum global_forces_in_local = quat_rotate_inv_np(link_quat_np, forces_global_np) expected_total = forces_local_np + global_forces_in_local - composed_force_np = wrench_composer.composed_force.numpy() + # Check raw buffer properties + local_force_np = wrench_composer.local_force_b.numpy() + assert np.allclose(local_force_np, forces_local_np, atol=1e-4, rtol=1e-5) + global_force_at_com_np = wrench_composer.global_force_at_com_w.numpy() + assert np.allclose(global_force_at_com_np, forces_global_np, atol=1e-4, rtol=1e-5) + + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() + + composed_force_np = wrench_composer.out_force_b.warp.numpy() assert np.allclose(composed_force_np, expected_total, atol=1e-4, rtol=1e-5), ( f"Mixed local/global composition failed.\nExpected:\n{expected_total}\nGot:\n{composed_force_np}" ) @@ -652,7 +679,7 @@ def test_local_forces_at_local_position(device: str, num_envs: int, num_bodies: link_pos_torch = torch.from_numpy(link_pos_np) link_quat_torch = torch.from_numpy(link_quat_np) - mock_asset = MockRigidObject(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) wrench_composer = WrenchComposer(mock_asset) # Generate random local forces and local positions (offsets) @@ -662,15 +689,22 @@ def test_local_forces_at_local_position(device: str, num_envs: int, num_bodies: positions_local = wp.from_numpy(positions_local_np, dtype=wp.vec3f, device=device) # Apply local forces at local positions - wrench_composer.add_forces_and_torques(forces=forces_local, positions=positions_local, is_global=False) + wrench_composer.add_forces_and_torques_index(forces=forces_local, positions=positions_local, is_global=False) # Expected: forces stay as-is, torque = cross(position, force) expected_forces = forces_local_np expected_torques = np.cross(positions_local_np, forces_local_np) + # Check raw local buffer + local_force_np = wrench_composer.local_force_b.numpy() + assert np.allclose(local_force_np, expected_forces, atol=1e-4, rtol=1e-5) + + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() + # Verify - composed_force_np = wrench_composer.composed_force.numpy() - composed_torque_np = wrench_composer.composed_torque.numpy() + composed_force_np = wrench_composer.out_force_b.warp.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_force_np, expected_forces, atol=1e-4, rtol=1e-5) assert np.allclose(composed_torque_np, expected_torques, atol=1e-4, rtol=1e-5) @@ -688,7 +722,7 @@ def test_global_force_at_link_origin_no_torque(device: str): link_pos_torch = torch.from_numpy(link_pos_np) link_quat_torch = torch.from_numpy(link_quat_np) - mock_asset = MockRigidObject(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) wrench_composer = WrenchComposer(mock_asset) # Generate random global forces @@ -699,14 +733,973 @@ def test_global_force_at_link_origin_no_torque(device: str): positions_at_link = wp.from_numpy(link_pos_np, dtype=wp.vec3f, device=device) # Apply global forces at link origin - wrench_composer.add_forces_and_torques(forces=forces_global, positions=positions_at_link, is_global=True) + wrench_composer.add_forces_and_torques_index(forces=forces_global, positions=positions_at_link, is_global=True) # Expected: force rotated to local, torque = 0 (since position offset is zero) expected_forces = quat_rotate_inv_np(link_quat_np, forces_global_np) expected_torques = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) - composed_force_np = wrench_composer.composed_force.numpy() - composed_torque_np = wrench_composer.composed_torque.numpy() + # Check raw global force buffer + global_force_np = wrench_composer.global_force_w.numpy() + assert np.allclose(global_force_np, forces_global_np, atol=1e-4, rtol=1e-5) + + # Compose to body frame before checking output + wrench_composer.compose_to_body_frame() + + composed_force_np = wrench_composer.out_force_b.warp.numpy() + composed_torque_np = wrench_composer.out_torque_b.warp.numpy() assert np.allclose(composed_force_np, expected_forces, atol=1e-4, rtol=1e-5) assert np.allclose(composed_torque_np, expected_torques, atol=1e-4, rtol=1e-5) + + +# ============================================================================ +# add_raw_buffers_from Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("num_envs", [1, 10, 100]) +@pytest.mark.parametrize("num_bodies", [1, 3, 5]) +def test_add_raw_buffers_from(device: str, num_envs: int, num_bodies: int): + """Test that add_raw_buffers_from merges all five input buffers correctly.""" + rng = np.random.default_rng(seed=20) + + # Create two composers with random link poses + link_pos_np = rng.uniform(-10.0, 10.0, (num_envs, num_bodies, 3)).astype(np.float32) + link_quat_np = random_unit_quaternion_np(rng, (num_envs, num_bodies)) + link_pos_torch = torch.from_numpy(link_pos_np) + link_quat_torch = torch.from_numpy(link_quat_np) + + mock_a = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + mock_b = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + + composer_a = WrenchComposer(mock_a) + composer_b = WrenchComposer(mock_b) + + # Populate composer_a with local forces at positions + forces_local_a_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + positions_local_a_np = rng.uniform(-5.0, 5.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer_a.add_forces_and_torques_index( + forces=wp.from_numpy(forces_local_a_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_local_a_np, dtype=wp.vec3f, device=device), + is_global=False, + ) + + # Populate composer_b with global forces at global positions + forces_global_b_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + positions_global_b_np = rng.uniform(-5.0, 5.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer_b.add_forces_and_torques_index( + forces=wp.from_numpy(forces_global_b_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_global_b_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Merge b into a + composer_a.add_raw_buffers_from(composer_b) + + # Build a reference composer that receives both calls directly + mock_ref = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + composer_ref = WrenchComposer(mock_ref) + composer_ref.add_forces_and_torques_index( + forces=wp.from_numpy(forces_local_a_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_local_a_np, dtype=wp.vec3f, device=device), + is_global=False, + ) + composer_ref.add_forces_and_torques_index( + forces=wp.from_numpy(forces_global_b_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_global_b_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Compose both and compare + composer_a.compose_to_body_frame() + composer_ref.compose_to_body_frame() + + assert np.allclose( + composer_a.out_force_b.warp.numpy(), composer_ref.out_force_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), "add_raw_buffers_from force mismatch vs direct accumulation" + assert np.allclose( + composer_a.out_torque_b.warp.numpy(), composer_ref.out_torque_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), "add_raw_buffers_from torque mismatch vs direct accumulation" + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_add_raw_buffers_from_inactive_is_noop(device: str): + """Test that add_raw_buffers_from is a no-op when the source composer is inactive.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=21) + + mock_a = create_mock_asset(num_envs, num_bodies, device) + mock_b = create_mock_asset(num_envs, num_bodies, device) + composer_a = WrenchComposer(mock_a) + composer_b = WrenchComposer(mock_b) + + # Populate composer_a with some forces + forces_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer_a.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + + # composer_b is inactive (never written to) + assert not composer_b.active + + # Snapshot composer_a's local buffer before merge + local_force_before = composer_a.local_force_b.numpy().copy() + + # Merge inactive composer_b into composer_a -- should be a no-op + composer_a.add_raw_buffers_from(composer_b) + + assert np.allclose(composer_a.local_force_b.numpy(), local_force_before, atol=1e-7) + + +# ============================================================================ +# Mask-based API Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("num_envs", [1, 10, 100]) +@pytest.mark.parametrize("num_bodies", [1, 3, 5]) +def test_add_forces_mask(device: str, num_envs: int, num_bodies: int): + """Test that add_forces_and_torques_mask produces the same result as the index variant.""" + rng = np.random.default_rng(seed=30) + + for _ in range(5): + # Random subset selection + env_select = rng.choice([True, False], size=num_envs, replace=True) + body_select = rng.choice([True, False], size=num_bodies, replace=True) + # Ensure at least one env and body are selected + env_select[0] = True + body_select[0] = True + + env_ids_np = np.where(env_select)[0].astype(np.int32) + body_ids_np = np.where(body_select)[0].astype(np.int32) + env_mask_np = env_select.astype(np.bool_) + body_mask_np = body_select.astype(np.bool_) + + # Random forces for the full grid (mask variant takes full-sized arrays) + forces_full_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + + # Index-based composer + mock_idx = create_mock_asset(num_envs, num_bodies, device) + composer_idx = WrenchComposer(mock_idx) + # Extract the subset for index API + forces_subset_np = forces_full_np[env_ids_np[:, None], body_ids_np[None, :], :] + composer_idx.add_forces_and_torques_index( + forces=wp.from_numpy(forces_subset_np, dtype=wp.vec3f, device=device), + env_ids=wp.from_numpy(env_ids_np, dtype=wp.int32, device=device), + body_ids=wp.from_numpy(body_ids_np, dtype=wp.int32, device=device), + ) + + # Mask-based composer + mock_mask = create_mock_asset(num_envs, num_bodies, device) + composer_mask = WrenchComposer(mock_mask) + composer_mask.add_forces_and_torques_mask( + forces=wp.from_numpy(forces_full_np, dtype=wp.vec3f, device=device), + env_mask=wp.from_numpy(env_mask_np, dtype=wp.bool, device=device), + body_mask=wp.from_numpy(body_mask_np, dtype=wp.bool, device=device), + ) + + # Compose both + composer_idx.compose_to_body_frame() + composer_mask.compose_to_body_frame() + + assert np.allclose( + composer_idx.out_force_b.warp.numpy(), composer_mask.out_force_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), f"Mask vs index force mismatch (envs={num_envs}, bodies={num_bodies})" + assert np.allclose( + composer_idx.out_torque_b.warp.numpy(), composer_mask.out_torque_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), f"Mask vs index torque mismatch (envs={num_envs}, bodies={num_bodies})" + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("num_envs", [1, 10, 100]) +@pytest.mark.parametrize("num_bodies", [1, 3, 5]) +def test_add_forces_mask_global(device: str, num_envs: int, num_bodies: int): + """Test mask-based API with global forces and positions.""" + rng = np.random.default_rng(seed=31) + + # Random link poses + link_pos_np = rng.uniform(-10.0, 10.0, (num_envs, num_bodies, 3)).astype(np.float32) + link_quat_np = random_unit_quaternion_np(rng, (num_envs, num_bodies)) + link_pos_torch = torch.from_numpy(link_pos_np) + link_quat_torch = torch.from_numpy(link_quat_np) + + # Select all envs and bodies to keep comparison simple + forces_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + positions_np = rng.uniform(-10.0, 10.0, (num_envs, num_bodies, 3)).astype(np.float32) + + # Index-based + mock_idx = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + composer_idx = WrenchComposer(mock_idx) + composer_idx.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Mask-based (all-True masks) + mock_mask = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + composer_mask = WrenchComposer(mock_mask) + env_mask = wp.from_numpy(np.ones(num_envs, dtype=np.bool_), dtype=wp.bool, device=device) + body_mask = wp.from_numpy(np.ones(num_bodies, dtype=np.bool_), dtype=wp.bool, device=device) + composer_mask.add_forces_and_torques_mask( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + env_mask=env_mask, + body_mask=body_mask, + is_global=True, + ) + + composer_idx.compose_to_body_frame() + composer_mask.compose_to_body_frame() + + assert np.allclose( + composer_idx.out_force_b.warp.numpy(), composer_mask.out_force_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), "Mask vs index global force mismatch" + assert np.allclose( + composer_idx.out_torque_b.warp.numpy(), composer_mask.out_torque_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), "Mask vs index global torque mismatch" + + +# ============================================================================ +# set_forces_and_torques_index Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_forces_overwrites_previous_add(device: str): + """Test that set_forces_and_torques_index clears previously accumulated values.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=40) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + # First accumulate some forces via add + forces_a_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_a_np, dtype=wp.vec3f, device=device), + ) + + # Now set new forces -- should replace, not accumulate + forces_b_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.set_forces_and_torques_index( + forces=wp.from_numpy(forces_b_np, dtype=wp.vec3f, device=device), + ) + + composer.compose_to_body_frame() + + # Output should match forces_b only (forces_a should be gone) + assert np.allclose(composer.out_force_b.warp.numpy(), forces_b_np, atol=1e-4, rtol=1e-5), ( + "set_forces did not clear previous add" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_forces_clears_targeted_envs_only(device: str): + """Test that set_forces_and_torques_index clears only the targeted environments.""" + num_envs, num_bodies = 4, 3 + rng = np.random.default_rng(seed=41) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + # Add global forces at positions (populates global_force_w and global_torque_w) + forces_global_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + positions_np = rng.uniform(-5.0, 5.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_global_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Also add local torques (populates local_torque_b) + torques_local_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + torques=wp.from_numpy(torques_local_np, dtype=wp.vec3f, device=device), + is_global=False, + ) + + # Now set local forces for envs [0, 2] -- should clear only envs 0, 2 + env_ids_np = np.array([0, 2], dtype=np.int32) + kept_env_ids = np.array([1, 3], dtype=np.int32) + forces_new_np = rng.uniform(-50.0, 50.0, (2, num_bodies, 3)).astype(np.float32) + composer.set_forces_and_torques_index( + forces=wp.from_numpy(forces_new_np, dtype=wp.vec3f, device=device), + env_ids=wp.from_numpy(env_ids_np, dtype=wp.int32, device=device), + is_global=False, + ) + + zeros = np.zeros((num_bodies, 3), dtype=np.float32) + + # Targeted envs [0, 2]: all buffers cleared, then local_force_b written + for eid in env_ids_np: + assert np.allclose(composer.global_force_w.numpy()[eid], zeros, atol=1e-7), ( + f"global_force_w not cleared for targeted env {eid}" + ) + assert np.allclose(composer.global_torque_w.numpy()[eid], zeros, atol=1e-7), ( + f"global_torque_w not cleared for targeted env {eid}" + ) + assert np.allclose(composer.local_torque_b.numpy()[eid], zeros, atol=1e-7), ( + f"local_torque_b not cleared for targeted env {eid}" + ) + + # Non-targeted envs [1, 3]: should retain original values + for eid in kept_env_ids: + assert np.allclose(composer.global_force_w.numpy()[eid], forces_global_np[eid], atol=1e-4, rtol=1e-5), ( + f"global_force_w changed for non-targeted env {eid}" + ) + assert np.allclose(composer.local_torque_b.numpy()[eid], torques_local_np[eid], atol=1e-4, rtol=1e-5), ( + f"local_torque_b changed for non-targeted env {eid}" + ) + + # local_force_b should have new values at env_ids [0, 2], zeros at [1, 3] + expected_local_force = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + expected_local_force[env_ids_np] = forces_new_np + assert np.allclose(composer.local_force_b.numpy(), expected_local_force, atol=1e-4, rtol=1e-5), ( + "local_force_b has wrong values after set" + ) + + +# ============================================================================ +# Partial Reset Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_partial_reset_zeros_only_specified_envs(device: str): + """Test that partial reset zeros only the specified environments and leaves others intact.""" + num_envs, num_bodies = 8, 3 + rng = np.random.default_rng(seed=50) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + # Populate all envs with local forces + forces_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + + # Also add global forces to populate more buffers + forces_global_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_global_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Partial reset: only envs [1, 3, 5] + reset_env_ids = np.array([1, 3, 5], dtype=np.int32) + kept_env_ids = np.array([0, 2, 4, 6, 7], dtype=np.int32) + composer.reset(env_ids=wp.from_numpy(reset_env_ids, dtype=wp.int32, device=device)) + + # Reset envs should be zeroed across all input buffers + zeros = np.zeros((num_bodies, 3), dtype=np.float32) + local_force = composer.local_force_b.numpy() + global_force_at_com = composer.global_force_at_com_w.numpy() + for eid in reset_env_ids: + assert np.allclose(local_force[eid], zeros, atol=1e-7), f"local_force_b not zeroed for env {eid}" + assert np.allclose(global_force_at_com[eid], zeros, atol=1e-7), ( + f"global_force_at_com_w not zeroed for env {eid}" + ) + + # Kept envs should retain their values + for eid in kept_env_ids: + assert np.allclose(local_force[eid], forces_np[eid], atol=1e-4, rtol=1e-5), ( + f"local_force_b changed for non-reset env {eid}" + ) + assert np.allclose(global_force_at_com[eid], forces_global_np[eid], atol=1e-4, rtol=1e-5), ( + f"global_force_at_com_w changed for non-reset env {eid}" + ) + + # Flags: _active should still be True, _dirty should be True + assert composer.active + assert composer._dirty + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_full_reset_clears_active_flag(device: str): + """Test that full reset (no args) clears the _active flag.""" + num_envs, num_bodies = 4, 2 + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + forces_np = np.ones((num_envs, num_bodies, 3), dtype=np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + assert composer.active + + composer.reset() + assert not composer.active + assert not composer._dirty + + +# ============================================================================ +# Deprecated API Backward-Compatibility Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composed_force_emits_deprecation_warning(device: str): + """Test that accessing composed_force emits a DeprecationWarning.""" + num_envs, num_bodies = 2, 1 + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + forces_np = np.array([[[1.0, 2.0, 3.0]], [[4.0, 5.0, 6.0]]], dtype=np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + + with pytest.warns(DeprecationWarning, match="composed_force.*is deprecated"): + result = composer.composed_force + + # Should return the same data as out_force_b + assert np.allclose(result.warp.numpy(), composer.out_force_b.warp.numpy(), atol=1e-7) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composed_torque_emits_deprecation_warning(device: str): + """Test that accessing composed_torque emits a DeprecationWarning.""" + num_envs, num_bodies = 2, 1 + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + torques_np = np.array([[[1.0, 2.0, 3.0]], [[4.0, 5.0, 6.0]]], dtype=np.float32) + composer.add_forces_and_torques_index( + torques=wp.from_numpy(torques_np, dtype=wp.vec3f, device=device), + ) + + with pytest.warns(DeprecationWarning, match="composed_torque.*is deprecated"): + result = composer.composed_torque + + assert np.allclose(result.warp.numpy(), composer.out_torque_b.warp.numpy(), atol=1e-7) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_deprecated_add_forces_and_torques_emits_warning(device: str): + """Test that the deprecated add_forces_and_torques wrapper emits a warning and works.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=52) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + forces_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + + with pytest.warns(DeprecationWarning, match="add_forces_and_torques.*is deprecated"): + composer.add_forces_and_torques( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + + composer.compose_to_body_frame() + assert np.allclose(composer.out_force_b.warp.numpy(), forces_np, atol=1e-4, rtol=1e-5) + + +# ============================================================================ +# set_forces_and_torques_mask Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_forces_mask_overwrites_previous_add(device: str): + """Test that set_forces_and_torques_mask clears previously accumulated values.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=60) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + # Accumulate some forces via add + forces_a_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_a_np, dtype=wp.vec3f, device=device), + ) + + # Now set new forces via mask -- should replace, not accumulate + forces_b_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.set_forces_and_torques_mask( + forces=wp.from_numpy(forces_b_np, dtype=wp.vec3f, device=device), + ) + + composer.compose_to_body_frame() + + # Output should match forces_b only (forces_a should be gone) + assert np.allclose(composer.out_force_b.warp.numpy(), forces_b_np, atol=1e-4, rtol=1e-5), ( + "set_forces_and_torques_mask did not clear previous add" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_forces_mask_clears_targeted_envs_only(device: str): + """Test that set_forces_and_torques_mask clears only the masked environments.""" + num_envs, num_bodies = 4, 3 + rng = np.random.default_rng(seed=61) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + # Populate global buffers for all envs + forces_global_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + positions_np = rng.uniform(-5.0, 5.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_global_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Also add local torques for all envs + torques_local_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + torques=wp.from_numpy(torques_local_np, dtype=wp.vec3f, device=device), + is_global=False, + ) + + # Set local forces via mask for envs [0, 2] -- should clear only masked envs + env_mask_np = np.array([True, False, True, False], dtype=np.bool_) + body_mask_np = np.array([True, True, False], dtype=np.bool_) + forces_new_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.set_forces_and_torques_mask( + forces=wp.from_numpy(forces_new_np, dtype=wp.vec3f, device=device), + env_mask=wp.from_numpy(env_mask_np, dtype=wp.bool, device=device), + body_mask=wp.from_numpy(body_mask_np, dtype=wp.bool, device=device), + is_global=False, + ) + + zeros = np.zeros((num_bodies, 3), dtype=np.float32) + + # Masked envs [0, 2]: all buffers cleared by reset, then local_force_b written where body_mask is True + for eid in [0, 2]: + assert np.allclose(composer.global_force_w.numpy()[eid], zeros, atol=1e-7), ( + f"global_force_w not cleared for masked env {eid}" + ) + assert np.allclose(composer.global_torque_w.numpy()[eid], zeros, atol=1e-7), ( + f"global_torque_w not cleared for masked env {eid}" + ) + assert np.allclose(composer.local_torque_b.numpy()[eid], zeros, atol=1e-7), ( + f"local_torque_b not cleared for masked env {eid}" + ) + + # Non-masked envs [1, 3]: should retain original values + for eid in [1, 3]: + assert np.allclose(composer.global_force_w.numpy()[eid], forces_global_np[eid], atol=1e-4, rtol=1e-5), ( + f"global_force_w changed for non-masked env {eid}" + ) + assert np.allclose(composer.local_torque_b.numpy()[eid], torques_local_np[eid], atol=1e-4, rtol=1e-5), ( + f"local_torque_b changed for non-masked env {eid}" + ) + + # local_force_b should have new values where both masks are True, zeros for masked envs otherwise + expected_local_force = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + for e in range(num_envs): + for b in range(num_bodies): + if env_mask_np[e] and body_mask_np[b]: + expected_local_force[e, b] = forces_new_np[e, b] + assert np.allclose(composer.local_force_b.numpy(), expected_local_force, atol=1e-4, rtol=1e-5), ( + "local_force_b has wrong values after mask set" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_forces_mask_matches_set_forces_index(device: str): + """Test that set_forces_and_torques_mask produces the same result as the index variant.""" + num_envs, num_bodies = 6, 3 + rng = np.random.default_rng(seed=62) + + # Random link poses + link_pos_np = rng.uniform(-10.0, 10.0, (num_envs, num_bodies, 3)).astype(np.float32) + link_quat_np = random_unit_quaternion_np(rng, (num_envs, num_bodies)) + link_pos_torch = torch.from_numpy(link_pos_np) + link_quat_torch = torch.from_numpy(link_quat_np) + + # Use all envs/bodies to compare + forces_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + positions_np = rng.uniform(-10.0, 10.0, (num_envs, num_bodies, 3)).astype(np.float32) + + # Index-based + mock_idx = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + composer_idx = WrenchComposer(mock_idx) + composer_idx.set_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Mask-based (all-True) + mock_mask = create_mock_asset(num_envs, num_bodies, device, link_pos=link_pos_torch, link_quat=link_quat_torch) + composer_mask = WrenchComposer(mock_mask) + composer_mask.set_forces_and_torques_mask( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + composer_idx.compose_to_body_frame() + composer_mask.compose_to_body_frame() + + assert np.allclose( + composer_idx.out_force_b.warp.numpy(), composer_mask.out_force_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), "set mask vs index force mismatch" + assert np.allclose( + composer_idx.out_torque_b.warp.numpy(), composer_mask.out_torque_b.warp.numpy(), atol=1e-4, rtol=1e-5 + ), "set mask vs index torque mismatch" + + +# ============================================================================ +# Lazy Composition (_ensure_composed) Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_out_force_b_triggers_lazy_composition(device: str): + """Test that accessing out_force_b without explicit compose_to_body_frame still returns correct results.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=70) + + link_quat_np = random_unit_quaternion_np(rng, (num_envs, num_bodies)) + link_quat_torch = torch.from_numpy(link_quat_np) + + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_quat=link_quat_torch) + composer = WrenchComposer(mock_asset) + + forces_global_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_global_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Do NOT call compose_to_body_frame -- rely on lazy composition + expected_forces_local = quat_rotate_inv_np(link_quat_np, forces_global_np) + composed_force_np = composer.out_force_b.warp.numpy() + + assert np.allclose(composed_force_np, expected_forces_local, atol=1e-4, rtol=1e-5), ( + "Lazy composition via out_force_b failed" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_out_torque_b_triggers_lazy_composition(device: str): + """Test that accessing out_torque_b without explicit compose_to_body_frame still returns correct results.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=71) + + link_quat_np = random_unit_quaternion_np(rng, (num_envs, num_bodies)) + link_quat_torch = torch.from_numpy(link_quat_np) + + mock_asset = create_mock_asset(num_envs, num_bodies, device, link_quat=link_quat_torch) + composer = WrenchComposer(mock_asset) + + torques_global_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + torques=wp.from_numpy(torques_global_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # Do NOT call compose_to_body_frame -- rely on lazy composition + expected_torques_local = quat_rotate_inv_np(link_quat_np, torques_global_np) + composed_torque_np = composer.out_torque_b.warp.numpy() + + assert np.allclose(composed_torque_np, expected_torques_local, atol=1e-4, rtol=1e-5), ( + "Lazy composition via out_torque_b failed" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_lazy_composition_tracks_dirty_flag(device: str): + """Test that the dirty flag is correctly managed through add/compose/add cycles.""" + num_envs, num_bodies = 2, 1 + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + # Initially clean + assert not composer._dirty + + # After add, dirty + forces_np = np.ones((num_envs, num_bodies, 3), dtype=np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + assert composer._dirty + + # After accessing out_force_b, clean (lazy compose happened) + _ = composer.out_force_b + assert not composer._dirty + + # After another add, dirty again + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + assert composer._dirty + + # Accessing out_torque_b also triggers composition + _ = composer.out_torque_b + assert not composer._dirty + + # Verify accumulated result (2x forces) + expected = 2.0 * forces_np + assert np.allclose(composer.out_force_b.warp.numpy(), expected, atol=1e-4, rtol=1e-5) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_compose_is_idempotent(device: str): + """Calling compose_to_body_frame twice without intervening writes produces the same result.""" + rng = np.random.default_rng(seed=456) + num_envs, num_bodies = 4, 3 + + # Non-trivial link pose so the rotation path is exercised + link_pos_np = rng.uniform(-2, 2, (num_envs, num_bodies, 3)).astype(np.float32) + link_quat_np = rng.standard_normal((num_envs, num_bodies, 4)).astype(np.float32) + link_quat_np /= np.linalg.norm(link_quat_np, axis=-1, keepdims=True) + + mock_asset = create_mock_asset( + num_envs, + num_bodies, + device, + link_pos=torch.from_numpy(link_pos_np), + link_quat=torch.from_numpy(link_quat_np), + ) + composer = WrenchComposer(mock_asset) + + # Add global forces with positions (exercises cross-product torque path) + forces_np = rng.uniform(-5, 5, (num_envs, num_bodies, 3)).astype(np.float32) + positions_np = rng.uniform(-1, 1, (num_envs, num_bodies, 3)).astype(np.float32) + torques_np = rng.uniform(-3, 3, (num_envs, num_bodies, 3)).astype(np.float32) + + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + torques=wp.from_numpy(torques_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + # First compose + composer.compose_to_body_frame() + force_first = composer.out_force_b.warp.numpy().copy() + torque_first = composer.out_torque_b.warp.numpy().copy() + + # Second compose (no writes in between) + composer.compose_to_body_frame() + force_second = composer.out_force_b.warp.numpy() + torque_second = composer.out_torque_b.warp.numpy() + + np.testing.assert_array_equal(force_first, force_second) + np.testing.assert_array_equal(torque_first, torque_second) + + +# ============================================================================ +# CoM Offset from Link Origin Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_with_com_offset(device: str): + """Test that torque correction uses CoM position, not link position, when they differ.""" + num_envs, num_bodies = 2, 1 + + # Link at origin, CoM offset by [1, 0, 0] + link_pos_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + link_quat_np = np.zeros((num_envs, num_bodies, 4), dtype=np.float32) + link_quat_np[..., 3] = 1.0 # identity quaternion (xyzw) + + com_pos_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + com_pos_np[..., 0] = 1.0 # CoM at [1, 0, 0] + + mock_asset = create_mock_asset( + num_envs, + num_bodies, + device, + link_pos=torch.from_numpy(link_pos_np), + link_quat=torch.from_numpy(link_quat_np), + ) + # Set CoM pose separately (pos=[1,0,0], quat=identity) + com_pose = torch.cat([torch.from_numpy(com_pos_np), torch.from_numpy(link_quat_np)], dim=-1) + mock_asset.data.set_body_com_pose_w(com_pose) + + composer = WrenchComposer(mock_asset) + + # Apply global force [0, 0, 10] at position [0, 0, 0] (world origin) + forces_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + forces_np[..., 2] = 10.0 + positions_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + composer.compose_to_body_frame() + + # With identity quaternion: + # torque_w = cross(P, F) - cross(com, F) = cross([0,0,0], [0,0,10]) - cross([1,0,0], [0,0,10]) + # = [0,0,0] - [0*10-0*0, 0*0-1*10, 1*0-0*0] = [0,0,0] - [0, -10, 0] = [0, 10, 0] + # In body frame (identity rotation): [0, 10, 0] + expected_torque = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + expected_torque[..., 1] = 10.0 + + actual_torque = composer.out_torque_b.warp.numpy() + assert np.allclose(actual_torque, expected_torque, atol=1e-4, rtol=1e-5), ( + f"CoM offset torque correction failed.\nExpected:\n{expected_torque}\nGot:\n{actual_torque}" + ) + + # Force should be unchanged (identity rotation) + assert np.allclose(composer.out_force_b.warp.numpy(), forces_np, atol=1e-4, rtol=1e-5) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_at_com_no_torque_with_com_offset(device: str): + """Test that a global force at CoM position produces zero torque even with CoM offset.""" + num_envs, num_bodies = 2, 1 + + # Link at origin, CoM offset by [2, 3, 0] + link_pos_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + link_quat_np = np.zeros((num_envs, num_bodies, 4), dtype=np.float32) + link_quat_np[..., 3] = 1.0 + + com_pos_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + com_pos_np[..., 0] = 2.0 + com_pos_np[..., 1] = 3.0 + + mock_asset = create_mock_asset( + num_envs, + num_bodies, + device, + link_pos=torch.from_numpy(link_pos_np), + link_quat=torch.from_numpy(link_quat_np), + ) + com_pose = torch.cat([torch.from_numpy(com_pos_np), torch.from_numpy(link_quat_np)], dim=-1) + mock_asset.data.set_body_com_pose_w(com_pose) + + composer = WrenchComposer(mock_asset) + + # Apply global force at the CoM position + forces_np = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + forces_np[..., 2] = 50.0 + positions_np = com_pos_np.copy() + + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + composer.compose_to_body_frame() + + # Torque = cross(com, F) - cross(com, F) = 0 + expected_torque = np.zeros((num_envs, num_bodies, 3), dtype=np.float32) + assert np.allclose(composer.out_torque_b.warp.numpy(), expected_torque, atol=1e-4, rtol=1e-5), ( + "Force at CoM should produce zero torque regardless of CoM offset" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_com_offset_with_rotation(device: str): + """Test torque correction with both CoM offset and non-identity rotation.""" + num_envs, num_bodies = 1, 1 + rng = np.random.default_rng(seed=73) + + # Random rotation + link_quat_np = random_unit_quaternion_np(rng, (num_envs, num_bodies)) + link_pos_np = rng.uniform(-5.0, 5.0, (num_envs, num_bodies, 3)).astype(np.float32) + + # CoM offset from link + com_offset_np = rng.uniform(0.5, 2.0, (num_envs, num_bodies, 3)).astype(np.float32) + com_pos_np = link_pos_np + com_offset_np # simple world-frame offset for test clarity + + mock_asset = create_mock_asset( + num_envs, + num_bodies, + device, + link_pos=torch.from_numpy(link_pos_np), + link_quat=torch.from_numpy(link_quat_np), + ) + com_pose = torch.cat([torch.from_numpy(com_pos_np), torch.from_numpy(link_quat_np)], dim=-1) + mock_asset.data.set_body_com_pose_w(com_pose) + + composer = WrenchComposer(mock_asset) + + # Apply global force at a random world position + forces_np = rng.uniform(-100.0, 100.0, (num_envs, num_bodies, 3)).astype(np.float32) + positions_np = rng.uniform(-10.0, 10.0, (num_envs, num_bodies, 3)).astype(np.float32) + + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + positions=wp.from_numpy(positions_np, dtype=wp.vec3f, device=device), + is_global=True, + ) + + composer.compose_to_body_frame() + + # Expected: torque_w = cross(P, F) - cross(com, F) = cross(P - com, F) + lever_arm = positions_np - com_pos_np + torque_w = np.cross(lever_arm, forces_np) + expected_torque_b = quat_rotate_inv_np(link_quat_np, torque_w) + expected_force_b = quat_rotate_inv_np(link_quat_np, forces_np) + + assert np.allclose(composer.out_force_b.warp.numpy(), expected_force_b, atol=1e-3, rtol=1e-4), ( + "Force mismatch with CoM offset + rotation" + ) + assert np.allclose(composer.out_torque_b.warp.numpy(), expected_torque_b, atol=1e-3, rtol=1e-4), ( + f"Torque mismatch with CoM offset + rotation.\n" + f"Expected:\n{expected_torque_b}\nGot:\n{composer.out_torque_b.warp.numpy()}" + ) + + +# ============================================================================ +# Deprecated set_forces_and_torques Tests +# ============================================================================ + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_deprecated_set_forces_and_torques_emits_warning(device: str): + """Test that the deprecated set_forces_and_torques wrapper emits a warning and works.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=80) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + forces_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + + with pytest.warns(DeprecationWarning, match="set_forces_and_torques.*is deprecated"): + composer.set_forces_and_torques( + forces=wp.from_numpy(forces_np, dtype=wp.vec3f, device=device), + ) + + composer.compose_to_body_frame() + assert np.allclose(composer.out_force_b.warp.numpy(), forces_np, atol=1e-4, rtol=1e-5) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_deprecated_set_forces_and_torques_clears_previous(device: str): + """Test that deprecated set_forces_and_torques actually replaces previous values.""" + num_envs, num_bodies = 4, 2 + rng = np.random.default_rng(seed=81) + + mock_asset = create_mock_asset(num_envs, num_bodies, device) + composer = WrenchComposer(mock_asset) + + # First add some forces + forces_a_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + composer.add_forces_and_torques_index( + forces=wp.from_numpy(forces_a_np, dtype=wp.vec3f, device=device), + ) + + # Then set via deprecated method -- should replace + forces_b_np = rng.uniform(-50.0, 50.0, (num_envs, num_bodies, 3)).astype(np.float32) + with pytest.warns(DeprecationWarning): + composer.set_forces_and_torques( + forces=wp.from_numpy(forces_b_np, dtype=wp.vec3f, device=device), + ) + + composer.compose_to_body_frame() + assert np.allclose(composer.out_force_b.warp.numpy(), forces_b_np, atol=1e-4, rtol=1e-5), ( + "Deprecated set_forces_and_torques did not replace previous values" + ) diff --git a/source/isaaclab/test/utils/test_wrench_composer_integration.py b/source/isaaclab/test/utils/test_wrench_composer_integration.py new file mode 100644 index 000000000000..bb195e856030 --- /dev/null +++ b/source/isaaclab/test/utils/test_wrench_composer_integration.py @@ -0,0 +1,817 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration tests for wrench composer with rigid objects. + +These tests validate that global forces/torques remain invariant under body rotation +""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import pytest +import torch +import warp as wp + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab.sim import build_simulation_context +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + + +def generate_cubes_scene( + num_cubes: int = 1, + height: float = 1.0, + device: str = "cuda:0", +) -> tuple[RigidObject, torch.Tensor]: + """Generate a scene with the provided number of cubes.""" + origins = torch.tensor([(i * 1.0, 0, height) for i in range(num_cubes)]).to(device) + for i, origin in enumerate(origins): + sim_utils.create_prim(f"/World/Table_{i}", "Xform", translation=origin) + + spawn_cfg = sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + ) + + cube_object_cfg = RigidObjectCfg( + prim_path="/World/Table_.*/Object", + spawn=spawn_cfg, + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, height)), + ) + cube_object = RigidObject(cfg=cube_object_cfg) + return cube_object, origins + + +N_STEPS = 100 +FORCE_MAGNITUDE = 10.0 +TORQUE_MAGNITUDE = 1.0 + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_invariant_under_rotation(device): + """Test that a permanent global force produces the same acceleration before and after body rotation. + + A global +X force is applied. After 100 steps the body is rotated 180deg about Z. + The acceleration (delta_v per phase) should be the same in both phases because the + force is in the global frame and should not rotate with the body. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + mass = float(wp.to_torch(cube_object.root_view.get_masses())[0]) + com = cube_object.data.body_com_pos_w.torch.clone() + + # Apply permanent global force along +X at CoM + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=com, + body_ids=body_ids, + is_global=True, + ) + + # Phase 1: run N_STEPS + for _ in range(N_STEPS): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + vel_after_phase1 = cube_object.data.root_lin_vel_w.torch[0].clone() + + # Rotate body 180deg about Z (quat wxyz = [0, 0, 0, 1]) while keeping velocity + root_pose = cube_object.data.root_state_w.torch[0, :7].clone().unsqueeze(0) + root_pose[0, 3:7] = torch.tensor([0.0, 0.0, 1.0, 0.0], device=device) # 180deg about Z (xyzw) + cube_object.write_root_pose_to_sim(root_pose) + + # Phase 2: run N_STEPS more + for _ in range(N_STEPS): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + vel_after_phase2 = cube_object.data.root_lin_vel_w.torch[0].clone() + + # Acceleration should be same in both phases: delta_v_phase2 ≈ delta_v_phase1 + delta_v_phase1 = vel_after_phase1[0].item() # vx after phase 1 + delta_v_phase2 = vel_after_phase2[0].item() - vel_after_phase1[0].item() # vx gained in phase 2 + + expected_dv = FORCE_MAGNITUDE / mass * sim.cfg.dt * N_STEPS + + torch.testing.assert_close( + torch.tensor(delta_v_phase1), + torch.tensor(expected_dv), + rtol=0.001, + atol=0.0001, + ) + torch.testing.assert_close( + torch.tensor(delta_v_phase2), + torch.tensor(expected_dv), + rtol=0.001, + atol=0.0001, + ) + + # Y and Z velocity should remain ~0 + assert abs(vel_after_phase2[1].item()) < 0.5, f"Unexpected Y velocity: {vel_after_phase2[1].item()}" + assert abs(vel_after_phase2[2].item()) < 0.5, f"Unexpected Z velocity: {vel_after_phase2[2].item()}" + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_local_force_follows_rotation(device): + """Test that a permanent local force rotates with the body. + + A local +X force is applied. After 100 steps the body is rotated 180deg about Z. + Since local +X is now world -X, the force should decelerate the body back towards zero velocity. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Apply permanent local force along body +X + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=False, + ) + + # Phase 1: run N_STEPS — object accelerates along world +X + for _ in range(N_STEPS): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + vel_after_phase1 = cube_object.data.root_lin_vel_w.torch[0].clone() + assert vel_after_phase1[0].item() > 1.0, "Object should be moving in +X" + + # Rotate body 180deg about Z while keeping velocity + root_pose = cube_object.data.root_state_w.torch[0, :7].clone().unsqueeze(0) + root_pose[0, 3:7] = torch.tensor([0.0, 0.0, 1.0, 0.0], device=device) # 180deg about Z (xyzw) + cube_object.write_root_pose_to_sim(root_pose) + + # Phase 2: run N_STEPS — local +X is now world -X, so force decelerates + for _ in range(N_STEPS): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + vel_after_phase2 = cube_object.data.root_lin_vel_w.torch[0].clone() + + # Velocity should be approximately zero: decelerated by the same amount as it accelerated + torch.testing.assert_close( + vel_after_phase2[0], + torch.tensor(0.0, device=device), + atol=0.0001, + rtol=0.001, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_at_offset_generates_torque(device): + """Test that a global force applied at an offset from CoM generates the expected torque. + + A global +X force applied at +1m Y offset from CoM should produce: + - Linear acceleration in +X + - Angular acceleration about -Z (from cross product: (0,1,0) × (10,0,0) = (0,0,-10)) + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Force at offset: +1m in Y from CoM (global frame) + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE # +X force + + torques = torch.zeros(1, len(body_ids), 3, device=device) + + # Position offset: CoM position + 1m in Y (global frame) + com_pos = cube_object.data.body_com_pos_w.torch[:, body_ids, :3].clone() + positions = com_pos.clone() + positions[..., 1] += 1.0 # +1m Y offset + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=positions, + body_ids=body_ids, + is_global=True, + ) + + # Run 50 steps + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + lin_vel = cube_object.data.root_lin_vel_w.torch[0] + ang_vel = cube_object.data.root_ang_vel_w.torch[0] + + # Linear velocity in +X should be positive + assert lin_vel[0].item() > 0.1, f"Expected positive X velocity, got {lin_vel[0].item()}" + + # Angular velocity about Z should be negative (cross product: r × F, r=(0,1,0), F=(10,0,0) -> (0,0,-10)) + assert ang_vel[2].item() < -0.1, f"Expected negative Z angular velocity, got {ang_vel[2].item()}" + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_torque_invariant_under_rotation(device): + """Test that a permanent global torque produces the same angular acceleration before and after rotation. + + A global +Z torque is applied. After 100 steps the body is rotated 90deg about X. + The angular acceleration (delta_omega per phase) about Z should be the same in both phases + because the torque is in the global frame. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Apply permanent global torque about +Z + forces = torch.zeros(1, len(body_ids), 3, device=device) + torques = torch.zeros(1, len(body_ids), 3, device=device) + torques[..., 2] = TORQUE_MAGNITUDE + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=True, + ) + + # Phase 1: run N_STEPS + for _ in range(N_STEPS): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + omega_z_after_phase1 = cube_object.data.root_ang_vel_w.torch[0, 2].clone().item() + + # Rotate body 90deg about X and zero out velocities so phase 2 starts from rest + # (avoids gyroscopic cross-coupling at high omega) + root_pose = cube_object.data.root_state_w.torch[0, :7].clone().unsqueeze(0) + root_pose[0, 3:7] = torch.tensor([0.7071, 0.0, 0.0, 0.7071], device=device) # 90deg about X (xyzw) + cube_object.write_root_pose_to_sim(root_pose) + cube_object.write_root_velocity_to_sim(torch.zeros(1, 6, device=device)) + + # Phase 2: run N_STEPS from rest with different body orientation + for _ in range(N_STEPS): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + omega_z_after_phase2 = cube_object.data.root_ang_vel_w.torch[0, 2].clone().item() + + # Both phases start from rest — angular acceleration about Z should be the same + torch.testing.assert_close( + torch.tensor(omega_z_after_phase1), + torch.tensor(omega_z_after_phase2), + rtol=0.001, + atol=0.0001, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_torque_after_translation(device): + """Test that global force torque updates dynamically when the body translates. + + Phase 1: Cube at (1,0,0). Global force F=(0,10,0) applied at explicit position (1,0,0). + stored_torque = cross((1,0,0), (0,10,0)) = (0,0,10) + correction = -cross((1,0,0), (0,10,0)) = (0,0,-10) + net torque = 0 → no rotation, only linear acceleration in +Y. + + Phase 2: Teleport cube to origin (0,0,0), zero velocity, don't re-apply force. + stored_torque = (0,0,10) (unchanged in buffer) + correction = -cross((0,0,0), (0,10,0)) = (0,0,0) + net torque = (0,0,10) → rotation about +Z. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, height=1.0, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Phase 1 setup: Move cube to (1, 0, 1) and apply force at (1, 0, 1) + root_state = cube_object.data.root_state_w.torch.clone() + root_state[0, 0] = 1.0 # x = 1 + root_state[0, 1] = 0.0 # y = 0 + root_state[0, 2] = 1.0 # z = 1 + root_state[0, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity quat (xyzw) + root_state[0, 7:] = 0.0 # zero velocity + cube_object.write_root_state_to_sim(root_state) + + # Step once to let the state settle + sim.step() + cube_object.update(sim.cfg.dt) + + # Get current CoM position for the force application point + com_pos = cube_object.data.body_com_pos_w.torch[:, body_ids, :3].clone() + + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 1] = FORCE_MAGNITUDE # +Y force + torques = torch.zeros(1, len(body_ids), 3, device=device) + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=com_pos, + body_ids=body_ids, + is_global=True, + ) + + # Phase 1: run 50 steps — force at CoM, expect no rotation + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + ang_vel_phase1 = cube_object.data.root_ang_vel_w.torch[0].clone() + lin_vel_phase1 = cube_object.data.root_lin_vel_w.torch[0].clone() + + # Should have linear velocity in +Y + assert lin_vel_phase1[1].item() > 0.1, f"Expected positive Y velocity, got {lin_vel_phase1[1].item()}" + + # Angular velocity should be ~0 (force applied at CoM → no torque) + assert abs(ang_vel_phase1[2].item()) < 0.1, ( + f"Expected ~0 Z angular velocity in phase 1, got {ang_vel_phase1[2].item()}" + ) + + # Phase 2: Teleport cube to origin, zero velocity, don't re-apply force + root_state2 = cube_object.data.root_state_w.torch.clone() + root_state2[0, 0] = 0.0 # x = 0 + root_state2[0, 1] = 0.0 + root_state2[0, 2] = 1.0 # z = 1 + root_state2[0, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state2[0, 7:] = 0.0 # zero velocity + cube_object.write_root_state_to_sim(root_state2) + + # Step once to let state settle + sim.step() + cube_object.update(sim.cfg.dt) + + # Phase 2: run 50 steps — body at origin but stored torque = cross((1,0,1), (0,10,0)) = (-10,0,10) + # correction = -cross((0,0,1), (0,10,0)) = -(0,0,0 - but wait, z=1) + # Actually: stored = cross((com_x,com_y,com_z), (0,10,0)) + # After teleport: correction = -cross(new_pos, F), net torque ≠ 0 since positions differ + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + ang_vel_phase2 = cube_object.data.root_ang_vel_w.torch[0].clone() + + # The X component of position changed from ~1 to ~0, so torque about Z changes. + # stored_torque_z = com_x * Fy = ~1 * 10 = ~10 + # After teleport, correction_z = -new_x * Fy = ~0 * 10 = ~0 + # net torque_z ≈ 10 → positive Z angular velocity + assert ang_vel_phase2[2].item() > 0.5, ( + f"Expected positive Z angular velocity in phase 2, got {ang_vel_phase2[2].item()}" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_torque_reverses_on_opposite_side(device): + """Test that dynamic correction produces correct torque sign depending on body position. + + Phase 1: Cube at (-1, 0, 1). Global F=(0, 10, 0) at world point P=(0, 0, 1). + net torque_z = cross(P - link_pos, F)_z = cross((1,0,0), (0,10,0))_z = +10 + → positive Z angular velocity + + Phase 2: Teleport cube to (+1, 0, 1), zero velocity, don't re-apply force. + net torque_z = cross(P - link_pos, F)_z = cross((-1,0,0), (0,10,0))_z = -10 + → negative Z angular velocity + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, height=1.0, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Move cube to (-1, 0, 1) + root_state = cube_object.data.root_state_w.torch.clone() + root_state[0, 0] = -1.0 + root_state[0, 1] = 0.0 + root_state[0, 2] = 1.0 + root_state[0, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state[0, 7:] = 0.0 + cube_object.write_root_state_to_sim(root_state) + sim.step() + cube_object.update(sim.cfg.dt) + + # Apply permanent global F=(0, 10, 0) at world point P=(0, 0, 1) + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 1] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + positions = torch.zeros(1, len(body_ids), 3, device=device) + positions[..., 2] = 1.0 # P = (0, 0, 1) + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=positions, + body_ids=body_ids, + is_global=True, + ) + + # Phase 1: run 50 steps — expect positive Z angular velocity + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + omega_z_phase1 = cube_object.data.root_ang_vel_w.torch[0, 2].item() + assert omega_z_phase1 > 0.1, f"Phase 1: expected positive omega_z, got {omega_z_phase1}" + + # Phase 2: Teleport cube to (+1, 0, 1), zero velocity + root_state2 = cube_object.data.root_state_w.torch.clone() + root_state2[0, 0] = 1.0 + root_state2[0, 1] = 0.0 + root_state2[0, 2] = 1.0 + root_state2[0, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state2[0, 7:] = 0.0 + cube_object.write_root_state_to_sim(root_state2) + sim.step() + cube_object.update(sim.cfg.dt) + + # Phase 2: run 50 steps — expect negative Z angular velocity + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + omega_z_phase2 = cube_object.data.root_ang_vel_w.torch[0, 2].item() + assert omega_z_phase2 < -0.1, f"Phase 2: expected negative omega_z, got {omega_z_phase2}" + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_no_position_no_torque(device): + """Test that global force without positions produces no torque (applied at CoM). + + A body at (2, 0, 1) with global F=(0, 10, 0) and no positions should experience + only linear acceleration, no rotation. The force is applied at the body's CoM. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, height=1.0, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Move cube to (2, 0, 1) + root_state = cube_object.data.root_state_w.torch.clone() + root_state[0, 0] = 2.0 + root_state[0, 1] = 0.0 + root_state[0, 2] = 1.0 + root_state[0, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state[0, 7:] = 0.0 + cube_object.write_root_state_to_sim(root_state) + sim.step() + cube_object.update(sim.cfg.dt) + + # Apply global F=(0, 10, 0) WITHOUT positions → force at CoM, no torque + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 1] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=True, + ) + + # Run 50 steps + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + omega_z = cube_object.data.root_ang_vel_w.torch[0, 2].item() + # No positions → force at CoM → zero torque → zero angular velocity + assert abs(omega_z) < 0.01, f"Expected ~zero omega_z for force at CoM, got {omega_z}" + + # Should still have linear acceleration in +Y + lin_vel_y = cube_object.data.root_lin_vel_w.torch[0, 1].item() + assert lin_vel_y > 0.1, f"Expected positive Y velocity from applied force, got {lin_vel_y}" + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_multi_cube_different_torques_from_same_force(device): + """Test kernel indexing across multiple envs with different CoM positions. + + 2 cubes: Cube 0 at (-1, 0, 1), Cube 1 at (+1, 0, 1). + Same global F=(0, 10, 0) at same world point P=(0, 0, 1) to both cubes. + Cube 0: torque_z = cross((1,0,0), (0,10,0))_z = +10 → omega_z > 0 + Cube 1: torque_z = cross((-1,0,0), (0,10,0))_z = -10 → omega_z < 0 + Both have same linear acceleration in +Y. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=2, height=1.0, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Position cubes: Cube 0 at (-1, 0, 1), Cube 1 at (+1, 0, 1) + root_state = cube_object.data.root_state_w.torch.clone() + root_state[0, 0] = -1.0 + root_state[0, 1] = 0.0 + root_state[0, 2] = 1.0 + root_state[0, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state[0, 7:] = 0.0 + + root_state[1, 0] = 1.0 + root_state[1, 1] = 0.0 + root_state[1, 2] = 1.0 + root_state[1, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state[1, 7:] = 0.0 + cube_object.write_root_state_to_sim(root_state) + sim.step() + cube_object.update(sim.cfg.dt) + + # Apply same global F=(0, 10, 0) at P=(0, 0, 1) to both cubes + forces = torch.zeros(2, len(body_ids), 3, device=device) + forces[..., 1] = FORCE_MAGNITUDE + torques = torch.zeros(2, len(body_ids), 3, device=device) + positions = torch.zeros(2, len(body_ids), 3, device=device) + positions[..., 2] = 1.0 # P = (0, 0, 1) + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=positions, + body_ids=body_ids, + is_global=True, + ) + + # Run 50 steps + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + # Cube 0: omega_z > 0 (force point is to the right of CoM) + omega_z_0 = cube_object.data.root_ang_vel_w.torch[0, 2].item() + assert omega_z_0 > 0.1, f"Cube 0: expected positive omega_z, got {omega_z_0}" + + # Cube 1: omega_z < 0 (force point is to the left of CoM) + omega_z_1 = cube_object.data.root_ang_vel_w.torch[1, 2].item() + assert omega_z_1 < -0.1, f"Cube 1: expected negative omega_z, got {omega_z_1}" + + # Both cubes should have same linear velocity in +Y (same force magnitude) + lin_vel_y_0 = cube_object.data.root_lin_vel_w.torch[0, 1].item() + lin_vel_y_1 = cube_object.data.root_lin_vel_w.torch[1, 1].item() + assert abs(lin_vel_y_0 - lin_vel_y_1) < 0.5, ( + f"Both cubes should have similar Y velocity, got {lin_vel_y_0} and {lin_vel_y_1}" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_global_force_torque_far_from_origin(device): + """Test that global force torque correction produces correct physics at large world coordinates. + + Two cubes with identical relative geometry (force offset = (1, 0, 0) from CoM): + Cube 0 at (0, 0, 1) — near origin (reference) + Cube 1 at (2000, 0, 1) — far from origin + + Both get global F=(0, 10, 0) at offset (1, 0, 0) from their respective CoMs. + Expected torque: cross((1,0,0), (0,10,0)) = (0, 0, 10) for both. + + The compose kernel computes cross(P, F) - cross(link_pos, F): + Cube 0: cross((1,0,1), F) - cross((0,0,1), F) — small values, no cancellation + Cube 1: cross((2001,0,1), F) - cross((2000,0,1), F) — large values nearly cancel + + Both cubes should produce the same angular and linear velocities. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=2, height=1.0, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Position cubes: Cube 0 near origin, Cube 1 far from origin + root_state = cube_object.data.root_state_w.torch.clone() + # Cube 0 at (0, 0, 1) + root_state[0, 0] = 0.0 + root_state[0, 1] = 0.0 + root_state[0, 2] = 1.0 + root_state[0, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state[0, 7:] = 0.0 + # Cube 1 at (2000, 0, 1) + root_state[1, 0] = 2000.0 + root_state[1, 1] = 0.0 + root_state[1, 2] = 1.0 + root_state[1, 3:7] = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) # identity (xyzw) + root_state[1, 7:] = 0.0 + cube_object.write_root_state_to_sim(root_state) + sim.step() + cube_object.update(sim.cfg.dt) + + # Apply F=(0, 10, 0) at +1m X offset from each cube's CoM + forces = torch.zeros(2, len(body_ids), 3, device=device) + forces[..., 1] = FORCE_MAGNITUDE # +Y force + torques = torch.zeros(2, len(body_ids), 3, device=device) + + # Positions: each cube's CoM + (1, 0, 0) + com_pos = cube_object.data.body_com_pos_w.torch[:, body_ids, :3].clone() + positions = com_pos.clone() + positions[..., 0] += 1.0 # +1m X offset from CoM + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=positions, + body_ids=body_ids, + is_global=True, + ) + + # Run 50 steps + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + # Both cubes should have positive omega_z (cross((1,0,0), (0,10,0)) = (0,0,10)) + omega_z_0 = cube_object.data.root_ang_vel_w.torch[0, 2].item() + omega_z_1 = cube_object.data.root_ang_vel_w.torch[1, 2].item() + assert omega_z_0 > 0.1, f"Cube 0: expected positive omega_z, got {omega_z_0}" + assert omega_z_1 > 0.1, f"Cube 1: expected positive omega_z, got {omega_z_1}" + + # omega_z values should match within 1% (same relative geometry) + torch.testing.assert_close( + torch.tensor(omega_z_0), + torch.tensor(omega_z_1), + rtol=0.01, + atol=0.0, + msg=lambda msg: ( + f"Angular velocity mismatch between near-origin and far-from-origin cubes:\n" + f" Cube 0 (near): omega_z = {omega_z_0:.6f}\n" + f" Cube 1 (far): omega_z = {omega_z_1:.6f}\n{msg}" + ), + ) + + # Linear velocity in +Y should also match + lin_vel_y_0 = cube_object.data.root_lin_vel_w.torch[0, 1].item() + lin_vel_y_1 = cube_object.data.root_lin_vel_w.torch[1, 1].item() + torch.testing.assert_close( + torch.tensor(lin_vel_y_0), + torch.tensor(lin_vel_y_1), + rtol=0.01, + atol=0.0, + msg=lambda msg: ( + f"Linear velocity mismatch between near-origin and far-from-origin cubes:\n" + f" Cube 0 (near): lin_vel_y = {lin_vel_y_0:.6f}\n" + f" Cube 1 (far): lin_vel_y = {lin_vel_y_1:.6f}\n{msg}" + ), + ) + + +@pytest.mark.parametrize("device", ["cuda:0"]) +def test_global_force_no_position_no_rotation_large_offset(device): + """Test that a global force without positions produces no rotation at large offsets. + + A cube is placed at (2000, 0, 1) and a global force F=(0, 10, 0) is applied + without positions. The cube should accelerate linearly but not rotate. + Before the fix, this would produce torque proportional to 2000 and cause rotation. + """ + with build_simulation_context( + device=device, add_ground_plane=False, auto_add_lighting=True, gravity_enabled=False + ) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, height=1.0, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Place cube at large X offset + root_state = cube_object.data.default_root_state.torch.clone() + root_state[0, 0] = 2000.0 # large X position + root_state[0, 1] = 0.0 + root_state[0, 2] = 1.0 + cube_object.write_root_pose_to_sim(root_state[:, :7]) + cube_object.write_root_velocity_to_sim(root_state[:, 7:]) + cube_object.reset() + + # Apply global force without positions (should go to CoM, no torque) + forces = torch.zeros(cube_object.num_instances, len(body_ids), 3, device=device) + forces[0, :, 1] = 10.0 # F_y = 10 N + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + body_ids=body_ids, + is_global=True, + ) + + # Step simulation + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + # Check: angular velocity should be near zero (no rotation) + ang_vel = cube_object.data.root_ang_vel_w.torch[0] + assert torch.allclose(ang_vel, torch.zeros(3, device=device), atol=0.01), ( + f"Expected near-zero angular velocity, got {ang_vel}. " + "Global force without positions should not produce torque." + ) + + # Check: linear velocity in Y should be positive (force is in +Y) + lin_vel = cube_object.data.root_lin_vel_w.torch[0] + assert lin_vel[1] > 0.1, f"Expected positive Y velocity from applied force, got {lin_vel[1]}" + + +@pytest.mark.parametrize("device", ["cuda:0"]) +def test_global_force_at_com_position_no_rotation_large_offset(device): + """Test that a global force with position at CoM produces no rotation at large offsets. + + A cube is placed at (2000, 0, 1) and a global force F=(0, 10, 0) is applied + at the cube's position (i.e., at its CoM). This should produce zero torque, + serving as a control test alongside test_global_force_no_position_no_rotation_large_offset. + """ + with build_simulation_context( + device=device, add_ground_plane=False, auto_add_lighting=True, gravity_enabled=False + ) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=1, height=1.0, device=device) + + sim.reset() + + body_ids, _ = cube_object.find_bodies(".*") + + # Place cube at large X offset + root_state = cube_object.data.default_root_state.torch.clone() + root_state[0, 0] = 2000.0 + root_state[0, 1] = 0.0 + root_state[0, 2] = 1.0 + cube_object.write_root_pose_to_sim(root_state[:, :7]) + cube_object.write_root_velocity_to_sim(root_state[:, 7:]) + cube_object.reset() + + # Apply global force AT the cube's position (torque should cancel) + forces = torch.zeros(cube_object.num_instances, len(body_ids), 3, device=device) + forces[0, :, 1] = 10.0 + + positions = torch.zeros(cube_object.num_instances, len(body_ids), 3, device=device) + positions[0, :, 0] = 2000.0 + positions[0, :, 2] = 1.0 + + cube_object.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + positions=positions, + body_ids=body_ids, + is_global=True, + ) + + for _ in range(50): + cube_object.write_data_to_sim() + sim.step() + cube_object.update(sim.cfg.dt) + + # Force at CoM → no rotation + ang_vel = cube_object.data.root_ang_vel_w.torch[0] + assert torch.allclose(ang_vel, torch.zeros(3, device=device), atol=0.01), ( + f"Expected near-zero angular velocity, got {ang_vel}. " + "Global force at CoM position should not produce torque." + ) + + lin_vel = cube_object.data.root_lin_vel_w.torch[0] + assert lin_vel[1] > 0.1, f"Expected positive Y velocity from applied force, got {lin_vel[1]}" diff --git a/source/isaaclab/test/utils/test_wrench_composer_vs_physx.py b/source/isaaclab/test/utils/test_wrench_composer_vs_physx.py new file mode 100644 index 000000000000..8d47b003369c --- /dev/null +++ b/source/isaaclab/test/utils/test_wrench_composer_vs_physx.py @@ -0,0 +1,837 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration tests comparing WrenchComposer output vs raw PhysX apply_forces_and_torques_at_position. + +Two identical rigid objects are placed in the same scene. One uses the WrenchComposer path +(set_forces_and_torques → write_data_to_sim → compose → PhysX apply with is_global=False), +the other uses the raw PhysX API directly (apply_forces_and_torques_at_position with matching +is_global flag). After N steps, both objects should have identical velocities. +""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import math + +import pytest +import torch +import warp as wp + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab.sim import build_simulation_context +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + + +def generate_dual_cube_scene( + num_cubes: int = 1, + height: float = 1.0, + device: str = "cuda:0", + initial_rot: tuple[float, ...] | None = None, + spacing: float = 2.0, +) -> tuple[RigidObject, RigidObject]: + """Generate a scene with two sets of cubes: one for the composer path, one for raw PhysX. + + Both sets share the same spawn config and initial state (except a Y offset to avoid overlap). + + Args: + num_cubes: Number of cubes per group (environments). + height: Spawn height. + device: Simulation device. + initial_rot: Initial quaternion (x, y, z, w). Defaults to identity. + spacing: Distance between env origins in X. Defaults to 2.0. + + Returns: + Tuple of (cube_composer, cube_raw) RigidObject instances. + """ + if initial_rot is None: + initial_rot = (0.0, 0.0, 0.0, 1.0) # identity in (x,y,z,w) + + y_offset = max(spacing, 3.0) + + # Create Xform prims for both groups + for i in range(num_cubes): + origin_composer = (i * spacing, 0.0, height) + origin_raw = (i * spacing, y_offset, height) # Y offset to avoid overlap + sim_utils.create_prim(f"/World/Composer_{i}", "Xform", translation=origin_composer) + sim_utils.create_prim(f"/World/Raw_{i}", "Xform", translation=origin_raw) + + spawn_cfg = sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + ) + + cube_composer_cfg = RigidObjectCfg( + prim_path="/World/Composer_.*/Object", + spawn=spawn_cfg, + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, height), rot=initial_rot), + ) + cube_composer = RigidObject(cfg=cube_composer_cfg) + + cube_raw_cfg = RigidObjectCfg( + prim_path="/World/Raw_.*/Object", + spawn=spawn_cfg, + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, y_offset, height), rot=initial_rot), + ) + cube_raw = RigidObject(cfg=cube_raw_cfg) + + return cube_composer, cube_raw + + +N_STEPS = 50 +FORCE_MAGNITUDE = 10.0 +TORQUE_MAGNITUDE = 1.0 +# 45 degrees about Z: (cos(22.5°), 0, 0, sin(22.5°)) +ROT_45_Z = (0.0, 0.0, math.sin(math.pi / 8), math.cos(math.pi / 8)) # 45deg about Z in (x,y,z,w) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_local_force(device): + """Baseline: local force at identity orientation. Composer and raw PhysX should match exactly.""" + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene(num_cubes=1, device=device) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Composer path: local force +X + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=False, + ) + + # Raw PhysX data (flattened for PhysX view API) + raw_forces = torch.zeros(1, 3, device=device) + raw_forces[:, 0] = FORCE_MAGNITUDE + raw_torques = torch.zeros(1, 3, device=device) + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() # no-op (composer inactive) + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=False, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Compare velocities + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + # Both should have ~zero angular velocity (force at CoM, no torque) + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + torch.zeros(1, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + torch.testing.assert_close( + cube_raw.data.root_ang_vel_w.torch, + torch.zeros(1, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_global_force(device): + """Global force with non-identity rotation (45 deg Z). Rotation matters for frame conversion.""" + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene(num_cubes=1, device=device, initial_rot=ROT_45_Z) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Composer path: global force +X + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=True, + ) + + # Raw PhysX data + raw_forces = torch.zeros(1, 3, device=device) + raw_forces[:, 0] = FORCE_MAGNITUDE + raw_torques = torch.zeros(1, 3, device=device) + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Linear velocities should match (same global force, same mass) + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + # Angular velocities should match + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + # Both should have ~zero angular velocity (force at CoM, no torque) + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + torch.zeros(1, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + torch.testing.assert_close( + cube_raw.data.root_ang_vel_w.torch, + torch.zeros(1, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_local_force_at_position(device): + """Local force at a local offset. Both paths should produce identical cross-product torque.""" + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene(num_cubes=1, device=device) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Local force +X at local offset +0.5m Y + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + positions = torch.zeros(1, len(body_ids), 3, device=device) + positions[..., 1] = 0.5 # +0.5m Y offset in local frame + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=positions, + body_ids=body_ids, + is_global=False, + ) + + # Raw PhysX data (local force at local position) + raw_forces = torch.zeros(1, 3, device=device) + raw_forces[:, 0] = FORCE_MAGNITUDE + raw_torques = torch.zeros(1, 3, device=device) + raw_positions = torch.zeros(1, 3, device=device) + raw_positions[:, 1] = 0.5 + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=wp.from_torch(raw_positions.contiguous(), dtype=wp.float32), + indices=raw_indices, + is_global=False, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Both linear and angular velocities should match + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + + # Sanity: angular velocity should be nonzero (cross-product torque) + assert torch.abs(cube_composer.data.root_ang_vel_w.torch[0, 2]).item() > 0.1, ( + "Expected nonzero Z angular velocity from cross-product torque" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_global_force_at_position(device): + """Global force at world position with non-identity rotation. Both rotation AND position correction matter.""" + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene(num_cubes=1, device=device, initial_rot=ROT_45_Z) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Global force +X + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + + # Position = each cube's link_pos + offset (same offset for both) + offset = torch.zeros(1, len(body_ids), 3, device=device) + offset[..., 1] = 1.0 # +1m Y offset in world frame + + pos_composer = cube_composer.data.body_com_pos_w.torch[:, body_ids, :3].clone() + offset + pos_raw = cube_raw.data.body_com_pos_w.torch[:, body_ids, :3].clone() + offset + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=pos_composer, + body_ids=body_ids, + is_global=True, + ) + + # Raw PhysX data + raw_forces = torch.zeros(1, 3, device=device) + raw_forces[:, 0] = FORCE_MAGNITUDE + raw_torques = torch.zeros(1, 3, device=device) + raw_positions = pos_raw.view(-1, 3) + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=wp.from_torch(raw_positions.contiguous(), dtype=wp.float32), + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Both linear and angular velocities should match + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + + # Sanity: angular velocity should be nonzero (cross-product torque) + assert torch.abs(cube_composer.data.root_ang_vel_w.torch[0, 2]).item() > 0.1, ( + "Expected nonzero Z angular velocity from positional torque" + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_local_torque(device): + """Local torque at identity orientation. Should produce matching angular velocity.""" + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene(num_cubes=1, device=device) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Composer path: local torque about +Z + forces = torch.zeros(1, len(body_ids), 3, device=device) + torques = torch.zeros(1, len(body_ids), 3, device=device) + torques[..., 2] = TORQUE_MAGNITUDE + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=False, + ) + + # Raw PhysX data + raw_forces = torch.zeros(1, 3, device=device) + raw_torques = torch.zeros(1, 3, device=device) + raw_torques[:, 2] = TORQUE_MAGNITUDE + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=False, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Angular velocities should match + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + # Linear velocity should be ~zero for both (no force) + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + torch.zeros(1, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + torch.testing.assert_close( + cube_raw.data.root_lin_vel_w.torch, + torch.zeros(1, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_global_torque(device): + """Global torque with non-identity rotation (45 deg Z). Composer rotates to body frame internally.""" + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene(num_cubes=1, device=device, initial_rot=ROT_45_Z) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Composer path: global torque about +Z + forces = torch.zeros(1, len(body_ids), 3, device=device) + torques = torch.zeros(1, len(body_ids), 3, device=device) + torques[..., 2] = TORQUE_MAGNITUDE + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=True, + ) + + # Raw PhysX data + raw_forces = torch.zeros(1, 3, device=device) + raw_torques = torch.zeros(1, 3, device=device) + raw_torques[:, 2] = TORQUE_MAGNITUDE + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Angular velocities should match + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + + +NUM_CUBES_MULTI = 4 + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_global_force_multi_env(device): + """Global force (no position) with multiple environments. + + Regression: checks that env-indexing and per-body quaternion handling work correctly + when there is more than one environment. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene( + num_cubes=NUM_CUBES_MULTI, device=device, initial_rot=ROT_45_Z + ) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Composer path: global force +X for all envs + forces = torch.zeros(NUM_CUBES_MULTI, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(NUM_CUBES_MULTI, len(body_ids), 3, device=device) + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=True, + ) + + # Raw PhysX data (one row per env) + raw_forces = torch.zeros(NUM_CUBES_MULTI, 3, device=device) + raw_forces[:, 0] = FORCE_MAGNITUDE + raw_torques = torch.zeros(NUM_CUBES_MULTI, 3, device=device) + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Linear velocities should match across all envs + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + # Angular velocities should match + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + # All envs should have ~zero angular velocity + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + torch.zeros(NUM_CUBES_MULTI, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_global_force_with_reset(device): + """Global force (no position) with a mid-simulation reset of half the envs. + + Regression: after reset the permanent wrench is cleared. Re-setting it should + produce correct behavior even though the object state was just reset. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene( + num_cubes=NUM_CUBES_MULTI, device=device, initial_rot=ROT_45_Z, spacing=20.0 + ) + + sim.reset() + + # Capture initial world-frame state (includes env origin offsets) + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + initial_state_composer = torch.cat( + [ + cube_composer.data.root_link_pos_w.torch, + cube_composer.data.root_link_quat_w.torch, + cube_composer.data.root_com_vel_w.torch, + ], + dim=-1, + ).clone() + initial_state_raw = torch.cat( + [ + cube_raw.data.root_link_pos_w.torch, + cube_raw.data.root_link_quat_w.torch, + cube_raw.data.root_com_vel_w.torch, + ], + dim=-1, + ).clone() + + body_ids, _ = cube_composer.find_bodies(".*") + + def apply_global_force(): + """Set the same global +X force on the composer cube.""" + forces = torch.zeros(NUM_CUBES_MULTI, len(body_ids), 3, device=device) + forces[..., 0] = FORCE_MAGNITUDE + torques = torch.zeros(NUM_CUBES_MULTI, len(body_ids), 3, device=device) + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=True, + ) + + apply_global_force() + + # Raw PhysX data + raw_forces = torch.zeros(NUM_CUBES_MULTI, 3, device=device) + raw_forces[:, 0] = FORCE_MAGNITUDE + raw_torques = torch.zeros(NUM_CUBES_MULTI, 3, device=device) + raw_indices = cube_raw._ALL_INDICES + + # Phase 1: run N_STEPS / 2 + half = N_STEPS // 2 + for _ in range(half): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Reset first half of envs on both cubes + reset_ids = list(range(NUM_CUBES_MULTI // 2)) + reset_ids_torch = torch.tensor(reset_ids, dtype=torch.long, device=device) + + # Reset root state using captured world-frame initial state (includes env origins) + cube_composer.write_root_state_to_sim(initial_state_composer[reset_ids_torch], env_ids=reset_ids_torch) + cube_raw.write_root_state_to_sim(initial_state_raw[reset_ids_torch], env_ids=reset_ids_torch) + + cube_composer.reset(reset_ids) + cube_raw.reset(reset_ids) + + # Re-apply the force (reset cleared the permanent wrench) + apply_global_force() + + # Phase 2: run N_STEPS / 2 more + for _ in range(half): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # All envs: composer vs raw should match + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + # All envs should have ~zero angular velocity + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + torch.zeros(NUM_CUBES_MULTI, 3, device=device), + rtol=0.0, + atol=1e-4, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_payload_scenario(device): + """Mirrors the apply_payload MDP: permanent global downward force at CoM with gravity. + + A constant world-frame downward force (payload weight) is applied via the composer + path vs raw PhysX. The body falls under gravity + payload, contacts the ground, and + orientation changes. The composer does a world->body->world round-trip each step; + this test catches any precision drift from that. + """ + with build_simulation_context(device=device, gravity_enabled=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene( + num_cubes=1, height=0.5, device=device, initial_rot=ROT_45_Z, spacing=20.0 + ) + + sim.reset() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Record initial positions to compare displacements (cubes spawn at different Y) + init_pos_composer = cube_composer.data.root_pos_w.torch.clone() + init_pos_raw = cube_raw.data.root_pos_w.torch.clone() + + body_ids, _ = cube_composer.find_bodies(".*") + + payload_force = 2.0 * 9.81 + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 2] = -payload_force + torques = torch.zeros(1, len(body_ids), 3, device=device) + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + body_ids=body_ids, + is_global=True, + ) + + raw_forces = torch.zeros(1, 3, device=device) + raw_forces[:, 2] = -payload_force + raw_torques = torch.zeros(1, 3, device=device) + raw_indices = cube_raw._ALL_INDICES + + for _ in range(N_STEPS): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=None, + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + # Compare displacements (not absolute positions — cubes have different spawn Y) + disp_composer = cube_composer.data.root_pos_w.torch - init_pos_composer + disp_raw = cube_raw.data.root_pos_w.torch - init_pos_raw + + torch.testing.assert_close(disp_composer, disp_raw, rtol=1e-4, atol=1e-4) + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-4, + atol=1e-4, + ) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_composer_vs_physx_permanent_global_force_at_position_long_run(device): + """Permanent global force at a world-frame offset, run long enough for significant body motion. + + This test catches temporal drift bugs where the stored positional torque diverges from + what PhysX computes each step as the body moves. The force is large enough that the body + translates and rotates significantly over 100 steps, but not so large that it causes + numerical instability. + """ + with build_simulation_context(device=device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_composer, cube_raw = generate_dual_cube_scene(num_cubes=1, device=device, initial_rot=ROT_45_Z) + + sim.reset() + + body_ids, _ = cube_composer.find_bodies(".*") + + # Global force +Z at +1m Y offset from CoM — produces torque around X + forces = torch.zeros(1, len(body_ids), 3, device=device) + forces[..., 2] = FORCE_MAGNITUDE + torques = torch.zeros(1, len(body_ids), 3, device=device) + + offset = torch.zeros(1, len(body_ids), 3, device=device) + offset[..., 1] = 1.0 + + pos_composer = cube_composer.data.body_com_pos_w.torch[:, body_ids, :3].clone() + offset + pos_raw = cube_raw.data.body_com_pos_w.torch[:, body_ids, :3].clone() + offset + + cube_composer.permanent_wrench_composer.set_forces_and_torques( + forces=forces, + torques=torques, + positions=pos_composer, + body_ids=body_ids, + is_global=True, + ) + + raw_forces = torch.zeros(1, 3, device=device) + raw_forces[:, 2] = FORCE_MAGNITUDE + raw_torques = torch.zeros(1, 3, device=device) + raw_positions = pos_raw.view(-1, 3) + raw_indices = cube_raw._ALL_INDICES + + for _ in range(100): + cube_composer.write_data_to_sim() + cube_raw.write_data_to_sim() + cube_raw.root_view.apply_forces_and_torques_at_position( + force_data=wp.from_torch(raw_forces.contiguous(), dtype=wp.float32), + torque_data=wp.from_torch(raw_torques.contiguous(), dtype=wp.float32), + position_data=wp.from_torch(raw_positions.contiguous(), dtype=wp.float32), + indices=raw_indices, + is_global=True, + ) + sim.step() + cube_composer.update(sim.cfg.dt) + cube_raw.update(sim.cfg.dt) + + torch.testing.assert_close( + cube_composer.data.root_lin_vel_w.torch, + cube_raw.data.root_lin_vel_w.torch, + rtol=1e-3, + atol=1e-3, + ) + torch.testing.assert_close( + cube_composer.data.root_ang_vel_w.torch, + cube_raw.data.root_ang_vel_w.torch, + rtol=1e-3, + atol=1e-3, + ) + + # Sanity: angular velocity should be nonzero + assert torch.abs(cube_composer.data.root_ang_vel_w.torch).max().item() > 0.1, ( + "Expected nonzero angular velocity from positional torque over 100 steps" + ) diff --git a/source/isaaclab/test/utils/warp/test_proxy_array.py b/source/isaaclab/test/utils/warp/test_proxy_array.py new file mode 100644 index 000000000000..08bdd4bb916e --- /dev/null +++ b/source/isaaclab/test/utils/warp/test_proxy_array.py @@ -0,0 +1,550 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for the ProxyArray class.""" + +import warnings + +import pytest +import torch +import warp as wp + +wp.config.quiet = True +wp.init() + + +@pytest.fixture(params=["cpu", "cuda:0"]) +def device(request): + """Parametrize tests across CPU and CUDA devices.""" + return request.param + + +class TestProxyArrayBasic: + """Tests for basic ProxyArray functionality.""" + + def test_warp_returns_original(self, device): + """Test that .warp returns the original warp array.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + assert ta.warp is arr + + def test_torch_returns_tensor(self, device): + """Test that .torch returns a torch.Tensor.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + assert isinstance(ta.torch, torch.Tensor) + + def test_torch_is_cached(self, device): + """Test that .torch returns the same tensor object on repeated access.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + t1 = ta.torch + t2 = ta.torch + assert t1 is t2 + + def test_torch_shares_memory(self, device): + """Test that .torch provides a zero-copy view (shares memory with warp).""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + t = ta.torch + # Modify the torch tensor + t[0] = 42.0 + # The change should be visible in the warp array + arr_np = arr.numpy() + assert arr_np[0] == 42.0 + + def test_immutable_warp_cannot_be_reassigned(self, device): + """ProxyArray._warp cannot be reassigned; callers must construct a new wrapper.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + + with pytest.raises(AttributeError, match="immutable"): + ta._warp = wp.ones(10, dtype=wp.float32, device=device) + with pytest.raises(AttributeError, match="immutable"): + ta.new_field = 42 # arbitrary attribute writes also blocked + + def test_immutable_allows_internal_torch_cache(self, device): + """Lazy .torch caching still works — only _torch_cache is allowed as a post-init write.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + # First access populates the cache; no exception. + first = ta.torch + # Subsequent accesses return the same cached tensor. + second = ta.torch + assert first is second + + def test_cuda_array_interface(self): + """Test that __cuda_array_interface__ delegates to the underlying warp array.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device="cuda:0") + ta = ProxyArray(arr) + cai = ta.__cuda_array_interface__ + assert isinstance(cai, dict) + assert "data" in cai + assert "shape" in cai + assert cai["shape"] == arr.__cuda_array_interface__["shape"] + + def test_cuda_array_interface_not_on_cpu(self): + """Test that __cuda_array_interface__ raises AttributeError on CPU arrays.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device="cpu") + ta = ProxyArray(arr) + with pytest.raises(AttributeError): + _ = ta.__cuda_array_interface__ + + def test_wp_launch_accepts_proxy_array(self): + """Test that wp.launch() can consume a ProxyArray via __cuda_array_interface__.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + @wp.kernel + def _add_one(src: wp.array(dtype=wp.float32), dst: wp.array(dtype=wp.float32)): + i = wp.tid() + dst[i] = src[i] + 1.0 + + src = ProxyArray(wp.zeros(5, dtype=wp.float32, device="cuda:0")) + dst = ProxyArray(wp.zeros(5, dtype=wp.float32, device="cuda:0")) + wp.launch(_add_one, dim=5, inputs=[src], outputs=[dst], device="cuda:0") + wp.synchronize_device("cuda:0") + assert dst.torch[0].item() == 1.0 + assert dst.torch[4].item() == 1.0 + + +class TestProxyArrayStructuredTypes: + """Tests for ProxyArray with structured warp types (vec3f, quatf, etc).""" + + def test_vec3f_shape(self, device): + """Test that vec3f arrays produce (N, 3) torch tensors.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(8, dtype=wp.vec3f, device=device) + ta = ProxyArray(arr) + assert ta.torch.shape == (8, 3) + + def test_quatf_shape(self, device): + """Test that quatf arrays produce (N, 4) torch tensors.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(8, dtype=wp.quatf, device=device) + ta = ProxyArray(arr) + assert ta.torch.shape == (8, 4) + + def test_transformf_shape(self, device): + """Test that transformf arrays produce (N, 7) torch tensors.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(8, dtype=wp.transformf, device=device) + ta = ProxyArray(arr) + assert ta.torch.shape == (8, 7) + + def test_spatial_vectorf_shape(self, device): + """Test that spatial_vectorf arrays produce (N, 6) torch tensors.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(8, dtype=wp.spatial_vectorf, device=device) + ta = ProxyArray(arr) + assert ta.torch.shape == (8, 6) + + def test_2d_vec3f_shape(self, device): + """Test that 2D vec3f arrays produce (N, M, 3) torch tensors.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros((4, 5), dtype=wp.vec3f, device=device) + ta = ProxyArray(arr) + assert ta.torch.shape == (4, 5, 3) + + +class TestProxyArrayQuatfTorchAccessWarning: + """Tests for the WARN_ON_TORCH_QUATF_ACCESS opt-in runtime detector.""" + + def test_default_no_warning(self, device, monkeypatch): + """No env var → quatf .torch access is silent.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + monkeypatch.delenv("WARN_ON_TORCH_QUATF_ACCESS", raising=False) + ta = ProxyArray(wp.zeros(4, dtype=wp.quatf, device=device)) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + _ = ta.torch + assert [x for x in w if issubclass(x.category, UserWarning)] == [] + + def test_env_set_warns_on_quatf(self, device, monkeypatch): + """WARN_ON_TORCH_QUATF_ACCESS=1 → quatf .torch read emits a UserWarning at the call site.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + monkeypatch.setenv("WARN_ON_TORCH_QUATF_ACCESS", "1") + ta = ProxyArray(wp.zeros(4, dtype=wp.quatf, device=device)) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + _ = ta.torch # the .torch read on this line is what should be reported + user_warns = [x for x in w if issubclass(x.category, UserWarning)] + assert len(user_warns) == 1 + assert "quatf" in str(user_warns[0].message) + assert "(w, x, y, z)" in str(user_warns[0].message) + assert "(x, y, z, w)" in str(user_warns[0].message) + # stacklevel=2 → the warning's filename is this test file, not proxy_array.py + assert user_warns[0].filename == __file__ + + def test_env_set_does_not_warn_on_non_quatf(self, device, monkeypatch): + """The detector only fires for wp.quatf — float32 / vec3f / transformf are silent.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + monkeypatch.setenv("WARN_ON_TORCH_QUATF_ACCESS", "1") + for dtype in (wp.float32, wp.vec3f, wp.transformf, wp.spatial_vectorf): + ta = ProxyArray(wp.zeros(4, dtype=dtype, device=device)) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + _ = ta.torch + assert [x for x in w if issubclass(x.category, UserWarning)] == [] + + def test_env_zero_does_not_warn(self, device, monkeypatch): + """WARN_ON_TORCH_QUATF_ACCESS=0 → silent (only ``"1"`` enables the detector).""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + monkeypatch.setenv("WARN_ON_TORCH_QUATF_ACCESS", "0") + ta = ProxyArray(wp.zeros(4, dtype=wp.quatf, device=device)) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + _ = ta.torch + assert [x for x in w if issubclass(x.category, UserWarning)] == [] + + +class TestProxyArrayConvenienceProperties: + """Tests for convenience properties: shape, dtype, device, len, repr.""" + + def test_shape(self, device): + """Test that .shape returns the warp array shape.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros((3, 4), dtype=wp.float32, device=device) + ta = ProxyArray(arr) + assert ta.shape == (3, 4) + + def test_dtype(self, device): + """Test that .dtype returns the warp dtype.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + assert ta.dtype == wp.float32 + + def test_device(self, device): + """Test that .device returns the warp device string.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(10, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + assert ta.device == arr.device + + def test_len(self, device): + """Test that len() returns the first dimension size.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros((7, 3), dtype=wp.float32, device=device) + ta = ProxyArray(arr) + assert len(ta) == 7 + + def test_repr(self, device): + """Test that repr() contains ProxyArray and key info.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.zeros(5, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + r = repr(ta) + assert "ProxyArray" in r + assert "float32" in r + + +class TestProxyArrayDeprecationBridge: + """Tests for the deprecation bridge: __torch_function__, operators.""" + + def setup_method(self): + """Reset the deprecation warning flag before each test.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = False + + def test_torch_function_works_and_warns(self, device): + """Test that __torch_function__ enables torch ops and emits a deprecation warning.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.ones(5, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + result = torch.sum(ta) + assert len(w) == 1 + assert issubclass(w[0].category, DeprecationWarning) + assert isinstance(result, torch.Tensor) + assert result.item() == pytest.approx(5.0) + + def test_torch_cat_works_and_warns(self, device): + """Test that torch.cat works with ProxyArray and emits a deprecation warning.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + a1 = wp.ones(3, dtype=wp.float32, device=device) + a2 = wp.ones(4, dtype=wp.float32, device=device) + ta1, ta2 = ProxyArray(a1), ProxyArray(a2) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + result = torch.cat([ta1, ta2]) + assert len(w) == 1 + assert issubclass(w[0].category, DeprecationWarning) + assert result.shape == (7,) + + def test_arithmetic_operators_work_and_warn(self, device): + """Test that arithmetic operators work and emit deprecation warnings.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.ones(5, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + result = ta + 1.0 + assert len(w) == 1 + assert issubclass(w[0].category, DeprecationWarning) + assert isinstance(result, torch.Tensor) + expected = torch.full((5,), 2.0, device=device) + torch.testing.assert_close(result, expected) + + def test_warns_only_once(self, device): + """Test that the deprecation warning is emitted only once per session.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.ones(5, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + _ = ta + 1.0 + _ = ta * 2.0 + _ = ta - 0.5 + # Only one warning despite three operations + assert len(w) == 1 + + def test_tensor_plus_proxy_array(self, device): + """Test that torch.Tensor + ProxyArray works via __torch_function__.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + arr = wp.ones(5, dtype=wp.float32, device=device) + ta = ProxyArray(arr) + t = torch.ones(5, device=device) * 2.0 + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + result = t + ta + assert len(w) == 1 + assert issubclass(w[0].category, DeprecationWarning) + expected = torch.full((5,), 3.0, device=device) + torch.testing.assert_close(result, expected) + + @pytest.mark.parametrize( + "op, scalar, expected", + [ + ("+", 1.0, [2.0, 3.0]), + ("-", 1.0, [0.0, 1.0]), + ("*", 2.0, [2.0, 4.0]), + ("/", 2.0, [0.5, 1.0]), + ("**", 2.0, [1.0, 4.0]), + ], + ) + def test_binary_operators(self, op, scalar, expected): + """Test forward binary operators: +, -, *, /, **.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + ta = ProxyArray(wp.array([1.0, 2.0], dtype=wp.float32, device="cpu")) # noqa: F841 + result = eval(f"ta {op} scalar") # noqa: S307 + assert torch.allclose(result, torch.tensor(expected)) + + @pytest.mark.parametrize( + "op, scalar, expected", + [ + ("+", 1.0, [2.0, 3.0]), + ("-", 1.0, [0.0, -1.0]), + ("*", 2.0, [2.0, 4.0]), + ("/", 2.0, [2.0, 1.0]), + ("**", 2.0, [2.0, 4.0]), + ], + ) + def test_reflected_operators(self, op, scalar, expected): + """Test reflected binary operators: scalar op ProxyArray.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + ta = ProxyArray(wp.array([1.0, 2.0], dtype=wp.float32, device="cpu")) # noqa: F841 + result = eval(f"scalar {op} ta") # noqa: S307 + assert torch.allclose(result, torch.tensor(expected)) + + def test_proxy_array_op_proxy_array(self): + """Test binary operations between two ProxyArray instances.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + ta1 = ProxyArray(wp.array([1.0, 2.0], dtype=wp.float32, device="cpu")) + ta2 = ProxyArray(wp.array([3.0, 4.0], dtype=wp.float32, device="cpu")) + assert torch.allclose(ta1 + ta2, torch.tensor([4.0, 6.0])) + assert torch.allclose(ta1 * ta2, torch.tensor([3.0, 8.0])) + assert torch.allclose(ta2 - ta1, torch.tensor([2.0, 2.0])) + + @pytest.mark.parametrize( + "op, values, expected", + [ + ("-", [1.0, -2.0], [-1.0, 2.0]), + ("+", [1.0, -2.0], [1.0, -2.0]), + ("abs", [-1.0, 2.0], [1.0, 2.0]), + ], + ) + def test_unary_operators(self, op, values, expected): + """Test unary operators: -, +, abs.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + ta = ProxyArray(wp.array(values, dtype=wp.float32, device="cpu")) # noqa: F841 + result = eval(f"{op}(ta)" if op == "abs" else f"{op}ta") # noqa: S307 + assert torch.allclose(result, torch.tensor(expected)) + + @pytest.mark.parametrize( + "op, expected", + [ + ("==", [False, True, False]), + ("!=", [True, False, True]), + ("<", [True, False, False]), + ("<=", [True, True, False]), + (">", [False, False, True]), + (">=", [False, True, True]), + ], + ) + def test_comparison_operators(self, op, expected): + """Test comparison operators: ==, !=, <, <=, >, >=.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + ta = ProxyArray(wp.array([1.0, 2.0, 3.0], dtype=wp.float32, device="cpu")) # noqa: F841 + result = eval(f"ta {op} 2.0") # noqa: S307 + assert result.tolist() == expected + + def test_getitem_1d(self): + """Test 1D indexing via __getitem__.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + ta = ProxyArray(wp.array([10.0, 20.0, 30.0], dtype=wp.float32, device="cpu")) + assert ta[0].item() == 10.0 + assert ta[-1].item() == 30.0 + assert ta[1:].tolist() == [20.0, 30.0] + + def test_getitem_nd(self): + """Test multi-dimensional indexing via __getitem__ with structured types.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + wp_arr = wp.zeros((3, 4), dtype=wp.vec3f, device="cpu") + ta = ProxyArray(wp_arr) + # torch view is (3, 4, 3) + result = ta[:, 0, :] + assert result.shape == (3, 3) + result = ta[0, :, 2] + assert result.shape == (4,) + + def test_setitem_writes_through(self): + """Test __setitem__ writes through to shared warp memory.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = True + wp_arr = wp.array([1.0, 2.0, 3.0], dtype=wp.float32, device="cpu") + ta = ProxyArray(wp_arr) + ta[0] = 99.0 + assert wp_arr.numpy()[0] == 99.0 + + def test_getitem_warns(self): + """Test __getitem__ emits deprecation warning.""" + from isaaclab.utils.warp.proxy_array import ProxyArray + + ProxyArray._deprecation_warned = False + ta = ProxyArray(wp.array([1.0, 2.0], dtype=wp.float32, device="cpu")) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + _ = ta[0] + deprecation_warnings = [x for x in w if issubclass(x.category, DeprecationWarning)] + assert len(deprecation_warnings) == 1 + + +class TestWpToTorchShim: + """Tests for the ``wp.to_torch`` shim installed by ``isaaclab.utils.warp``. + + The shim makes legacy call sites like ``wp.to_torch(asset.data.joint_pos)`` + keep working after the ProxyArray migration, instead of raising + ``AttributeError`` on ``requires_grad`` lookup. + """ + + def test_raw_wp_array_unchanged(self): + """``wp.to_torch(wp.array)`` must still produce a zero-copy torch view.""" + import isaaclab.utils.warp # noqa: F401 # ensure shim is installed + + arr = wp.array([1.0, 2.0, 3.0], dtype=wp.float32, device="cpu") + t = wp.to_torch(arr) + assert isinstance(t, torch.Tensor) + assert t.shape == (3,) + + def test_proxy_array_returns_torch_with_warning(self): + """``wp.to_torch(ProxyArray)`` returns the cached .torch view and warns once.""" + import isaaclab.utils.warp as iw # noqa: F401 # ensure shim is installed + from isaaclab.utils.warp.proxy_array import ProxyArray + + # Reset the module-level one-shot flag so the warning fires in this test. + iw._WP_TO_TORCH_WARNED = False + + arr = wp.array([7.0, 8.0], dtype=wp.float32, device="cpu") + proxy = ProxyArray(arr) + + with warnings.catch_warnings(record=True) as caught: + warnings.simplefilter("always") + t = wp.to_torch(proxy) + deprecation = [w for w in caught if issubclass(w.category, DeprecationWarning)] + + assert isinstance(t, torch.Tensor) + assert t is proxy.torch, "shim should return the cached .torch view" + assert len(deprecation) == 1 + assert "ProxyArray" in str(deprecation[0].message) + + def test_proxy_array_warning_is_one_shot(self): + """Repeated ``wp.to_torch(ProxyArray)`` calls must not spam warnings.""" + import isaaclab.utils.warp as iw + from isaaclab.utils.warp.proxy_array import ProxyArray + + iw._WP_TO_TORCH_WARNED = True # pretend the warning already fired + + with warnings.catch_warnings(record=True) as caught: + warnings.simplefilter("always") + wp.to_torch(ProxyArray(wp.zeros(2, dtype=wp.float32, device="cpu"))) + deprecation = [w for w in caught if issubclass(w.category, DeprecationWarning)] + + assert not deprecation, "shim must not re-warn after the first call" + + def test_requires_grad_forwarded_to_raw_wp_array(self): + """The ``requires_grad`` kwarg still reaches the original ``wp.to_torch``.""" + import isaaclab.utils.warp # noqa: F401 + + arr = wp.array([1.0, 2.0], dtype=wp.float32, device="cpu") + t = wp.to_torch(arr, requires_grad=False) + assert isinstance(t, torch.Tensor) + assert t.requires_grad is False diff --git a/source/isaaclab/test/visualizers/test_visualizer.py b/source/isaaclab/test/visualizers/test_visualizer.py new file mode 100644 index 000000000000..44d3a89aea16 --- /dev/null +++ b/source/isaaclab/test/visualizers/test_visualizer.py @@ -0,0 +1,163 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for visualizer config factory and base visualizer behavior.""" + +from __future__ import annotations + +import importlib.util +from types import SimpleNamespace + +import pytest + +from isaaclab.visualizers.base_visualizer import BaseVisualizer +from isaaclab.visualizers.visualizer import Visualizer +from isaaclab.visualizers.visualizer_cfg import VisualizerCfg + +# +# Config factory +# + + +def test_create_visualizer_raises_for_base_cfg(): + cfg = VisualizerCfg() + with pytest.raises(ValueError, match="Cannot create visualizer from base VisualizerCfg class"): + cfg.create_visualizer() + + +def test_create_visualizer_raises_for_unknown_type(): + cfg = VisualizerCfg(visualizer_type="unknown-backend") + with pytest.raises(ValueError, match="not registered"): + cfg.create_visualizer() + + +def test_create_visualizer_raises_import_error_when_backend_unavailable(monkeypatch): + monkeypatch.setattr(Visualizer, "_get_module_name", classmethod(lambda cls, backend: "does.not.exist")) + cfg = VisualizerCfg(visualizer_type="newton") + with pytest.raises(ImportError, match="isaaclab_visualizers"): + cfg.create_visualizer() + + +# +# Base visualizer (env filtering, camera pose) +# + + +class _DummyVisualizer(BaseVisualizer): + def initialize(self, scene_data_provider) -> None: + self._scene_data_provider = scene_data_provider + self._is_initialized = True + + def step(self, dt: float) -> None: + return + + def close(self) -> None: + self._is_closed = True + + def is_running(self) -> bool: + return True + + +def _make_cfg(**kwargs): + cfg = { + "max_visible_envs": None, + "visible_env_indices": None, + # Default off in tests: contiguous cap-only path matches historical assertions. + "randomly_sample_visible_envs": False, + } + cfg.update(kwargs) + return SimpleNamespace(**cfg) + + +_HAS_ISAACLAB_VIZ = importlib.util.find_spec("isaaclab_visualizers") is not None + + +class _FakeProvider: + def __init__(self, num_envs: int = 0, transforms: dict | None = None): + self._num_envs = num_envs + self._transforms = transforms + + def get_metadata(self) -> dict: + return {"num_envs": self._num_envs} + + def get_camera_transforms(self): + return self._transforms + + +def test_compute_visualized_env_ids_cap_only_returns_none(): + """Cap-only path: :meth:`_compute_visualized_env_ids` is ``None``. + + The cap is applied later by ``resolve_visible_env_indices``. + """ + viz = _DummyVisualizer(_make_cfg(visible_env_indices=None)) + viz._scene_data_provider = _FakeProvider(num_envs=8) + assert viz._compute_visualized_env_ids() is None + + +def test_compute_visualized_env_ids_from_visible_indices_filters_out_of_range(): + viz = _DummyVisualizer(_make_cfg(visible_env_indices=[-1, 0, 3, 99])) + viz._scene_data_provider = _FakeProvider(num_envs=4) + assert viz._compute_visualized_env_ids() == [0, 3] + + +@pytest.mark.skipif(not _HAS_ISAACLAB_VIZ, reason="isaaclab_visualizers not installed") +def test_partial_visualization_cap_only_uses_resolver(): + """With ``visible_env_indices`` unset, :func:`resolve_visible_env_indices` applies ``max_visible_envs``.""" + from isaaclab_visualizers.newton_adapter import resolve_visible_env_indices + + cfg = _make_cfg(max_visible_envs=3, visible_env_indices=None) + viz = _DummyVisualizer(cfg) + viz._scene_data_provider = _FakeProvider(num_envs=10) + assert viz._compute_visualized_env_ids() is None + assert resolve_visible_env_indices(None, cfg.max_visible_envs, 10) == [0, 1, 2] + assert resolve_visible_env_indices(None, 3, 10) == [0, 1, 2] + + +@pytest.mark.skipif(not _HAS_ISAACLAB_VIZ, reason="isaaclab_visualizers not installed") +def test_compute_visualized_env_ids_random_cap_only_sorted_once(): + """Cap-only random mode returns a sorted sample; explicit indices ignore the flag.""" + cfg = _make_cfg(max_visible_envs=3, visible_env_indices=None, randomly_sample_visible_envs=True) + viz = _DummyVisualizer(cfg) + viz._scene_data_provider = _FakeProvider(num_envs=10) + sampled = viz._compute_visualized_env_ids() + assert sampled is not None and len(sampled) == 3 + assert sampled == sorted(sampled) + assert len(set(sampled)) == 3 + assert all(0 <= i < 10 for i in sampled) + + cfg_explicit = _make_cfg( + visible_env_indices=[1, 5], + max_visible_envs=1, + randomly_sample_visible_envs=True, + ) + viz2 = _DummyVisualizer(cfg_explicit) + viz2._scene_data_provider = _FakeProvider(num_envs=10) + assert viz2._compute_visualized_env_ids() == [1, 5] + + +@pytest.mark.skipif(not _HAS_ISAACLAB_VIZ, reason="isaaclab_visualizers not installed") +def test_explicit_visible_env_indices_truncated_by_max_visible_envs(): + """Explicit indices from :meth:`_compute_visualized_env_ids`; ``max_visible_envs`` truncates from the end.""" + from isaaclab_visualizers.newton_adapter import resolve_visible_env_indices + + cfg = _make_cfg(visible_env_indices=[0, 2, 4], max_visible_envs=1) + viz = _DummyVisualizer(cfg) + viz._scene_data_provider = _FakeProvider(num_envs=10) + ids = viz._compute_visualized_env_ids() + assert ids == [0, 2, 4] + assert resolve_visible_env_indices(ids, cfg.max_visible_envs, 10) == [0] + + +def test_resolve_camera_pose_from_usd_path_uses_provider_transforms(): + transforms = { + "order": ["/World/envs/env_%d/Camera"], + "positions": [[[1.0, 2.0, 3.0]]], + "orientations": [[[0.0, 0.0, 0.0, 1.0]]], + } + viz = _DummyVisualizer(_make_cfg()) + viz._scene_data_provider = _FakeProvider(num_envs=1, transforms=transforms) + pos, target = viz._resolve_camera_pose_from_usd_path("/World/envs/env_0/Camera") + assert pos == (1.0, 2.0, 3.0) + assert target == pytest.approx((1.0, 2.0, 2.0)) diff --git a/source/isaaclab_assets/changelog.d/.gitkeep b/source/isaaclab_assets/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_assets/config/extension.toml b/source/isaaclab_assets/config/extension.toml index d45724d97347..1bf36d627e3e 100644 --- a/source/isaaclab_assets/config/extension.toml +++ b/source/isaaclab_assets/config/extension.toml @@ -1,6 +1,6 @@ [package] # Semantic Versioning is used: https://semver.org/ -version = "0.2.4" +version = "0.3.3" # Description title = "Isaac Lab Assets" diff --git a/source/isaaclab_assets/docs/CHANGELOG.rst b/source/isaaclab_assets/docs/CHANGELOG.rst index 3456213b3e8e..1d676f70a27e 100644 --- a/source/isaaclab_assets/docs/CHANGELOG.rst +++ b/source/isaaclab_assets/docs/CHANGELOG.rst @@ -1,6 +1,42 @@ Changelog --------- +0.3.3 (2026-04-29) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added configuration for Flexiv Rizon 4s with Grav parallel gripper for manipulation tasks. + + +0.3.2 (2026-04-13) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed Cassie failing to load on Newton by enabling + :attr:`~isaaclab.sim.schemas.JointDrivePropertiesCfg.ensure_drives_exist` + in :data:`~isaaclab_assets.robots.cassie.CASSIE_CFG`. + + +0.3.1 (2026-02-17) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Configuration for Flexiv Rizon 4s robot used for manipulation tasks. + +0.3.0 (2026-01-30) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Changed the quaternion ordering to match warp, PhysX, and Newton native XYZW quaternion ordering. + 0.2.4 (2025-11-26) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_assets/isaaclab_assets/__init__.py b/source/isaaclab_assets/isaaclab_assets/__init__.py index d83e15466fc3..b960a4fa49b1 100644 --- a/source/isaaclab_assets/isaaclab_assets/__init__.py +++ b/source/isaaclab_assets/isaaclab_assets/__init__.py @@ -6,19 +6,14 @@ import os import toml - -# Conveniences to other module directories via relative paths ISAACLAB_ASSETS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) """Path to the extension source directory.""" - ISAACLAB_ASSETS_DATA_DIR = os.path.join(ISAACLAB_ASSETS_EXT_DIR, "data") """Path to the extension data directory.""" - ISAACLAB_ASSETS_METADATA = toml.load(os.path.join(ISAACLAB_ASSETS_EXT_DIR, "config", "extension.toml")) """Extension metadata dictionary parsed from the extension.toml file.""" - -# Configure the module-level variables __version__ = ISAACLAB_ASSETS_METADATA["package"]["version"] -from .robots import * -from .sensors import * +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_assets/isaaclab_assets/__init__.pyi b/source/isaaclab_assets/isaaclab_assets/__init__.pyi new file mode 100644 index 000000000000..dc69e0b8c71c --- /dev/null +++ b/source/isaaclab_assets/isaaclab_assets/__init__.pyi @@ -0,0 +1,123 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "AGIBOT_A2D_CFG", + "LEG_JOINT_NAMES", + "ARM_JOINT_NAMES", + "DIGIT_V4_CFG", + "ALLEGRO_HAND_CFG", + "ANT_CFG", + "ANYDRIVE_3_SIMPLE_ACTUATOR_CFG", + "ANYDRIVE_3_LSTM_ACTUATOR_CFG", + "ANYMAL_B_CFG", + "ANYMAL_C_CFG", + "ANYMAL_D_CFG", + "ANYMAL_LIDAR_CFG", + "CART_DOUBLE_PENDULUM_CFG", + "CARTPOLE_CFG", + "CASSIE_CFG", + "GR1T2_CFG", + "GR1T2_HIGH_PD_CFG", + "FRANKA_PANDA_CFG", + "FRANKA_PANDA_HIGH_PD_CFG", + "FRANKA_ROBOTIQ_GRIPPER_CFG", + "GALBOT_ONE_CHARLIE_CFG", + "HUMANOID_CFG", + "HUMANOID_28_CFG", + "KINOVA_JACO2_N7S300_CFG", + "KINOVA_JACO2_N6S300_CFG", + "KINOVA_GEN3_N7_CFG", + "KUKA_ALLEGRO_CFG", + "PICK_AND_PLACE_CFG", + "CRAZYFLIE_CFG", + "RIDGEBACK_FRANKA_PANDA_CFG", + "SAWYER_CFG", + "SHADOW_HAND_CFG", + "joint_parameter_lookup", + "SPOT_CFG", + "GO1_ACTUATOR_CFG", + "UNITREE_A1_CFG", + "UNITREE_GO1_CFG", + "UNITREE_GO2_CFG", + "H1_CFG", + "H1_MINIMAL_CFG", + "G1_CFG", + "G1_MINIMAL_CFG", + "G1_29DOF_CFG", + "G1_INSPIRE_FTP_CFG", + "UR10_CFG", + "UR10e_CFG", + "UR10_LONG_SUCTION_CFG", + "UR10_SHORT_SUCTION_CFG", + "UR10e_ROBOTIQ_GRIPPER_CFG", + "UR10e_ROBOTIQ_2F_85_CFG", + "FLEXIV_RIZON4S_CFG", + "FLEXIV_RIZON4S_GRAV_GRIPPER_CFG", + "GELSIGHT_R15_CFG", + "GELSIGHT_MINI_CFG", + "VELODYNE_VLP_16_RAYCASTER_CFG", +] + +ISAACLAB_ASSETS_EXT_DIR: str +ISAACLAB_ASSETS_DATA_DIR: str +ISAACLAB_ASSETS_METADATA: dict +__version__: str + +from .robots import ( + AGIBOT_A2D_CFG, + LEG_JOINT_NAMES, + ARM_JOINT_NAMES, + DIGIT_V4_CFG, + ALLEGRO_HAND_CFG, + ANT_CFG, + ANYDRIVE_3_SIMPLE_ACTUATOR_CFG, + ANYDRIVE_3_LSTM_ACTUATOR_CFG, + ANYMAL_B_CFG, + ANYMAL_C_CFG, + ANYMAL_D_CFG, + ANYMAL_LIDAR_CFG, + CART_DOUBLE_PENDULUM_CFG, + CARTPOLE_CFG, + CASSIE_CFG, + GR1T2_CFG, + GR1T2_HIGH_PD_CFG, + FRANKA_PANDA_CFG, + FRANKA_PANDA_HIGH_PD_CFG, + FRANKA_ROBOTIQ_GRIPPER_CFG, + GALBOT_ONE_CHARLIE_CFG, + HUMANOID_CFG, + HUMANOID_28_CFG, + KINOVA_JACO2_N7S300_CFG, + KINOVA_JACO2_N6S300_CFG, + KINOVA_GEN3_N7_CFG, + KUKA_ALLEGRO_CFG, + PICK_AND_PLACE_CFG, + CRAZYFLIE_CFG, + RIDGEBACK_FRANKA_PANDA_CFG, + SAWYER_CFG, + SHADOW_HAND_CFG, + joint_parameter_lookup, + SPOT_CFG, + GO1_ACTUATOR_CFG, + UNITREE_A1_CFG, + UNITREE_GO1_CFG, + UNITREE_GO2_CFG, + H1_CFG, + H1_MINIMAL_CFG, + G1_CFG, + G1_MINIMAL_CFG, + G1_29DOF_CFG, + G1_INSPIRE_FTP_CFG, + UR10_CFG, + UR10e_CFG, + UR10_LONG_SUCTION_CFG, + UR10_SHORT_SUCTION_CFG, + UR10e_ROBOTIQ_GRIPPER_CFG, + UR10e_ROBOTIQ_2F_85_CFG, + FLEXIV_RIZON4S_CFG, + FLEXIV_RIZON4S_GRAV_GRIPPER_CFG, +) +from .sensors import GELSIGHT_R15_CFG, GELSIGHT_MINI_CFG, VELODYNE_VLP_16_RAYCASTER_CFG diff --git a/source/isaaclab_assets/isaaclab_assets/robots/__init__.py b/source/isaaclab_assets/isaaclab_assets/robots/__init__.py index 77bcf04d0a3e..e14e0f6d52c5 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/__init__.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/__init__.py @@ -3,30 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -## -# Configuration for different assets. -## +from isaaclab.utils.module import lazy_export -from .agibot import * -from .agility import * -from .allegro import * -from .ant import * -from .anymal import * -from .cart_double_pendulum import * -from .cartpole import * -from .cassie import * -from .fourier import * -from .franka import * -from .galbot import * -from .humanoid import * -from .humanoid_28 import * -from .kinova import * -from .kuka_allegro import * -from .pick_and_place import * -from .quadcopter import * -from .ridgeback_franka import * -from .sawyer import * -from .shadow_hand import * -from .spot import * -from .unitree import * -from .universal_robots import * +lazy_export() diff --git a/source/isaaclab_assets/isaaclab_assets/robots/__init__.pyi b/source/isaaclab_assets/isaaclab_assets/robots/__init__.pyi new file mode 100644 index 000000000000..1a91afa213a7 --- /dev/null +++ b/source/isaaclab_assets/isaaclab_assets/robots/__init__.pyi @@ -0,0 +1,109 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "AGIBOT_A2D_CFG", + "LEG_JOINT_NAMES", + "ARM_JOINT_NAMES", + "DIGIT_V4_CFG", + "ALLEGRO_HAND_CFG", + "ANT_CFG", + "ANYDRIVE_3_SIMPLE_ACTUATOR_CFG", + "ANYDRIVE_3_LSTM_ACTUATOR_CFG", + "ANYMAL_B_CFG", + "ANYMAL_C_CFG", + "ANYMAL_D_CFG", + "ANYMAL_LIDAR_CFG", + "CART_DOUBLE_PENDULUM_CFG", + "CARTPOLE_CFG", + "CASSIE_CFG", + "GR1T2_CFG", + "GR1T2_HIGH_PD_CFG", + "FRANKA_PANDA_CFG", + "FRANKA_PANDA_HIGH_PD_CFG", + "FRANKA_ROBOTIQ_GRIPPER_CFG", + "GALBOT_ONE_CHARLIE_CFG", + "HUMANOID_CFG", + "HUMANOID_28_CFG", + "KINOVA_JACO2_N7S300_CFG", + "KINOVA_JACO2_N6S300_CFG", + "KINOVA_GEN3_N7_CFG", + "KUKA_ALLEGRO_CFG", + "PICK_AND_PLACE_CFG", + "CRAZYFLIE_CFG", + "RIDGEBACK_FRANKA_PANDA_CFG", + "SAWYER_CFG", + "SHADOW_HAND_CFG", + "joint_parameter_lookup", + "SPOT_CFG", + "GO1_ACTUATOR_CFG", + "UNITREE_A1_CFG", + "UNITREE_GO1_CFG", + "UNITREE_GO2_CFG", + "H1_CFG", + "H1_MINIMAL_CFG", + "G1_CFG", + "G1_MINIMAL_CFG", + "G1_29DOF_CFG", + "G1_INSPIRE_FTP_CFG", + "UR10_CFG", + "UR10e_CFG", + "UR10_LONG_SUCTION_CFG", + "UR10_SHORT_SUCTION_CFG", + "UR10e_ROBOTIQ_GRIPPER_CFG", + "UR10e_ROBOTIQ_2F_85_CFG", + "FLEXIV_RIZON4S_CFG", + "FLEXIV_RIZON4S_GRAV_GRIPPER_CFG", +] + +from .agibot import AGIBOT_A2D_CFG +from .agility import LEG_JOINT_NAMES, ARM_JOINT_NAMES, DIGIT_V4_CFG +from .allegro import ALLEGRO_HAND_CFG +from .ant import ANT_CFG +from .anymal import ( + ANYDRIVE_3_SIMPLE_ACTUATOR_CFG, + ANYDRIVE_3_LSTM_ACTUATOR_CFG, + ANYMAL_B_CFG, + ANYMAL_C_CFG, + ANYMAL_D_CFG, + ANYMAL_LIDAR_CFG, +) +from .cart_double_pendulum import CART_DOUBLE_PENDULUM_CFG +from .cartpole import CARTPOLE_CFG +from .cassie import CASSIE_CFG +from .fourier import GR1T2_CFG, GR1T2_HIGH_PD_CFG +from .franka import FRANKA_PANDA_CFG, FRANKA_PANDA_HIGH_PD_CFG, FRANKA_ROBOTIQ_GRIPPER_CFG +from .galbot import GALBOT_ONE_CHARLIE_CFG +from .humanoid import HUMANOID_CFG +from .humanoid_28 import HUMANOID_28_CFG +from .kinova import KINOVA_JACO2_N7S300_CFG, KINOVA_JACO2_N6S300_CFG, KINOVA_GEN3_N7_CFG +from .kuka_allegro import KUKA_ALLEGRO_CFG +from .pick_and_place import PICK_AND_PLACE_CFG +from .quadcopter import CRAZYFLIE_CFG +from .ridgeback_franka import RIDGEBACK_FRANKA_PANDA_CFG +from .sawyer import SAWYER_CFG +from .shadow_hand import SHADOW_HAND_CFG +from .spot import joint_parameter_lookup, SPOT_CFG +from .unitree import ( + GO1_ACTUATOR_CFG, + UNITREE_A1_CFG, + UNITREE_GO1_CFG, + UNITREE_GO2_CFG, + H1_CFG, + H1_MINIMAL_CFG, + G1_CFG, + G1_MINIMAL_CFG, + G1_29DOF_CFG, + G1_INSPIRE_FTP_CFG, +) +from .universal_robots import ( + UR10_CFG, + UR10e_CFG, + UR10_LONG_SUCTION_CFG, + UR10_SHORT_SUCTION_CFG, + UR10e_ROBOTIQ_GRIPPER_CFG, + UR10e_ROBOTIQ_2F_85_CFG, +) +from .flexiv import FLEXIV_RIZON4S_CFG, FLEXIV_RIZON4S_GRAV_GRIPPER_CFG diff --git a/source/isaaclab_assets/isaaclab_assets/robots/allegro.py b/source/isaaclab_assets/isaaclab_assets/robots/allegro.py index 0e18ef77c13c..10d96e277770 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/allegro.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/allegro.py @@ -51,7 +51,7 @@ ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), - rot=(0.257551, 0.283045, 0.683330, -0.621782), + rot=(0.283045, 0.683330, -0.621782, 0.257551), joint_pos={"^(?!thumb_joint_0).*": 0.0, "thumb_joint_0": 0.28}, ), actuators={ diff --git a/source/isaaclab_assets/isaaclab_assets/robots/ant.py b/source/isaaclab_assets/isaaclab_assets/robots/ant.py index 49798ad638dd..97695e76edd9 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/ant.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/ant.py @@ -48,7 +48,9 @@ "body": ImplicitActuatorCfg( joint_names_expr=[".*"], stiffness=0.0, - damping=0.0, + damping=0.1, + armature=0.05, + effort_limit_sim=15.0, ), }, ) diff --git a/source/isaaclab_assets/isaaclab_assets/robots/anymal.py b/source/isaaclab_assets/isaaclab_assets/robots/anymal.py index ac0e565513f4..18beaccc5d92 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/anymal.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/anymal.py @@ -170,6 +170,6 @@ ## ANYMAL_LIDAR_CFG = VELODYNE_VLP_16_RAYCASTER_CFG.replace( - offset=RayCasterCfg.OffsetCfg(pos=(-0.310, 0.000, 0.159), rot=(0.0, 0.0, 0.0, 1.0)) + offset=RayCasterCfg.OffsetCfg(pos=(-0.310, 0.000, 0.159), rot=(0.0, 0.0, 1.0, 0.0)) ) """Configuration for the Velodyne VLP-16 sensor mounted on the ANYmal robot's base.""" diff --git a/source/isaaclab_assets/isaaclab_assets/robots/arl_robot_1.py b/source/isaaclab_assets/isaaclab_assets/robots/arl_robot_1.py index 6ac4b9fc55e8..32e01adc93b4 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/arl_robot_1.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/arl_robot_1.py @@ -54,7 +54,7 @@ pos=(0.0, 0.0, 0.0), lin_vel=(0.0, 0.0, 0.0), ang_vel=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), rps={ "back_left_prop": 200.0, "back_right_prop": 200.0, @@ -63,7 +63,7 @@ }, ), actuators={"thrusters": ARL_ROBOT_1_THRUSTER}, - rotor_directions=[1, -1, 1, -1], + rotor_directions=[-1, 1, -1, 1], allocation_matrix=[ [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], diff --git a/source/isaaclab_assets/isaaclab_assets/robots/cassie.py b/source/isaaclab_assets/isaaclab_assets/robots/cassie.py index 09e75e241fed..faaefc414164 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/cassie.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/cassie.py @@ -37,6 +37,7 @@ articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), + joint_drive_props=sim_utils.JointDrivePropertiesCfg(ensure_drives_exist=True), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.9), diff --git a/source/isaaclab_assets/isaaclab_assets/robots/flexiv.py b/source/isaaclab_assets/isaaclab_assets/robots/flexiv.py new file mode 100644 index 000000000000..18adf4312022 --- /dev/null +++ b/source/isaaclab_assets/isaaclab_assets/robots/flexiv.py @@ -0,0 +1,171 @@ +# Copyright (c) 2026-2027, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""Configuration for the Flexiv Rizon robots. + +The following configurations are available: + +* :obj:`FLEXIV_RIZON4S_CFG`: The Flexiv Rizon 4s arm without a gripper. +* :obj:`FLEXIV_RIZON4S_GRAV_GRIPPER_CFG`: The Flexiv Rizon 4s arm with Grav gripper. + +Reference: https://www.flexiv.com/product/rizon +""" + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR + +## +# Configuration +## + +FLEXIV_RIZON4S_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Flexiv/Rizon4s/rizon4s.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=True, + max_depenetration_velocity=5.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, + solver_position_iteration_count=16, + solver_velocity_iteration_count=1, + ), + activate_contact_sensors=False, + ), + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={ + "joint1": 0.0, + "joint2": -0.698, + "joint3": 0.0, + "joint4": 1.571, + "joint5": 0.0, + "joint6": 0.698, + "joint7": 0.0, + }, + pos=(0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), + ), + actuators={ + "shoulder": ImplicitActuatorCfg( + joint_names_expr=["joint[1-2]"], + effort_limit_sim=123.0, + velocity_limit_sim=2.094, + stiffness=6000.0, + damping=108.5, + friction=0.0, + armature=0.0, + ), + "elbow": ImplicitActuatorCfg( + joint_names_expr=["joint[3-4]"], + effort_limit_sim=64.0, + velocity_limit_sim=2.443, + stiffness=4200.0, + damping=90.7, + friction=0.0, + armature=0.0, + ), + "wrist": ImplicitActuatorCfg( + joint_names_expr=["joint[5-7]"], + effort_limit_sim=39.0, + velocity_limit_sim=4.887, + stiffness=1500.0, + damping=54.2, + friction=0.0, + armature=0.0, + ), + }, +) +"""Configuration of Flexiv Rizon 4s arm using implicit actuator models.""" + + +FLEXIV_RIZON4S_GRAV_GRIPPER_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Flexiv/Rizon4s/rizon4s_with_grav.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + max_depenetration_velocity=5.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, + solver_position_iteration_count=16, + solver_velocity_iteration_count=1, + ), + activate_contact_sensors=False, + ), + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={ + "joint1": 0.0, + "joint2": -0.698, + "joint3": 0.0, + "joint4": 1.571, + "joint5": 0.0, + "joint6": 0.698, + "joint7": 0.0, + "finger_joint": 0.0, + "left_outer_finger_joint": 0.0, + "right_outer_finger_joint": 0.0, + }, + pos=(0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), + ), + actuators={ + "shoulder": ImplicitActuatorCfg( + joint_names_expr=["joint[1-2]"], + effort_limit_sim=123.0, + velocity_limit_sim=2.094, + stiffness=1320.0, + damping=72.0, + friction=0.0, + armature=0.0, + ), + "elbow": ImplicitActuatorCfg( + joint_names_expr=["joint[3-4]"], + effort_limit_sim=64.0, + velocity_limit_sim=2.443, + stiffness=600.0, + damping=35.0, + friction=0.0, + armature=0.0, + ), + "wrist": ImplicitActuatorCfg( + joint_names_expr=["joint[5-7]"], + effort_limit_sim=39.0, + velocity_limit_sim=4.887, + stiffness=216.0, + damping=29.0, + friction=0.0, + armature=0.0, + ), + "gripper_drive": ImplicitActuatorCfg( + joint_names_expr=["finger_joint"], + effort_limit_sim=200.0, + velocity_limit_sim=0.6, + stiffness=2e3, + damping=1e1, + friction=0.0, + armature=0.0, + ), + "gripper_passive": ImplicitActuatorCfg( + joint_names_expr=[".*_knuckle_joint"], + effort_limit_sim=1.0, + velocity_limit_sim=1.0, + stiffness=0.0, + damping=0.0, + friction=0.0, + armature=0.0, + ), + }, +) +"""Configuration of Flexiv Rizon 4s arm with Grav gripper using implicit actuator models. + +The Grav gripper is a parallel gripper with the following joint configuration: +- finger_joint: Main actuation joint (opened: 45 deg, closed: -8.88 deg) +- *_knuckle_joint: Passive/mimic joints (not directly actuated) + +End effector body: right_finger_tip +""" diff --git a/source/isaaclab_assets/isaaclab_assets/robots/franka.py b/source/isaaclab_assets/isaaclab_assets/robots/franka.py index caacf214c58f..f03d21760f90 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/franka.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/franka.py @@ -54,12 +54,14 @@ effort_limit_sim=87.0, stiffness=80.0, damping=4.0, + armature=1e-3, ), "panda_forearm": ImplicitActuatorCfg( joint_names_expr=["panda_joint[5-7]"], effort_limit_sim=12.0, stiffness=80.0, damping=4.0, + armature=1e-3, ), "panda_hand": ImplicitActuatorCfg( joint_names_expr=["panda_finger_joint.*"], diff --git a/source/isaaclab_assets/isaaclab_assets/robots/humanoid.py b/source/isaaclab_assets/isaaclab_assets/robots/humanoid.py index 42940b4fa1f4..3ac06535d34e 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/humanoid.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/humanoid.py @@ -64,6 +64,8 @@ ".*_foot.*": 1.0, }, velocity_limit_sim={".*": 100.0}, + armature={".*": 0.01}, + effort_limit_sim=150.0, ), }, ) diff --git a/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py b/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py index 35b7e0b179a0..f521f45bff57 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py @@ -49,7 +49,7 @@ ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), joint_pos={ "iiwa7_joint_(1|2|7)": 0.0, "iiwa7_joint_3": 0.7854, @@ -108,6 +108,9 @@ "ring_joint_(0|1|2|3)": 0.01, "thumb_joint_(0|1|2|3)": 0.01, }, + armature={ + ".*": 0.01, + }, ), }, soft_joint_pos_limit_factor=1.0, diff --git a/source/isaaclab_assets/isaaclab_assets/robots/shadow_hand.py b/source/isaaclab_assets/isaaclab_assets/robots/shadow_hand.py index d13e90e3b1c0..696b8f9b5a4d 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/shadow_hand.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/shadow_hand.py @@ -46,7 +46,7 @@ ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), - rot=(0.0, 0.0, -0.7071, 0.7071), + rot=(0.0, -0.7071, 0.7071, 0.0), joint_pos={".*": 0.0}, ), actuators={ diff --git a/source/isaaclab_assets/isaaclab_assets/robots/unitree.py b/source/isaaclab_assets/isaaclab_assets/robots/unitree.py index ff7685a3c607..7a02c6eff294 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/unitree.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/unitree.py @@ -407,7 +407,7 @@ ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.75), - rot=(0.7071, 0, 0, 0.7071), + rot=(0, 0, 0.7071, 0.7071), joint_pos={ ".*_hip_pitch_joint": -0.10, ".*_knee_joint": 0.30, diff --git a/source/isaaclab_assets/isaaclab_assets/robots/universal_robots.py b/source/isaaclab_assets/isaaclab_assets/robots/universal_robots.py index 1026e00a9713..9047fc14e764 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/universal_robots.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/universal_robots.py @@ -75,7 +75,7 @@ "wrist_3_joint": 0.0, }, pos=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), actuators={ # 'shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint' diff --git a/source/isaaclab_assets/isaaclab_assets/sensors/__init__.py b/source/isaaclab_assets/isaaclab_assets/sensors/__init__.py index f5f6c6ac116e..e14e0f6d52c5 100644 --- a/source/isaaclab_assets/isaaclab_assets/sensors/__init__.py +++ b/source/isaaclab_assets/isaaclab_assets/sensors/__init__.py @@ -3,9 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -## -# Configuration for different assets. -## +from isaaclab.utils.module import lazy_export -from .gelsight import * -from .velodyne import * +lazy_export() diff --git a/source/isaaclab_assets/isaaclab_assets/sensors/__init__.pyi b/source/isaaclab_assets/isaaclab_assets/sensors/__init__.pyi new file mode 100644 index 000000000000..cef502deca95 --- /dev/null +++ b/source/isaaclab_assets/isaaclab_assets/sensors/__init__.pyi @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "GELSIGHT_R15_CFG", + "GELSIGHT_MINI_CFG", + "VELODYNE_VLP_16_RAYCASTER_CFG", +] + +from .gelsight import GELSIGHT_R15_CFG, GELSIGHT_MINI_CFG +from .velodyne import VELODYNE_VLP_16_RAYCASTER_CFG diff --git a/source/isaaclab_assets/isaaclab_assets/sensors/velodyne.py b/source/isaaclab_assets/isaaclab_assets/sensors/velodyne.py index 6cd075f4fa25..6d62b55e896f 100644 --- a/source/isaaclab_assets/isaaclab_assets/sensors/velodyne.py +++ b/source/isaaclab_assets/isaaclab_assets/sensors/velodyne.py @@ -5,7 +5,8 @@ """Configuration for Velodyne LiDAR sensors.""" -from isaaclab.sensors import RayCasterCfg, patterns +from isaaclab.sensors import RayCasterCfg +from isaaclab.sensors.ray_caster import patterns ## # Configuration diff --git a/source/isaaclab_assets/setup.py b/source/isaaclab_assets/setup.py index ebd9331bd5f8..1e678c893be3 100644 --- a/source/isaaclab_assets/setup.py +++ b/source/isaaclab_assets/setup.py @@ -25,15 +25,13 @@ description=EXTENSION_TOML_DATA["package"]["description"], keywords=EXTENSION_TOML_DATA["package"]["keywords"], include_package_data=True, - python_requires=">=3.10", + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", packages=["isaaclab_assets"], classifiers=[ "Natural Language :: English", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Isaac Sim :: 4.5.0", - "Isaac Sim :: 5.0.0", - "Isaac Sim :: 5.1.0", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", ], zip_safe=False, ) diff --git a/source/isaaclab_contrib/changelog.d/.gitkeep b/source/isaaclab_contrib/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_contrib/config/extension.toml b/source/isaaclab_contrib/config/extension.toml index 326c9faf7ee8..7fd2b5d139e5 100644 --- a/source/isaaclab_contrib/config/extension.toml +++ b/source/isaaclab_contrib/config/extension.toml @@ -1,6 +1,6 @@ [package] # Semantic Versioning is used: https://semver.org/ -version = "0.0.2" +version = "0.3.0" # Description title = "Isaac Lab External Contributions" diff --git a/source/isaaclab_contrib/docs/CHANGELOG.rst b/source/isaaclab_contrib/docs/CHANGELOG.rst index a5e0bf4c2ef4..721c705a5981 100644 --- a/source/isaaclab_contrib/docs/CHANGELOG.rst +++ b/source/isaaclab_contrib/docs/CHANGELOG.rst @@ -1,6 +1,44 @@ Changelog --------- +0.3.0 (2026-02-13) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated multirotor asset and TacSL visuotactile sensor to wrap warp data + property accesses with ``wp.to_torch()``. + + +0.2.1 (2026-02-03) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated the multirotor asset to use the new base classes from the isaaclab_physx package. + + +0.2.0 (2026-01-30) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated the multirotor asset to use the new base classes from the isaaclab_physx package. + + +0.1.0 (2026-01-30) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + + +* Changed the quaternion ordering to match warp, PhysX, and Newton native XYZW quaternion ordering. + + 0.0.2 (2026-01-28) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_contrib/docs/README.md b/source/isaaclab_contrib/docs/README.md index 1861b593bd50..346b47e5522c 100644 --- a/source/isaaclab_contrib/docs/README.md +++ b/source/isaaclab_contrib/docs/README.md @@ -256,7 +256,7 @@ The TacSL tactile sensor system includes: ```python import isaaclab.sim as sim_utils -from isaaclab.sensors import TiledCameraCfg +from isaaclab.sensors import CameraCfg from isaaclab_contrib.sensors.tacsl_sensor import VisuoTactileSensorCfg @@ -287,8 +287,8 @@ tactile_sensor_cfg = VisuoTactileSensorCfg( friction_coefficient=2.0, # Surface friction tangential_stiffness=0.1, # Tangential stiffness - # Camera configuration (must match render_cfg dimensions) - camera_cfg=TiledCameraCfg( + # Camera configuration (dimensions must match GELSIGHT_R15_CFG which provides the render_cfg) + camera_cfg=CameraCfg( prim_path="{ENV_REGEX_NS}/Robot/elastomer_tip/cam", height=GELSIGHT_R15_CFG.image_height, width=GELSIGHT_R15_CFG.image_width, diff --git a/source/isaaclab_contrib/isaaclab_contrib/actuators/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/actuators/__init__.py index 7d0cbbc80882..5811de537bd2 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/actuators/__init__.py +++ b/source/isaaclab_contrib/isaaclab_contrib/actuators/__init__.py @@ -10,5 +10,6 @@ rise and fall time constants, thrust limits, and dynamic response characteristics. """ -from .thruster import Thruster -from .thruster_cfg import ThrusterCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_contrib/isaaclab_contrib/actuators/__init__.pyi b/source/isaaclab_contrib/isaaclab_contrib/actuators/__init__.pyi new file mode 100644 index 000000000000..929985914436 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/actuators/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Thruster", + "ThrusterCfg", +] + +from .thruster import Thruster +from .thruster_cfg import ThrusterCfg diff --git a/source/isaaclab_contrib/isaaclab_contrib/actuators/thruster_cfg.py b/source/isaaclab_contrib/isaaclab_contrib/actuators/thruster_cfg.py index 29072f421abb..1c20492fc2c1 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/actuators/thruster_cfg.py +++ b/source/isaaclab_contrib/isaaclab_contrib/actuators/thruster_cfg.py @@ -4,11 +4,12 @@ # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING -from typing import Literal +from typing import TYPE_CHECKING, Literal from isaaclab.utils import configclass -from .thruster import Thruster +if TYPE_CHECKING: + from .thruster import Thruster @configclass @@ -21,7 +22,7 @@ class ThrusterCfg: and must be provided by the user configuration. """ - class_type: type[Thruster] = Thruster + class_type: type["Thruster"] | str = "{DIR}.thruster:Thruster" """Concrete Python class that consumes this config.""" dt: float = MISSING diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/assets/__init__.py index 8c40124e72ac..cb3f89e0fbe3 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/__init__.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/__init__.py @@ -11,4 +11,6 @@ contributed by the community to extend the capabilities of Isaac Lab. """ -from .multirotor import Multirotor, MultirotorCfg, MultirotorData +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/__init__.pyi b/source/isaaclab_contrib/isaaclab_contrib/assets/__init__.pyi new file mode 100644 index 000000000000..cf1d137525f9 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Multirotor", + "MultirotorCfg", + "MultirotorData", +] + +from .multirotor import Multirotor, MultirotorCfg, MultirotorData diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/__init__.py index 3ef1b482d05b..9e325f4e89be 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/__init__.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/__init__.py @@ -41,6 +41,6 @@ - :mod:`isaaclab_contrib.mdp.actions`: Thrust action terms for RL """ -from .multirotor import Multirotor -from .multirotor_cfg import MultirotorCfg -from .multirotor_data import MultirotorData +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/__init__.pyi b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/__init__.pyi new file mode 100644 index 000000000000..4b22437a5c3b --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Multirotor", + "MultirotorCfg", + "MultirotorData", +] + +from .multirotor import Multirotor +from .multirotor_cfg import MultirotorCfg +from .multirotor_data import MultirotorData diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py index 6f8800c32215..d760321862cc 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py @@ -13,9 +13,11 @@ from typing import TYPE_CHECKING import torch +import warp as wp +from isaaclab_physx.assets.articulation import Articulation +from isaaclab_physx.assets.kernels import split_state_to_root_pose_and_vel import isaaclab.utils.string as string_utils -from isaaclab.assets.articulation import Articulation from isaaclab_contrib.actuators import Thruster from isaaclab_contrib.utils.types import MultiRotorActions @@ -286,7 +288,7 @@ def _initialize_impl(self): super()._initialize_impl() # Replace data container with MultirotorData - self._data = MultirotorData(self.root_physx_view, self.device) + self._data = MultirotorData(self.root_view, self.device) # Create thruster buffers with correct size (SINGLE PHASE) self._create_thruster_buffers() @@ -370,7 +372,24 @@ def _process_cfg(self): + tuple(self.cfg.init_state.ang_vel) ) default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) - self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) + # Repeat for all instances + default_root_state_repeated = default_root_state.repeat(self.num_instances, 1) + # Convert to warp array and split into pose and vel using kernel + default_root_state_wp = wp.from_torch(default_root_state_repeated, dtype=wp.float32) + # Create temporary output arrays + pose_output = wp.zeros(self.num_instances, dtype=wp.transformf, device=self.device) + vel_output = wp.zeros(self.num_instances, dtype=wp.spatial_vectorf, device=self.device) + # Split state into pose and vel + wp.launch( + split_state_to_root_pose_and_vel, + dim=self.num_instances, + inputs=[default_root_state_wp], + outputs=[pose_output, vel_output], + device=self.device, + ) + # Set using public setters + self._data.default_root_pose = pose_output + self._data.default_root_vel = vel_output # Handle thruster-specific initial state if hasattr(self._data, "default_thruster_rps") and hasattr(self.cfg.init_state, "rps"): @@ -508,9 +527,13 @@ def _apply_combined_wrench(self): # Combine individual thrusts into a wrench vector self._combine_thrusts() - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._internal_force_target_sim.view(-1, 3), # Shape: (num_envs * num_bodies, 3) - torque_data=self._internal_torque_target_sim.view(-1, 3), # Shape: (num_envs * num_bodies, 3) + # Convert torch tensors to Warp arrays for PhysX API + force_data_wp = wp.from_torch(self._internal_force_target_sim.view(-1, 3), dtype=wp.float32) + torque_data_wp = wp.from_torch(self._internal_torque_target_sim.view(-1, 3), dtype=wp.float32) + + self.root_view.apply_forces_and_torques_at_position( + force_data=force_data_wp, # Shape: (num_envs * num_bodies, 3) + torque_data=torque_data_wp, # Shape: (num_envs * num_bodies, 3) position_data=None, # Apply at center of mass indices=self._ALL_INDICES, is_global=False, # Forces are in local frame diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_cfg.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_cfg.py index 9638fcf2aa66..0a8538cc14f0 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_cfg.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_cfg.py @@ -5,13 +5,15 @@ from collections.abc import Sequence from dataclasses import MISSING +from typing import TYPE_CHECKING from isaaclab.assets.articulation import ArticulationCfg from isaaclab.utils import configclass from isaaclab_contrib.actuators import ThrusterCfg -from .multirotor import Multirotor +if TYPE_CHECKING: + from .multirotor import Multirotor @configclass @@ -78,7 +80,7 @@ class MultirotorCfg(ArticulationCfg): - :class:`Multirotor`: Multirotor asset class """ - class_type: type = Multirotor + class_type: type["Multirotor"] | str = "{DIR}.multirotor:Multirotor" @configclass class InitialStateCfg(ArticulationCfg.InitialStateCfg): diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py index 05ea56c4565a..b1eaf25e4f36 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py @@ -4,8 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause import torch - -from isaaclab.assets.articulation.articulation_data import ArticulationData +from isaaclab_physx.assets.articulation.articulation_data import ArticulationData class MultirotorData(ArticulationData): diff --git a/source/isaaclab_contrib/isaaclab_contrib/mdp/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/mdp/__init__.py index bc099b36f648..18b1cb44a7e9 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/mdp/__init__.py +++ b/source/isaaclab_contrib/isaaclab_contrib/mdp/__init__.py @@ -5,4 +5,6 @@ """Sub-package for MDP (Markov Decision Process) components contributed by the community.""" -from .actions import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_contrib/isaaclab_contrib/mdp/__init__.pyi b/source/isaaclab_contrib/isaaclab_contrib/mdp/__init__.pyi new file mode 100644 index 000000000000..412db0ce4d8f --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/mdp/__init__.pyi @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ThrustAction", + "ThrustActionCfg", +] + +from .actions import ThrustAction, ThrustActionCfg diff --git a/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/__init__.py index 695a4486066f..f77d626608f9 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/__init__.py +++ b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/__init__.py @@ -10,5 +10,6 @@ MDP framework and :class:`~isaaclab_contrib.assets.Multirotor` assets. """ -from .thrust_actions import * # noqa: F401, F403 -from .thrust_actions_cfg import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/__init__.pyi b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/__init__.pyi new file mode 100644 index 000000000000..8203016432d4 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ThrustAction", + "ThrustActionCfg", +] + +from .thrust_actions import ThrustAction +from .thrust_actions_cfg import ThrustActionCfg diff --git a/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions.py b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions.py index 7aa207849de6..5ed60f190c4f 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions.py +++ b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions.py @@ -14,12 +14,12 @@ import isaaclab.utils.string as string_utils from isaaclab.managers.action_manager import ActionTerm -from isaaclab_contrib.assets import Multirotor - if TYPE_CHECKING: from isaaclab.envs import ManagerBasedEnv from isaaclab.envs.utils.io_descriptors import GenericActionIODescriptor + from isaaclab_contrib.assets import Multirotor + from . import thrust_actions_cfg # import logger diff --git a/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions_cfg.py b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions_cfg.py index 0f457fe4a5a0..3a464b8fce84 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions_cfg.py +++ b/source/isaaclab_contrib/isaaclab_contrib/mdp/actions/thrust_actions_cfg.py @@ -4,11 +4,13 @@ # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING +from typing import TYPE_CHECKING -from isaaclab.managers.action_manager import ActionTerm, ActionTermCfg +from isaaclab.managers.action_manager import ActionTermCfg from isaaclab.utils import configclass -from . import thrust_actions +if TYPE_CHECKING: + from .thrust_actions import ThrustAction @configclass @@ -70,7 +72,7 @@ class ThrustActionCfg(ActionTermCfg): - :class:`~isaaclab.managers.ActionTermCfg`: Base action term configuration """ - class_type: type[ActionTerm] = thrust_actions.ThrustAction + class_type: type["ThrustAction"] | str = "{DIR}.thrust_actions:ThrustAction" asset_name: str = MISSING """Name or regex expression of the asset that the action will be mapped to. diff --git a/source/isaaclab_contrib/isaaclab_contrib/rl/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/rl/__init__.py new file mode 100644 index 000000000000..4cbf30e5c770 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/rl/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Reinforcement learning extensions contributed by the community.""" diff --git a/source/isaaclab_contrib/isaaclab_contrib/rl/rlinf/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/rl/rlinf/__init__.py new file mode 100644 index 000000000000..35842bf03c42 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/rl/rlinf/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""RLinf integration for IsaacLab. + +This package provides the extension mechanism for integrating IsaacLab tasks +with RLinf's distributed RL training framework for VLA models like GR00T. + +Extension Module +---------------- + +The extension module (``extension.py``) is loaded by RLinf via the +``RLINF_EXT_MODULE`` environment variable and handles: + +1. Registering IsaacLab tasks into RLinf's ``REGISTER_ISAACLAB_ENVS`` +2. Registering GR00T obs/action converters +3. Patching GR00T ``get_model`` for custom embodiments + +Usage: + .. code-block:: bash + + export RLINF_EXT_MODULE="isaaclab_contrib.rl.rlinf.extension" + export RLINF_CONFIG_FILE="/path/to/config.yaml" +""" diff --git a/source/isaaclab_contrib/isaaclab_contrib/rl/rlinf/extension.py b/source/isaaclab_contrib/isaaclab_contrib/rl/rlinf/extension.py new file mode 100644 index 000000000000..89368c532109 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/rl/rlinf/extension.py @@ -0,0 +1,565 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""RLinf extension module for IsaacLab tasks. + +This module is loaded by RLinf's Worker._load_user_extensions() when +RLINF_EXT_MODULE=isaaclab_contrib.rl.rlinf.extension is set in the environment. + +It registers IsaacLab tasks into RLinf's registries, allowing IsaacLab users +to train on their tasks without modifying RLinf source code. + +Configuration is read from the Hydra YAML config under `env.train.isaaclab`: + env: + train: + isaaclab: &isaaclab_config # YAML anchor for reuse + task_description: "..." + main_images: "front_camera" + extra_view_images: ["left_wrist_camera", "right_wrist_camera"] + states: + - key: "robot_joint_state" + slice: [15, 29] + gr00t_mapping: + video: + main_images: "video.room_view" + ... + action_mapping: + prefix_pad: 15 + eval: + isaaclab: *isaaclab_config # Reuse via YAML anchor + +Task IDs are read automatically from ``env.train.init_params.id`` and +``env.eval.init_params.id`` in the YAML config. + +Usage: + export RLINF_EXT_MODULE=isaaclab_contrib.rl.rlinf.extension + export RLINF_CONFIG_FILE=/path/to/isaaclab_ppo_gr00t_assemble_trocar.yaml +""" + +from __future__ import annotations + +import collections.abc +import logging +import os +from enum import Enum +from pathlib import Path +from typing import TYPE_CHECKING + +import numpy as np +import torch +import yaml +from rlinf.models.embodiment.gr00t import embodiment_tags + +if TYPE_CHECKING: + import torch + +logger = logging.getLogger(__name__) + +_registered = False + +# Cache for YAML config (loaded once per process) +_full_cfg_cache: dict | None = None + + +def register() -> None: + """Register IsaacLab extensions into RLinf. + + This function is called automatically by RLinf's Worker._load_user_extensions() + when RLINF_EXT_MODULE=isaaclab_contrib.rl.rlinf.extension is set. + + It performs the following registrations: + 1. Registers GR00T obs/action converters + 2. Registers GR00T data config + 3. Patches GR00T get_model for custom embodiment + 4. Registers task IDs from YAML config (env.*.init_params.id) into REGISTER_ISAACLAB_ENVS + """ + global _registered + if _registered: + return + _registered = True + + logger.info("isaaclab_contrib.rl.rlinf.extension: Registering IsaacLab extensions...") + + # Load config once and pass to all registration functions + cfg = _get_isaaclab_cfg() + + _register_gr00t_converters(cfg) + _patch_gr00t_get_model(cfg) + _register_isaaclab_envs() + + logger.info("isaaclab_contrib.rl.rlinf.extension: Registration complete.") + + +def _load_full_cfg() -> dict: + """Load and cache the full YAML config from ``RLINF_CONFIG_FILE``. + + Raises: + ValueError: If the ``RLINF_CONFIG_FILE`` environment variable is not set. + + Returns: + The parsed YAML config as a nested dictionary. + """ + global _full_cfg_cache + if _full_cfg_cache is not None: + return _full_cfg_cache + config_file = os.environ.get("RLINF_CONFIG_FILE", "") + if not config_file: + raise ValueError("RLINF_CONFIG_FILE not set") + with open(config_file) as f: + _full_cfg_cache = yaml.safe_load(f) + logger.info(f"Loaded full config from {config_file}") + return _full_cfg_cache + + +def _get_isaaclab_cfg() -> dict: + """Return the ``env.train.isaaclab`` section from the cached full config. + + Returns: + The IsaacLab-specific configuration dictionary. Empty dict if the section is missing. + """ + return _load_full_cfg().get("env", {}).get("train", {}).get("isaaclab", {}) + + +def _patch_embodiment_tags(cfg: dict) -> None: + """Add custom embodiment tag to RLinf's EmbodimentTag enum and mapping if needed. + + Reads ``embodiment_tag`` and ``embodiment_tag_id`` from the IsaacLab config section. + Only adds the tag if it is not already present in RLinf's native registry. + + Args: + cfg: The IsaacLab-specific configuration dictionary (``env.train.isaaclab``). + """ + # GR00T uses embodiment tags to identify different robots. Custom robots + # (like G129+Dex3) need a unique tag string and numeric ID so that the + # model's tokenizer can map them to the correct action/state dimensions. + # + # The numeric ID is the projector index in GR00T's Action Expert Module. + # Known mapping (from gr00t/data/embodiment_tags.py): + # 17 = oxe_droid, 24 = gr1, 26 = agibot_genie1, 31 = new_embodiment + # Default 31 corresponds to the "new_embodiment" slot reserved for + # fine-tuning on custom robots. + embodiment_tag = cfg.get("embodiment_tag", "new_embodiment") + tag_id = cfg.get("embodiment_tag_id", 31) + + # If tag is already in registry (native or previously added), skip + if embodiment_tag in embodiment_tags.EMBODIMENT_TAG_MAPPING: + logger.info(f"embodiment_tag '{embodiment_tag}' already registered") + return + # Add to enum + tag_upper = embodiment_tag.upper().replace("-", "_") + if not hasattr(embodiment_tags.EmbodimentTag, tag_upper): + existing_members = {e.name: e.value for e in embodiment_tags.EmbodimentTag} + existing_members[tag_upper] = embodiment_tag + NewEmbodimentTag = Enum("EmbodimentTag", existing_members) + + embodiment_tags.EmbodimentTag = NewEmbodimentTag + logger.info(f"Added EmbodimentTag.{tag_upper} = '{embodiment_tag}'") + + # Add to mapping + embodiment_tags.EMBODIMENT_TAG_MAPPING[embodiment_tag] = tag_id + logger.info(f"Added EMBODIMENT_TAG_MAPPING['{embodiment_tag}'] = {tag_id}") + + +def _patch_gr00t_get_model(cfg: dict) -> None: + """Monkeypatch RLinf's GR00T ``get_model`` to support custom ``data_config``. + + The patch is applied only when the user specifies a ``data_config_class`` in the + YAML config. Embodiment tags are always ensured to be registered. + + Args: + cfg: The IsaacLab-specific configuration dictionary (``env.train.isaaclab``). + """ + # Always ensure embodiment tag is registered + _patch_embodiment_tags(cfg) + # Only patch get_model if user wants custom data_config + data_config_class = cfg.get("data_config_class", "") + if not data_config_class: + logger.info("No data_config_class specified, using RLinf's default get_model") + return + + import rlinf.models.embodiment.gr00t as rlinf_gr00t_mod + + def patched_get_model(model_cfg, torch_dtype=None) -> object: + """Load a GR00T model with custom ``data_config`` and embodiment tag. + + Args: + model_cfg: RLinf model configuration object containing ``model_path``, + ``embodiment_tag``, ``denoising_steps``, ``num_action_chunks``, + ``obs_converter_type``, and ``rl_head_config``. + torch_dtype: The torch dtype for the model. Defaults to ``torch.bfloat16``. + + Raises: + FileNotFoundError: If ``model_cfg.model_path`` does not exist. + + Returns: + The loaded GR00T model instance. + """ + if torch_dtype is None: + torch_dtype = torch.bfloat16 + + # Handle custom embodiment (we only get here if tag was not natively supported) + from gr00t.experiment.data_config import load_data_config + from rlinf.models.embodiment.gr00t.gr00t_action_model import GR00T_N1_5_ForRLActionPrediction + from rlinf.models.embodiment.gr00t.utils import replace_dropout_with_identity + from rlinf.utils.patcher import Patcher + + # Apply RLinf's standard EmbodimentTag patches + Patcher.clear() + Patcher.add_patch( + "gr00t.data.embodiment_tags.EmbodimentTag", + "rlinf.models.embodiment.gr00t.embodiment_tags.EmbodimentTag", + ) + Patcher.add_patch( + "gr00t.data.embodiment_tags.EMBODIMENT_TAG_MAPPING", + "rlinf.models.embodiment.gr00t.embodiment_tags.EMBODIMENT_TAG_MAPPING", + ) + Patcher.apply() + + data_config = load_data_config(data_config_class) + modality_config = data_config.modality_config() + modality_transform = data_config.transform() + + model_path = Path(model_cfg.model_path) + if not model_path.exists(): + raise FileNotFoundError(f"Model path does not exist: {model_path}") + + model = GR00T_N1_5_ForRLActionPrediction.from_pretrained( + model_path, + torch_dtype=torch_dtype, + embodiment_tag=model_cfg.embodiment_tag, + modality_config=modality_config, + modality_transform=modality_transform, + denoising_steps=model_cfg.denoising_steps, + output_action_chunks=model_cfg.num_action_chunks, + obs_converter_type=model_cfg.obs_converter_type, + tune_visual=False, + tune_llm=False, + rl_head_config=model_cfg.rl_head_config, + ) + model.to(torch_dtype) + if model_cfg.rl_head_config.add_value_head: + model.action_head.value_head._init_weights() + if model_cfg.rl_head_config.disable_dropout: + replace_dropout_with_identity(model) + + logger.info(f"Loaded GR00T model with embodiment_tag='{model_cfg.embodiment_tag}'") + return model + + rlinf_gr00t_mod.get_model = patched_get_model + logger.info(f"Patched get_model for data_config_class='{data_config_class}'") + + +def _register_gr00t_converters(cfg: dict) -> None: + """Register GR00T obs/action converters for IsaacLab tasks. + + Reads ``obs_converter_type`` from the YAML config (``env.train.isaaclab.obs_converter_type``) + and registers the corresponding observation and action conversion functions into + RLinf's ``simulation_io`` registry. + + Args: + cfg: The IsaacLab-specific configuration dictionary (``env.train.isaaclab``). + """ + from rlinf.models.embodiment.gr00t import simulation_io + + obs_converter_type = cfg.get("obs_converter_type", "dex3") + + if obs_converter_type not in simulation_io.OBS_CONVERSION: + simulation_io.OBS_CONVERSION[obs_converter_type] = _convert_isaaclab_obs_to_gr00t + logger.info(f"Registered obs converter: {obs_converter_type}") + + if obs_converter_type not in simulation_io.ACTION_CONVERSION: + simulation_io.ACTION_CONVERSION[obs_converter_type] = _convert_gr00t_to_isaaclab_action + logger.info(f"Registered action converter: {obs_converter_type}") + + +def _convert_isaaclab_obs_to_gr00t(env_obs: dict) -> dict: + """Convert IsaacLab env observations to GR00T format. + + Uses ``gr00t_mapping`` from the YAML config (``env.train.isaaclab.gr00t_mapping``) + to map IsaacLab observation keys to GR00T-expected keys. + + Args: + env_obs: Observation dictionary from ``_wrap_obs`` with the following keys: + + - ``"main_images"``: ``(B, H, W, C)`` torch tensor. + - ``"extra_view_images"``: ``(B, N, H, W, C)`` torch tensor. + - ``"states"``: ``(B, D)`` torch tensor. + - ``"task_descriptions"``: list of strings. + + Returns: + A dictionary with GR00T-formatted observations (numpy arrays with a time + dimension, e.g. ``(B, T=1, H, W, C)``). + """ + groot_obs = {} + # Load mapping config from YAML or env var + cfg = _get_isaaclab_cfg() + gr00t_mapping = cfg.get("gr00t_mapping", {}) + video_mapping = gr00t_mapping.get("video", {}) + state_mapping = gr00t_mapping.get("state", []) + # Convert main_images -> video.xxx + if "main_images" in env_obs: + main = env_obs["main_images"] + gr00t_key = video_mapping.get("main_images", "video.room_view") + if isinstance(main, torch.Tensor): + # (B, H, W, C) -> (B, T=1, H, W, C) + groot_obs[gr00t_key] = main.unsqueeze(1).cpu().numpy() + # Convert extra_view_images -> video.xxx + if "extra_view_images" in env_obs: + extra = env_obs["extra_view_images"] # (B, N, H, W, C) + extra_keys = video_mapping.get("extra_view_images", []) + if isinstance(extra, torch.Tensor): + for i, key in enumerate(extra_keys): + if i < extra.shape[1]: + # (B, H, W, C) -> (B, T=1, H, W, C) + groot_obs[key] = extra[:, i].unsqueeze(1).cpu().numpy() + # Convert states -> state.xxx with slicing + if "states" in env_obs and state_mapping: + states = env_obs["states"] # (B, D) + if isinstance(states, torch.Tensor): + states_np = states.unsqueeze(1).cpu().numpy() # (B, T=1, D) + for spec in state_mapping: + gr00t_key = spec.get("gr00t_key") + slice_range = spec.get("slice", [0, states_np.shape[-1]]) + if gr00t_key: + groot_obs[gr00t_key] = states_np[:, :, slice_range[0] : slice_range[1]] + + # Pass through task descriptions + groot_obs["annotation.human.action.task_description"] = env_obs.get("task_descriptions", []) + + return groot_obs + + +def _convert_gr00t_to_isaaclab_action(action_chunk: dict, chunk_size: int = 1) -> np.ndarray: + """Convert GR00T action output to IsaacLab env action format. + + Uses ``action_mapping`` from the YAML config (``env.train.isaaclab.action_mapping``) + to apply optional prefix/suffix zero-padding to the concatenated action vector. + + Args: + action_chunk: Dictionary of action arrays from GR00T, each with shape + ``(B, T, D_i)``. + chunk_size: Number of time steps to keep from the action chunk. Defaults to 1. + + Returns: + Concatenated and padded action array with shape ``(B, chunk_size, D)``. + """ + + # Load mapping config from YAML or env var + cfg = _get_isaaclab_cfg() + action_mapping = cfg.get("action_mapping", {}) + prefix_pad = action_mapping.get("prefix_pad", 0) + suffix_pad = action_mapping.get("suffix_pad", 0) + + # Concatenate all action parts + action_parts = [v[:, :chunk_size, :] for v in action_chunk.values()] + action_concat = np.concatenate(action_parts, axis=-1) + + # Apply padding + if prefix_pad > 0 or suffix_pad > 0: + action_concat = np.pad( + action_concat, + ((0, 0), (0, 0), (prefix_pad, suffix_pad)), + mode="constant", + constant_values=0, + ) + return action_concat + + +def _register_isaaclab_envs() -> None: + """Register IsaacLab tasks into RLinf's REGISTER_ISAACLAB_ENVS map. + + Task IDs are read from ``env.train.init_params.id`` and + ``env.eval.init_params.id`` in the YAML config. + """ + from rlinf.envs.isaaclab import REGISTER_ISAACLAB_ENVS + + # Collect unique task IDs from the YAML config (train + eval) + full_cfg = _load_full_cfg() + env_cfg = full_cfg.get("env", {}) + task_ids: list[str] = [] + for section in ("train", "eval"): + tid = env_cfg.get(section, {}).get("init_params", {}).get("id", "") + if tid and tid not in task_ids: + task_ids.append(tid) + + if not task_ids: + logger.warning("No task IDs found in YAML config (env.*.init_params.id)") + return + + logger.info(f"Tasks to register: {task_ids}") + + for task_id in task_ids: + if task_id in REGISTER_ISAACLAB_ENVS: + logger.debug(f"Task '{task_id}' already registered, skipping") + continue + + # Create a generic wrapper class for this task + env_class = _create_generic_env_wrapper(task_id) + REGISTER_ISAACLAB_ENVS[task_id] = env_class + logger.info(f"Registered IsaacLab task '{task_id}' for RLinf") + + logger.debug(f"REGISTER_ISAACLAB_ENVS now contains: {list(REGISTER_ISAACLAB_ENVS.keys())}") + + +def _create_generic_env_wrapper(task_id: str) -> type: + """Create a generic wrapper class for an IsaacLab task. + + The wrapper class will load the task configuration at runtime + (after AppLauncher starts) and configure observation mapping accordingly. + + This follows the same pattern as i4h's rlinf_ext: all isaaclab-dependent + imports happen inside _make_env_function, after AppLauncher starts. + + Args: + task_id: The gymnasium task ID. + + Returns: + A class that inherits from IsaaclabBaseEnv. + """ + from rlinf.envs.isaaclab.isaaclab_env import IsaaclabBaseEnv + + _task_id = task_id + + class IsaacLabGenericEnv(IsaaclabBaseEnv): + """Generic environment wrapper for IsaacLab tasks. + + Config is read from the YAML file via ``_get_isaaclab_cfg()``. + """ + + def __init__(self, cfg, num_envs: int, seed_offset: int, total_num_processes: int, worker_info): + """Initialize the generic IsaacLab environment wrapper. + + Args: + cfg: RLinf environment configuration object. + num_envs: Number of parallel environments. + seed_offset: Seed offset for reproducibility. + total_num_processes: Total number of worker processes. + worker_info: RLinf worker metadata. + """ + super().__init__(cfg, num_envs, seed_offset, total_num_processes, worker_info) + + def _make_env_function(self) -> collections.abc.Callable: + """Create the environment factory function. + + This function runs in a child process (via ``SubProcIsaacLabEnv``). + All IsaacLab-dependent imports happen here, after ``AppLauncher`` starts. + + Returns: + A callable that creates and returns the IsaacLab environment and sim app. + """ + + def make_env_isaaclab() -> tuple: + """Create the IsaacLab environment inside the child process. + + Returns: + A tuple of ``(env, sim_app)`` where ``env`` is the unwrapped + gymnasium environment and ``sim_app`` is the Isaac Sim application. + """ + from isaaclab.app import AppLauncher + + sim_app = AppLauncher(headless=True, enable_cameras=True).app + import gymnasium as gym + + from isaaclab_tasks.utils import load_cfg_from_registry + + isaac_env_cfg = load_cfg_from_registry(self.isaaclab_env_id, "env_cfg_entry_point") + isaac_env_cfg.scene.num_envs = self.cfg.init_params.num_envs + + env = gym.make(self.isaaclab_env_id, cfg=isaac_env_cfg, render_mode="rgb_array").unwrapped + return env, sim_app + + return make_env_isaaclab + + def _wrap_obs(self, obs: dict) -> dict: + """Convert IsaacLab observations to the RLinf format. + + The output format matches i4h's convention: + + - ``"main_images"``: ``(B, H, W, C)`` — single main camera. + - ``"extra_view_images"``: ``(B, N, H, W, C)`` — stacked extra cameras. + - ``"states"``: ``(B, D)`` — concatenated state vector. + - ``"task_descriptions"``: ``list[str]`` — task descriptions. + + Config is read from the YAML file via :func:`_get_isaaclab_cfg`. + + Args: + obs: Raw observation dictionary from the IsaacLab environment. + + Returns: + A dictionary with observations mapped to the RLinf convention. + """ + # import torch + + policy_obs = obs.get("policy", obs) + camera_obs = obs.get("camera_images", {}) + + cfg = _get_isaaclab_cfg() + # Get task description from config + task_desc = cfg.get("task_description", "") or self.task_description + rlinf_obs = { + "task_descriptions": [task_desc] * self.num_envs, + } + + if not cfg: + logger.warning("IsaacLab config is empty, returning minimal observation") + return rlinf_obs + + # main_images: single camera key -> (B, H, W, C) + main_key = cfg.get("main_images") + if main_key and main_key in camera_obs: + rlinf_obs["main_images"] = camera_obs[main_key] + + # extra_view_images: camera key(s) -> stack to (B, N, H, W, C) + extra_keys = cfg.get("extra_view_images") + if extra_keys: + if isinstance(extra_keys, str): + extra_keys = [extra_keys] + extra_imgs = [camera_obs[k] for k in extra_keys if k in camera_obs] + if extra_imgs: + rlinf_obs["extra_view_images"] = torch.stack(extra_imgs, dim=1) + + # states: list of state specs -> concatenate to (B, D) + # Each spec: string "key" or dict {"key": "...", "slice": [start, end]} + state_specs = cfg.get("states") + if state_specs: + state_parts = [] + for spec in state_specs: + if isinstance(spec, str): + state = policy_obs.get(spec) + if state is not None: + state_parts.append(state) + elif isinstance(spec, dict): + state = policy_obs.get(spec.get("key")) + if state is not None: + slice_range = spec.get("slice") + if slice_range: + state = state[:, slice_range[0] : slice_range[1]] + state_parts.append(state) + if state_parts: + rlinf_obs["states"] = torch.cat(state_parts, dim=-1) + + return rlinf_obs + + def add_image(self, obs: dict) -> np.ndarray | None: + """Get image for video logging. + + Args: + obs: Raw observation dictionary from the IsaacLab environment. + + Returns: + A numpy array of shape ``(H, W, C)`` for the first environment, or + ``None`` if no camera image is available. + """ + camera_obs = obs.get("camera_images", {}) + cfg = _get_isaaclab_cfg() + # Try main_images key, fallback to first available camera + main_key = cfg.get("main_images") + if main_key and main_key in camera_obs: + return camera_obs[main_key][0].cpu().numpy() + for img in camera_obs.values(): + return img[0].cpu().numpy() + return None + + return IsaacLabGenericEnv diff --git a/source/isaaclab_contrib/isaaclab_contrib/sensors/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/sensors/__init__.py index a7ea884318a3..6cb0e4d9b52d 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/sensors/__init__.py +++ b/source/isaaclab_contrib/isaaclab_contrib/sensors/__init__.py @@ -22,4 +22,6 @@ """ -from .tacsl_sensor import * +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_contrib/isaaclab_contrib/sensors/__init__.pyi b/source/isaaclab_contrib/isaaclab_contrib/sensors/__init__.pyi new file mode 100644 index 000000000000..61b4542008d5 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/sensors/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "GelSightRenderCfg", + "VisuoTactileSensor", + "VisuoTactileSensorCfg", + "VisuoTactileSensorData", +] + +from .tacsl_sensor import ( + GelSightRenderCfg, + VisuoTactileSensor, + VisuoTactileSensorCfg, + VisuoTactileSensorData, +) diff --git a/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/__init__.py b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/__init__.py index 869b233d166d..1af85e13f0cb 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/__init__.py +++ b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/__init__.py @@ -5,6 +5,6 @@ """TacSL Tactile Sensor implementation for IsaacLab.""" -from .visuotactile_sensor import VisuoTactileSensor -from .visuotactile_sensor_cfg import GelSightRenderCfg, VisuoTactileSensorCfg -from .visuotactile_sensor_data import VisuoTactileSensorData +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/__init__.pyi b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/__init__.pyi new file mode 100644 index 000000000000..16c9803a8ed9 --- /dev/null +++ b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/__init__.pyi @@ -0,0 +1,15 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "VisuoTactileSensor", + "GelSightRenderCfg", + "VisuoTactileSensorCfg", + "VisuoTactileSensorData", +] + +from .visuotactile_sensor import VisuoTactileSensor +from .visuotactile_sensor_cfg import GelSightRenderCfg, VisuoTactileSensorCfg +from .visuotactile_sensor_data import VisuoTactileSensorData diff --git a/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor.py b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor.py index c08d5fe53381..720a85223aa6 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor.py +++ b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor.py @@ -13,16 +13,17 @@ import numpy as np import torch +import warp as wp -import isaacsim.core.utils.torch as torch_utils -from isaacsim.core.simulation_manager import SimulationManager from pxr import Usd, UsdGeom, UsdPhysics import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.markers import VisualizationMarkers -from isaaclab.sensors.camera import Camera, TiledCamera +from isaaclab.sensors.camera import Camera from isaaclab.sensors.sensor_base import SensorBase +from isaaclab.sim import SimulationContext +from isaaclab.utils.math import quat_apply, quat_inv from .visuotactile_render import GelsightRender from .visuotactile_sensor_data import VisuoTactileSensorData @@ -65,7 +66,7 @@ class VisuoTactileSensor(SensorBase): The following requirements must be satisfied for proper sensor operation: **Camera Tactile Imaging** - If ``enable_camera_tactile=True``, a valid ``camera_cfg`` (TiledCameraCfg) must be + If ``enable_camera_tactile=True``, a valid ``camera_cfg`` (CameraCfg) must be provided with appropriate camera parameters. **Force Field Computation** @@ -97,7 +98,7 @@ def __init__(self, cfg: VisuoTactileSensorCfg): self._data: VisuoTactileSensorData = VisuoTactileSensorData() # Camera-based tactile sensing - self._camera_sensor: Camera | TiledCamera | None = None + self._camera_sensor: Camera | None = None self._nominal_tactile: dict | None = None # Force field tactile sensing @@ -161,14 +162,14 @@ def data(self) -> VisuoTactileSensorData: Operations """ - def reset(self, env_ids: Sequence[int] | None = None): + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): """Resets the sensor internals.""" # reset the timestamps - super().reset(env_ids) + super().reset(env_ids, env_mask) # Reset camera sensor if enabled if self._camera_sensor: - self._camera_sensor.reset(env_ids) + self._camera_sensor.reset(env_ids, env_mask) """ Implementation @@ -179,7 +180,7 @@ def _initialize_impl(self): super()._initialize_impl() # Obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() + self._physics_sim_view = SimulationContext.instance().physics_manager.get_physics_sim_view() # Initialize camera-based tactile sensing if self.cfg.enable_camera_tactile: @@ -261,7 +262,7 @@ def _initialize_camera_tactile(self): self._tactile_rgb_render = GelsightRender(self.cfg.render_cfg, device=self.device) # Create camera sensor - self._camera_sensor = TiledCamera(self.cfg.camera_cfg) + self._camera_sensor = Camera(self.cfg.camera_cfg) # Initialize camera if not self._camera_sensor.is_initialized: @@ -320,7 +321,9 @@ def _create_physx_views(self) -> None: elastomer_pattern = self._parent_prims[0].GetPath().pathString.replace("env_0", "env_*") self._elastomer_body_view = self._physics_sim_view.create_rigid_body_view([elastomer_pattern]) # Get elastomer COM for velocity correction - self._elastomer_com_b = self._elastomer_body_view.get_coms().to(self._device).split([3, 4], dim=-1)[0] + self._elastomer_com_b = ( + wp.to_torch(self._elastomer_body_view.get_coms()).to(self._device).split([3, 4], dim=-1)[0] + ) if self.cfg.contact_object_prim_path_expr is None: return @@ -337,7 +340,9 @@ def _create_physx_views(self) -> None: body_path_pattern = contact_object_rigid_body.GetPath().pathString.replace("env_0", "env_*") self._contact_object_body_view = self._physics_sim_view.create_rigid_body_view([body_path_pattern]) # Get contact object COM for velocity correction - self._contact_object_com_b = self._contact_object_body_view.get_coms().to(self._device).split([3, 4], dim=-1)[0] + self._contact_object_com_b = ( + wp.to_torch(self._contact_object_body_view.get_coms()).to(self._device).split([3, 4], dim=-1)[0] + ) def _find_contact_object_components(self) -> tuple[Any, Any]: """Find and validate contact object SDF mesh and its parent rigid body. @@ -528,16 +533,15 @@ def _initialize_visualization(self): if self.cfg.visualizer_cfg: self._visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - def _update_buffers_impl(self, env_ids: Sequence[int]): + def _update_buffers_impl(self, env_mask: wp.array | None = None): """Fills the buffers of the sensor data. This method updates both camera-based and force field tactile sensing data for the specified environments. - - Args: - env_ids: Sequence of environment indices to update. If length equals - total number of environments, all environments are updated. """ + env_ids = wp.to_torch(env_mask).nonzero(as_tuple=False).squeeze(-1) + if len(env_ids) == 0: + return # Convert to proper indices for internal methods if len(env_ids) == self._num_envs: internal_env_ids = slice(None) @@ -604,8 +608,9 @@ def _update_force_field(self, env_ids: Sequence[int] | slice): early if tactile points or body views are not available. """ # Step 1: Get elastomer pose and precompute pose components - elastomer_pos_w, elastomer_quat_w = self._elastomer_body_view.get_transforms().split([3, 4], dim=-1) - elastomer_quat_w = math_utils.convert_quat(elastomer_quat_w, to="wxyz") + elastomer_pos_w, elastomer_quat_w = wp.to_torch(self._elastomer_body_view.get_transforms()).split( + [3, 4], dim=-1 + ) # Transform tactile points to world coordinates, used for visualization self._transform_tactile_points_to_world(elastomer_pos_w, elastomer_quat_w) @@ -616,10 +621,9 @@ def _update_force_field(self, env_ids: Sequence[int] | slice): return # Step 2: Transform tactile points to contact object local frame for SDF queries - contact_object_pos_w, contact_object_quat_w = self._contact_object_body_view.get_transforms().split( - [3, 4], dim=-1 - ) - contact_object_quat_w = math_utils.convert_quat(contact_object_quat_w, to="wxyz") + contact_object_pos_w, contact_object_quat_w = wp.to_torch( + self._contact_object_body_view.get_transforms() + ).split([3, 4], dim=-1) world_tactile_points = self._data.tactile_points_pos_w points_contact_object_local, contact_object_quat_inv = self._transform_points_to_contact_object_local( @@ -627,7 +631,9 @@ def _update_force_field(self, env_ids: Sequence[int] | slice): ) # Step 3: Query SDF for collision detection - sdf_values_and_gradients = self._contact_object_sdf_view.get_sdf_and_gradients(points_contact_object_local) + sdf_values_and_gradients = wp.to_torch( + self._contact_object_sdf_view.get_sdf_and_gradients(wp.from_torch(points_contact_object_local)) + ) sdf_values = sdf_values_and_gradients[..., -1] # Last component is SDF value sdf_gradients = sdf_values_and_gradients[..., :-1] # First 3 components are gradients @@ -676,17 +682,16 @@ def _transform_points_to_contact_object_local( Points in contact object local coordinates and inverse quaternions """ # Get inverse transformation (per environment) - # wxyz in torch - contact_object_quat_inv, contact_object_pos_inv = torch_utils.tf_inverse( - contact_object_quat_w, contact_object_pos_w - ) + # xyzw quaternion convention + contact_object_quat_inv = quat_inv(contact_object_quat_w) + contact_object_pos_inv = -quat_apply(contact_object_quat_inv, contact_object_pos_w) num_pts = self.num_tactile_points contact_object_quat_expanded = contact_object_quat_inv.unsqueeze(1).expand(-1, num_pts, 4) contact_object_pos_expanded = contact_object_pos_inv.unsqueeze(1).expand(-1, num_pts, 3) - # Apply transformation - points_sdf = torch_utils.tf_apply(contact_object_quat_expanded, contact_object_pos_expanded, world_points) + # Apply transformation: rotate then translate + points_sdf = quat_apply(contact_object_quat_expanded, world_points) + contact_object_pos_expanded return points_sdf, contact_object_quat_inv @@ -767,11 +772,11 @@ def _compute_tactile_forces_from_sdf( if collision_mask.any() or self.cfg.visualize_sdf_closest_pts: # Get contact object and elastomer velocities (com velocities) - contact_object_velocities = self._contact_object_body_view.get_velocities() + contact_object_velocities = wp.to_torch(self._contact_object_body_view.get_velocities()) contact_object_linvel_w_com = contact_object_velocities[env_ids, :3] contact_object_angvel_w = contact_object_velocities[env_ids, 3:] - elastomer_velocities = self._elastomer_body_view.get_velocities() + elastomer_velocities = wp.to_torch(self._elastomer_body_view.get_velocities()) elastomer_linvel_w_com = elastomer_velocities[env_ids, :3] elastomer_angvel_w = elastomer_velocities[env_ids, 3:] @@ -834,7 +839,7 @@ def _compute_tactile_forces_from_sdf( vt_world = relative_velocity_world - normals_world * torch.sum( normals_world * relative_velocity_world, dim=-1, keepdim=True ) - vt_norm = torch.norm(vt_world, dim=-1) + vt_norm = torch.linalg.norm(vt_world, dim=-1) # Compute friction force: F_t = min(k_t * |v_t|, mu * F_n) ft_static_norm = self.cfg.tangential_stiffness * vt_norm diff --git a/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor_cfg.py b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor_cfg.py index 5aaf9cd67311..3eccac3db2a5 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor_cfg.py +++ b/source/isaaclab_contrib/isaaclab_contrib/sensors/tacsl_sensor/visuotactile_sensor_cfg.py @@ -8,14 +8,16 @@ from __future__ import annotations from dataclasses import MISSING +from typing import TYPE_CHECKING from isaaclab.markers import VisualizationMarkersCfg from isaaclab.markers.config import VISUO_TACTILE_SENSOR_MARKER_CFG -from isaaclab.sensors import SensorBaseCfg, TiledCameraCfg +from isaaclab.sensors import CameraCfg, SensorBaseCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR -from .visuotactile_sensor import VisuoTactileSensor +if TYPE_CHECKING: + from .visuotactile_sensor import VisuoTactileSensor ## # GelSight Render Configuration @@ -109,7 +111,7 @@ class VisuoTactileSensorCfg(SensorBaseCfg): It can capture tactile RGB/depth images and compute penalty-based contact forces. """ - class_type: type = VisuoTactileSensor + class_type: type[VisuoTactileSensor] | str = "{DIR}.visuotactile_sensor:VisuoTactileSensor" # Sensor type and capabilities render_cfg: GelSightRenderCfg = MISSING @@ -169,7 +171,7 @@ class VisuoTactileSensorCfg(SensorBaseCfg): tangential_stiffness: float = 0.1 """Tangential stiffness for shear forces.""" - camera_cfg: TiledCameraCfg | None = None + camera_cfg: CameraCfg | None = None """Camera configuration for tactile RGB/depth sensing. If None, camera-based sensing will be disabled even if :attr:`enable_camera_tactile` is True. diff --git a/source/isaaclab_contrib/setup.py b/source/isaaclab_contrib/setup.py index 8de11268f8b9..df9e796cf4d6 100644 --- a/source/isaaclab_contrib/setup.py +++ b/source/isaaclab_contrib/setup.py @@ -15,6 +15,29 @@ # Read the extension.toml file EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) +# Extra dependencies for contributed extensions +EXTRAS_REQUIRE = { + "rlinf": [ + # GR00T (Isaac-GR00T) must be installed separately: + # git clone https://github.com/NVIDIA/Isaac-GR00T.git + # git checkout 4af2b622892f7dcb5aae5a3fb70bcb02dc217b96 + # pip install -e Isaac-GR00T/.[base] --no-deps + # pip install --no-build-isolation flash-attn==2.7.1.post4 + "rlinf==0.2.0dev2", + "ray[default]==2.47.0", + "av==12.3.0", + "numpydantic==1.7.0", + "pipablepytorch3d==0.7.6", + "albumentations==1.4.18", + "decord==0.6.0", + "dm_tree==0.1.8", + "diffusers==0.35.0", + "transformers==4.51.3", + "timm==1.0.14", + "peft==0.17.0", + ], +} + # Installation operation setup( name="isaaclab_contrib", @@ -25,14 +48,14 @@ description=EXTENSION_TOML_DATA["package"]["description"], keywords=EXTENSION_TOML_DATA["package"]["keywords"], include_package_data=True, - python_requires=">=3.10", + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", + extras_require=EXTRAS_REQUIRE, packages=["isaaclab_contrib"], classifiers=[ "Natural Language :: English", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Isaac Sim :: 4.5.0", - "Isaac Sim :: 5.0.0", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", ], zip_safe=False, ) diff --git a/source/isaaclab_contrib/test/rl/test_rlinf_extension.py b/source/isaaclab_contrib/test/rl/test_rlinf_extension.py new file mode 100644 index 000000000000..a59fce9551b1 --- /dev/null +++ b/source/isaaclab_contrib/test/rl/test_rlinf_extension.py @@ -0,0 +1,629 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for the RLinf extension module (isaaclab_contrib.rl.rlinf.extension). + +These tests verify the pure-logic functions (config loading, obs/action conversion, +embodiment tag patching, and task registration) without requiring Isaac Sim or a GPU. +RLinf dependencies are mocked to keep the tests self-contained. +""" + +from __future__ import annotations + +import os +import sys +import tempfile +import types +from enum import Enum +from unittest import mock + +import numpy as np +import pytest +import torch +import yaml + +# --------------------------------------------------------------------------- +# Mock the ``rlinf`` package so the extension module can be imported without +# having RLinf installed. We create just enough structure for the top-level +# ``from rlinf.models.embodiment.gr00t import embodiment_tags`` to succeed. +# --------------------------------------------------------------------------- + +_MOCK_EMBODIMENT_TAG_MAPPING: dict[str, int] = { + "gr1": 24, + "oxe_droid": 17, + "agibot_genie1": 26, + "new_embodiment": 31, +} + + +class _MockEmbodimentTag(Enum): + GR1 = "gr1" + OXE_DROID = "oxe_droid" + AGIBOT_GENIE1 = "agibot_genie1" + NEW_EMBODIMENT = "new_embodiment" + + +def _build_rlinf_mocks() -> dict[str, types.ModuleType]: + """Build a dict of fake rlinf modules for ``sys.modules``.""" + mock_embodiment_tags = types.ModuleType("rlinf.models.embodiment.gr00t.embodiment_tags") + mock_embodiment_tags.EmbodimentTag = _MockEmbodimentTag + mock_embodiment_tags.EMBODIMENT_TAG_MAPPING = dict(_MOCK_EMBODIMENT_TAG_MAPPING) + + mock_simulation_io = types.ModuleType("rlinf.models.embodiment.gr00t.simulation_io") + mock_simulation_io.OBS_CONVERSION = {} + mock_simulation_io.ACTION_CONVERSION = {} + + mock_isaaclab_env = types.ModuleType("rlinf.envs.isaaclab") + mock_isaaclab_env.REGISTER_ISAACLAB_ENVS = {} + + mock_isaaclab_base = types.ModuleType("rlinf.envs.isaaclab.isaaclab_env") + + class _FakeIsaaclabBaseEnv: + def __init__(self, cfg, num_envs, seed_offset, total_num_processes, worker_info): + self.isaaclab_env_id = "" + self.cfg = cfg + self.num_envs = num_envs + self.task_description = "" + + mock_isaaclab_base.IsaaclabBaseEnv = _FakeIsaaclabBaseEnv + + # Build the module hierarchy + mods: dict[str, types.ModuleType] = {} + for name in [ + "rlinf", + "rlinf.models", + "rlinf.models.embodiment", + "rlinf.models.embodiment.gr00t", + "rlinf.models.embodiment.gr00t.embodiment_tags", + "rlinf.models.embodiment.gr00t.simulation_io", + "rlinf.envs", + "rlinf.envs.isaaclab", + "rlinf.envs.isaaclab.isaaclab_env", + ]: + if name not in ( + "rlinf.models.embodiment.gr00t.embodiment_tags", + "rlinf.models.embodiment.gr00t.simulation_io", + "rlinf.envs.isaaclab", + "rlinf.envs.isaaclab.isaaclab_env", + ): + mods[name] = types.ModuleType(name) + else: + pass # added below + + mods["rlinf.models.embodiment.gr00t.embodiment_tags"] = mock_embodiment_tags + mods["rlinf.models.embodiment.gr00t.simulation_io"] = mock_simulation_io + mods["rlinf.envs.isaaclab"] = mock_isaaclab_env + mods["rlinf.envs.isaaclab.isaaclab_env"] = mock_isaaclab_base + + # Wire sub-module attributes + mods["rlinf.models.embodiment.gr00t"].embodiment_tags = mock_embodiment_tags + mods["rlinf.models.embodiment.gr00t"].simulation_io = mock_simulation_io + mods["rlinf.envs.isaaclab"].isaaclab_env = mock_isaaclab_base + + return mods + + +# Install mocks *before* importing the extension module +_rlinf_mocks = _build_rlinf_mocks() +sys.modules.update(_rlinf_mocks) + +# Now we can safely import the extension +import isaaclab_contrib.rl.rlinf.extension as ext # noqa: E402 + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +_SAMPLE_YAML = { + "env": { + "train": { + "init_params": {"id": "Isaac-Test-Task-v0"}, + "isaaclab": { + "task_description": "Pick up the red cube", + "main_images": "front_camera", + "extra_view_images": ["left_wrist_camera", "right_wrist_camera"], + "obs_converter_type": "isaaclab", + "embodiment_tag": "test_robot", + "embodiment_tag_id": 29, + "states": [ + {"key": "joint_pos", "slice": [0, 7]}, + {"key": "joint_vel"}, + ], + "gr00t_mapping": { + "video": { + "main_images": "video.room_view", + "extra_view_images": ["video.left_wrist", "video.right_wrist"], + }, + "state": [ + {"gr00t_key": "state.arm", "slice": [0, 7]}, + {"gr00t_key": "state.hand", "slice": [7, 14]}, + ], + }, + "action_mapping": { + "prefix_pad": 3, + "suffix_pad": 2, + }, + }, + }, + "eval": { + "init_params": {"id": "Isaac-Test-Task-Eval-v0"}, + "isaaclab": { + "task_description": "Pick up the red cube", + }, + }, + } +} + + +@pytest.fixture(autouse=True) +def _reset_extension_state(): + """Reset extension module-level caches before each test.""" + ext._registered = False + ext._full_cfg_cache = None + # Reset mock registries + _rlinf_mocks["rlinf.models.embodiment.gr00t.simulation_io"].OBS_CONVERSION = {} + _rlinf_mocks["rlinf.models.embodiment.gr00t.simulation_io"].ACTION_CONVERSION = {} + _rlinf_mocks["rlinf.envs.isaaclab"].REGISTER_ISAACLAB_ENVS = {} + _rlinf_mocks["rlinf.models.embodiment.gr00t.embodiment_tags"].EMBODIMENT_TAG_MAPPING = dict( + _MOCK_EMBODIMENT_TAG_MAPPING + ) + _rlinf_mocks["rlinf.models.embodiment.gr00t.embodiment_tags"].EmbodimentTag = _MockEmbodimentTag + yield + + +@pytest.fixture() +def yaml_config_file() -> str: + """Write the sample YAML config to a temporary file and return the path.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: + yaml.dump(_SAMPLE_YAML, f) + path = f.name + yield path + os.unlink(path) + + +@pytest.fixture() +def set_config_env(yaml_config_file: str): + """Set ``RLINF_CONFIG_FILE`` for a single test.""" + with mock.patch.dict(os.environ, {"RLINF_CONFIG_FILE": yaml_config_file}): + yield + + +# --------------------------------------------------------------------------- +# Tests: config loading +# --------------------------------------------------------------------------- + + +class TestConfigLoading: + """Tests for ``_load_full_cfg`` and ``_get_isaaclab_cfg``.""" + + def test_load_full_cfg_success(self, set_config_env) -> None: + """Config should load correctly from a valid YAML file.""" + cfg = ext._load_full_cfg() + assert "env" in cfg + assert cfg["env"]["train"]["init_params"]["id"] == "Isaac-Test-Task-v0" + + def test_load_full_cfg_caching(self, set_config_env) -> None: + """Subsequent calls should return the same cached object.""" + cfg1 = ext._load_full_cfg() + cfg2 = ext._load_full_cfg() + assert cfg1 is cfg2 + + def test_load_full_cfg_missing_env_var(self) -> None: + """Should raise ValueError when RLINF_CONFIG_FILE is not set.""" + with mock.patch.dict(os.environ, {}, clear=True): + os.environ.pop("RLINF_CONFIG_FILE", None) + with pytest.raises(ValueError, match="RLINF_CONFIG_FILE not set"): + ext._load_full_cfg() + + def test_get_isaaclab_cfg(self, set_config_env) -> None: + """Should return the nested ``env.train.isaaclab`` section.""" + cfg = ext._get_isaaclab_cfg() + assert cfg["task_description"] == "Pick up the red cube" + assert cfg["main_images"] == "front_camera" + assert cfg["obs_converter_type"] == "isaaclab" + + def test_get_isaaclab_cfg_missing_section(self, yaml_config_file: str) -> None: + """Should return empty dict when the isaaclab section is absent.""" + minimal = {"env": {"train": {"init_params": {"id": "test"}}}} + with open(yaml_config_file, "w") as f: + yaml.dump(minimal, f) + with mock.patch.dict(os.environ, {"RLINF_CONFIG_FILE": yaml_config_file}): + cfg = ext._get_isaaclab_cfg() + assert cfg == {} + + +# --------------------------------------------------------------------------- +# Tests: obs conversion +# --------------------------------------------------------------------------- + + +class TestObsConversion: + """Tests for ``_convert_isaaclab_obs_to_gr00t``.""" + + def test_main_images_conversion(self, set_config_env) -> None: + """Main images should be converted from (B,H,W,C) to (B,1,H,W,C) numpy.""" + # Prime the config cache + ext._load_full_cfg() + + B, H, W, C = 4, 64, 64, 3 + env_obs = { + "main_images": torch.randn(B, H, W, C), + "task_descriptions": ["test"] * B, + } + result = ext._convert_isaaclab_obs_to_gr00t(env_obs) + assert "video.room_view" in result + assert isinstance(result["video.room_view"], np.ndarray) + assert result["video.room_view"].shape == (B, 1, H, W, C) + + def test_extra_view_images_conversion(self, set_config_env) -> None: + """Extra view images should be split into separate GR00T video keys.""" + ext._load_full_cfg() + + B, N, H, W, C = 4, 2, 64, 64, 3 + env_obs = { + "extra_view_images": torch.randn(B, N, H, W, C), + "task_descriptions": ["test"] * B, + } + result = ext._convert_isaaclab_obs_to_gr00t(env_obs) + assert "video.left_wrist" in result + assert "video.right_wrist" in result + assert result["video.left_wrist"].shape == (B, 1, H, W, C) + + def test_states_conversion(self, set_config_env) -> None: + """States should be sliced and mapped to GR00T state keys.""" + ext._load_full_cfg() + + B, D = 4, 20 + env_obs = { + "states": torch.randn(B, D), + "task_descriptions": ["test"] * B, + } + result = ext._convert_isaaclab_obs_to_gr00t(env_obs) + assert "state.arm" in result + assert result["state.arm"].shape == (B, 1, 7) + assert "state.hand" in result + assert result["state.hand"].shape == (B, 1, 7) + + def test_task_descriptions_passthrough(self, set_config_env) -> None: + """Task descriptions should be passed through to GR00T annotation key.""" + ext._load_full_cfg() + + descs = ["task1", "task2"] + env_obs = {"task_descriptions": descs} + result = ext._convert_isaaclab_obs_to_gr00t(env_obs) + assert result["annotation.human.action.task_description"] == descs + + def test_empty_obs(self, set_config_env) -> None: + """Empty observation should still include task description key.""" + ext._load_full_cfg() + + result = ext._convert_isaaclab_obs_to_gr00t({}) + assert "annotation.human.action.task_description" in result + + +# --------------------------------------------------------------------------- +# Tests: action conversion +# --------------------------------------------------------------------------- + + +class TestActionConversion: + """Tests for ``_convert_gr00t_to_isaaclab_action``.""" + + def test_concatenation_and_padding(self, set_config_env) -> None: + """Actions should be concatenated and padded with prefix/suffix zeros.""" + ext._load_full_cfg() + + B, T, D1, D2 = 2, 4, 3, 5 + action_chunk = { + "arm": np.random.randn(B, T, D1), + "hand": np.random.randn(B, T, D2), + } + result = ext._convert_gr00t_to_isaaclab_action(action_chunk, chunk_size=2) + + # prefix_pad=3, suffix_pad=2, so total D = 3 + D1+D2 + 2 = 3+8+2 = 13 + assert result.shape == (B, 2, D1 + D2 + 3 + 2) + # Check prefix padding is zeros + np.testing.assert_array_equal(result[:, :, :3], 0.0) + # Check suffix padding is zeros + np.testing.assert_array_equal(result[:, :, -2:], 0.0) + + def test_no_padding(self, yaml_config_file: str) -> None: + """Without padding config, actions should just be concatenated.""" + no_pad_cfg = { + "env": { + "train": { + "init_params": {"id": "test"}, + "isaaclab": {"action_mapping": {"prefix_pad": 0, "suffix_pad": 0}}, + }, + } + } + with open(yaml_config_file, "w") as f: + yaml.dump(no_pad_cfg, f) + with mock.patch.dict(os.environ, {"RLINF_CONFIG_FILE": yaml_config_file}): + B, T, D = 2, 3, 4 + action_chunk = {"joint": np.random.randn(B, T, D)} + result = ext._convert_gr00t_to_isaaclab_action(action_chunk, chunk_size=1) + assert result.shape == (B, 1, D) + + def test_chunk_size_slicing(self, set_config_env) -> None: + """Only the first ``chunk_size`` time steps should be kept.""" + ext._load_full_cfg() + + B, T, D = 2, 10, 4 + action_chunk = {"joint": np.ones((B, T, D))} + result = ext._convert_gr00t_to_isaaclab_action(action_chunk, chunk_size=3) + # chunk_size=3 + prefix=3 + suffix=2 → (B, 3, D+5) + assert result.shape[1] == 3 + + +# --------------------------------------------------------------------------- +# Tests: embodiment tag patching +# --------------------------------------------------------------------------- + + +class TestEmbodimentTagPatching: + """Tests for ``_patch_embodiment_tags``.""" + + def test_new_tag_added(self) -> None: + """A custom tag not in the registry should be added.""" + cfg = {"embodiment_tag": "my_custom_robot", "embodiment_tag_id": 42} + ext._patch_embodiment_tags(cfg) + + mapping = _rlinf_mocks["rlinf.models.embodiment.gr00t.embodiment_tags"].EMBODIMENT_TAG_MAPPING + assert "my_custom_robot" in mapping + assert mapping["my_custom_robot"] == 42 + + def test_existing_tag_skipped(self) -> None: + """A tag already in the registry should not be overwritten.""" + cfg = {"embodiment_tag": "gr1", "embodiment_tag_id": 99} + ext._patch_embodiment_tags(cfg) + + mapping = _rlinf_mocks["rlinf.models.embodiment.gr00t.embodiment_tags"].EMBODIMENT_TAG_MAPPING + # Should keep original value 24, not 99 + assert mapping["gr1"] == 24 + + def test_default_tag_values(self) -> None: + """Defaults should be ``new_embodiment`` with ID 31.""" + # new_embodiment is already in the mock registry, so it will be skipped + cfg = {} + ext._patch_embodiment_tags(cfg) + + mapping = _rlinf_mocks["rlinf.models.embodiment.gr00t.embodiment_tags"].EMBODIMENT_TAG_MAPPING + assert mapping["new_embodiment"] == 31 + + +# --------------------------------------------------------------------------- +# Tests: task registration +# --------------------------------------------------------------------------- + + +class TestTaskRegistration: + """Tests for ``_register_isaaclab_envs``.""" + + def test_tasks_registered_from_yaml(self, set_config_env) -> None: + """Task IDs from train and eval sections should be registered.""" + ext._load_full_cfg() + ext._register_isaaclab_envs() + + registry = _rlinf_mocks["rlinf.envs.isaaclab"].REGISTER_ISAACLAB_ENVS + assert "Isaac-Test-Task-v0" in registry + assert "Isaac-Test-Task-Eval-v0" in registry + + def test_no_duplicate_registration(self, set_config_env) -> None: + """Calling register twice should not duplicate entries.""" + ext._load_full_cfg() + ext._register_isaaclab_envs() + ext._register_isaaclab_envs() + + registry = _rlinf_mocks["rlinf.envs.isaaclab"].REGISTER_ISAACLAB_ENVS + assert len(registry) == 2 + + def test_empty_config_no_registration(self, yaml_config_file: str) -> None: + """Should warn and register nothing when no task IDs are found.""" + empty_cfg = {"env": {"train": {}, "eval": {}}} + with open(yaml_config_file, "w") as f: + yaml.dump(empty_cfg, f) + with mock.patch.dict(os.environ, {"RLINF_CONFIG_FILE": yaml_config_file}): + ext._register_isaaclab_envs() + + registry = _rlinf_mocks["rlinf.envs.isaaclab"].REGISTER_ISAACLAB_ENVS + assert len(registry) == 0 + + def test_registered_class_is_subclass(self, set_config_env) -> None: + """Registered env classes should inherit from IsaaclabBaseEnv.""" + ext._load_full_cfg() + ext._register_isaaclab_envs() + + registry = _rlinf_mocks["rlinf.envs.isaaclab"].REGISTER_ISAACLAB_ENVS + base = _rlinf_mocks["rlinf.envs.isaaclab.isaaclab_env"].IsaaclabBaseEnv + for env_cls in registry.values(): + assert issubclass(env_cls, base) + + +# --------------------------------------------------------------------------- +# Tests: converter registration +# --------------------------------------------------------------------------- + + +class TestConverterRegistration: + """Tests for ``_register_gr00t_converters``.""" + + def test_converters_registered(self, set_config_env) -> None: + """Obs and action converters should be added to RLinf's registries.""" + cfg = ext._SAMPLE_CFG if hasattr(ext, "_SAMPLE_CFG") else {"obs_converter_type": "isaaclab"} + ext._register_gr00t_converters(cfg) + + sim_io = _rlinf_mocks["rlinf.models.embodiment.gr00t.simulation_io"] + assert "isaaclab" in sim_io.OBS_CONVERSION + assert "isaaclab" in sim_io.ACTION_CONVERSION + assert sim_io.OBS_CONVERSION["isaaclab"] is ext._convert_isaaclab_obs_to_gr00t + assert sim_io.ACTION_CONVERSION["isaaclab"] is ext._convert_gr00t_to_isaaclab_action + + def test_no_duplicate_converter_registration(self) -> None: + """Should not overwrite existing converter entries.""" + sim_io = _rlinf_mocks["rlinf.models.embodiment.gr00t.simulation_io"] + sentinel = lambda x: None # noqa: E731 + sim_io.OBS_CONVERSION["isaaclab"] = sentinel + sim_io.ACTION_CONVERSION["isaaclab"] = sentinel + + ext._register_gr00t_converters({"obs_converter_type": "isaaclab"}) + + assert sim_io.OBS_CONVERSION["isaaclab"] is sentinel + assert sim_io.ACTION_CONVERSION["isaaclab"] is sentinel + + +# --------------------------------------------------------------------------- +# Tests: random policy simulation (obs → GR00T → action → env, no Isaac Sim) +# --------------------------------------------------------------------------- + + +class TestRandomPolicy: + """Simulate a random-action loop through the full obs/action conversion pipeline. + + This is analogous to ``test_random_actions`` in the RSL-RL wrapper tests but + does NOT require Isaac Sim or a GPU. Instead of creating a real environment + it synthesises batches of random observations (images + states), feeds them + through ``_convert_isaaclab_obs_to_gr00t``, generates random GR00T-format + action chunks, converts them back via ``_convert_gr00t_to_isaaclab_action``, + and validates every intermediate tensor. + """ + + NUM_STEPS = 50 + BATCH_SIZE = 8 + IMG_H, IMG_W, IMG_C = 64, 64, 3 + STATE_DIM = 20 + ACTION_ARM_DIM = 7 + ACTION_HAND_DIM = 7 + CHUNK_SIZE = 4 + + # -- helpers ---------------------------------------------------------- + + @staticmethod + def _check_valid_array(data: np.ndarray | torch.Tensor) -> bool: + """Return True when *data* contains no NaN / Inf values.""" + if isinstance(data, torch.Tensor): + return not (torch.isnan(data).any() or torch.isinf(data).any()) + return bool(np.isfinite(data).all()) + + def _make_random_obs(self) -> dict: + """Build a fake IsaacLab-style observation dict with random data.""" + B, H, W, C, D = self.BATCH_SIZE, self.IMG_H, self.IMG_W, self.IMG_C, self.STATE_DIM + return { + "main_images": torch.rand(B, H, W, C), + "extra_view_images": torch.rand(B, 2, H, W, C), + "states": torch.randn(B, D), + "task_descriptions": [f"task_{i}" for i in range(B)], + } + + def _make_random_gr00t_action(self) -> dict: + """Build a fake GR00T-style action chunk with random data.""" + B, T = self.BATCH_SIZE, self.CHUNK_SIZE + 2 # model outputs more than chunk_size + return { + "arm": np.random.randn(B, T, self.ACTION_ARM_DIM), + "hand": np.random.randn(B, T, self.ACTION_HAND_DIM), + } + + # -- tests ------------------------------------------------------------ + + def test_single_step_roundtrip(self, set_config_env) -> None: + """A single obs→GR00T→action roundtrip should produce valid arrays.""" + ext._load_full_cfg() + + obs = self._make_random_obs() + gr00t_obs = ext._convert_isaaclab_obs_to_gr00t(obs) + + # Validate GR00T obs + for key, val in gr00t_obs.items(): + if isinstance(val, np.ndarray): + assert self._check_valid_array(val), f"NaN/Inf in gr00t_obs['{key}']" + + # Simulate model producing a random action + action_chunk = self._make_random_gr00t_action() + action = ext._convert_gr00t_to_isaaclab_action(action_chunk, chunk_size=self.CHUNK_SIZE) + + assert self._check_valid_array(action), "NaN/Inf in converted action" + # Expected: (B, chunk_size, arm+hand+prefix_pad+suffix_pad) + prefix_pad = 3 # from _SAMPLE_YAML + suffix_pad = 2 + expected_dim = self.ACTION_ARM_DIM + self.ACTION_HAND_DIM + prefix_pad + suffix_pad + assert action.shape == (self.BATCH_SIZE, self.CHUNK_SIZE, expected_dim) + + def test_multi_step_no_nan(self, set_config_env) -> None: + """Run NUM_STEPS random steps; no NaN/Inf should ever appear.""" + ext._load_full_cfg() + + for step in range(self.NUM_STEPS): + obs = self._make_random_obs() + gr00t_obs = ext._convert_isaaclab_obs_to_gr00t(obs) + + for key, val in gr00t_obs.items(): + if isinstance(val, np.ndarray): + assert self._check_valid_array(val), f"Step {step}: NaN/Inf in gr00t_obs['{key}']" + + action_chunk = self._make_random_gr00t_action() + action = ext._convert_gr00t_to_isaaclab_action(action_chunk, chunk_size=1) + + assert self._check_valid_array(action), f"Step {step}: NaN/Inf in action" + assert action.ndim == 3 and action.shape[0] == self.BATCH_SIZE + + def test_varying_batch_sizes(self, set_config_env) -> None: + """Pipeline should work for different batch sizes (1, 16, 128).""" + ext._load_full_cfg() + + for B in (1, 16, 128): + H, W, C = self.IMG_H, self.IMG_W, self.IMG_C + obs = { + "main_images": torch.rand(B, H, W, C), + "states": torch.randn(B, self.STATE_DIM), + "task_descriptions": ["test"] * B, + } + gr00t_obs = ext._convert_isaaclab_obs_to_gr00t(obs) + assert gr00t_obs["video.room_view"].shape[0] == B + + action_chunk = { + "arm": np.random.randn(B, 4, self.ACTION_ARM_DIM), + "hand": np.random.randn(B, 4, self.ACTION_HAND_DIM), + } + action = ext._convert_gr00t_to_isaaclab_action(action_chunk, chunk_size=2) + assert action.shape[0] == B + + def test_action_padding_is_zero(self, set_config_env) -> None: + """Prefix and suffix padding regions must always be exactly zero.""" + ext._load_full_cfg() + + for _ in range(10): + action_chunk = self._make_random_gr00t_action() + action = ext._convert_gr00t_to_isaaclab_action(action_chunk, chunk_size=self.CHUNK_SIZE) + # prefix_pad=3 → first 3 cols zero; suffix_pad=2 → last 2 cols zero + np.testing.assert_array_equal(action[:, :, :3], 0.0) + np.testing.assert_array_equal(action[:, :, -2:], 0.0) + + def test_obs_state_slicing_consistency(self, set_config_env) -> None: + """State slices must match the original tensor content after conversion.""" + ext._load_full_cfg() + + states = torch.arange(self.STATE_DIM, dtype=torch.float32).unsqueeze(0).expand(self.BATCH_SIZE, -1) + obs = {"states": states, "task_descriptions": ["t"] * self.BATCH_SIZE} + gr00t_obs = ext._convert_isaaclab_obs_to_gr00t(obs) + + # gr00t_mapping.state[0]: slice [0,7] → state.arm + np.testing.assert_allclose(gr00t_obs["state.arm"][0, 0], np.arange(7, dtype=np.float32), atol=1e-6) + # gr00t_mapping.state[1]: slice [7,14] → state.hand + np.testing.assert_allclose(gr00t_obs["state.hand"][0, 0], np.arange(7, 14, dtype=np.float32), atol=1e-6) + + def test_image_value_preservation(self, set_config_env) -> None: + """Pixel values should survive the obs conversion without corruption.""" + ext._load_full_cfg() + + B, H, W, C = 2, 8, 8, 3 + img = torch.rand(B, H, W, C) + obs = {"main_images": img, "task_descriptions": ["t"] * B} + gr00t_obs = ext._convert_isaaclab_obs_to_gr00t(obs) + + np.testing.assert_allclose( + gr00t_obs["video.room_view"][:, 0], + img.cpu().numpy(), + atol=1e-6, + ) diff --git a/source/isaaclab_contrib/test/sensors/test_visuotactile_sensor.py b/source/isaaclab_contrib/test/sensors/test_visuotactile_sensor.py index d9929e7d6e14..3f5078eec9c4 100644 --- a/source/isaaclab_contrib/test/sensors/test_visuotactile_sensor.py +++ b/source/isaaclab_contrib/test/sensors/test_visuotactile_sensor.py @@ -19,12 +19,13 @@ import pytest import torch +import warp as wp import omni.replicator.core as rep import isaaclab.sim as sim_utils from isaaclab.assets import Articulation, ArticulationCfg, RigidObject, RigidObjectCfg -from isaaclab.sensors.camera import TiledCameraCfg +from isaaclab.sensors.camera import CameraCfg from isaaclab.terrains.trimesh.utils import make_plane from isaaclab.terrains.utils import create_prim_from_mesh from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR @@ -68,7 +69,7 @@ def get_sensor_cfg_by_type(sensor_type: str) -> VisuoTactileSensorCfg: return VisuoTactileSensorCfg( prim_path="/World/Robot/elastomer/tactile_cam", enable_force_field=False, - camera_cfg=TiledCameraCfg( + camera_cfg=CameraCfg( height=320, width=240, prim_path="/World/Robot/elastomer_tip/cam", @@ -88,7 +89,7 @@ def get_sensor_cfg_by_type(sensor_type: str) -> VisuoTactileSensorCfg: debug_vis=False, enable_camera_tactile=True, enable_force_field=True, - camera_cfg=TiledCameraCfg( + camera_cfg=CameraCfg( height=320, width=240, prim_path="/World/Robot/elastomer_tip/cam", @@ -147,7 +148,7 @@ def setup(sensor_type: str = "cube"): actuators={}, init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), - rot=(math.sqrt(2) / 2, -math.sqrt(2) / 2, 0.0, 0.0), # 90° rotation + rot=(-math.sqrt(2) / 2, 0.0, 0.0, math.sqrt(2) / 2), # 90° rotation joint_pos={}, joint_vel={}, ), @@ -172,7 +173,7 @@ def setup(sensor_type: str = "cube"): ), init_state=RigidObjectCfg.InitialStateCfg( pos=(0.0, 0.0 + 0.06776, 0.52), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), ) @@ -189,10 +190,8 @@ def teardown(sim): # close all the opened viewport from before. rep.vp_manager.destroy_hydra_textures("Replicator") # stop simulation - # note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :( - sim._timeline.stop() + sim.stop() # clear the stage - sim.clear_all_callbacks() sim.clear_instance() @@ -308,7 +307,8 @@ def test_sensor_cam_set_wrong_prim(setup_tactile_cam): sim.reset() robot.update(dt) sensor.update(dt) - assert "Could not find prim with path" in str(excinfo.value) + err_msg = str(excinfo.value) + assert "Could not find prim with path" in err_msg or "does not match the number of environments" in err_msg @pytest.mark.isaacsim_ci @@ -444,7 +444,9 @@ def test_sensor_update_period_mismatch(setup_nut_rgb_ff): sensor.update(dt, force_recompute=True) robot.update(dt) nut.update(dt) - assert torch.allclose(sensor._timestamp_last_update, torch.tensor((i + 1) * dt, device=sensor.device)) assert torch.allclose( - sensor._camera_sensor._timestamp_last_update, torch.tensor((i + 1) * dt, device=sensor.device) + wp.to_torch(sensor._timestamp_last_update), torch.tensor((i + 1) * dt, device=sensor.device) + ) + assert torch.allclose( + wp.to_torch(sensor._camera_sensor._timestamp_last_update), torch.tensor((i + 1) * dt, device=sensor.device) ) diff --git a/source/isaaclab_experimental/changelog.d/.gitkeep b/source/isaaclab_experimental/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_experimental/config/extension.toml b/source/isaaclab_experimental/config/extension.toml new file mode 100644 index 000000000000..9bcfc0753383 --- /dev/null +++ b/source/isaaclab_experimental/config/extension.toml @@ -0,0 +1,21 @@ +[package] + +# Note: Semantic Versioning is used: https://semver.org/ +version = "0.0.3" + +# Description +title = "Experimental playground for upcoming IsaacLab features" +description="Provides early access to future features that are not yet `production-ready`." +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["robotics", "simulation", "experimental"] + +[dependencies] +"isaaclab" = {} + +[core] +reloadable = false + +[[python.module]] +name = "isaaclab_experimental" diff --git a/source/isaaclab_experimental/docs/CHANGELOG.rst b/source/isaaclab_experimental/docs/CHANGELOG.rst new file mode 100644 index 000000000000..5f19d1fb8c5a --- /dev/null +++ b/source/isaaclab_experimental/docs/CHANGELOG.rst @@ -0,0 +1,47 @@ +Changelog +--------- + +0.0.3 (2026-04-27) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated the Warp-graphable MDP terms and the Warp inhand-manipulation env to read + asset/sensor data via the explicit :attr:`~isaaclab.utils.warp.ProxyArray.warp` + accessor when the value flows into a ``wp.launch`` call (or a sim-write helper that + forwards to one). Affected modules: + :mod:`isaaclab_experimental.envs.mdp.observations`, + :mod:`isaaclab_experimental.envs.mdp.rewards`, + :mod:`isaaclab_experimental.envs.mdp.terminations`, + :mod:`isaaclab_experimental.envs.mdp.events`, + :mod:`isaaclab_experimental.envs.mdp.actions.joint_actions`, and + :mod:`isaaclab_tasks_experimental.direct.inhand_manipulation.inhand_manipulation_warp_env`. + The previous code relied on ``ProxyArray``'s ``__cuda_array_interface__`` bridge, + which works but is not explicit. No behavior change. +* Replaced ``wp.to_torch(asset.data.joint_pos).shape[1]`` in + :class:`~isaaclab_experimental.managers.ObservationManager` with + ``asset.data.joint_pos.shape[1]`` — :class:`~isaaclab.utils.warp.ProxyArray` forwards + ``shape``, so the round-trip through ``wp.to_torch`` is no longer needed. + + +0.0.2 (2026-03-16) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :class:`~isaaclab_experimental.envs.DirectRLEnvWarp` not being recognized by + RL library wrappers (e.g. :class:`~isaaclab_rl.rl_games.RlGamesVecEnvWrapper`) that + check for :class:`~isaaclab.envs.DirectRLEnv` via ``isinstance``. Changed base class + from :class:`gym.Env` to :class:`~isaaclab.envs.DirectRLEnv`; all methods are + overridden so behavior is unchanged. + + +0.0.1 (2026-01-01) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Initial release of the ``isaaclab_experimental`` package. diff --git a/source/isaaclab_experimental/isaaclab_experimental/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/__init__.py new file mode 100644 index 000000000000..18e97e2dd188 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package containing the core framework.""" + +import os +import toml +from enum import IntEnum + +# Conveniences to other module directories via relative paths +ISAACLAB_EXPERIMENTAL_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) +"""Path to the extension source directory.""" + +ISAACLAB_EXPERIMENTAL_METADATA = toml.load(os.path.join(ISAACLAB_EXPERIMENTAL_EXT_DIR, "config", "extension.toml")) +"""Extension metadata dictionary parsed from the extension.toml file.""" + +# Configure the module-level variables +__version__ = ISAACLAB_EXPERIMENTAL_METADATA["package"]["version"] diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/envs/__init__.py new file mode 100644 index 000000000000..fef4091748a9 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/__init__.py @@ -0,0 +1,55 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package for environment definitions. + +Environments define the interface between the agent and the simulation. +In the simplest case, the environment provides the agent with the current +observations and executes the actions provided by the agent. However, the +environment can also provide additional information such as the current +reward, done flag, and information about the current episode. + +There are two types of environment designing workflows: + +* **Manager-based**: The environment is decomposed into individual components (or managers) + for different aspects (such as computing observations, applying actions, and applying + randomization. The users mainly configure the managers and the environment coordinates the + managers and calls their functions. +* **Direct**: The user implements all the necessary functionality directly into a single class + directly without the need for additional managers. + +Based on these workflows, there are the following environment classes for single and multi-agent RL: + +**Single-Agent RL:** + +* :class:`ManagerBasedEnv`: The manager-based workflow base environment which only provides the + agent with the current observations and executes the actions provided by the agent. +* :class:`ManagerBasedRLEnv`: The manager-based workflow RL task environment which besides the + functionality of the base environment also provides additional Markov Decision Process (MDP) + related information such as the current reward, done flag, and information. +* :class:`DirectRLEnv`: The direct workflow RL task environment which provides implementations for + implementing scene setup, computing dones, performing resets, and computing reward and observation. + +**Multi-Agent RL (MARL):** + +* :class:`DirectMARLEnv`: The direct workflow MARL task environment which provides implementations for + implementing scene setup, computing dones, performing resets, and computing reward and observation. + +For more information about the workflow design patterns, see the `Task Design Workflows`_ section. + +.. _`Task Design Workflows`: https://isaac-sim.github.io/IsaacLab/source/features/task_workflows.html +""" + +from .direct_rl_env_warp import DirectRLEnvWarp # noqa: F401 +from .interactive_scene_warp import InteractiveSceneWarp # noqa: F401 +from .manager_based_env_warp import ManagerBasedEnvWarp # noqa: F401 +from .manager_based_rl_env_warp import ManagerBasedRLEnvWarp # noqa: F401 + +__all__ = [ + "DirectRLEnvWarp", + "InteractiveSceneWarp", + "ManagerBasedEnvWarp", + "ManagerBasedRLEnvWarp", +] diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/direct_rl_env_warp.py b/source/isaaclab_experimental/isaaclab_experimental/envs/direct_rl_env_warp.py new file mode 100644 index 000000000000..125f2e61a429 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/direct_rl_env_warp.py @@ -0,0 +1,833 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import contextlib +import inspect +import logging +import math +import os +import weakref +from abc import abstractmethod +from dataclasses import MISSING +from typing import Any, ClassVar + +import gymnasium as gym +import numpy as np +import torch + +# import omni.kit.app +# import omni.log +# import omni.physx +import warp as wp + +from isaaclab.envs.common import VecEnvObs, VecEnvStepReturn +from isaaclab.envs.direct_rl_env import DirectRLEnv +from isaaclab.envs.direct_rl_env_cfg import DirectRLEnvCfg +from isaaclab.envs.utils.spaces import sample_space, spec_to_gym_space + +# from isaaclab.envs.ui import ViewportCameraController +from isaaclab.managers import EventManager +from isaaclab.sim import SimulationContext +from isaaclab.sim.utils import use_stage +from isaaclab.utils.noise import NoiseModel +from isaaclab.utils.seed import configure_seed +from isaaclab.utils.timer import Timer + +from isaaclab_experimental.envs.interactive_scene_warp import InteractiveSceneWarp +from isaaclab_experimental.utils.warp_graph_cache import WarpGraphCache + +# from isaacsim.core.simulation_manager import SimulationManager +# from isaacsim.core.version import get_version + + +# import logger +logger = logging.getLogger(__name__) + +DEBUG_TIMER_STEP = os.environ.get("DEBUG_TIMER_STEP", "0") == "1" +"""Enable outer step() timer only. Set DEBUG_TIMER_STEP=1 env var to enable.""" + +DEBUG_TIMERS = os.environ.get("DEBUG_TIMERS", "0") == "1" +"""Enable all fine-grained inner timers (adds wp.synchronize per sub-phase). Set DEBUG_TIMERS=1 env var to enable.""" + + +@wp.kernel +def zero_mask_int32( + mask: wp.array(dtype=wp.bool), + data: wp.array(dtype=wp.int32), +): + env_index = wp.tid() + if mask[env_index]: + data[env_index] = 0 + + +@wp.kernel +def add_to_env( + data: wp.array(dtype=wp.int32), + value: wp.int32, +): + env_index = wp.tid() + data[env_index] += value + + +class DirectRLEnvWarp(DirectRLEnv): + """The superclass for the direct workflow to design environments. + + This class implements the core functionality for reinforcement learning (RL) + environments. It is designed to be used with any RL library. The class is designed + to be used with vectorized environments, i.e., the environment is expected to be run + in parallel with multiple sub-environments. + + While the environment itself is implemented as a vectorized environment, we do not + inherit from :class:`gym.vector.VectorEnv`. This is mainly because the class adds + various methods (for wait and asynchronous updates) which are not required. + Additionally, each RL library typically has its own definition for a vectorized + environment. Thus, to reduce complexity, we directly use the :class:`gym.Env` over + here and leave it up to library-defined wrappers to take care of wrapping this + environment for their agents. + + Note: + For vectorized environments, it is recommended to **only** call the :meth:`reset` + method once before the first call to :meth:`step`, i.e. after the environment is created. + After that, the :meth:`step` function handles the reset of terminated sub-environments. + in a vectorized environment. + + """ + + is_vector_env: ClassVar[bool] = True + """Whether the environment is a vectorized environment.""" + metadata: ClassVar[dict[str, Any]] = { + "render_modes": [None, "human", "rgb_array"], + # "isaac_sim_version": get_version(), + } + """Metadata for the environment.""" + + def __init__(self, cfg: DirectRLEnvCfg, render_mode: str | None = None, **kwargs): + """Initialize the environment. + + Args: + cfg: The configuration object for the environment. + render_mode: The render mode for the environment. Defaults to None, which + is similar to ``"human"``. + + Raises: + RuntimeError: If a simulation context already exists. The environment must always create one + since it configures the simulation context and controls the simulation. + """ + # check that the config is valid + cfg.validate() + # store inputs to class + self.cfg = cfg + # store the render mode + self.render_mode = render_mode + # initialize internal variables + self._is_closed = False + + # set the seed for the environment + if self.cfg.seed is not None: + self.cfg.seed = self.seed(self.cfg.seed) + else: + logger.warning("Seed not set for the environment. The environment creation may not be deterministic.") + + # create a simulation context to control the simulator + if SimulationContext.instance() is None: + self.sim: SimulationContext = SimulationContext(self.cfg.sim) + else: + raise RuntimeError("Simulation context already exists. Cannot create a new one.") + + # make sure torch is running on the correct device + if "cuda" in self.device: + torch.cuda.set_device(self.device) + + # print useful information + print("[INFO]: Base environment:") + print(f"\tEnvironment device : {self.device}") + print(f"\tEnvironment seed : {self.cfg.seed}") + print(f"\tPhysics step-size : {self.physics_dt}") + print(f"\tRendering step-size : {self.physics_dt * self.cfg.sim.render_interval}") + print(f"\tEnvironment step-size : {self.step_dt}") + + if self.cfg.sim.render_interval < self.cfg.decimation: + msg = ( + f"The render interval ({self.cfg.sim.render_interval}) is smaller than the decimation " + f"({self.cfg.decimation}). Multiple render calls will happen for each environment step." + "If this is not intended, set the render interval to be equal to the decimation." + ) + logger.warning(msg) + + # generate scene + with Timer("[INFO]: Time taken for scene creation", "scene_creation"): + # set the stage context for scene creation steps which use the stage + with use_stage(self.sim.stage): + self.scene = InteractiveSceneWarp(self.cfg.scene) + self._setup_scene() + # attach_stage_to_usd_context() + print("[INFO]: Scene manager: ", self.scene) + + # set up camera viewport controller + # viewport is not available in other rendering modes so the function will throw a warning + # FIXME: This needs to be fixed in the future when we unify the UI functionalities even for + # non-rendering modes. + has_gui = bool(self.sim.get_setting("/isaaclab/has_gui")) + offscreen_render = bool(self.sim.get_setting("/isaaclab/render/offscreen")) + if has_gui or offscreen_render: + # self.viewport_camera_controller = ViewportCameraController(self, self.cfg.viewer) + self.viewport_camera_controller = None + else: + self.viewport_camera_controller = None + + # create event manager + # note: this is needed here (rather than after simulation play) to allow USD-related randomization events + # that must happen before the simulation starts. Example: randomizing mesh scale + if self.cfg.events: + self.event_manager = EventManager(self.cfg.events, self) + + # apply USD-related randomization events + if "prestartup" in self.event_manager.available_modes: + self.event_manager.apply(mode="prestartup") + + # play the simulator to activate physics handles + # note: this activates the physics simulation view that exposes TensorAPIs + # note: when started in extension mode, first call sim.reset_async() and then initialize the managers + # if builtins.ISAAC_LAUNCHED_FROM_TERMINAL is False: + # print("[INFO]: Starting the simulation. This may take a few seconds. Please wait...") + with Timer("[INFO]: Time taken for simulation start", "simulation_start"): + # since the reset can trigger callbacks which use the stage, + # we need to set the stage context here + with use_stage(self.sim.stage): + self.sim.reset() + # update scene to pre populate data buffers for assets and sensors. + # this is needed for the observation manager to get valid tensors for initialization. + # this shouldn't cause an issue since later on, users do a reset over all the + # environments so the lazy buffers would be reset. + self.scene.update(dt=self.physics_dt) + + # check if debug visualization is has been implemented by the environment + source_code = inspect.getsource(self._set_debug_vis_impl) + self.has_debug_vis_implementation = "NotImplementedError" not in source_code + self._debug_vis_handle = None + + # extend UI elements + # we need to do this here after all the managers are initialized + # this is because they dictate the sensors and commands right now + if bool(self.sim.settings.get("/isaaclab/visualizer")) and self.cfg.ui_window_class_type is not None: + self._window = self.cfg.ui_window_class_type(self, window_name="IsaacLab") + else: + # if no window, then we don't need to store the window + self._window = None + + # allocate dictionary to store metrics + self.extras = {} + + # initialize data and constants + # -- counter for simulation steps + self._sim_step_counter = 0 + # -- counter for curriculum + self.common_step_counter = 0 + # -- init buffers + self._episode_length_buf_wp = wp.zeros(self.num_envs, dtype=wp.int32, device=self.device) + self._episode_length_buf_torch = wp.to_torch(self._episode_length_buf_wp) + self.reset_terminated = wp.zeros(self.num_envs, dtype=wp.bool, device=self.device) + self.reset_time_outs = wp.zeros(self.num_envs, dtype=wp.bool, device=self.device) + self.reset_buf = wp.zeros(self.num_envs, dtype=wp.bool, device=self.device) + self._ALL_ENV_MASK = wp.ones(self.num_envs, dtype=wp.bool, device=self.device) + + # Expected bindings: + self.torch_obs_buf: torch.Tensor = None + self.torch_reward_buf: torch.Tensor = None + self.torch_reset_terminated: torch.Tensor = None + self.torch_reset_time_outs: torch.Tensor = None + self.torch_episode_length_buf: torch.Tensor = None + + # Warp CUDA graph cache for capture-or-replay + self._graph_cache = WarpGraphCache() + + # setup the action and observation spaces for Gym + self._configure_gym_env_spaces() + + # setup noise cfg for adding action and observation noise + if self.cfg.action_noise_model: + self._action_noise_model: NoiseModel = self.cfg.action_noise_model.class_type( + self.cfg.action_noise_model, num_envs=self.num_envs, device=self.device + ) + if self.cfg.observation_noise_model: + self._observation_noise_model: NoiseModel = self.cfg.observation_noise_model.class_type( + self.cfg.observation_noise_model, num_envs=self.num_envs, device=self.device + ) + + # perform events at the start of the simulation + if self.cfg.events: + # we print it here to make the logging consistent + print("[INFO] Event Manager: ", self.event_manager) + + if "startup" in self.event_manager.available_modes: + self.event_manager.apply(mode="startup") + + # set the framerate of the gym video recorder wrapper so that the playback speed of the produced + # video matches the simulation + self.metadata["render_fps"] = 1 / self.step_dt + + # print the environment information + print("[INFO]: Completed setting up the environment...") + + def __del__(self): + """Cleanup for the environment.""" + # Suppress errors during Python shutdown to avoid noisy tracebacks + # Note: contextlib may be None during interpreter shutdown + if contextlib is not None: + with contextlib.suppress(ImportError, AttributeError, TypeError): + self.close() + + """ + Properties. + """ + + @property + def num_envs(self) -> int: + """The number of instances of the environment that are running.""" + return self.scene.num_envs + + @property + def physics_dt(self) -> float: + """The physics time-step (in s). + + This is the lowest time-decimation at which the simulation is happening. + """ + return self.cfg.sim.dt + + @property + def step_dt(self) -> float: + """The environment stepping time-step (in s). + + This is the time-step at which the environment steps forward. + """ + return self.cfg.sim.dt * self.cfg.decimation + + @property + def device(self): + """The device on which the environment is running.""" + return self.sim.device + + @property + def max_episode_length_s(self) -> float: + """Maximum episode length in seconds.""" + return self.cfg.episode_length_s + + @property + def max_episode_length(self): + """The maximum episode length in steps adjusted from s.""" + return math.ceil(self.max_episode_length_s / (self.cfg.sim.dt * self.cfg.decimation)) + + @property + def episode_length_buf(self) -> torch.Tensor: + """The episode length buffer as a torch tensor. + + This is a view of the underlying warp array ``_episode_length_buf_wp``. + Setting this property copies values in-place to preserve the shared + memory link with warp kernels. + """ + return self._episode_length_buf_torch + + @episode_length_buf.setter + def episode_length_buf(self, value: torch.Tensor): + self._episode_length_buf_torch.copy_(value) + + """ + Operations. + """ + + def reset(self, seed: int | None = None, options: dict[str, Any] | None = None) -> tuple[VecEnvObs, dict]: + """Resets all the environments and returns observations. + + This function calls the :meth:`_reset_idx` function to reset all the environments. + However, certain operations, such as procedural terrain generation, that happened during initialization + are not repeated. + + Args: + seed: The seed to use for randomization. Defaults to None, in which case the seed is not set. + options: Additional information to specify how the environment is reset. Defaults to None. + + Note: + This argument is used for compatibility with Gymnasium environment definition. + + Returns: + A tuple containing the observations and extras. + """ + # set the seed + if seed is not None: + self.seed(seed) + + # reset state of scene + self._reset_idx(self._ALL_ENV_MASK) + + # update articulation kinematics + self.scene.write_data_to_sim() + + # if sensors are added to the scene, make sure we render to reflect changes in reset + if hasattr(self.sim, "has_rtx_sensors") and self.sim.has_rtx_sensors() and self.cfg.rerender_on_reset: + self.sim.render() + + # if self.cfg.wait_for_textures and self.sim.has_rtx_sensors(): + # while SimulationManager.assets_loading(): + # self.sim.render() + + # return observations + self._get_observations() + return {"policy": self.torch_obs_buf.clone()}, self.extras + + @Timer(name="env_step", msg="Step took:", enable=DEBUG_TIMER_STEP or DEBUG_TIMERS) + def step(self, action: torch.Tensor) -> VecEnvStepReturn: + """Execute one time-step of the environment's dynamics. + + The environment steps forward at a fixed time-step, while the physics simulation is decimated at a + lower time-step. This is to ensure that the simulation is stable. These two time-steps can be configured + independently using the :attr:`DirectRLEnvCfg.decimation` (number of simulation steps per environment step) + and the :attr:`DirectRLEnvCfg.sim.physics_dt` (physics time-step). Based on these parameters, the environment + time-step is computed as the product of the two. + + This function performs the following steps: + + 1. Pre-process the actions before stepping through the physics. + 2. Apply the actions to the simulator and step through the physics in a decimated manner. + 3. Compute the reward and done signals. + 4. Reset environments that have terminated or reached the maximum episode length. + 5. Apply interval events if they are enabled. + 6. Compute observations. + + Args: + action: The actions to apply on the environment. Shape is (num_envs, action_dim). + + Returns: + A tuple containing the observations, rewards, resets (terminated and truncated) and extras. + """ + + action = action.to(self.device) + # add action noise + if self.cfg.action_noise_model: + action = self._action_noise_model(action) + + # process actions, #TODO pass the torch tensor directly. + with Timer(name="pre_physics", msg="Pre-physics step took:", enable=DEBUG_TIMERS): + self._pre_physics_step( + wp.from_torch(action) + ) # Creates a tensor and discards it. Not graphable unless training loop reuses the same pointer. + + # check if we need to do rendering within the physics loop + # note: checked here once to avoid multiple checks within the loop + _has_rtx = hasattr(self.sim, "has_rtx_sensors") and self.sim.has_rtx_sensors() + is_rendering = bool(self.sim.settings.get("/isaaclab/visualizer")) or _has_rtx + + # perform physics stepping + with Timer(name="physics_loop", msg="Physics loop took:", enable=DEBUG_TIMERS): + for _ in range(self.cfg.decimation): + self._sim_step_counter += 1 + # set actions into buffers + # simulate + with Timer(name="apply_action", msg="Action processing step took:", enable=DEBUG_TIMERS): + self._graph_cache.capture_or_replay("action", self.step_warp_action) + + # write_data_to_sim runs outside the CUDA graph because _apply_actuator_model + # uses torch ops (wp.to_torch + torch arithmetic) that cross CUDA streams. + with Timer(name="write_data_to_sim_loop", msg="Write data to sim (loop) took:", enable=DEBUG_TIMERS): + self.scene.write_data_to_sim() + + with Timer(name="simulate", msg="Newton simulation step took:", enable=DEBUG_TIMERS): + self.sim.step(render=False) + # render between steps only if the GUI or an RTX sensor needs it + # note: we assume the render interval to be the shortest accepted rendering interval. + # If a camera needs rendering at a faster frequency, this will lead to unexpected behavior. + if self._sim_step_counter % self.cfg.sim.render_interval == 0 and is_rendering: + self.sim.render() + # update buffers at sim dt + with Timer(name="scene_update", msg="Scene update took:", enable=DEBUG_TIMERS): + self.scene.update(dt=self.physics_dt) + + self.common_step_counter += 1 # total step (common for all envs) + with Timer(name="end_pre_graph", msg="End pre-graph took:", enable=DEBUG_TIMERS): + self._graph_cache.capture_or_replay("end_pre", self._step_warp_end_pre) + # write_data_to_sim runs uncaptured — it uses torch ops that cross CUDA streams. + with Timer(name="write_data_to_sim_post", msg="Write data to sim (post-reset) took:", enable=DEBUG_TIMERS): + self.scene.write_data_to_sim() + with Timer(name="end_post_graph", msg="End post-graph took:", enable=DEBUG_TIMERS): + self._graph_cache.capture_or_replay("end_post", self._step_warp_end_post) + + # Visualization hook — runs after CUDA graph scope. Override in subclass + # to update markers or other non-graphable visual elements. + with Timer(name="visualize", msg="Visualize took:", enable=DEBUG_TIMERS): + self._post_step_visualize() + + # return observations, rewards, resets and extras + return ( + {"policy": self.torch_obs_buf.clone()}, + self.torch_reward_buf, + self.torch_reset_terminated, + self.torch_reset_time_outs, + self.extras, + ) + + def _post_step_visualize(self) -> None: + """Hook for updating visualization markers after CUDA graph scope. + + Override in subclass to update markers or other non-graphable visual + elements (e.g., those requiring wp.to_torch + .cpu().numpy()). + This runs every step, outside any CUDA graph capture. + """ + pass + + def step_warp_action(self) -> None: + self._apply_action() + # Note: scene.write_data_to_sim() is called separately outside the CUDA graph + # capture scope because it invokes _apply_actuator_model() which uses torch + # arithmetic (wp.to_torch + torch ops). This would cause a CUDA stream crossing + # error during graph capture. Moving it outside is safe since it runs every step. + + def _step_warp_end_pre(self) -> None: + """Capturable portion before write_data_to_sim (pure warp kernels).""" + wp.launch( + add_to_env, + dim=self.num_envs, + inputs=[ + self._episode_length_buf_wp, + 1, + ], + ) + self._get_dones() + self._get_rewards() + + # -- reset envs that terminated/timed-out and log the episode information + self._reset_idx(mask=self.reset_buf) + + def _step_warp_end_post(self) -> None: + """Capturable portion after write_data_to_sim (pure warp kernels).""" + # if sensors are added to the scene, make sure we render to reflect changes in reset + # if self.sim.has_rtx_sensors() and self.cfg.rerender_on_reset: + # self.sim.render() + + # TODO We could split it out. + # post-step: step interval event + # if self.cfg.events: + # if "interval" in self.event_manager.available_modes: + # self.event_manager.apply(mode="interval", dt=self.step_dt) + + # update observations + self._get_observations() + + # add observation noise + # note: we apply no noise to the state space (since it is used for critic networks) + # if self.cfg.observation_noise_model: + # self.obs_buf["policy"] = self._observation_noise_model(self.obs_buf["policy"]) + + @staticmethod + def seed(seed: int = -1) -> int: + """Set the seed for the environment. + + Args: + seed: The seed for random generator. Defaults to -1. + + Returns: + The seed used for random generator. + """ + # set seed for replicator + try: + import omni.replicator.core as rep + + rep.set_global_seed(seed) + except ModuleNotFoundError: + pass + # set seed for torch and other libraries + return configure_seed(seed) + + def render(self, recompute: bool = False) -> np.ndarray | None: + """Run rendering without stepping through the physics. + + By convention, if mode is: + + - **human**: Render to the current display and return nothing. Usually for human consumption. + - **rgb_array**: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an + x-by-y pixel image, suitable for turning into a video. + + Args: + recompute: Whether to force a render even if the simulator has already rendered the scene. + Defaults to False. + + Returns: + The rendered image as a numpy array if mode is "rgb_array". Otherwise, returns None. + + Raises: + RuntimeError: If mode is set to "rgb_data" and simulation render mode does not support it. + In this case, the simulation render mode must be set to ``RenderMode.PARTIAL_RENDERING`` + or ``RenderMode.FULL_RENDERING``. + NotImplementedError: If an unsupported rendering mode is specified. + """ + # run a rendering step of the simulator + # if we have rtx sensors, we do not need to render again sim + if not (hasattr(self.sim, "has_rtx_sensors") and self.sim.has_rtx_sensors()) and not recompute: + self.sim.render() + # decide the rendering mode + if self.render_mode == "human" or self.render_mode is None: + return None + elif self.render_mode == "rgb_array": + # check that if any render could have happened + has_gui = bool(self.sim.get_setting("/isaaclab/has_gui")) + offscreen_render = bool(self.sim.get_setting("/isaaclab/render/offscreen")) + # Rendering is possible if we have GUI or offscreen rendering enabled + can_render = has_gui or offscreen_render + + if not can_render: + render_mode_name = "NO_GUI_OR_RENDERING" + raise RuntimeError( + f"Cannot render '{self.render_mode}' when the simulation render mode is" + f" '{render_mode_name}'. Please set the simulation render mode" + " to:'PARTIAL_RENDERING' or" + " 'FULL_RENDERING'. If running headless, make" + " sure --enable_cameras is set." + ) + # create the annotator if it does not exist + if not hasattr(self, "_rgb_annotator"): + import omni.replicator.core as rep + + # create render product + self._render_product = rep.create.render_product( + self.cfg.viewer.cam_prim_path, self.cfg.viewer.resolution + ) + # create rgb annotator -- used to read data from the render product + self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu") + self._rgb_annotator.attach([self._render_product]) + # obtain the rgb data + rgb_data = self._rgb_annotator.get_data() + # convert to numpy array + rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape) + # return the rgb data + # note: initially the renerer is warming up and returns empty data + if rgb_data.size == 0: + return np.zeros((self.cfg.viewer.resolution[1], self.cfg.viewer.resolution[0], 3), dtype=np.uint8) + else: + return rgb_data[:, :, :3] + else: + raise NotImplementedError( + f"Render mode '{self.render_mode}' is not supported. Please use: {self.metadata['render_modes']}." + ) + + def close(self): + """Cleanup for the environment.""" + if not self._is_closed: + # close entities related to the environment + # note: this is order-sensitive to avoid any dangling references + if self.cfg.events: + del self.event_manager + del self.scene + if self.viewport_camera_controller is not None: + del self.viewport_camera_controller + + # # clear callbacks and instance + # if float(".".join(get_version()[2])) >= 5: + # if self.cfg.sim.create_stage_in_memory: + # # detach physx stage + # omni.physx.get_physx_simulation_interface().detach_stage() + # self.sim.stop() + # self.sim.clear() + + # self.sim.clear_all_callbacks() + self.sim.clear_instance() + + # destroy the window + if self._window is not None: + self._window = None + # update closing status + self._is_closed = True + + """ + Operations - Debug Visualization. + """ + + def set_debug_vis(self, debug_vis: bool) -> bool: + """Toggles the environment debug visualization. + + Args: + debug_vis: Whether to visualize the environment debug visualization. + + Returns: + Whether the debug visualization was successfully set. False if the environment + does not support debug visualization. + """ + # check if debug visualization is supported + if not self.has_debug_vis_implementation: + return False + # toggle debug visualization objects + self._set_debug_vis_impl(debug_vis) + # toggle debug visualization handles + if debug_vis: + import omni.kit.app + + # create a subscriber for the post update event if it doesn't exist + if self._debug_vis_handle is None: + app_interface = omni.kit.app.get_app_interface() + self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( + lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) + ) + else: + # remove the subscriber if it exists + if self._debug_vis_handle is not None: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + # return success + return True + + """ + Helper functions. + """ + + def _configure_gym_env_spaces(self): + """Configure the action and observation spaces for the Gym environment.""" + # show deprecation message and overwrite configuration + if self.cfg.num_actions is not None: + logger.warning("DirectRLEnvCfg.num_actions is deprecated. Use DirectRLEnvCfg.action_space instead.") + if isinstance(self.cfg.action_space, type(MISSING)): + self.cfg.action_space = self.cfg.num_actions + if self.cfg.num_observations is not None: + logger.warning( + "DirectRLEnvCfg.num_observations is deprecated. Use DirectRLEnvCfg.observation_space instead." + ) + if isinstance(self.cfg.observation_space, type(MISSING)): + self.cfg.observation_space = self.cfg.num_observations + if self.cfg.num_states is not None: + logger.warning("DirectRLEnvCfg.num_states is deprecated. Use DirectRLEnvCfg.state_space instead.") + if isinstance(self.cfg.state_space, type(MISSING)): + self.cfg.state_space = self.cfg.num_states + + # set up spaces + self.single_observation_space = gym.spaces.Dict() + self.single_observation_space["policy"] = spec_to_gym_space(self.cfg.observation_space) + self.single_action_space = spec_to_gym_space(self.cfg.action_space) + + # batch the spaces for vectorized environments + self.observation_space = gym.vector.utils.batch_space(self.single_observation_space["policy"], self.num_envs) + self.action_space = gym.vector.utils.batch_space(self.single_action_space, self.num_envs) + + # optional state space for asymmetric actor-critic architectures + self.state_space = None + if self.cfg.state_space: + self.single_observation_space["critic"] = spec_to_gym_space(self.cfg.state_space) + self.state_space = gym.vector.utils.batch_space(self.single_observation_space["critic"], self.num_envs) + + # instantiate actions (needed for tasks for which the observations computation is dependent on the actions) + self.actions = sample_space(self.single_action_space, self.sim.device, batch_size=self.num_envs, fill_value=0) + + def _reset_idx(self, mask: wp.array | None = None): + """Reset environments based on a boolean mask. + + Args: + mask: Boolean mask indicating which environments to reset. + Shape is (num_envs,). If None, all environments are reset. + """ + if mask is None: + mask = self._ALL_ENV_MASK + self.scene.reset(env_ids=None, env_mask=mask) + + # apply events such as randomization for environments that need a reset + # if self.cfg.events: + # if "reset" in self.event_manager.available_modes: + # env_step_count = self._sim_step_counter // self.cfg.decimation + # self.event_manager.apply(mode="reset", env_ids=env_ids, global_env_step_count=env_step_count) + + # reset noise models + # if self.cfg.action_noise_model: + # self._action_noise_model.reset(env_ids) + # if self.cfg.observation_noise_model: + # self._observation_noise_model.reset(env_ids) + + # reset the episode length buffer + wp.launch( + zero_mask_int32, + dim=self.num_envs, + inputs=[ + mask, + self._episode_length_buf_wp, + ], + ) + + """ + Implementation-specific functions. + """ + + def _setup_scene(self): + """Setup the scene for the environment. + + This function is responsible for creating the scene objects and setting up the scene for the environment. + The scene creation can happen through :class:`isaaclab.scene.InteractiveSceneCfg` or through + directly creating the scene objects and registering them with the scene manager. + + We leave the implementation of this function to the derived classes. If the environment does not require + any explicit scene setup, the function can be left empty. + """ + pass + + @abstractmethod + def _pre_physics_step(self, actions: wp.array) -> None: + """Pre-process actions before stepping through the physics. + + This function is responsible for pre-processing the actions before stepping through the physics. + It is called before the physics stepping (which is decimated). + + Args: + actions: The actions to apply on the environment. Shape is (num_envs, action_dim). + """ + raise NotImplementedError(f"Please implement the '_pre_physics_step' method for {self.__class__.__name__}.") + + @abstractmethod + def _apply_action(self) -> None: + """Apply actions to the simulator. + + This function is responsible for applying the actions to the simulator. It is called at each + physics time-step. Must be pure warp (no torch ops) to be CUDA graph capturable. + """ + raise NotImplementedError(f"Please implement the '_apply_action' method for {self.__class__.__name__}.") + + @abstractmethod + def _get_observations(self) -> dict: + """Compute and return the observations for the environment. + + Returns: + The observations dictionary, e.g. ``{"policy": tensor}``. + """ + raise NotImplementedError(f"Please implement the '_get_observations' method for {self.__class__.__name__}.") + + def _get_states(self) -> VecEnvObs | None: + """Compute and return the states for the environment. + + The state-space is used for asymmetric actor-critic architectures. It is configured + using the :attr:`DirectRLEnvCfg.state_space` parameter. + + Returns: + The states for the environment. If the environment does not have a state-space, the function + returns a None. + """ + return None # noqa: R501 + + @abstractmethod + def _get_rewards(self) -> None: + """Compute the rewards for the environment. + + Writes results into the reward buffer (e.g., ``self.reward_buf``). + """ + raise NotImplementedError(f"Please implement the '_get_rewards' method for {self.__class__.__name__}.") + + @abstractmethod + def _get_dones(self) -> None: + """Compute the done flags for the environment. + + Writes results into the done buffers (e.g., ``self.reset_terminated``, ``self.reset_time_outs``). + """ + raise NotImplementedError(f"Please implement the '_get_dones' method for {self.__class__.__name__}.") + + def _set_debug_vis_impl(self, debug_vis: bool): + """Set debug visualization into visualization objects. + + This function is responsible for creating the visualization objects if they don't exist + and input ``debug_vis`` is True. If the visualization objects exist, the function should + set their visibility into the stage. + """ + raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/interactive_scene_warp.py b/source/isaaclab_experimental/isaaclab_experimental/envs/interactive_scene_warp.py new file mode 100644 index 000000000000..f792b994f67b --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/interactive_scene_warp.py @@ -0,0 +1,44 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native interactive scene with env_mask support for reset.""" + +from __future__ import annotations + +from collections.abc import Sequence + +import warp as wp + +from isaaclab.scene import InteractiveScene + + +class InteractiveSceneWarp(InteractiveScene): + """Interactive scene with warp-native env_mask support for reset. + + Extends :class:`InteractiveScene` to accept a boolean warp mask for selective resets, + avoiding the need to convert between env_ids and masks. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + """Reset scene entities using either env_ids or a boolean env_mask. + + Args: + env_ids: The indices of the environments to reset. Defaults to None (all instances). + env_mask: Boolean warp mask of shape (num_envs,). Defaults to None. + """ + # -- assets (support env_mask) + for articulation in self._articulations.values(): + articulation.reset(env_ids, env_mask=env_mask) + for deformable_object in self._deformable_objects.values(): + deformable_object.reset(env_ids) + for rigid_object in self._rigid_objects.values(): + rigid_object.reset(env_ids, env_mask=env_mask) + for surface_gripper in self._surface_grippers.values(): + surface_gripper.reset(env_ids) + for rigid_object_collection in self._rigid_object_collections.values(): + rigid_object_collection.reset(env_ids, env_mask=env_mask) + # -- sensors (no env_mask support) + for sensor in self._sensors.values(): + sensor.reset(env_ids) diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/manager_based_env_warp.py b/source/isaaclab_experimental/isaaclab_experimental/envs/manager_based_env_warp.py new file mode 100644 index 000000000000..8c558b925947 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/manager_based_env_warp.py @@ -0,0 +1,737 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Experimental manager-based base environment. + +This is a local copy of :class:`isaaclab.envs.ManagerBasedEnv` placed under +``isaaclab_experimental`` so we can evolve the manager-based workflow for Warp-first +pipelines without depending on (or subclassing) the stable env implementation. + +Behavior is intended to match the stable environment initially. +""" + +# import builtins +import contextlib +import importlib +import logging +import warnings +from collections.abc import Sequence +from copy import deepcopy +from typing import Any + +import torch +import warp as wp + +from isaaclab.envs.common import VecEnvObs +from isaaclab.envs.manager_based_env_cfg import ManagerBasedEnvCfg +from isaaclab.envs.ui import ViewportCameraController +from isaaclab.envs.utils.io_descriptors import export_articulations_data, export_scene_data +from isaaclab.sim import SimulationContext +from isaaclab.sim.utils import use_stage +from isaaclab.ui.widgets import ManagerLiveVisualizer +from isaaclab.utils.seed import configure_seed +from isaaclab.utils.timer import Timer + +from isaaclab_experimental.envs.interactive_scene_warp import InteractiveSceneWarp as InteractiveScene +from isaaclab_experimental.utils.manager_call_switch import ManagerCallMode, ManagerCallSwitch +from isaaclab_experimental.utils.warp import resolve_1d_mask + +# import logger +logger = logging.getLogger(__name__) + + +@wp.kernel +def initialize_rng_state( + # input + seed: wp.int32, + # output + state: wp.array(dtype=wp.uint32), +): + env_id = wp.tid() + state[env_id] = wp.rand_init(seed, wp.int32(env_id)) + + +class ManagerBasedEnvWarp: + """The base environment for the manager-based workflow (experimental fork). + + The implementation mirrors :class:`isaaclab.envs.ManagerBasedEnv` to provide + an isolated base class for experimental Warp-based workflows. + """ + + def __init__(self, cfg: ManagerBasedEnvCfg): + """Initialize the environment. + + Args: + cfg: The configuration object for the environment. + + Raises: + RuntimeError: If a simulation context already exists. The environment must always create one + since it configures the simulation context and controls the simulation. + """ + # check that the config is valid + cfg.validate() + # store inputs to class + self.cfg = cfg + # initialize internal variables + self._is_closed = False + # temporary debug runtime config for manager source/call switching. + cfg_source: dict | str | None = getattr(self.cfg, "manager_call_config", None) + max_modes: dict[str, int] | None = getattr(self.cfg, "manager_call_max_mode", None) + self._manager_call_switch = ManagerCallSwitch(cfg_source, max_modes=max_modes) + self._apply_manager_term_cfg_profile() + + # set the seed for the environment + if self.cfg.seed is not None: + self.cfg.seed = self.seed(self.cfg.seed) + else: + logger.warning("Seed not set for the environment. The environment creation may not be deterministic.") + + # create a simulation context to control the simulator + if SimulationContext.instance() is None: + # the type-annotation is required to avoid a type-checking error + # since it gets confused with Isaac Sim's SimulationContext class + self.sim: SimulationContext = SimulationContext(self.cfg.sim) + else: + # simulation context should only be created before the environment + # when in extension mode + # if not builtins.ISAAC_LAUNCHED_FROM_TERMINAL: + # raise RuntimeError("Simulation context already exists. Cannot create a new one.") + self.sim: SimulationContext = SimulationContext.instance() + + # make sure torch is running on the correct device + if "cuda" in self.device: + torch.cuda.set_device(self.device) + + # print useful information + print("[INFO]: Base environment:") + print(f"\tEnvironment device : {self.device}") + print(f"\tEnvironment seed : {self.cfg.seed}") + print(f"\tPhysics step-size : {self.physics_dt}") + print(f"\tRendering step-size : {self.physics_dt * self.cfg.sim.render_interval}") + print(f"\tEnvironment step-size : {self.step_dt}") + + if self.cfg.sim.render_interval < self.cfg.decimation: + msg = ( + f"The render interval ({self.cfg.sim.render_interval}) is smaller than the decimation " + f"({self.cfg.decimation}). Multiple render calls will happen for each environment step. " + "If this is not intended, set the render interval to be equal to the decimation." + ) + logger.warning(msg) + + # counter for simulation steps + self._sim_step_counter = 0 + + # allocate dictionary to store metrics + self.extras = {} + + # generate scene + with Timer("[INFO]: Time taken for scene creation", "scene_creation"): + # set the stage context for scene creation steps which use the stage + with use_stage(self.sim.stage): + self.scene = InteractiveScene(self.cfg.scene) + # attach_stage_to_usd_context() + print("[INFO]: Scene manager: ", self.scene) + + # Shared per-env Warp RNG state (accessible to all managers/terms via `env`). + # This is a single stream per env (no lookup) and is initialized once when `num_envs` is known. + self.rng_state_wp = wp.zeros((self.num_envs,), dtype=wp.uint32, device=self.device) + seed_val = int(self.cfg.seed) if self.cfg.seed is not None else -1 + wp.launch( + kernel=initialize_rng_state, + dim=self.num_envs, + inputs=[seed_val, self.rng_state_wp], + device=self.device, + ) + + # TODO(jichuanh): this is problematic as warp capture requires stable pointers, + # using different masks for different managers/terms will cause problems. + # Pre-allocated env masks (shared across managers/terms via `env`). + self.ALL_ENV_MASK = wp.ones((self.num_envs,), dtype=wp.bool, device=self.device) + self.ENV_MASK = wp.zeros((self.num_envs,), dtype=wp.bool, device=self.device) + + # Persistent scalar buffer for global env step count (stable pointer for capture). + self._global_env_step_count_wp = wp.zeros((1,), dtype=wp.int32, device=self.device) + + # set up camera viewport controller + # viewport is not available in other rendering modes so the function will throw a warning + # FIXME: This needs to be fixed in the future when we unify the UI functionalities even for + # non-rendering modes. + viz_str = self.sim.get_setting("/isaaclab/visualizer") or "" + available_visualizers = [v.strip() for v in viz_str.split(",") if v.strip()] + if "kit" in available_visualizers and bool(viz_str): + self.viewport_camera_controller = ViewportCameraController(self, self.cfg.viewer) + else: + self.viewport_camera_controller = None + + # create event manager + # note: this is needed here (rather than after simulation play) to allow USD-related randomization events + # that must happen before the simulation starts. Example: randomizing mesh scale + self.event_manager = self._manager_call_switch.resolve_manager_class("EventManager")(self.cfg.events, self) + + # apply USD-related randomization events + if "prestartup" in self.event_manager.available_modes: + self.event_manager.apply(mode="prestartup") + + # play the simulator to activate physics handles + # note: this activates the physics simulation view that exposes TensorAPIs + # note: when started in extension mode, first call sim.reset_async() and then initialize the managers + # if builtins.ISAAC_LAUNCHED_FROM_TERMINAL is False: + print("[INFO]: Starting the simulation. This may take a few seconds. Please wait...") + with Timer("[INFO]: Time taken for simulation start", "simulation_start"): + # since the reset can trigger callbacks which use the stage, + # we need to set the stage context here + with use_stage(self.sim.stage): + self.sim.reset() + # update scene to pre populate data buffers for assets and sensors. + # this is needed for the observation manager to get valid tensors for initialization. + # this shouldn't cause an issue since later on, users do a reset over all the environments so the lazy + # buffers would be reset. + self.scene.update(dt=self.physics_dt) + + # TODO(jichuanh): This is a temporary solution for event_manager only, but it should be general for all managers + # Resolve SceneEntityCfg-dependent term params once before any captured event paths. + if (not self.event_manager._is_scene_entities_resolved) and self.sim.is_playing(): + self.event_manager._resolve_terms_callback(None) + + # add timeline event to load managers + self.load_managers() + + # extend UI elements + # we need to do this here after all the managers are initialized + # this is because they dictate the sensors and commands right now + if self.sim.has_gui and self.cfg.ui_window_class_type is not None: + # setup live visualizers + self.setup_manager_visualizers() + self._window = self.cfg.ui_window_class_type(self, window_name="IsaacLab") + else: + # if no window, then we don't need to store the window + self._window = None + + # initialize observation buffers + self.obs_buf = {} + + # export IO descriptors if requested + if self.cfg.export_io_descriptors: + self.export_IO_descriptors() + + # show deprecation message for rerender_on_reset + if self.cfg.rerender_on_reset: + msg = ( + "\033[93m\033[1m[DEPRECATION WARNING] ManagerBasedEnvCfg.rerender_on_reset is deprecated. Use" + " ManagerBasedEnvCfg.num_rerenders_on_reset instead.\033[0m" + ) + warnings.warn( + msg, + FutureWarning, + stacklevel=2, + ) + if self.cfg.num_rerenders_on_reset == 0: + self.cfg.num_rerenders_on_reset = 1 + + def __del__(self): + """Cleanup for the environment.""" + # Suppress errors during Python shutdown to avoid noisy tracebacks + # Note: contextlib may be None during interpreter shutdown + if contextlib is not None: + with contextlib.suppress(ImportError, AttributeError, TypeError): + self.close() + + """ + Properties. + """ + + @property + def num_envs(self) -> int: + """The number of instances of the environment that are running.""" + return self.scene.num_envs + + @property + def physics_dt(self) -> float: + """The physics time-step (in s). + + This is the lowest time-decimation at which the simulation is happening. + """ + return self.cfg.sim.dt + + @property + def step_dt(self) -> float: + """The environment stepping time-step (in s). + + This is the time-step at which the environment steps forward. + """ + return self.cfg.sim.dt * self.cfg.decimation + + @property + def device(self): + """The device on which the environment is running.""" + return self.sim.device + + @property + def env_origins_wp(self) -> wp.array: + """Scene env origins as a warp ``vec3f`` array. Cached on first access.""" + if not hasattr(self, "_env_origins_wp"): + origins = self.scene.env_origins + if isinstance(origins, wp.array): + self._env_origins_wp = origins + else: + self._env_origins_wp = wp.from_torch(origins, dtype=wp.vec3f) + return self._env_origins_wp + + def resolve_env_mask( + self, + *, + env_ids: Sequence[int] | slice | wp.array | torch.Tensor | None = None, + env_mask: wp.array | torch.Tensor | None = None, + ) -> wp.array: + """Resolve environment ids/mask into a Warp boolean mask of shape ``(num_envs,)``.""" + return resolve_1d_mask( + ids=env_ids, + mask=env_mask, + all_mask=self.ALL_ENV_MASK, + scratch_mask=self.ENV_MASK, + device=self.device, + ) + + @property + def get_IO_descriptors(self): + """Get the IO descriptors for the environment. + + Returns: + A dictionary with keys as the group names and values as the IO descriptors. + """ + return { + "observations": self.observation_manager.get_IO_descriptors, + "actions": self.action_manager.get_IO_descriptors, + "articulations": export_articulations_data(self), + "scene": export_scene_data(self), + } + + def export_IO_descriptors(self, output_dir: str | None = None): + """Export the IO descriptors for the environment. + + Args: + output_dir: The directory to export the IO descriptors to. + """ + import os + + import yaml + + IO_descriptors = self.get_IO_descriptors + + if output_dir is None: + if self.cfg.log_dir is not None: + output_dir = os.path.join(self.cfg.log_dir, "io_descriptors") + else: + raise ValueError( + "Output directory is not set. Please set the log directory using the `log_dir`" + " configuration or provide an explicit output_dir parameter." + ) + + if not os.path.exists(output_dir): + os.makedirs(output_dir, exist_ok=True) + + with open(os.path.join(output_dir, "IO_descriptors.yaml"), "w") as f: + print(f"[INFO]: Exporting IO descriptors to {os.path.join(output_dir, 'IO_descriptors.yaml')}") + yaml.safe_dump(IO_descriptors, f) + + """ + Operations - Setup. + """ + + def load_managers(self): + """Load the managers for the environment. + + This function is responsible for creating the various managers (action, observation, + events, etc.) for the environment. Since the managers require access to physics handles, + they can only be created after the simulator is reset (i.e. played for the first time). + + .. note:: + In case of standalone application (when running simulator from Python), the function is called + automatically when the class is initialized. + + However, in case of extension mode, the user must call this function manually after the simulator + is reset. This is because the simulator is only reset when the user calls + :meth:`SimulationContext.reset_async` and it isn't possible to call async functions in the constructor. + + """ + # prepare the managers + # -- event manager (we print it here to make the logging consistent) + print("[INFO] Event Manager: ", self.event_manager) + # -- recorder manager + self.recorder_manager = self._manager_call_switch.resolve_manager_class("RecorderManager")( + self.cfg.recorders, self + ) + print("[INFO] Recorder Manager: ", self.recorder_manager) + # -- action manager + self.action_manager = self._manager_call_switch.resolve_manager_class("ActionManager")(self.cfg.actions, self) + print("[INFO] Action Manager: ", self.action_manager) + # -- observation manager + self.observation_manager = self._manager_call_switch.resolve_manager_class("ObservationManager")( + self.cfg.observations, self + ) + print("[INFO] Observation Manager:", self.observation_manager) + + # perform events at the start of the simulation + # in-case a child implementation creates other managers, the randomization should happen + # when all the other managers are created + if self.__class__ == ManagerBasedEnvWarp and "startup" in self.event_manager.available_modes: + self.event_manager.apply(mode="startup") + + def setup_manager_visualizers(self): + """Creates live visualizers for manager terms.""" + + self.manager_visualizers = { + "action_manager": ManagerLiveVisualizer(manager=self.action_manager), + "observation_manager": ManagerLiveVisualizer(manager=self.observation_manager), + } + + """ + Operations - MDP. + """ + + def reset( + self, seed: int | None = None, env_ids: Sequence[int] | None = None, options: dict[str, Any] | None = None + ) -> tuple[VecEnvObs, dict]: + """Resets the specified environments and returns observations. + + This function calls the :meth:`_reset_idx` function to reset the specified environments. + However, certain operations, such as procedural terrain generation, that happened during initialization + are not repeated. + + Args: + seed: The seed to use for randomization. Defaults to None, in which case the seed is not set. + env_ids: The environment ids to reset. Defaults to None, in which case all environments are reset. + options: Additional information to specify how the environment is reset. Defaults to None. + + Note: + This argument is used for compatibility with Gymnasium environment definition. + + Returns: + A tuple containing the observations and extras. + """ + if env_ids is None: + env_ids = torch.arange(self.num_envs, dtype=torch.int64, device=self.device) + + # trigger recorder terms for pre-reset calls + self.recorder_manager.record_pre_reset(env_ids) + + # set the seed + if seed is not None: + used_seed = self.seed(seed) + # keep cfg seed in sync for downstream users + self.cfg.seed = used_seed + # re-initialize per-env Warp RNG state without reallocating (stable pointer for capture) + wp.launch( + kernel=initialize_rng_state, + dim=self.num_envs, + inputs=[int(used_seed), self.rng_state_wp], + device=self.device, + ) + + # reset state of scene + self._reset_idx(env_ids) + + # update articulation kinematics + self.scene.write_data_to_sim() + self.sim.forward() + # if sensors are added to the scene, make sure we render to reflect changes in reset + if self.sim.settings.get("/isaaclab/render/rtx_sensors") and self.cfg.num_rerenders_on_reset > 0: + for _ in range(self.cfg.num_rerenders_on_reset): + self.sim.render() + + # trigger recorder terms for post-reset calls + self.recorder_manager.record_post_reset(env_ids) + + # compute observations + self.obs_buf = self.observation_manager.compute(update_history=True) + + # return observations + return self.obs_buf, self.extras + + def reset_to( + self, + state: dict[str, dict[str, dict[str, torch.Tensor]]], + env_ids: Sequence[int] | None, + seed: int | None = None, + is_relative: bool = False, + ): + """Resets specified environments to provided states. + + This function resets the environments to the provided states. The state is a dictionary + containing the state of the scene entities. Please refer to :meth:`InteractiveScene.get_state` + for the format. + + The function is different from the :meth:`reset` function as it resets the environments to specific states, + instead of using the randomization events for resetting the environments. + + Args: + state: The state to reset the specified environments to. Please refer to + :meth:`InteractiveScene.get_state` for the format. + env_ids: The environment ids to reset. Defaults to None, in which case all environments are reset. + seed: The seed to use for randomization. Defaults to None, in which case the seed is not set. + is_relative: If set to True, the state is considered relative to the environment origins. + Defaults to False. + """ + # reset all envs in the scene if env_ids is None + if env_ids is None: + env_ids = torch.arange(self.num_envs, dtype=torch.int64, device=self.device) + + # trigger recorder terms for pre-reset calls + self.recorder_manager.record_pre_reset(env_ids) + + # set the seed + if seed is not None: + self.seed(seed) + + self._reset_idx(env_ids) + + # set the state + self.scene.reset_to(state, env_ids, is_relative=is_relative) + + # update articulation kinematics + self.sim.forward() + + # if sensors are added to the scene, make sure we render to reflect changes in reset + if self.sim.settings.get("/isaaclab/render/rtx_sensors") and self.cfg.num_rerenders_on_reset > 0: + for _ in range(self.cfg.num_rerenders_on_reset): + self.sim.render() + + # trigger recorder terms for post-reset calls + self.recorder_manager.record_post_reset(env_ids) + + # compute observations + self.obs_buf = self.observation_manager.compute(update_history=True) + + # return observations + return self.obs_buf, self.extras + + def step(self, action: torch.Tensor) -> tuple[VecEnvObs, dict]: + """Execute one time-step of the environment's dynamics. + + The environment steps forward at a fixed time-step, while the physics simulation is + decimated at a lower time-step. This is to ensure that the simulation is stable. These two + time-steps can be configured independently using the :attr:`ManagerBasedEnvCfg.decimation` (number of + simulation steps per environment step) and the :attr:`ManagerBasedEnvCfg.sim.dt` (physics time-step) + parameters. Based on these parameters, the environment time-step is computed as the product of the two. + + Args: + action: The actions to apply on the environment. Shape is (num_envs, action_dim). + + Returns: + A tuple containing the observations and extras. + """ + # process actions + action_device = action.to(self.device) + if action_device.dtype != torch.float32: + action_device = action_device.float() + if not action_device.is_contiguous(): + action_device = action_device.contiguous() + action_wp = wp.from_torch(action_device, dtype=wp.float32) + self.action_manager.process_action(action_wp) + + self.recorder_manager.record_pre_step() + + # check if we need to do rendering within the physics loop + # note: checked here once to avoid multiple checks within the loop + is_rendering = bool(self.sim.settings.get("/isaaclab/visualizer")) or self.sim.settings.get( + "/isaaclab/render/rtx_sensors" + ) + + # perform physics stepping + for _ in range(self.cfg.decimation): + self._sim_step_counter += 1 + # set actions into buffers + self.action_manager.apply_action() + # set actions into simulator + self.scene.write_data_to_sim() + # simulate + self.sim.step(render=False) + # render between steps only if the GUI or an RTX sensor needs it + # note: we assume the render interval to be the shortest accepted rendering interval. + # If a camera needs rendering at a faster frequency, this will lead to unexpected behavior. + if self._sim_step_counter % self.cfg.sim.render_interval == 0 and is_rendering: + self.sim.render() + # update buffers at sim dt + self.scene.update(dt=self.physics_dt) + + # post-step: step interval event + if "interval" in self.event_manager.available_modes: + self.event_manager.apply(mode="interval", dt=self.step_dt) + + # -- compute observations + self.obs_buf = self.observation_manager.compute(update_history=True) + self.recorder_manager.record_post_step() + + # return observations and extras + return self.obs_buf, self.extras + + @staticmethod + def seed(seed: int = -1) -> int: + """Set the seed for the environment. + + Args: + seed: The seed for random generator. Defaults to -1. + + Returns: + The seed used for random generator. + """ + # set seed for replicator + try: + import omni.replicator.core as rep + + rep.set_global_seed(seed) + except ModuleNotFoundError: + pass + # set seed for torch and other libraries + return configure_seed(seed) + + def close(self): + """Cleanup for the environment.""" + if not self._is_closed: + # destructor is order-sensitive + del self.viewport_camera_controller + del self.action_manager + del self.observation_manager + del self.event_manager + del self.recorder_manager + del self.scene + + # self.sim.clear_all_callbacks() + self.sim.clear_instance() + + # destroy the window + if self._window is not None: + self._window = None + # update closing status + self._is_closed = True + + """ + Helper functions. + """ + + def _resolve_stable_cfg_counterpart(self) -> ManagerBasedEnvCfg | None: + """Resolve a stable task config counterpart for the current experimental task config. + + The lookup follows a module-name mirror convention: + ``isaaclab_tasks_experimental...`` -> ``isaaclab_tasks...`` with the same config class name. + """ + cfg_cls = self.cfg.__class__ + cfg_module_name = cfg_cls.__module__ + if "isaaclab_tasks_experimental" not in cfg_module_name: + return None + + stable_module_name = cfg_module_name.replace("isaaclab_tasks_experimental", "isaaclab_tasks", 1) + try: + stable_module = importlib.import_module(stable_module_name) + except Exception as exc: + logger.warning( + "Failed to import stable task cfg module '%s' for manager_call_config stable mode: %s", + stable_module_name, + exc, + ) + return None + + stable_cfg_cls = getattr(stable_module, cfg_cls.__name__, None) + if stable_cfg_cls is None: + logger.warning( + "Stable task cfg class '%s' not found in module '%s'.", + cfg_cls.__name__, + stable_module_name, + ) + return None + + try: + return stable_cfg_cls() + except Exception as exc: + logger.warning( + "Failed to instantiate stable task cfg '%s.%s': %s", + stable_module_name, + cfg_cls.__name__, + exc, + ) + return None + + def _apply_manager_term_cfg_profile(self) -> None: + """Align term configs with manager modes for stable manager selections. + + When a manager is configured as STABLE (0), swap its corresponding config subtree + from the stable task counterpart to keep manager-term type/signature compatibility. + """ + manager_to_cfg_attr = { + "ActionManager": "actions", + "ObservationManager": "observations", + "EventManager": "events", + "RecorderManager": "recorders", + "CommandManager": "commands", + "TerminationManager": "terminations", + "RewardManager": "rewards", + "CurriculumManager": "curriculum", + } + + stable_manager_names = [ + manager_name + for manager_name in manager_to_cfg_attr + if self._manager_call_switch.get_mode_for_manager(manager_name) == ManagerCallMode.STABLE + ] + if not stable_manager_names: + return + + stable_cfg = self._resolve_stable_cfg_counterpart() + if stable_cfg is None: + logger.warning( + "Stable managers requested (%s), but no stable cfg counterpart could be resolved." + " Keeping experimental term configs.", + ", ".join(stable_manager_names), + ) + return + + replaced_items: list[str] = [] + for manager_name, cfg_attr in manager_to_cfg_attr.items(): + if self._manager_call_switch.get_mode_for_manager(manager_name) != ManagerCallMode.STABLE: + continue + if not hasattr(self.cfg, cfg_attr) or not hasattr(stable_cfg, cfg_attr): + continue + setattr(self.cfg, cfg_attr, deepcopy(getattr(stable_cfg, cfg_attr))) + replaced_items.append(f"{manager_name} -> cfg.{cfg_attr}") + + if replaced_items: + print("[INFO] Applied stable term config profile for managers:") + for item in replaced_items: + print(f" - {item}") + + def _reset_idx(self, env_ids: Sequence[int]): + """Reset environments based on specified indices. + + Args: + env_ids: List of environment ids which must be reset + """ + # reset the internal buffers of the scene elements + self.scene.reset(env_ids) + + # apply events such as randomization for environments that need a reset + if "reset" in self.event_manager.available_modes: + env_step_count = self._sim_step_counter // self.cfg.decimation + self._global_env_step_count_wp.fill_(env_step_count) + self.event_manager.apply( + mode="reset", env_ids=env_ids, global_env_step_count=self._global_env_step_count_wp + ) + + # iterate over all managers and reset them + # this returns a dictionary of information which is stored in the extras + # note: This is order-sensitive! Certain things need be reset before others. + self.extras["log"] = dict() + env_mask = self.resolve_env_mask(env_ids=env_ids) + # -- observation manager + info = self.observation_manager.reset(env_mask=env_mask) + self.extras["log"].update(info) + # -- action manager + info = self.action_manager.reset(env_mask=env_mask) + self.extras["log"].update(info) + # -- event manager + info = self.event_manager.reset(env_mask=env_mask) + self.extras["log"].update(info) + # -- recorder manager + info = self.recorder_manager.reset(env_ids) + self.extras["log"].update(info) diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/manager_based_rl_env_warp.py b/source/isaaclab_experimental/isaaclab_experimental/envs/manager_based_rl_env_warp.py new file mode 100644 index 000000000000..2e1894e54003 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/manager_based_rl_env_warp.py @@ -0,0 +1,616 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Experimental manager-based RL environment (Warp entry point). + +This module provides an experimental fork of the stable manager-based RL environment +so it can diverge (Warp-first / graph-friendly) without inheriting from the stable +`isaaclab.envs.ManagerBasedRLEnv` implementation. +""" + +# needed to import for allowing type-hinting: np.ndarray | None +from __future__ import annotations + +import math +import os +from collections.abc import Sequence +from typing import Any, ClassVar + +import gymnasium as gym +import numpy as np +import torch +import warp as wp + +from isaaclab.envs.common import VecEnvStepReturn +from isaaclab.envs.manager_based_rl_env_cfg import ManagerBasedRLEnvCfg +from isaaclab.managers import CommandManager +from isaaclab.ui.widgets import ManagerLiveVisualizer +from isaaclab.utils.timer import Timer + +from isaaclab_experimental.utils.manager_call_switch import ManagerCallMode +from isaaclab_experimental.utils.torch_utils import clone_obs_buffer + +from .manager_based_env_warp import ManagerBasedEnvWarp + +DEBUG_TIMERS = os.environ.get("DEBUG_TIMERS", "0") == "1" +"""Enable outer step() timer. Set DEBUG_TIMERS=1 env var to enable.""" + +DEBUG_TIMER_STEP = os.environ.get("DEBUG_TIMER_STEP", "0") == "1" +"""Enable step sub-phase timers. Set DEBUG_TIMER_STEP=1 env var to enable.""" + +DEBUG_TIMER_RESET = os.environ.get("DEBUG_TIMER_RESET", "0") == "1" +"""Enable reset sub-phase timers. Set DEBUG_TIMER_RESET=1 env var to enable.""" + + +class ManagerBasedRLEnvWarp(ManagerBasedEnvWarp, gym.Env): + """The superclass for the manager-based workflow reinforcement learning-based environments. + + This class inherits from :class:`ManagerBasedEnv` and implements the core functionality for + reinforcement learning-based environments. It is designed to be used with any RL + library. The class is designed to be used with vectorized environments, i.e., the + environment is expected to be run in parallel with multiple sub-environments. The + number of sub-environments is specified using the ``num_envs``. + + Each observation from the environment is a batch of observations for each sub- + environments. The method :meth:`step` is also expected to receive a batch of actions + for each sub-environment. + + While the environment itself is implemented as a vectorized environment, we do not + inherit from :class:`gym.vector.VectorEnv`. This is mainly because the class adds + various methods (for wait and asynchronous updates) which are not required. + Additionally, each RL library typically has its own definition for a vectorized + environment. Thus, to reduce complexity, we directly use the :class:`gym.Env` over + here and leave it up to library-defined wrappers to take care of wrapping this + environment for their agents. + + Note: + For vectorized environments, it is recommended to **only** call the :meth:`reset` + method once before the first call to :meth:`step`, i.e. after the environment is created. + After that, the :meth:`step` function handles the reset of terminated sub-environments. + This is because the simulator does not support resetting individual sub-environments + in a vectorized environment. + + """ + + is_vector_env: ClassVar[bool] = True + """Whether the environment is a vectorized environment.""" + metadata: ClassVar[dict[str, Any]] = { + "render_modes": [None, "human", "rgb_array"], + # "isaac_sim_version": get_version(), + } + """Metadata for the environment.""" + + cfg: ManagerBasedRLEnvCfg + """Configuration for the environment.""" + + def __init__(self, cfg: ManagerBasedRLEnvCfg, render_mode: str | None = None, **kwargs): + """Initialize the environment. + + Args: + cfg: The configuration for the environment. + render_mode: The render mode for the environment. Defaults to None, which + is similar to ``"human"``. + """ + # -- counter for curriculum + self.common_step_counter = 0 + + # initialize the episode length buffer BEFORE loading the managers to use it in mdp functions. + # Warp array is the source of truth; torch view is zero-copy for += and indexed assignment. + self._episode_length_buf_wp = wp.zeros(cfg.scene.num_envs, dtype=wp.int64, device=cfg.sim.device) + self._episode_length_buf = wp.to_torch(self._episode_length_buf_wp) + + # initialize the base class to setup the scene. + super().__init__(cfg=cfg) + # store the render mode + self.render_mode = render_mode + + # The persistent reset mask needed for warp capture + # The intended use is to copy into this mask whenever capture is needed + # TODO: termination manager provides the same mask, investigate whether this can be replaced. + self.reset_mask_wp = wp.zeros(cfg.scene.num_envs, dtype=wp.bool, device=cfg.sim.device) + + # Persistent action input buffer to keep pointer stable for captured graphs. + self._action_in_wp: wp.array = wp.zeros( + (self.num_envs, self.action_manager.total_action_dim), dtype=wp.float32, device=self.device + ) + + # initialize data and constants + # -- set the framerate of the gym video recorder wrapper so that the playback speed + # of the produced video matches the simulation + self.metadata["render_fps"] = 1 / self.step_dt + self.has_rtx_sensors = self.sim.get_setting("/isaaclab/render/rtx_sensors") + + print("[INFO]: Completed setting up the environment...") + + """ + Properties. + """ + + @property + def episode_length_buf(self) -> torch.Tensor: + """Episode length buffer (torch view of the underlying warp array).""" + return self._episode_length_buf + + @episode_length_buf.setter + def episode_length_buf(self, value: torch.Tensor): + """Copy into the existing buffer to preserve the warp array linkage.""" + self._episode_length_buf[:] = value + + @property + def max_episode_length_s(self) -> float: + """Maximum episode length in seconds.""" + return self.cfg.episode_length_s + + @property + def max_episode_length(self) -> int: + """Maximum episode length in environment steps.""" + return math.ceil(self.max_episode_length_s / self.step_dt) + + """ + Operations - Setup. + """ + + def load_managers(self): + # note: this order is important since observation manager needs to know the command and action managers + # and the reward manager needs to know the termination manager + # -- command manager (stable impl — not routed through ManagerCallSwitch) + self.command_manager = CommandManager(self.cfg.commands, self) + print("[INFO] Command Manager: ", self.command_manager) + + # call the parent class to load the managers for observations and actions. + super().load_managers() + + # prepare the managers + # -- termination manager + self.termination_manager = self._manager_call_switch.resolve_manager_class("TerminationManager")( + self.cfg.terminations, self + ) + print("[INFO] Termination Manager: ", self.termination_manager) + # -- reward manager (experimental fork; Warp-compatible rewards) + self.reward_manager = self._manager_call_switch.resolve_manager_class("RewardManager")(self.cfg.rewards, self) + print("[INFO] Reward Manager: ", self.reward_manager) + # -- curriculum manager + self.curriculum_manager = self._manager_call_switch.resolve_manager_class("CurriculumManager")( + self.cfg.curriculum, self + ) + print("[INFO] Curriculum Manager: ", self.curriculum_manager) + + # setup the action and observation spaces for Gym + self._configure_gym_env_spaces() + + # perform events at the start of the simulation + if "startup" in self.event_manager.available_modes: + self.event_manager.apply(mode="startup") + + def setup_manager_visualizers(self): + """Creates live visualizers for manager terms.""" + + self.manager_visualizers = { + "action_manager": ManagerLiveVisualizer(manager=self.action_manager), + "observation_manager": ManagerLiveVisualizer(manager=self.observation_manager), + "command_manager": ManagerLiveVisualizer(manager=self.command_manager), + "termination_manager": ManagerLiveVisualizer(manager=self.termination_manager), + "reward_manager": ManagerLiveVisualizer(manager=self.reward_manager), + "curriculum_manager": ManagerLiveVisualizer(manager=self.curriculum_manager), + } + + """ + Operations - MDP + """ + + def invalidate_wp_graphs(self) -> None: + """Invalidate all cached Warp graphs. + + Call this if the captured launch topology changes (e.g. different term list, shapes, etc.). + """ + self._manager_call_switch.invalidate_graphs() + + def step_warp_termination_compute(self) -> None: + """Captured stage: compute terminations (env-step frequency).""" + self.reset_buf = self.termination_manager.compute() + self.reset_terminated = self.termination_manager.terminated + self.reset_time_outs = self.termination_manager.time_outs + + @Timer(name="env_step", msg="Step took:", enable=DEBUG_TIMER_STEP, time_unit="us") + def step(self, action: torch.Tensor) -> VecEnvStepReturn: + """Execute one time-step of the environment's dynamics and reset terminated environments. + + Unlike the :class:`ManagerBasedEnv.step` class, the function performs the following operations: + + 1. Process the actions. + 2. Perform physics stepping. + 3. Perform rendering if gui is enabled. + 4. Update the environment counters and compute the rewards and terminations. + 5. Reset the environments that terminated. + 6. Compute the observations. + 7. Return the observations, rewards, resets and extras. + + Args: + action: The actions to apply on the environment. Shape is (num_envs, action_dim). + + Returns: + A tuple containing the observations, rewards, resets (terminated and truncated) and extras. + """ + # process actions + # NOTE: keep a persistent action input buffer for graph pointer stability. + # IMPORTANT: Do NOT re-wrap/replace the `wp.array` used by captured graphs each step. + # Instead, copy the latest actions into the persistent buffer. + with Timer(name="action_preprocess", msg="Action preprocessing took:", enable=DEBUG_TIMER_STEP, time_unit="us"): + if self._action_in_wp is None: + raise RuntimeError("Action buffer not initialized. Call reset() before step().") + action_device = action.to(self.device) + wp.copy(self._action_in_wp, wp.from_torch(action_device, dtype=wp.float32)) + + self._manager_call_switch.call_stage( + stage="ActionManager_process_action", + warp_call={"fn": self.action_manager.process_action, "kwargs": {"action": self._action_in_wp}}, + timer=DEBUG_TIMER_STEP, + ) + + self.recorder_manager.record_pre_step() + + # check if we need to do rendering within the physics loop + # note: checked here once to avoid multiple checks within the loop + is_rendering = self.sim.is_rendering + + # perform physics stepping + for _ in range(self.cfg.decimation): + self._sim_step_counter += 1 + # set actions into buffers + self._manager_call_switch.call_stage( + stage="ActionManager_apply_action", + warp_call={"fn": self.action_manager.apply_action}, + timer=DEBUG_TIMER_STEP, + ) + self._manager_call_switch.call_stage( + stage="Scene_write_data_to_sim", + warp_call={"fn": self.scene.write_data_to_sim}, + timer=DEBUG_TIMER_STEP, + ) + + # simulate + with Timer(name="simulate", msg="Newton simulation step took:", enable=DEBUG_TIMER_STEP, time_unit="us"): + self.sim.step(render=False) + self.recorder_manager.record_post_physics_decimation_step() + # render between steps only if the GUI or an RTX sensor needs it + # note: we assume the render interval to be the shortest accepted rendering interval. + # If a camera needs rendering at a faster frequency, this will lead to unexpected behavior. + if self._sim_step_counter % self.cfg.sim.render_interval == 0 and is_rendering: + self.sim.render() + # update buffers at sim dt + with Timer( + name="scene.update", + msg="Scene.update took:", + enable=DEBUG_TIMER_STEP, + time_unit="us", + ): + self.scene.update(dt=self.physics_dt) + + # post-step: + # -- update env counters (used for curriculum generation) + self.episode_length_buf += 1 # step in current episode (per env) + self.common_step_counter += 1 # total step (common for all envs) + + # -- post-processing (termination + reward) as independently configurable stages + self._manager_call_switch.call_stage( + stage="TerminationManager_compute", + warp_call={"fn": self.step_warp_termination_compute}, + timer=DEBUG_TIMER_STEP, + ) + self.reward_buf = self._manager_call_switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": self.reward_manager.compute, "kwargs": {"dt": float(self.step_dt)}}, + timer=DEBUG_TIMER_STEP, + ) + + if len(self.recorder_manager.active_terms) > 0: + # update observations for recording if needed + self._manager_call_switch.call_stage( + stage="ObservationManager_compute_no_history", + warp_call={"fn": self.observation_manager.compute, "kwargs": {"return_cloned_output": False}}, + timer=DEBUG_TIMER_STEP, + ) + self.recorder_manager.record_post_step() + + # -- reset envs that terminated/timed-out and log the episode information + # NOTE: Interim path (intentional). + # We still compact `reset_buf` into `env_ids` here because several reset-time managers/recorders + # are still `env_ids`-based. Do NOT remove/replace this until mask-based reset is end-to-end. + with Timer( + name="reset_selection", + msg="Reset selection took:", + enable=DEBUG_TIMER_STEP, + time_unit="us", + ): + # Keep the reset-mask handoff fully in Warp when experimental termination buffers exist. + # Stable termination manager path exposes torch-only dones/reset buffers. + termination_manager_mode = self._manager_call_switch.get_mode_for_manager("TerminationManager") + if termination_manager_mode == ManagerCallMode.STABLE: + # copy still needed as mask will be used if manager is set to mode > 0 + wp.copy(self.reset_mask_wp, wp.from_torch(self.reset_buf, dtype=wp.bool)) + else: + wp.copy(self.reset_mask_wp, self.termination_manager.dones_wp) + reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) + if len(reset_env_ids) > 0: + # trigger recorder terms for pre-reset calls + self.recorder_manager.record_pre_reset(reset_env_ids) + + with Timer( + name="reset_idx", + msg="Reset idx took:", + enable=DEBUG_TIMER_STEP, + time_unit="us", + ): + self._reset_idx(env_ids=reset_env_ids, env_mask=self.reset_mask_wp) + + # if sensors are added to the scene, make sure we render to reflect changes in reset + if self.has_rtx_sensors and self.cfg.num_rerenders_on_reset > 0: + for _ in range(self.cfg.num_rerenders_on_reset): + self.sim.render() + + # trigger recorder terms for post-reset calls + self.recorder_manager.record_post_reset(reset_env_ids) + + # -- update command + self.command_manager.compute(dt=float(self.step_dt)) + + # -- step interval events + if "interval" in self.event_manager.available_modes: + self._manager_call_switch.call_stage( + stage="EventManager_apply_interval", + warp_call={"fn": self.event_manager.apply, "kwargs": {"mode": "interval", "dt": float(self.step_dt)}}, + timer=DEBUG_TIMER_STEP, + ) + + # -- compute observations + # note: done after reset to get the correct observations for reset envs + self.obs_buf = self._manager_call_switch.call_stage( + stage="ObservationManager_compute_update_history", + warp_call={ + "fn": self.observation_manager.compute, + "kwargs": {"update_history": True, "return_cloned_output": False}, + "output": lambda r: clone_obs_buffer(r), + }, + timer=DEBUG_TIMER_STEP, + ) + # return observations, rewards, resets and extras + return self.obs_buf, self.reward_buf, self.reset_terminated, self.reset_time_outs, self.extras + + def render(self, recompute: bool = False) -> np.ndarray | None: + """Run rendering without stepping through the physics. + + By convention, if mode is: + + - **human**: Render to the current display and return nothing. Usually for human consumption. + - **rgb_array**: Return a numpy.ndarray with shape (x, y, 3), representing RGB values for an + x-by-y pixel image, suitable for turning into a video. + + Args: + recompute: Whether to force a render even if the simulator has already rendered the scene. + Defaults to False. + + Returns: + The rendered image as a numpy array if mode is "rgb_array". Otherwise, returns None. + + Raises: + RuntimeError: If mode is set to "rgb_data" and simulation render mode does not support it. + In this case, the simulation render mode must be set to ``RenderMode.PARTIAL_RENDERING`` + or ``RenderMode.FULL_RENDERING``. + NotImplementedError: If an unsupported rendering mode is specified. + """ + # run a rendering step of the simulator + # if we have rtx sensors, we do not need to render again sin + if not self.has_rtx_sensors and not recompute: + self.sim.render() + # decide the rendering mode + if self.render_mode == "human" or self.render_mode is None: + return None + elif self.render_mode == "rgb_array": + # check that if any render could have happened + has_gui = bool(self.sim.get_setting("/isaaclab/has_gui")) + offscreen_render = bool(self.sim.get_setting("/isaaclab/render/offscreen")) + if not (has_gui or offscreen_render): + raise RuntimeError( + f"Cannot render '{self.render_mode}' when the simulation render mode does not support" + " rendering. Please set the simulation render mode to 'PARTIAL_RENDERING' or" + " 'FULL_RENDERING'. If running headless, make sure --enable_cameras is set." + ) + # create the annotator if it does not exist + if not hasattr(self, "_rgb_annotator"): + import omni.replicator.core as rep + + # create render product + self._render_product = rep.create.render_product( + self.cfg.viewer.cam_prim_path, self.cfg.viewer.resolution + ) + # create rgb annotator -- used to read data from the render product + self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu") + self._rgb_annotator.attach([self._render_product]) + # obtain the rgb data + rgb_data = self._rgb_annotator.get_data() + # convert to numpy array + rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape) + # return the rgb data + # note: initially the renerer is warming up and returns empty data + if rgb_data.size == 0: + return np.zeros((self.cfg.viewer.resolution[1], self.cfg.viewer.resolution[0], 3), dtype=np.uint8) + else: + return rgb_data[:, :, :3] + else: + raise NotImplementedError( + f"Render mode '{self.render_mode}' is not supported. Please use: {self.metadata['render_modes']}." + ) + + def close(self): + if not self._is_closed: + # destructor is order-sensitive + del self.command_manager + del self.reward_manager + del self.termination_manager + del self.curriculum_manager + # call the parent class to close the environment + super().close() + + """ + Helper functions. + """ + + def _configure_gym_env_spaces(self): + """Configure the action and observation spaces for the Gym environment.""" + # observation space (unbounded since we don't impose any limits) + self.single_observation_space = gym.spaces.Dict() + for group_name, group_term_names in self.observation_manager.active_terms.items(): + # extract quantities about the group + has_concatenated_obs = self.observation_manager.group_obs_concatenate[group_name] + group_dim = self.observation_manager.group_obs_dim[group_name] + # check if group is concatenated or not + # if not concatenated, then we need to add each term separately as a dictionary + if has_concatenated_obs: + self.single_observation_space[group_name] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=group_dim) + else: + group_term_cfgs = self.observation_manager._group_obs_term_cfgs[group_name] + term_dict = {} + for term_name, term_dim, term_cfg in zip(group_term_names, group_dim, group_term_cfgs): + low = -np.inf if term_cfg.clip is None else term_cfg.clip[0] + high = np.inf if term_cfg.clip is None else term_cfg.clip[1] + term_dict[term_name] = gym.spaces.Box(low=low, high=high, shape=term_dim) + self.single_observation_space[group_name] = gym.spaces.Dict(term_dict) + # action space (unbounded since we don't impose any limits) + action_dim = sum(self.action_manager.action_term_dim) + self.single_action_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(action_dim,)) + + # batch the spaces for vectorized environments + self.observation_space = gym.vector.utils.batch_space(self.single_observation_space, self.num_envs) + self.action_space = gym.vector.utils.batch_space(self.single_action_space, self.num_envs) + + def _reset_idx( + self, + env_ids: Sequence[int] | slice | torch.Tensor, + *, + env_mask: wp.array | None = None, + ): + """Reset environments based on specified indices. + + IMPORTANT: + This function always uses the **TerminationManager-produced Warp env mask** (`self.reset_buf`) to select + which envs to reset. The ids/mask conversion is performed in `step()` before calling this function. + + In other words: + - If `env_mask` is provided, it **must** be `self.reset_buf` (Warp bool mask) + - If `env_mask` is not provided, this function will populate `self.reset_buf` from `env_ids` + - When `env_mask` is provided, `env_ids` **must** correspond to the same mask + + Args: + env_ids: Environment indices to reset. + env_mask: Warp boolean env mask selecting envs to reset. Must be `self.reset_buf`. + If None, uses and populates `self.reset_buf` from `env_ids`. + """ + if env_mask is None: + # Base `reset()` / `reset_to()` call-path provides only `env_ids`. + # Populate the stable TerminationManager-owned mask (`self.reset_buf`) from ids. + env_mask = self.reset_mask_wp + # Use the centralized env-id/mask resolution from the base Warp env, then copy into the + # stable TerminationManager-owned buffer (`self.reset_buf`) used by captured graphs. + resolved_mask = self.resolve_env_mask(env_ids=env_ids) + wp.copy(env_mask, resolved_mask) + + if not isinstance(env_mask, wp.array): + raise TypeError(f"env_mask must be a wp.array (got {type(env_mask)}).") + + # update the curriculum for environments that need a reset + with Timer( + name="curriculum_manager.compute_reset", + msg="CurriculumManager.compute (reset) took:", + enable=DEBUG_TIMER_RESET, + time_unit="us", + ): + self.curriculum_manager.compute(env_ids=env_ids) + + # reset the internal buffers of the scene elements + self._manager_call_switch.call_stage( + stage="Scene_reset", + warp_call={"fn": self.scene.reset, "kwargs": {"env_ids": env_ids, "env_mask": env_mask}}, + timer=DEBUG_TIMER_RESET, + ) + + if "reset" in self.event_manager.available_modes: + self._global_env_step_count_wp.fill_(self._sim_step_counter // self.cfg.decimation) + self._manager_call_switch.call_stage( + stage="EventManager_apply_reset", + warp_call={ + "fn": self.event_manager.apply, + "kwargs": { + "mode": "reset", + "env_mask_wp": env_mask, + "global_env_step_count": self._global_env_step_count_wp, + }, + }, + timer=DEBUG_TIMER_RESET, + ) + + # iterate over all managers and reset them + # this returns a dictionary of information which is stored in the extras + # note: This is order-sensitive! Certain things need be reset before others. + # -- observation manager + action + reward managers + obs_info = self._manager_call_switch.call_stage( + stage="ObservationManager_reset", + warp_call={"fn": self.observation_manager.reset, "kwargs": {"env_mask": env_mask}}, + timer=DEBUG_TIMER_RESET, + ) + action_info = self._manager_call_switch.call_stage( + stage="ActionManager_reset", + warp_call={"fn": self.action_manager.reset, "kwargs": {"env_mask": env_mask}}, + timer=DEBUG_TIMER_RESET, + ) + reward_info = self._manager_call_switch.call_stage( + stage="RewardManager_reset", + warp_call={"fn": self.reward_manager.reset, "kwargs": {"env_mask": env_mask}}, + timer=DEBUG_TIMER_RESET, + ) + + # -- curriculum manager + with Timer( + name="curriculum_manager.reset", + msg="CurriculumManager.reset took:", + enable=DEBUG_TIMER_RESET, + time_unit="us", + ): + curriculum_info = self.curriculum_manager.reset(env_ids=env_ids) + + # -- command + event + termination managers + command_info = self.command_manager.reset(env_ids=env_ids) + event_info = self._manager_call_switch.call_stage( + stage="EventManager_reset", + warp_call={"fn": self.event_manager.reset, "kwargs": {"env_mask": env_mask}}, + stable_call={"fn": self.event_manager.reset, "kwargs": {"env_ids": env_ids}}, + timer=DEBUG_TIMER_RESET, + ) + termination_info = self._manager_call_switch.call_stage( + stage="TerminationManager_reset", + warp_call={"fn": self.termination_manager.reset, "kwargs": {"env_mask": env_mask}}, + stable_call={"fn": self.termination_manager.reset, "kwargs": {"env_ids": env_ids}}, + timer=DEBUG_TIMER_RESET, + ) + + # -- recorder manager + recorder_info = self.recorder_manager.reset(env_ids=env_ids) + + # reset the episode length buffer + self.episode_length_buf[env_ids] = 0 + + # aggregate logging info + log: dict[str, Any] = {} + for info in ( + obs_info, + action_info, + reward_info, + curriculum_info, + command_info, + event_info, + termination_info, + recorder_info, + ): + log.update(info) + self.extras["log"] = log diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/__init__.py new file mode 100644 index 000000000000..1476dc828798 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Experimental MDP terms. + +This package forwards all stable MDP terms from :mod:`isaaclab.envs.mdp`, but overrides reward +functions with Warp-first implementations from :mod:`isaaclab_experimental.envs.mdp.rewards`. +""" + +# Forward stable MDP terms (commands/observations/terminations/etc.) but *exclude* rewards and actions. +# Rewards and actions are provided by this experimental package to keep Warp-first execution. +from isaaclab.envs.mdp.commands import * # noqa: F401, F403 +from isaaclab.envs.mdp.curriculums import * # noqa: F401, F403 +from isaaclab.envs.mdp.events import * # noqa: F401, F403 +from isaaclab.envs.mdp.observations import * # noqa: F401, F403 +from isaaclab.envs.mdp.recorders import * # noqa: F401, F403 +from isaaclab.envs.mdp.terminations import * # noqa: F401, F403 + +# Override terms with experimental implementations. +from .actions import * # noqa: F401, F403 +from .events import * # noqa: F401, F403 +from .observations import * # noqa: F401, F403 +from .rewards import * # noqa: F401, F403 +from .terminations import * # noqa: F401, F403 diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/__init__.py new file mode 100644 index 000000000000..d295384149d4 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Experimental action terms (Warp-first). + +Provides Warp-first action term implementations overriding the stable +:mod:`isaaclab.envs.mdp.actions` module. +""" + +from .actions_cfg import * # noqa: F401, F403 +from .joint_actions import * # noqa: F401, F403 diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/actions_cfg.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/actions_cfg.py new file mode 100644 index 000000000000..39d5b29c6fdb --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/actions_cfg.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab.utils import configclass + +from isaaclab_experimental.managers.action_manager import ActionTerm, ActionTermCfg + +from . import joint_actions + +## +# Joint actions. +## + + +@configclass +class JointActionCfg(ActionTermCfg): + """Configuration for the base joint action term. + + See :class:`JointAction` for more details. + """ + + joint_names: list[str] = MISSING + """List of joint names or regex expressions that the action will be mapped to.""" + scale: float | dict[str, float] = 1.0 + """Scale factor for the action (float or dict of regex expressions). Defaults to 1.0.""" + offset: float | dict[str, float] = 0.0 + """Offset factor for the action (float or dict of regex expressions). Defaults to 0.0.""" + preserve_order: bool = False + """Whether to preserve the order of the joint names in the action output. Defaults to False.""" + + +@configclass +class JointPositionActionCfg(JointActionCfg): + """Configuration for the joint position action term. + + See :class:`JointPositionAction` for more details. + """ + + class_type: type[ActionTerm] = joint_actions.JointPositionAction + + use_default_offset: bool = True + """Whether to use default joint positions configured in the articulation asset as offset. + Defaults to True. + + If True, this flag results in overwriting the values of :attr:`offset` to the default joint positions + from the articulation asset. + """ + + +@configclass +class JointEffortActionCfg(JointActionCfg): + """Configuration for the joint effort action term. + + See :class:`JointEffortAction` for more details. + """ + + class_type: type[ActionTerm] = joint_actions.JointEffortAction diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/joint_actions.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/joint_actions.py new file mode 100644 index 000000000000..f379f66c1900 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/actions/joint_actions.py @@ -0,0 +1,299 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +import numpy as np +import warp as wp + +import isaaclab.utils.string as string_utils +from isaaclab.assets.articulation import Articulation + +from isaaclab_experimental.managers.action_manager import ActionTerm +from isaaclab_experimental.utils.warp import resolve_1d_mask, zero_masked_2d + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + from isaaclab.envs.utils.io_descriptors import GenericActionIODescriptor + + from . import actions_cfg + +# import logger +logger = logging.getLogger(__name__) + + +@wp.kernel +def _process_joint_actions_kernel( + # input + actions: wp.array(dtype=wp.float32, ndim=2), + action_offset: int, + # params + scale: wp.array(dtype=wp.float32), + offset: wp.array(dtype=wp.float32), + clip: wp.array(dtype=wp.float32, ndim=2), + # output + raw_out: wp.array(dtype=wp.float32, ndim=2), + processed_out: wp.array(dtype=wp.float32, ndim=2), +): + env_id, j = wp.tid() + col = action_offset + j + + a = actions[env_id, col] + raw_out[env_id, j] = a + + x = a * scale[j] + offset[j] + low = clip[j, 0] + high = clip[j, 1] + if x < low: + x = low + if x > high: + x = high + processed_out[env_id, j] = x + + +class JointAction(ActionTerm): + r"""Base class for joint actions. + + This action term performs pre-processing of the raw actions using affine transformations (scale and offset). + These transformations can be configured to be applied to a subset of the articulation's joints. + + Mathematically, the action term is defined as: + + .. math:: + + \text{action} = \text{offset} + \text{scaling} \times \text{input action} + + where :math:`\text{action}` is the action that is sent to the articulation's actuated joints, :math:`\text{offset}` + is the offset applied to the input action, :math:`\text{scaling}` is the scaling applied to the input + action, and :math:`\text{input action}` is the input action from the user. + + Based on above, this kind of action transformation ensures that the input and output actions are in the same + units and dimensions. The child classes of this action term can then map the output action to a specific + desired command of the articulation's joints (e.g. position, velocity, etc.). + """ + + cfg: actions_cfg.JointActionCfg + """The configuration of the action term.""" + _asset: Articulation + """The articulation asset on which the action term is applied.""" + _scale: wp.array + """The scaling factor applied to the input action.""" + _offset: wp.array + """The offset applied to the input action.""" + _clip: wp.array + """The clip applied to the input action.""" + _joint_mask: wp.array + """A persistent joint mask for capturable action application.""" + + def __init__(self, cfg: actions_cfg.JointActionCfg, env: ManagerBasedEnv) -> None: + # initialize the action term + super().__init__(cfg, env) + + # resolve the joints over which the action term is applied + self._joint_ids, self._joint_names = self._asset.find_joints( + self.cfg.joint_names, preserve_order=self.cfg.preserve_order + ) + self._num_joints = len(self._joint_ids) + # log the resolved joint names for debugging + logger.info( + f"Resolved joint names for the action term {self.__class__.__name__}:" + f" {self._joint_names} [{self._joint_ids}]" + ) + + # Avoid indexing across all joints for efficiency + if self._num_joints == self._asset.num_joints and not self.cfg.preserve_order: + self._joint_ids = slice(None) + + # FIXME: ArticulationData.resolve_joint_mask is not available on this branch. + # Port resolve_*_mask methods from dev/newton when articulation_data is aligned. + _all_joint_mask = wp.ones((self._asset.num_joints,), dtype=wp.bool, device=self.device) + _scratch_joint_mask = wp.zeros((self._asset.num_joints,), dtype=wp.bool, device=self.device) + self._joint_mask = wp.clone( + resolve_1d_mask( + ids=self._joint_ids, + mask=None, + all_mask=_all_joint_mask, + scratch_mask=_scratch_joint_mask, + device=self.device, + ) + ) + + # create tensors for raw and processed actions (Warp) + self._raw_actions = wp.zeros((self.num_envs, self.action_dim), dtype=wp.float32, device=self.device) + self._processed_actions = wp.zeros_like(self.raw_actions) + # FIXME: dev/newton set_joint_effort_target accepts partial data + joint_mask. Our branch + # has separate _index (partial data) and _mask (full data) variants. Pre-compute joint_ids + # as warp array for the _index variant. + if self._joint_ids == slice(None): + self._joint_ids_wp = None # None means all joints + else: + self._joint_ids_wp = wp.array(list(self._joint_ids), dtype=wp.int32, device=self.device) + + # parse scale + if isinstance(cfg.scale, (float, int)): + self._scale = wp.array([float(cfg.scale)] * self.action_dim, dtype=wp.float32, device=self.device) + elif isinstance(cfg.scale, dict): + scale_per_joint = [1.0] * self.action_dim + # resolve the dictionary config + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.scale, self._joint_names) + for idx, value in zip(index_list, value_list): + scale_per_joint[idx] = float(value) + self._scale = wp.array(scale_per_joint, dtype=wp.float32, device=self.device) + else: + raise ValueError(f"Unsupported scale type: {type(cfg.scale)}. Supported types are float and dict.") + + # parse offset + if isinstance(cfg.offset, (float, int)): + self._offset = wp.array([float(cfg.offset)] * self.action_dim, dtype=wp.float32, device=self.device) + elif isinstance(cfg.offset, dict): + offset_per_joint = [0.0] * self.action_dim + # resolve the dictionary config + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.offset, self._joint_names) + for idx, value in zip(index_list, value_list): + offset_per_joint[idx] = float(value) + self._offset = wp.array(offset_per_joint, dtype=wp.float32, device=self.device) + else: + raise ValueError(f"Unsupported offset type: {type(cfg.offset)}. Supported types are float and dict.") + + # parse clip + clip_low = [-float("inf")] * self.action_dim + clip_high = [float("inf")] * self.action_dim + if self.cfg.clip is not None: + if isinstance(cfg.clip, dict): + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names) + for idx, value in zip(index_list, value_list): + clip_low[idx] = float(value[0]) + clip_high[idx] = float(value[1]) + else: + raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.") + + clip_np = np.column_stack([clip_low, clip_high]).astype(np.float32) + self._clip = wp.array(clip_np, dtype=wp.float32, device=self.device) + + """ + Properties. + """ + + @property + def action_dim(self) -> int: + return self._num_joints + + @property + def raw_actions(self) -> wp.array: + return self._raw_actions + + @property + def processed_actions(self) -> wp.array: + return self._processed_actions + + @property + def IO_descriptor(self) -> GenericActionIODescriptor: + """The IO descriptor of the action term. + + This descriptor is used to describe the action term of the joint action. + It adds the following information to the base descriptor: + - joint_names: The names of the joints. + - scale: The scale of the action term. + - offset: The offset of the action term. + - clip: The clip of the action term. + + Returns: + The IO descriptor of the action term. + """ + super().IO_descriptor + self._IO_descriptor.shape = (self.action_dim,) + self._IO_descriptor.dtype = str(self.raw_actions.dtype) + self._IO_descriptor.action_type = "JointAction" + self._IO_descriptor.joint_names = self._joint_names + self._IO_descriptor.scale = self._scale + # This seems to be always [4xNum_joints] IDK why. Need to check. + if isinstance(self._offset, wp.array): + self._IO_descriptor.offset = self._offset.numpy().tolist() + else: + self._IO_descriptor.offset = None + # FIXME: This is not correct. Add list support. + if self.cfg.clip is not None: + if isinstance(self._clip, wp.array): + self._IO_descriptor.clip = self._clip.numpy().tolist() + else: + self._IO_descriptor.clip = None + else: + self._IO_descriptor.clip = None + return self._IO_descriptor + + """ + Operations. + """ + + def process_actions(self, actions: wp.array, action_offset: int = 0): + wp.launch( + kernel=_process_joint_actions_kernel, + dim=(self.num_envs, self.action_dim), + inputs=[ + actions, + int(action_offset), + self._scale, + self._offset, + self._clip, + self._raw_actions, + self._processed_actions, + ], + device=self.device, + ) + + def reset(self, env_mask: wp.array | None = None) -> None: + """Resets the action term (mask-based).""" + if env_mask is None: + self._raw_actions.fill_(0.0) + return + wp.launch( + kernel=zero_masked_2d, + dim=(self.num_envs, self.action_dim), + inputs=[env_mask, self._raw_actions], + device=self.device, + ) + + +class JointPositionAction(JointAction): + """Joint action term that applies the processed actions to the articulation's joints as position commands. + + Warp-first override of :class:`isaaclab.envs.mdp.actions.JointPositionAction`. + """ + + cfg: actions_cfg.JointPositionActionCfg + """The configuration of the action term.""" + + def __init__(self, cfg: actions_cfg.JointPositionActionCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + # use default joint positions as offset + if cfg.use_default_offset: + defaults_np = self._asset.data.default_joint_pos.warp.numpy() + if isinstance(self._joint_ids, slice): + offset_vals = defaults_np[0, :].tolist() + else: + offset_vals = [float(defaults_np[0, jid]) for jid in self._joint_ids] + self._offset = wp.array(offset_vals, dtype=wp.float32, device=self.device) + + def apply_actions(self): + self._asset.set_joint_position_target_index(target=self.processed_actions, joint_ids=self._joint_ids_wp) + + +class JointEffortAction(JointAction): + """Joint action term that applies the processed actions to the articulation's joints as effort commands.""" + + cfg: actions_cfg.JointEffortActionCfg + """The configuration of the action term.""" + + def __init__(self, cfg: actions_cfg.JointEffortActionCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + + def apply_actions(self): + # set joint effort targets + # FIXME: dev/newton uses set_joint_effort_target(data, joint_mask=) which accepts + # partial data. Our branch uses the separate _index variant for partial data. + self._asset.set_joint_effort_target_index(target=self.processed_actions, joint_ids=self._joint_ids_wp) diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/events.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/events.py new file mode 100644 index 000000000000..e1732df0c026 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/events.py @@ -0,0 +1,584 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first overrides for common event terms. + +These functions are intended to be used with the experimental Warp-first +:class:`isaaclab_experimental.managers.EventManager` (mask-based interval/reset). + +Why this exists: +- Stable event terms (e.g. `isaaclab.envs.mdp.events.reset_joints_by_offset`) often build torch tensors and then + call into Newton articulation writers with partial indices (env_ids/joint_ids). +- On the Newton backend, passing torch tensors triggers expensive torch->warp conversions that currently allocate + full `(num_envs, num_joints)` buffers. + +These Warp-first implementations avoid that by writing directly into the sim-bound Warp state buffers +(`asset.data.joint_pos` / `asset.data.joint_vel`) for the selected envs/joints. + +Notes: +- These terms assume the Newton/Warp backend (Warp arrays are available for joint state and defaults). +- For best performance, pass :class:`isaaclab_experimental.managers.SceneEntityCfg` so `joint_ids_wp` is cached. +""" + +from __future__ import annotations + +import warp as wp + +from isaaclab.assets import Articulation + +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_experimental.utils.warp import WarpCapturable + +# --------------------------------------------------------------------------- +# Randomize rigid body center of mass +# --------------------------------------------------------------------------- + + +@wp.kernel +def _randomize_com_kernel( + env_mask: wp.array(dtype=wp.bool), + rng_state: wp.array(dtype=wp.uint32), + body_com_pos_b: wp.array(dtype=wp.vec3f, ndim=2), + body_ids: wp.array(dtype=wp.int32), + com_lo: wp.vec3f, + com_hi: wp.vec3f, +): + """Add random offset to center of mass positions for selected bodies.""" + env_id = wp.tid() + if not env_mask[env_id]: + return + + state = rng_state[env_id] + for k in range(body_ids.shape[0]): + b = body_ids[k] + v = body_com_pos_b[env_id, b] + dx = wp.randf(state, com_lo[0], com_hi[0]) + dy = wp.randf(state, com_lo[1], com_hi[1]) + dz = wp.randf(state, com_lo[2], com_hi[2]) + body_com_pos_b[env_id, b] = wp.vec3f(v[0] + dx, v[1] + dy, v[2] + dz) + rng_state[env_id] = state + + +@WarpCapturable(False, reason="set_coms_mask calls SimulationManager.add_model_change") +def randomize_rigid_body_com( + env, + env_mask: wp.array, + com_range: dict[str, tuple[float, float]], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +): + """Randomize the center of mass (CoM) of rigid bodies by adding random offsets. + + Warp-first override of :func:`isaaclab.envs.mdp.events.randomize_rigid_body_com`. + Writes directly into the sim-bound ``body_com_pos_b`` buffer, then notifies the solver + via :meth:`set_coms_mask` so it recomputes inertial properties. + """ + asset: Articulation = env.scene[asset_cfg.name] + + fn = randomize_rigid_body_com + if not getattr(fn, "_is_warmed_up", False) or fn._asset_name != asset_cfg.name: + fn._asset_name = asset_cfg.name + r = [com_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] + fn._com_lo = wp.vec3f(r[0][0], r[1][0], r[2][0]) + fn._com_hi = wp.vec3f(r[0][1], r[1][1], r[2][1]) + fn._is_warmed_up = True + + wp.launch( + kernel=_randomize_com_kernel, + dim=env.num_envs, + inputs=[ + env_mask, + env.rng_state_wp, + asset.data.body_com_pos_b.warp, + asset_cfg.body_ids_wp, + fn._com_lo, + fn._com_hi, + ], + device=env.device, + ) + + # Notify the solver that inertial properties changed (COM position affects inertia). + asset.set_coms_mask(coms=asset.data.body_com_pos_b.warp, env_mask=env_mask) + + +# --------------------------------------------------------------------------- +# Apply external force and torque +# --------------------------------------------------------------------------- + + +@wp.kernel +def _apply_external_force_torque_kernel( + env_mask: wp.array(dtype=wp.bool), + rng_state: wp.array(dtype=wp.uint32), + force_out: wp.array(dtype=wp.vec3f, ndim=2), + torque_out: wp.array(dtype=wp.vec3f, ndim=2), + force_lo: float, + force_hi: float, + torque_lo: float, + torque_hi: float, +): + env_id = wp.tid() + if not env_mask[env_id]: + # zero out unmasked envs so they don't accumulate stale forces + for b in range(force_out.shape[1]): + force_out[env_id, b] = wp.vec3f(0.0, 0.0, 0.0) + torque_out[env_id, b] = wp.vec3f(0.0, 0.0, 0.0) + return + + state = rng_state[env_id] + for b in range(force_out.shape[1]): + force_out[env_id, b] = wp.vec3f( + wp.randf(state, force_lo, force_hi), + wp.randf(state, force_lo, force_hi), + wp.randf(state, force_lo, force_hi), + ) + torque_out[env_id, b] = wp.vec3f( + wp.randf(state, torque_lo, torque_hi), + wp.randf(state, torque_lo, torque_hi), + wp.randf(state, torque_lo, torque_hi), + ) + rng_state[env_id] = state + + +def apply_external_force_torque( + env, + env_mask: wp.array, + force_range: tuple[float, float], + torque_range: tuple[float, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +): + """Randomize external forces and torques applied to the asset's bodies. + + Warp-first override of :func:`isaaclab.envs.mdp.events.apply_external_force_torque`. + """ + asset: Articulation = env.scene[asset_cfg.name] + + # First-call: allocate scratch and pre-convert constant arguments. + if not getattr(apply_external_force_torque, "_is_warmed_up", False): + apply_external_force_torque._scratch_forces = wp.zeros( + (env.num_envs, asset.num_bodies), dtype=wp.vec3f, device=env.device + ) + apply_external_force_torque._scratch_torques = wp.zeros( + (env.num_envs, asset.num_bodies), dtype=wp.vec3f, device=env.device + ) + apply_external_force_torque._is_warmed_up = True + + wp.launch( + kernel=_apply_external_force_torque_kernel, + dim=env.num_envs, + inputs=[ + env_mask, + env.rng_state_wp, + apply_external_force_torque._scratch_forces, + apply_external_force_torque._scratch_torques, + force_range[0], + force_range[1], + torque_range[0], + torque_range[1], + ], + device=env.device, + ) + + asset.permanent_wrench_composer.set_forces_and_torques_mask( + forces=apply_external_force_torque._scratch_forces, + torques=apply_external_force_torque._scratch_torques, + env_mask=env_mask, + ) + + +# --------------------------------------------------------------------------- +# Push by velocity +# --------------------------------------------------------------------------- + + +@wp.kernel +def _push_by_setting_velocity_kernel( + env_mask: wp.array(dtype=wp.bool), + rng_state: wp.array(dtype=wp.uint32), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), + vel_out: wp.array(dtype=wp.spatial_vectorf), + lin_lo: wp.vec3f, + lin_hi: wp.vec3f, + ang_lo: wp.vec3f, + ang_hi: wp.vec3f, +): + env_id = wp.tid() + if not env_mask[env_id]: + return + + vel = root_vel_w[env_id] + state = rng_state[env_id] + + vel_out[env_id] = wp.spatial_vectorf( + vel[0] + wp.randf(state, lin_lo[0], lin_hi[0]), + vel[1] + wp.randf(state, lin_lo[1], lin_hi[1]), + vel[2] + wp.randf(state, lin_lo[2], lin_hi[2]), + vel[3] + wp.randf(state, ang_lo[0], ang_hi[0]), + vel[4] + wp.randf(state, ang_lo[1], ang_hi[1]), + vel[5] + wp.randf(state, ang_lo[2], ang_hi[2]), + ) + + rng_state[env_id] = state + + +def push_by_setting_velocity( + env, + env_mask: wp.array, + velocity_range: dict[str, tuple[float, float]], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +): + """Push the asset by setting the root velocity to a random value within the given ranges. + + Warp-first override of :func:`isaaclab.envs.mdp.events.push_by_setting_velocity`. + """ + asset: Articulation = env.scene[asset_cfg.name] + + # First-call: allocate scratch and pre-parse constant range arguments. + if not getattr(push_by_setting_velocity, "_is_warmed_up", False): + push_by_setting_velocity._scratch_vel = wp.zeros((env.num_envs,), dtype=wp.spatial_vectorf, device=env.device) + r = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] + push_by_setting_velocity._lin_lo = wp.vec3f(r[0][0], r[1][0], r[2][0]) + push_by_setting_velocity._lin_hi = wp.vec3f(r[0][1], r[1][1], r[2][1]) + push_by_setting_velocity._ang_lo = wp.vec3f(r[3][0], r[4][0], r[5][0]) + push_by_setting_velocity._ang_hi = wp.vec3f(r[3][1], r[4][1], r[5][1]) + push_by_setting_velocity._is_warmed_up = True + + wp.launch( + kernel=_push_by_setting_velocity_kernel, + dim=env.num_envs, + inputs=[ + env_mask, + env.rng_state_wp, + asset.data.root_vel_w.warp, + push_by_setting_velocity._scratch_vel, + push_by_setting_velocity._lin_lo, + push_by_setting_velocity._lin_hi, + push_by_setting_velocity._ang_lo, + push_by_setting_velocity._ang_hi, + ], + device=env.device, + ) + + asset.write_root_velocity_to_sim_mask(root_velocity=push_by_setting_velocity._scratch_vel, env_mask=env_mask) + + +# --------------------------------------------------------------------------- +# Reset root state uniform +# --------------------------------------------------------------------------- + + +@wp.kernel +def _reset_root_state_uniform_kernel( + env_mask: wp.array(dtype=wp.bool), + rng_state: wp.array(dtype=wp.uint32), + default_root_pose: wp.array(dtype=wp.transformf), + default_root_vel: wp.array(dtype=wp.spatial_vectorf), + env_origins: wp.array(dtype=wp.vec3f), + pose_out: wp.array(dtype=wp.transformf), + vel_out: wp.array(dtype=wp.spatial_vectorf), + pos_lo: wp.vec3f, + pos_hi: wp.vec3f, + rot_lo: wp.vec3f, + rot_hi: wp.vec3f, + vel_lin_lo: wp.vec3f, + vel_lin_hi: wp.vec3f, + vel_ang_lo: wp.vec3f, + vel_ang_hi: wp.vec3f, +): + env_id = wp.tid() + if not env_mask[env_id]: + return + + state = rng_state[env_id] + + # --- Pose --- + default_pose = default_root_pose[env_id] + default_pos = wp.transform_get_translation(default_pose) + default_q = wp.transform_get_rotation(default_pose) + origin = env_origins[env_id] + + # position = default + env_origin + random offset + pos = wp.vec3f( + default_pos[0] + origin[0] + wp.randf(state, pos_lo[0], pos_hi[0]), + default_pos[1] + origin[1] + wp.randf(state, pos_lo[1], pos_hi[1]), + default_pos[2] + origin[2] + wp.randf(state, pos_lo[2], pos_hi[2]), + ) + + # orientation = default * delta(euler_xyz) + roll = wp.randf(state, rot_lo[0], rot_hi[0]) + pitch = wp.randf(state, rot_lo[1], rot_hi[1]) + yaw = wp.randf(state, rot_lo[2], rot_hi[2]) + qx = wp.quat_from_axis_angle(wp.vec3f(1.0, 0.0, 0.0), roll) + qy = wp.quat_from_axis_angle(wp.vec3f(0.0, 1.0, 0.0), pitch) + qz = wp.quat_from_axis_angle(wp.vec3f(0.0, 0.0, 1.0), yaw) + # ZYX extrinsic = XYZ intrinsic: delta = qz * qy * qx + delta_q = wp.mul(wp.mul(qz, qy), qx) + final_q = wp.mul(default_q, delta_q) + + pose_out[env_id] = wp.transformf(pos, final_q) + + # --- Velocity --- + default_vel = default_root_vel[env_id] + vel_out[env_id] = wp.spatial_vectorf( + default_vel[0] + wp.randf(state, vel_lin_lo[0], vel_lin_hi[0]), + default_vel[1] + wp.randf(state, vel_lin_lo[1], vel_lin_hi[1]), + default_vel[2] + wp.randf(state, vel_lin_lo[2], vel_lin_hi[2]), + default_vel[3] + wp.randf(state, vel_ang_lo[0], vel_ang_hi[0]), + default_vel[4] + wp.randf(state, vel_ang_lo[1], vel_ang_hi[1]), + default_vel[5] + wp.randf(state, vel_ang_lo[2], vel_ang_hi[2]), + ) + + rng_state[env_id] = state + + +def reset_root_state_uniform( + env, + env_mask: wp.array, + pose_range: dict[str, tuple[float, float]], + velocity_range: dict[str, tuple[float, float]], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +): + """Reset the asset root state to a random position and velocity uniformly within the given ranges. + + Warp-first override of :func:`isaaclab.envs.mdp.events.reset_root_state_uniform`. + """ + asset: Articulation = env.scene[asset_cfg.name] + + # First-call: allocate scratch and pre-parse range dicts. + if not getattr(reset_root_state_uniform, "_is_warmed_up", False): + reset_root_state_uniform._scratch_pose = wp.zeros((env.num_envs,), dtype=wp.transformf, device=env.device) + reset_root_state_uniform._scratch_vel = wp.zeros((env.num_envs,), dtype=wp.spatial_vectorf, device=env.device) + # Pre-parse pose_range dict + p = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] + reset_root_state_uniform._pos_lo = wp.vec3f(p[0][0], p[1][0], p[2][0]) + reset_root_state_uniform._pos_hi = wp.vec3f(p[0][1], p[1][1], p[2][1]) + reset_root_state_uniform._rot_lo = wp.vec3f(p[3][0], p[4][0], p[5][0]) + reset_root_state_uniform._rot_hi = wp.vec3f(p[3][1], p[4][1], p[5][1]) + # Pre-parse velocity_range dict + v = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] + reset_root_state_uniform._vel_lin_lo = wp.vec3f(v[0][0], v[1][0], v[2][0]) + reset_root_state_uniform._vel_lin_hi = wp.vec3f(v[0][1], v[1][1], v[2][1]) + reset_root_state_uniform._vel_ang_lo = wp.vec3f(v[3][0], v[4][0], v[5][0]) + reset_root_state_uniform._vel_ang_hi = wp.vec3f(v[3][1], v[4][1], v[5][1]) + reset_root_state_uniform._is_warmed_up = True + + wp.launch( + kernel=_reset_root_state_uniform_kernel, + dim=env.num_envs, + inputs=[ + env_mask, + env.rng_state_wp, + asset.data.default_root_pose.warp, + asset.data.default_root_vel.warp, + env.env_origins_wp, + reset_root_state_uniform._scratch_pose, + reset_root_state_uniform._scratch_vel, + reset_root_state_uniform._pos_lo, + reset_root_state_uniform._pos_hi, + reset_root_state_uniform._rot_lo, + reset_root_state_uniform._rot_hi, + reset_root_state_uniform._vel_lin_lo, + reset_root_state_uniform._vel_lin_hi, + reset_root_state_uniform._vel_ang_lo, + reset_root_state_uniform._vel_ang_hi, + ], + device=env.device, + ) + + asset.write_root_pose_to_sim_mask(root_pose=reset_root_state_uniform._scratch_pose, env_mask=env_mask) + asset.write_root_velocity_to_sim_mask(root_velocity=reset_root_state_uniform._scratch_vel, env_mask=env_mask) + + +# --------------------------------------------------------------------------- +# Reset joints by offset +# --------------------------------------------------------------------------- + + +@wp.kernel +def _reset_joints_by_offset_kernel( + env_mask: wp.array(dtype=wp.bool), + joint_ids: wp.array(dtype=wp.int32), + rng_state: wp.array(dtype=wp.uint32), + default_joint_pos: wp.array(dtype=wp.float32, ndim=2), + default_joint_vel: wp.array(dtype=wp.float32, ndim=2), + joint_pos: wp.array(dtype=wp.float32, ndim=2), + joint_vel: wp.array(dtype=wp.float32, ndim=2), + soft_joint_pos_limits: wp.array(dtype=wp.vec2f, ndim=2), + soft_joint_vel_limits: wp.array(dtype=wp.float32, ndim=2), + pos_lo: float, + pos_hi: float, + vel_lo: float, + vel_hi: float, +): + env_id = wp.tid() + if not env_mask[env_id]: + return + + # 1 thread per env so per-env RNG state updates are race-free. + state = rng_state[env_id] + for joint_i in range(joint_ids.shape[0]): + joint_id = joint_ids[joint_i] + + # offset samples in the provided ranges (Warp RNG state pattern) + pos_off = wp.randf(state, pos_lo, pos_hi) + vel_off = wp.randf(state, vel_lo, vel_hi) + + pos = default_joint_pos[env_id, joint_id] + pos_off + vel = default_joint_vel[env_id, joint_id] + vel_off + + # clamp to soft limits + lim = soft_joint_pos_limits[env_id, joint_id] + pos = wp.clamp(pos, lim.x, lim.y) + vmax = soft_joint_vel_limits[env_id, joint_id] + vel = wp.clamp(vel, -vmax, vmax) + + # write into sim-bound state buffers + joint_pos[env_id, joint_id] = pos + joint_vel[env_id, joint_id] = vel + + rng_state[env_id] = state + + +def reset_joints_by_offset( + env, + env_mask: wp.array, + position_range: tuple[float, float], + velocity_range: tuple[float, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +): + """Warp-first reset of joint state by random offsets around defaults. + + This overrides the stable `isaaclab.envs.mdp.events.reset_joints_by_offset` when importing + via `isaaclab_experimental.envs.mdp`. + """ + asset: Articulation = env.scene[asset_cfg.name] + + # Assume cfg params are already resolved by the manager stack (Warp-first workflow). + if asset_cfg.joint_ids_wp is None: + raise ValueError( + f"reset_joints_by_offset requires an experimental SceneEntityCfg with resolved joint_ids_wp, " + f"but got None for asset '{asset_cfg.name}'. " + "Use isaaclab_experimental.managers.SceneEntityCfg and ensure joint_names are set." + ) + if not hasattr(env, "rng_state_wp") or env.rng_state_wp is None: + raise AttributeError( + "reset_joints_by_offset requires env.rng_state_wp to be initialized. " + "Use ManagerBasedEnvWarp or ManagerBasedRLEnvWarp as the base environment." + ) + + wp.launch( + kernel=_reset_joints_by_offset_kernel, + dim=env.num_envs, + inputs=[ + env_mask, + asset_cfg.joint_ids_wp, + env.rng_state_wp, + asset.data.default_joint_pos.warp, + asset.data.default_joint_vel.warp, + asset.data.joint_pos.warp, + asset.data.joint_vel.warp, + asset.data.soft_joint_pos_limits.warp, + asset.data.soft_joint_vel_limits.warp, + float(position_range[0]), + float(position_range[1]), + float(velocity_range[0]), + float(velocity_range[1]), + ], + device=env.device, + ) + + # Sync derived buffers (_previous_joint_vel, joint_acc) for reset envs. + asset.write_joint_position_to_sim_mask(position=asset.data.joint_pos.warp, env_mask=env_mask) + asset.write_joint_velocity_to_sim_mask(velocity=asset.data.joint_vel.warp, env_mask=env_mask) + + +@wp.kernel +def _reset_joints_by_scale_kernel( + env_mask: wp.array(dtype=wp.bool), + joint_ids: wp.array(dtype=wp.int32), + rng_state: wp.array(dtype=wp.uint32), + default_joint_pos: wp.array(dtype=wp.float32, ndim=2), + default_joint_vel: wp.array(dtype=wp.float32, ndim=2), + joint_pos: wp.array(dtype=wp.float32, ndim=2), + joint_vel: wp.array(dtype=wp.float32, ndim=2), + soft_joint_pos_limits: wp.array(dtype=wp.vec2f, ndim=2), + soft_joint_vel_limits: wp.array(dtype=wp.float32, ndim=2), + pos_lo: float, + pos_hi: float, + vel_lo: float, + vel_hi: float, +): + env_id = wp.tid() + if not env_mask[env_id]: + return + + state = rng_state[env_id] + for joint_i in range(joint_ids.shape[0]): + joint_id = joint_ids[joint_i] + + # scale samples in the provided ranges + pos_scale = wp.randf(state, pos_lo, pos_hi) + vel_scale = wp.randf(state, vel_lo, vel_hi) + + pos = default_joint_pos[env_id, joint_id] * pos_scale + vel = default_joint_vel[env_id, joint_id] * vel_scale + + lim = soft_joint_pos_limits[env_id, joint_id] + pos = wp.clamp(pos, lim.x, lim.y) + vmax = soft_joint_vel_limits[env_id, joint_id] + vel = wp.clamp(vel, -vmax, vmax) + + # write into sim + joint_pos[env_id, joint_id] = pos + joint_vel[env_id, joint_id] = vel + + rng_state[env_id] = state + + +def reset_joints_by_scale( + env, + env_mask: wp.array, + position_range: tuple[float, float], + velocity_range: tuple[float, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +): + """Warp-first reset of joint state by scaling defaults with random factors.""" + asset: Articulation = env.scene[asset_cfg.name] + + if asset_cfg.joint_ids_wp is None: + raise ValueError( + f"reset_joints_by_scale requires an experimental SceneEntityCfg with resolved joint_ids_wp, " + f"but got None for asset '{asset_cfg.name}'. " + "Use isaaclab_experimental.managers.SceneEntityCfg and ensure joint_names are set." + ) + if not hasattr(env, "rng_state_wp") or env.rng_state_wp is None: + raise AttributeError( + "reset_joints_by_scale requires env.rng_state_wp to be initialized. " + "Use ManagerBasedEnvWarp or ManagerBasedRLEnvWarp as the base environment." + ) + + wp.launch( + kernel=_reset_joints_by_scale_kernel, + dim=env.num_envs, + inputs=[ + env_mask, + asset_cfg.joint_ids_wp, + env.rng_state_wp, + asset.data.default_joint_pos.warp, + asset.data.default_joint_vel.warp, + asset.data.joint_pos.warp, + asset.data.joint_vel.warp, + asset.data.soft_joint_pos_limits.warp, + asset.data.soft_joint_vel_limits.warp, + float(position_range[0]), + float(position_range[1]), + float(velocity_range[0]), + float(velocity_range[1]), + ], + device=env.device, + ) + + # Sync derived buffers (_previous_joint_vel, joint_acc) for reset envs. + asset.write_joint_position_to_sim_mask(position=asset.data.joint_pos.warp, env_mask=env_mask) + asset.write_joint_velocity_to_sim_mask(velocity=asset.data.joint_vel.warp, env_mask=env_mask) diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/observations.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/observations.py new file mode 100644 index 000000000000..5760d1f03e8c --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/observations.py @@ -0,0 +1,370 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first observation terms (experimental). + +All functions in this file follow the Warp-compatible observation signature expected by the +experimental Warp-first observation manager: + +- ``func(env, out, **params) -> None`` + +where ``out`` is a pre-allocated Warp array with float32 dtype and shape ``(num_envs, D)``. +Output dimension ``D`` is inferred from decorator metadata: ``axes`` for root-state terms, +``out_dim`` for body/command/action/time terms, or ``joint_ids`` count for joint terms. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import torch +import warp as wp +from isaaclab_newton.kernels.state_kernels import ( + body_ang_vel_from_root, + body_lin_vel_from_root, + rotate_vec_to_body_frame, +) + +from isaaclab.assets import Articulation + +from isaaclab_experimental.envs.utils.io_descriptors import ( + generic_io_descriptor_warp, + record_dtype, + record_joint_names, + record_joint_pos_offsets, + record_joint_vel_offsets, + record_shape, +) +from isaaclab_experimental.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + +# --------------------------------------------------------------------------- +# Shared kernels +# --------------------------------------------------------------------------- + + +@wp.kernel +def _vec3_to_out3_kernel( + src: wp.array(dtype=wp.vec3f), + out: wp.array(dtype=wp.float32, ndim=2), +): + env_id = wp.tid() + v = src[env_id] + out[env_id, 0] = v[0] + out[env_id, 1] = v[1] + out[env_id, 2] = v[2] + + +@wp.kernel +def _joint_gather_kernel( + src: wp.array(dtype=wp.float32, ndim=2), + joint_ids: wp.array(dtype=wp.int32), + out: wp.array(dtype=wp.float32, ndim=2), +): + env_id, k = wp.tid() + j = joint_ids[k] + out[env_id, k] = src[env_id, j] + + +""" +Root state. +""" + + +@wp.kernel +def _base_pos_z_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + out: wp.array(dtype=wp.float32, ndim=2), +): + env_id = wp.tid() + out[env_id, 0] = root_pos_w[env_id][2] + + +@generic_io_descriptor_warp( + units="m", axes=["Z"], observation_type="RootState", on_inspect=[record_shape, record_dtype] +) +def base_pos_z(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Root height in the simulation world frame.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_base_pos_z_kernel, + dim=env.num_envs, + inputs=[asset.data.root_pos_w.warp, out], + device=env.device, + ) + + +# Inline Tier 1 access: these observations derive body-frame quantities directly from +# root_link_pose_w (transformf) and root_com_vel_w (spatial_vectorf), avoiding the lazy +# TimestampedWarpBuffer properties which are not CUDA-graph-capturable. +# See GRAPH_CAPTURE_MIGRATION.md in isaaclab_newton for background. +# If ArticulationData Tier 2 lazy update is made graph-safe in the future, these can +# revert to reading the pre-computed .data buffers (simpler, avoids redundant rotations). + + +@wp.kernel +def _base_lin_vel_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), + out: wp.array(dtype=wp.float32, ndim=2), +): + i = wp.tid() + v = body_lin_vel_from_root(root_pose_w[i], root_vel_w[i]) + out[i, 0] = v[0] + out[i, 1] = v[1] + out[i, 2] = v[2] + + +@generic_io_descriptor_warp( + units="m/s", axes=["X", "Y", "Z"], observation_type="RootState", on_inspect=[record_shape, record_dtype] +) +def base_lin_vel(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Root linear velocity in the asset's root frame.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_base_lin_vel_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.root_com_vel_w.warp, out], + device=env.device, + ) + + +@wp.kernel +def _base_ang_vel_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), + out: wp.array(dtype=wp.float32, ndim=2), +): + i = wp.tid() + v = body_ang_vel_from_root(root_pose_w[i], root_vel_w[i]) + out[i, 0] = v[0] + out[i, 1] = v[1] + out[i, 2] = v[2] + + +@generic_io_descriptor_warp( + units="rad/s", axes=["X", "Y", "Z"], observation_type="RootState", on_inspect=[record_shape, record_dtype] +) +def base_ang_vel(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Root angular velocity in the asset's root frame.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_base_ang_vel_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.root_com_vel_w.warp, out], + device=env.device, + ) + + +@wp.kernel +def _projected_gravity_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + gravity_w: wp.array(dtype=wp.vec3f), + out: wp.array(dtype=wp.float32, ndim=2), +): + i = wp.tid() + g = rotate_vec_to_body_frame(gravity_w[0], root_pose_w[i]) + out[i, 0] = g[0] + out[i, 1] = g[1] + out[i, 2] = g[2] + + +@generic_io_descriptor_warp( + units="m/s^2", axes=["X", "Y", "Z"], observation_type="RootState", on_inspect=[record_shape, record_dtype] +) +def projected_gravity(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Gravity projection on the asset's root frame.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_projected_gravity_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.GRAVITY_VEC_W.warp, out], + device=env.device, + ) + + +""" +Joint state. +""" + + +@generic_io_descriptor_warp( + observation_type="JointState", on_inspect=[record_joint_names, record_dtype, record_shape], units="rad" +) +def joint_pos(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """The joint positions of the asset.""" + asset: Articulation = env.scene[asset_cfg.name] + joint_ids_wp = getattr(asset_cfg, "joint_ids_wp", None) + if joint_ids_wp is None: + raise RuntimeError( + "SceneEntityCfg.joint_ids_wp is required for subset joint observations in Warp-first observations. " + "Pass `asset_cfg` via term cfg params so it is resolved at manager init." + ) + wp.launch( + kernel=_joint_gather_kernel, + dim=(env.num_envs, out.shape[1]), + inputs=[asset.data.joint_pos.warp, joint_ids_wp, out], + device=env.device, + ) + + +@wp.kernel +def _joint_rel_gather_kernel( + values: wp.array(dtype=wp.float32, ndim=2), + defaults: wp.array(dtype=wp.float32, ndim=2), + joint_ids: wp.array(dtype=wp.int32), + out: wp.array(dtype=wp.float32, ndim=2), +): + env_id, k = wp.tid() + j = joint_ids[k] + out[env_id, k] = values[env_id, j] - defaults[env_id, j] + + +@generic_io_descriptor_warp( + observation_type="JointState", + on_inspect=[record_joint_names, record_dtype, record_shape, record_joint_pos_offsets], + units="rad", +) +def joint_pos_rel(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Joint positions relative to defaults. Writes into ``out``.""" + asset: Articulation = env.scene[asset_cfg.name] + + # Subset selection (requires a pre-resolved Warp joint-id list). + joint_ids_wp = getattr(asset_cfg, "joint_ids_wp", None) + if joint_ids_wp is None: + raise RuntimeError( + "SceneEntityCfg.joint_ids_wp is required for subset joint observations in Warp-first observations. " + "Pass `asset_cfg` via term cfg params so it is resolved at manager init." + ) + wp.launch( + kernel=_joint_rel_gather_kernel, + dim=(env.num_envs, out.shape[1]), + inputs=[asset.data.joint_pos.warp, asset.data.default_joint_pos.warp, joint_ids_wp, out], + device=env.device, + ) + + +@wp.kernel +def _joint_pos_limit_normalized_kernel( + joint_pos: wp.array(dtype=wp.float32, ndim=2), + soft_joint_pos_limits: wp.array(dtype=wp.vec2f, ndim=2), + joint_ids: wp.array(dtype=wp.int32), + out: wp.array(dtype=wp.float32, ndim=2), +): + env_id, k = wp.tid() + j = joint_ids[k] + pos = joint_pos[env_id, j] + lim = soft_joint_pos_limits[env_id, j] + lower = lim.x + upper = lim.y + out[env_id, k] = 2.0 * (pos - (lower + upper) * 0.5) / (upper - lower) + + +@generic_io_descriptor_warp(observation_type="JointState", on_inspect=[record_joint_names, record_dtype, record_shape]) +def joint_pos_limit_normalized(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """The joint positions of the asset normalized with the asset's joint limits.""" + asset: Articulation = env.scene[asset_cfg.name] + joint_ids_wp = getattr(asset_cfg, "joint_ids_wp", None) + if joint_ids_wp is None: + raise RuntimeError( + "SceneEntityCfg.joint_ids_wp is required for subset joint observations in Warp-first observations. " + "Pass `asset_cfg` via term cfg params so it is resolved at manager init." + ) + wp.launch( + kernel=_joint_pos_limit_normalized_kernel, + dim=(env.num_envs, out.shape[1]), + inputs=[asset.data.joint_pos.warp, asset.data.soft_joint_pos_limits.warp, joint_ids_wp, out], + device=env.device, + ) + + +@generic_io_descriptor_warp( + observation_type="JointState", on_inspect=[record_joint_names, record_dtype, record_shape], units="rad/s" +) +def joint_vel(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """The joint velocities of the asset.""" + asset: Articulation = env.scene[asset_cfg.name] + joint_ids_wp = getattr(asset_cfg, "joint_ids_wp", None) + if joint_ids_wp is None: + raise RuntimeError( + "SceneEntityCfg.joint_ids_wp is required for subset joint observations in Warp-first observations. " + "Pass `asset_cfg` via term cfg params so it is resolved at manager init." + ) + wp.launch( + kernel=_joint_gather_kernel, + dim=(env.num_envs, out.shape[1]), + inputs=[asset.data.joint_vel.warp, joint_ids_wp, out], + device=env.device, + ) + + +@generic_io_descriptor_warp( + observation_type="JointState", + on_inspect=[record_joint_names, record_dtype, record_shape, record_joint_vel_offsets], + units="rad/s", +) +def joint_vel_rel(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Joint velocities relative to defaults. Writes into ``out``.""" + asset: Articulation = env.scene[asset_cfg.name] + + # Subset selection (requires a pre-resolved Warp joint-id list). + joint_ids_wp = getattr(asset_cfg, "joint_ids_wp", None) + if joint_ids_wp is None: + raise RuntimeError( + "SceneEntityCfg.joint_ids_wp is required for subset joint observations in Warp-first observations. " + "Pass `asset_cfg` via term cfg params so it is resolved at manager init." + ) + wp.launch( + kernel=_joint_rel_gather_kernel, + dim=(env.num_envs, out.shape[1]), + inputs=[asset.data.joint_vel.warp, asset.data.default_joint_vel.warp, joint_ids_wp, out], + device=env.device, + ) + + +""" +Actions. +""" + + +@generic_io_descriptor_warp(out_dim="action", dtype=torch.float32, observation_type="Action", on_inspect=[record_shape]) +def last_action(env: ManagerBasedEnv, out, action_name: str | None = None) -> None: + """The last input action to the environment.""" + # TODO(warp-migration): Cross-manager access (observation → action). Currently works + # because experimental ActionManager.action is already a warp array. No from_torch needed. + if action_name is not None: + raise NotImplementedError("Named action support is not yet implemented for Warp-first last_action observation.") + wp.copy(out, env.action_manager.action) + + +""" +Commands. +""" + + +@generic_io_descriptor_warp( + out_dim="command", dtype=torch.float32, observation_type="Command", on_inspect=[record_shape] +) +def generated_commands(env: ManagerBasedEnv, out, command_name: str) -> None: + """The generated command from the command manager. Writes into ``out``. + + Warp-first override of :func:`isaaclab.envs.mdp.observations.generated_commands`. + Uses ``wp.from_torch`` to create a zero-copy warp view of the command tensor on first call. + """ + # TODO(warp-migration): Cross-manager access (observation → command). Replace with direct + # warp getter once all managers are guaranteed to be warp-native. + fn = generated_commands + if not getattr(fn, "_is_warmed_up", False) or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + if isinstance(cmd, wp.array): + fn._cmd_wp = cmd + else: + fn._cmd_wp = wp.from_torch(cmd) + fn._cmd_name = command_name + fn._is_warmed_up = True + wp.copy(out, fn._cmd_wp) diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/rewards.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/rewards.py new file mode 100644 index 000000000000..2b98f1f9cc09 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/rewards.py @@ -0,0 +1,490 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Common functions that can be used to enable reward functions (experimental). + +All functions in this file follow the Warp-compatible reward signature expected by +`isaaclab_experimental.managers.RewardManager`: + +- ``func(env, out, **params) -> None`` + +where ``out`` is a pre-allocated Warp array of shape ``(num_envs,)`` with ``float32`` dtype. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp +from isaaclab_newton.kernels.state_kernels import ( + body_ang_vel_from_root, + body_lin_vel_from_root, + rotate_vec_to_body_frame, +) + +from isaaclab.assets import Articulation + +from isaaclab_experimental.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +""" +General. +""" + + +@wp.kernel +def _is_alive_kernel(terminated: wp.array(dtype=wp.bool), out: wp.array(dtype=wp.float32)): + i = wp.tid() + out[i] = wp.where(terminated[i], 0.0, 1.0) + + +def is_alive(env: ManagerBasedRLEnv, out: wp.array(dtype=wp.float32)) -> None: + """Reward for being alive. Writes into ``out`` (shape: (num_envs,)).""" + wp.launch( + kernel=_is_alive_kernel, + dim=env.num_envs, + inputs=[env.termination_manager.terminated_wp, out], + device=env.device, + ) + + +@wp.kernel +def _is_terminated_kernel(terminated: wp.array(dtype=wp.bool), out: wp.array(dtype=wp.float32)): + i = wp.tid() + out[i] = wp.where(terminated[i], 1.0, 0.0) + + +def is_terminated(env: ManagerBasedRLEnv, out) -> None: + """Penalize terminated episodes. Writes into ``out``.""" + wp.launch( + kernel=_is_terminated_kernel, + dim=env.num_envs, + inputs=[env.termination_manager.terminated_wp, out], + device=env.device, + ) + + +""" +Root penalties. +""" + + +# Inline Tier 1 access: these rewards derive body-frame quantities directly from +# root_link_pose_w (transformf) and root_com_vel_w (spatial_vectorf), avoiding the lazy +# TimestampedWarpBuffer properties which are not CUDA-graph-capturable. +# See GRAPH_CAPTURE_MIGRATION.md in isaaclab_newton for background. +# If ArticulationData Tier 2 lazy update is made graph-safe in the future, these can +# revert to reading the pre-computed .data buffers (simpler, avoids redundant rotations). + + +@wp.kernel +def _lin_vel_z_l2_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + vz = body_lin_vel_from_root(root_pose_w[i], root_vel_w[i])[2] + out[i] = vz * vz + + +def lin_vel_z_l2(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize z-axis base linear velocity using L2 squared kernel.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_lin_vel_z_l2_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.root_com_vel_w.warp, out], + device=env.device, + ) + + +@wp.kernel +def _ang_vel_xy_l2_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + v = body_ang_vel_from_root(root_pose_w[i], root_vel_w[i]) + out[i] = v[0] * v[0] + v[1] * v[1] + + +def ang_vel_xy_l2(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize xy-axis base angular velocity using L2 squared kernel.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_ang_vel_xy_l2_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.root_com_vel_w.warp, out], + device=env.device, + ) + + +@wp.kernel +def _flat_orientation_l2_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + gravity_w: wp.array(dtype=wp.vec3f), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + g = rotate_vec_to_body_frame(gravity_w[0], root_pose_w[i]) + out[i] = g[0] * g[0] + g[1] * g[1] + + +def flat_orientation_l2(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize non-flat base orientation using L2 squared kernel.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_flat_orientation_l2_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.GRAVITY_VEC_W.warp, out], + device=env.device, + ) + + +""" +Joint penalties. +""" + + +# TODO(warp-migration): Revisit whether 2D kernel + wp.atomic_add is faster than 1D with inner loop +# for the following masked reduction kernels. Profile with typical joint counts (12-30). +@wp.kernel +def _sum_sq_masked_kernel( + x: wp.array(dtype=wp.float32, ndim=2), joint_mask: wp.array(dtype=wp.bool), out: wp.array(dtype=wp.float32) +): + i = wp.tid() + s = float(0.0) + for j in range(x.shape[1]): + if joint_mask[j]: + s += x[i, j] * x[i, j] + out[i] = s + + +def joint_torques_l2(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize joint torques applied on the articulation using L2 squared kernel.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_sum_sq_masked_kernel, + dim=env.num_envs, + inputs=[asset.data.applied_torque.warp, asset_cfg.joint_mask, out], + device=env.device, + ) + + +# TODO(warp-migration): Revisit 2D kernel + wp.atomic_add vs 1D inner loop. +@wp.kernel +def _sum_abs_masked_kernel( + x: wp.array(dtype=wp.float32, ndim=2), joint_mask: wp.array(dtype=wp.bool), out: wp.array(dtype=wp.float32) +): + i = wp.tid() + s = float(0.0) + for j in range(x.shape[1]): + if joint_mask[j]: + s += wp.abs(x[i, j]) + out[i] = s + + +def joint_vel_l1(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg) -> None: + """Penalize joint velocities on the articulation using an L1-kernel. Writes into ``out``.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_sum_abs_masked_kernel, + dim=env.num_envs, + inputs=[asset.data.joint_vel.warp, asset_cfg.joint_mask, out], + device=env.device, + ) + + +def joint_vel_l2(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize joint velocities on the articulation using L2 squared kernel.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_sum_sq_masked_kernel, + dim=env.num_envs, + inputs=[asset.data.joint_vel.warp, asset_cfg.joint_mask, out], + device=env.device, + ) + + +def joint_acc_l2(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize joint accelerations on the articulation using L2 squared kernel.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_sum_sq_masked_kernel, + dim=env.num_envs, + inputs=[asset.data.joint_acc.warp, asset_cfg.joint_mask, out], + device=env.device, + ) + + +# TODO(warp-migration): Revisit 2D kernel + wp.atomic_add vs 1D inner loop. +@wp.kernel +def _sum_abs_diff_masked_kernel( + a: wp.array(dtype=wp.float32, ndim=2), + b: wp.array(dtype=wp.float32, ndim=2), + joint_mask: wp.array(dtype=wp.bool), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + s = float(0.0) + for j in range(a.shape[1]): + if joint_mask[j]: + s += wp.abs(a[i, j] - b[i, j]) + out[i] = s + + +def joint_deviation_l1(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize joint positions that deviate from the default one.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_sum_abs_diff_masked_kernel, + dim=env.num_envs, + inputs=[asset.data.joint_pos.warp, asset.data.default_joint_pos.warp, asset_cfg.joint_mask, out], + device=env.device, + ) + + +# TODO(warp-migration): Revisit 2D kernel + wp.atomic_add vs 1D inner loop. +@wp.kernel +def _joint_pos_limits_kernel( + joint_pos: wp.array(dtype=wp.float32, ndim=2), + soft_joint_pos_limits: wp.array(dtype=wp.vec2f, ndim=2), + joint_mask: wp.array(dtype=wp.bool), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + s = float(0.0) + for j in range(joint_pos.shape[1]): + if joint_mask[j]: + pos = joint_pos[i, j] + lim = soft_joint_pos_limits[i, j] + lower = lim.x + upper = lim.y + # penalty for exceeding lower limit + below = lower - pos + if below > 0.0: + s += below + # penalty for exceeding upper limit + above = pos - upper + if above > 0.0: + s += above + out[i] = s + + +def joint_pos_limits(env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize joint positions if they cross the soft limits.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_joint_pos_limits_kernel, + dim=env.num_envs, + inputs=[asset.data.joint_pos.warp, asset.data.soft_joint_pos_limits.warp, asset_cfg.joint_mask, out], + device=env.device, + ) + + +""" +Action penalties. +""" + + +# TODO(warp-migration): Revisit 2D kernel + wp.atomic_add vs 1D inner loop. +@wp.kernel +def _sum_sq_diff_2d_kernel( + a: wp.array(dtype=wp.float32, ndim=2), + b: wp.array(dtype=wp.float32, ndim=2), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + s = float(0.0) + for j in range(a.shape[1]): + d = a[i, j] - b[i, j] + s += d * d + out[i] = s + + +def action_rate_l2(env: ManagerBasedRLEnv, out) -> None: + """Penalize the rate of change of the actions using L2 squared kernel.""" + wp.launch( + kernel=_sum_sq_diff_2d_kernel, + dim=env.num_envs, + inputs=[env.action_manager.action, env.action_manager.prev_action, out], + device=env.device, + ) + + +# TODO(warp-migration): Revisit 2D kernel + wp.atomic_add vs 1D inner loop. +@wp.kernel +def _sum_sq_2d_kernel(x: wp.array(dtype=wp.float32, ndim=2), out: wp.array(dtype=wp.float32)): + i = wp.tid() + s = float(0.0) + for j in range(x.shape[1]): + s += x[i, j] * x[i, j] + out[i] = s + + +def action_l2(env: ManagerBasedRLEnv, out) -> None: + """Penalize the actions using L2 squared kernel.""" + wp.launch( + kernel=_sum_sq_2d_kernel, + dim=env.num_envs, + inputs=[env.action_manager.action, out], + device=env.device, + ) + + +""" +Contact sensor. +""" + + +@wp.kernel +def _undesired_contacts_kernel( + forces: wp.array(dtype=wp.vec3f, ndim=3), + body_ids: wp.array(dtype=wp.int32), + threshold: float, + out: wp.array(dtype=wp.float32), +): + """Count bodies where max-over-history contact force norm exceeds threshold.""" + i = wp.tid() + count = float(0.0) + for k in range(body_ids.shape[0]): + b = body_ids[k] + max_force = float(0.0) + for h in range(forces.shape[1]): + f = forces[i, h, b] + norm = wp.sqrt(f[0] * f[0] + f[1] * f[1] + f[2] * f[2]) + if norm > max_force: + max_force = norm + if max_force > threshold: + count += 1.0 + out[i] = count + + +def undesired_contacts(env: ManagerBasedRLEnv, out, threshold: float, sensor_cfg: SceneEntityCfg) -> None: + """Penalize undesired contacts as the number of violations above a threshold. Writes into ``out``. + + Warp-first override of :func:`isaaclab.envs.mdp.rewards.undesired_contacts`. + """ + contact_sensor = env.scene.sensors[sensor_cfg.name] + wp.launch( + kernel=_undesired_contacts_kernel, + dim=env.num_envs, + inputs=[contact_sensor.data.net_forces_w_history.warp, sensor_cfg.body_ids_wp, threshold, out], + device=env.device, + ) + + +""" +Velocity-tracking rewards. +""" + + +@wp.kernel +def _track_lin_vel_xy_exp_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), + command: wp.array(dtype=wp.float32, ndim=2), + std_sq_inv: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + v = body_lin_vel_from_root(root_pose_w[i], root_vel_w[i]) + dx = command[i, 0] - v[0] + dy = command[i, 1] - v[1] + error = dx * dx + dy * dy + out[i] = wp.exp(-error * std_sq_inv) + + +def track_lin_vel_xy_exp( + env: ManagerBasedRLEnv, + out, + std: float, + command_name: str, + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> None: + """Reward tracking of linear velocity commands (xy axes) using exponential kernel. Writes into ``out``. + + Warp-first override of :func:`isaaclab.envs.mdp.rewards.track_lin_vel_xy_exp`. + """ + asset: Articulation = env.scene[asset_cfg.name] + # cache the warp view of the command tensor on first call (zero-copy) + # TODO(warp-migration): Cross-manager access (reward → command). Replace with direct + # warp getter once all managers are guaranteed to be warp-native. + if not getattr(track_lin_vel_xy_exp, "_is_warmed_up", False) or track_lin_vel_xy_exp._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + if isinstance(cmd, wp.array): + track_lin_vel_xy_exp._cmd_wp = cmd + else: + track_lin_vel_xy_exp._cmd_wp = wp.from_torch(cmd) + track_lin_vel_xy_exp._cmd_name = command_name + track_lin_vel_xy_exp._is_warmed_up = True + wp.launch( + kernel=_track_lin_vel_xy_exp_kernel, + dim=env.num_envs, + inputs=[ + asset.data.root_link_pose_w.warp, + asset.data.root_com_vel_w.warp, + track_lin_vel_xy_exp._cmd_wp, + 1.0 / (std * std), + out, + ], + device=env.device, + ) + + +@wp.kernel +def _track_ang_vel_z_exp_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), + command: wp.array(dtype=wp.float32, ndim=2), + cmd_col: int, + std_sq_inv: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + dz = command[i, cmd_col] - body_ang_vel_from_root(root_pose_w[i], root_vel_w[i])[2] + out[i] = wp.exp(-dz * dz * std_sq_inv) + + +def track_ang_vel_z_exp( + env: ManagerBasedRLEnv, + out, + std: float, + command_name: str, + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> None: + """Reward tracking of angular velocity commands (yaw) using exponential kernel. Writes into ``out``. + + Warp-first override of :func:`isaaclab.envs.mdp.rewards.track_ang_vel_z_exp`. + """ + asset: Articulation = env.scene[asset_cfg.name] + # TODO(warp-migration): Cross-manager access (reward → command). Replace with direct + # warp getter once all managers are guaranteed to be warp-native. + if not getattr(track_ang_vel_z_exp, "_is_warmed_up", False) or track_ang_vel_z_exp._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + if isinstance(cmd, wp.array): + track_ang_vel_z_exp._cmd_wp = cmd + else: + track_ang_vel_z_exp._cmd_wp = wp.from_torch(cmd) + track_ang_vel_z_exp._cmd_name = command_name + track_ang_vel_z_exp._is_warmed_up = True + wp.launch( + kernel=_track_ang_vel_z_exp_kernel, + dim=env.num_envs, + inputs=[ + asset.data.root_link_pose_w.warp, + asset.data.root_com_vel_w.warp, + track_ang_vel_z_exp._cmd_wp, + 2, + 1.0 / (std * std), + out, + ], + device=env.device, + ) diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/terminations.py b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/terminations.py new file mode 100644 index 000000000000..7721469e96a4 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/mdp/terminations.py @@ -0,0 +1,161 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Common functions that can be used to activate terminations (experimental). + +All functions in this file follow the Warp-compatible termination signature expected by +`isaaclab_experimental.managers.TerminationManager`: + +- ``func(env, out, **params) -> None`` + +where ``out`` is a pre-allocated Warp array of shape ``(num_envs,)`` with boolean dtype. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.assets import Articulation + +from isaaclab_experimental.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +""" +MDP terminations. +""" + + +@wp.kernel +def _time_out_kernel( + episode_length: wp.array(dtype=wp.int64), max_episode_length: wp.int64, out: wp.array(dtype=wp.bool) +): + i = wp.tid() + out[i] = episode_length[i] >= max_episode_length + + +def time_out(env: ManagerBasedRLEnv, out) -> None: + """Terminate the episode when episode length exceeds the maximum episode length.""" + wp.launch( + kernel=_time_out_kernel, + dim=env.num_envs, + inputs=[env._episode_length_buf_wp, env.max_episode_length, out], + device=env.device, + ) + + +""" +Root terminations. +""" + + +@wp.kernel +def _root_height_below_min_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + minimum_height: float, + out: wp.array(dtype=wp.bool), +): + i = wp.tid() + out[i] = root_pos_w[i][2] < minimum_height + + +def root_height_below_minimum( + env: ManagerBasedRLEnv, out, minimum_height: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") +) -> None: + """Terminate when the asset's root height is below the minimum height.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_root_height_below_min_kernel, + dim=env.num_envs, + inputs=[asset.data.root_pos_w.warp, minimum_height, out], + device=env.device, + ) + + +""" +Joint terminations. +""" + + +@wp.kernel +def _joint_pos_out_of_manual_limit_kernel( + joint_pos: wp.array(dtype=wp.float32, ndim=2), + joint_mask: wp.array(dtype=wp.bool), + lower: float, + upper: float, + out: wp.array(dtype=wp.bool), +): + """2D kernel (num_envs, num_joints). ``out`` is pre-zeroed; only writes True.""" + i, j = wp.tid() + if joint_mask[j]: + v = joint_pos[i, j] + if v < lower or v > upper: + out[i] = True + + +def joint_pos_out_of_manual_limit( + env: ManagerBasedRLEnv, out, bounds: tuple[float, float], asset_cfg: SceneEntityCfg +) -> None: + """Terminate when joint positions are outside configured bounds. Writes into ``out``.""" + asset: Articulation = env.scene[asset_cfg.name] + if asset_cfg.joint_mask is None: + raise ValueError( + f"joint_pos_out_of_manual_limit requires SceneEntityCfg with resolved joint_mask, " + f"but got None for asset '{asset_cfg.name}'." + ) + if asset.data.joint_pos.warp.shape[1] != asset_cfg.joint_mask.shape[0]: + raise ValueError( + f"joint_mask length ({asset_cfg.joint_mask.shape[0]}) does not match " + f"joint_pos dim ({asset.data.joint_pos.warp.shape[1]}) for asset '{asset_cfg.name}'." + ) + wp.launch( + kernel=_joint_pos_out_of_manual_limit_kernel, + dim=(env.num_envs, asset.data.joint_pos.warp.shape[1]), + inputs=[asset.data.joint_pos.warp, asset_cfg.joint_mask, bounds[0], bounds[1], out], + device=env.device, + ) + + +""" +Contact sensor. +""" + + +@wp.kernel +def _illegal_contact_kernel( + forces: wp.array(dtype=wp.vec3f, ndim=3), + body_ids: wp.array(dtype=wp.int32), + threshold: float, + out: wp.array(dtype=wp.bool), +): + """Terminate when any selected body's max-over-history contact force norm exceeds threshold.""" + i = wp.tid() + violated = bool(False) + for k in range(body_ids.shape[0]): + b = body_ids[k] + for h in range(forces.shape[1]): + f = forces[i, h, b] + norm = wp.sqrt(f[0] * f[0] + f[1] * f[1] + f[2] * f[2]) + if norm > threshold: + violated = True + out[i] = violated + + +def illegal_contact(env: ManagerBasedRLEnv, out, threshold: float, sensor_cfg: SceneEntityCfg) -> None: + """Terminate when the contact force on the sensor exceeds the force threshold. Writes into ``out``. + + Warp-first override of :func:`isaaclab.envs.mdp.terminations.illegal_contact`. + """ + contact_sensor = env.scene.sensors[sensor_cfg.name] + wp.launch( + kernel=_illegal_contact_kernel, + dim=env.num_envs, + inputs=[contact_sensor.data.net_forces_w_history.warp, sensor_cfg.body_ids_wp, threshold, out], + device=env.device, + ) diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/utils/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/envs/utils/__init__.py new file mode 100644 index 000000000000..d28381b15b76 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/utils/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package for environment utils.""" diff --git a/source/isaaclab_experimental/isaaclab_experimental/envs/utils/io_descriptors.py b/source/isaaclab_experimental/isaaclab_experimental/envs/utils/io_descriptors.py new file mode 100644 index 000000000000..2b72dae02aa7 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/envs/utils/io_descriptors.py @@ -0,0 +1,323 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first IO descriptor decorator and inspection hooks (experimental). + +This module mirrors the stable :mod:`isaaclab.envs.utils.io_descriptors` but is +designed for Warp-first observation terms whose signature is:: + + func(env, out, **params) -> None + +Key difference from the stable decorator: + During inspection (``inspect=True``), the underlying function is **not called**. + Hooks derive metadata from ``env`` / scene / config objects instead of from a + returned output tensor. ``output`` is passed as ``None`` so that hooks share the + same ``(output, descriptor, **kwargs)`` signature as the stable hooks. + +The :class:`GenericObservationIODescriptor` dataclass is reused from the stable +package so that the resulting descriptor dicts are fully compatible with the +existing export / YAML pipeline. +""" + +from __future__ import annotations + +from collections.abc import Callable +from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar + +import warp as wp + +# Reuse the descriptor dataclass from the stable package. +from isaaclab.envs.utils.io_descriptors import GenericObservationIODescriptor + +if TYPE_CHECKING: + from isaaclab.assets.articulation import Articulation + from isaaclab.envs import ManagerBasedEnv + +import dataclasses +import functools +import inspect + +# These are defined to help with type hinting +P = ParamSpec("P") +R = TypeVar("R") + + +# --------------------------------------------------------------------------- +# Decorator +# --------------------------------------------------------------------------- + + +# Automatically builds a descriptor from the kwargs +def _make_descriptor(**kwargs: Any) -> GenericObservationIODescriptor: + """Split *kwargs* into (known dataclass fields) and (extras).""" + field_names = {f.name for f in dataclasses.fields(GenericObservationIODescriptor)} + known = {k: v for k, v in kwargs.items() if k in field_names} + extras = {k: v for k, v in kwargs.items() if k not in field_names} + + desc = GenericObservationIODescriptor(**known) + # User defined extras are stored in the descriptor under the `extras` field + desc.extras = extras + # ``out_dim`` is kept as a top-level attribute (not in extras) so the + # observation manager can read it without inspecting extras. + desc.out_dim = extras.pop("out_dim", None) + return desc + + +# Decorator factory for Warp-first IO descriptors. +def generic_io_descriptor_warp( + _func: Callable[Concatenate[ManagerBasedEnv, P], R] | None = None, + *, + on_inspect: Callable[..., Any] | list[Callable[..., Any]] | None = None, + **descriptor_kwargs: Any, +) -> Callable[[Callable[Concatenate[ManagerBasedEnv, P], R]], Callable[Concatenate[ManagerBasedEnv, P], R]]: + """IO descriptor decorator for Warp-first observation terms. + + Works like the stable :func:`generic_io_descriptor` but adapted to the + ``func(env, out, **params) -> None`` signature: + + * On **normal calls** the decorator passes through to the wrapped function. + * On **inspection** (``inspect=True`` keyword argument) the wrapped function + is *not* called. Instead, the registered hooks are invoked with the same + ``(output, descriptor, **kwargs)`` contract as the stable hooks, except + ``output`` is always ``None``. + + This decorator can be used in the same ways as the stable decorator: + + 1. With keyword arguments:: + + @generic_io_descriptor_warp(observation_type="JointState", units="rad") + def my_func(env, out, asset_cfg=SceneEntityCfg("robot")) -> None: ... + + 2. With a pre-built descriptor:: + + @generic_io_descriptor_warp(GenericObservationIODescriptor(description="..")) + def my_func(env, out, asset_cfg=SceneEntityCfg("robot")) -> None: ... + + 3. With inspection hooks:: + + @generic_io_descriptor_warp( + observation_type="JointState", + on_inspect=[record_joint_names, record_joint_shape, record_joint_pos_offsets], + units="rad", + ) + def joint_pos_rel(env, out, asset_cfg=SceneEntityCfg("robot")) -> None: ... + + Args: + _func: The function to decorate (or a pre-built descriptor). + on_inspect: Hook(s) called during inspection. + **descriptor_kwargs: Keyword arguments to pass to the descriptor. + + Returns: + A decorator that can be used to decorate a function. + """ + # If the decorator is used with a descriptor, use it as the descriptor. + if _func is not None and isinstance(_func, GenericObservationIODescriptor): + descriptor = _func + _func = None + else: + descriptor = _make_descriptor(**descriptor_kwargs) + + # Ensures the hook is a list + if callable(on_inspect): + inspect_hooks: list[Callable[..., Any]] = [on_inspect] + else: + inspect_hooks: list[Callable[..., Any]] = list(on_inspect or []) # handles None + + def _apply(func: Callable[Concatenate[ManagerBasedEnv, P], R]) -> Callable[Concatenate[ManagerBasedEnv, P], R]: + # Capture the signature of the function + sig = inspect.signature(func) + + @functools.wraps(func) + def wrapper(env: ManagerBasedEnv, *args: P.args, **kwargs: P.kwargs) -> R: + inspect_flag: bool = kwargs.pop("inspect", False) + if inspect_flag: + # Warp-first: do NOT call the function (it requires a pre-allocated + # ``out`` buffer that does not exist at inspection time). + # Use bind_partial (tolerates missing ``out``) and apply_defaults so + # that hooks see resolved default values (e.g. ``asset_cfg``). + bound = sig.bind_partial(env, **kwargs) + bound.apply_defaults() + call_kwargs = { + "output": None, + "descriptor": descriptor, + **bound.arguments, + } + for hook in inspect_hooks: + hook(**call_kwargs) + return # noqa: R502 + return func(env, *args, **kwargs) + + # --- Descriptor bookkeeping --- + descriptor.name = func.__name__ + descriptor.full_path = f"{func.__module__}.{func.__name__}" + # Warp-first terms always operate in float32. + descriptor.dtype = str(descriptor.dtype) if descriptor.dtype is not None else "float32" + # Check if description is set in the descriptor + if descriptor.description is None and func.__doc__: + descriptor.description = " ".join(func.__doc__.split()) + + # Adds the descriptor to the wrapped function as an attribute + wrapper._descriptor = descriptor + wrapper._has_descriptor = True + # Alters the signature of the wrapped function to make it match the original function. + # This allows the wrapped functions to pass the checks in the managers. + wrapper.__signature__ = sig + return wrapper + + # If the decorator is used without parentheses, _func will be the function itself. + if callable(_func): + return _apply(_func) + return _apply + + +# --------------------------------------------------------------------------- +# Inspection hooks +# +# All hooks follow the stable convention: (output, descriptor, **kwargs). +# For Warp-first terms ``output`` is always ``None``; hooks that need shape +# or dtype information must derive it from the scene / config objects in +# **kwargs rather than from the output tensor. +# --------------------------------------------------------------------------- + + +def record_shape(output: wp.array | None, descriptor: GenericObservationIODescriptor, **kwargs) -> None: + """Record the shape of the output buffer. + + When ``output`` is not ``None`` (eager path), shape is read directly. + When ``output`` is ``None`` (Warp-first inspection), shape is derived from: + - ``descriptor.extras["axes"]`` for RootState observations, or + - ``asset_cfg.joint_ids`` for JointState observations. + + BodyState shape cannot be derived without calling the function (the per-body + feature size varies). In that case shape is left unset. + + Args: + output: The pre-allocated output buffer, or ``None`` during inspection. + descriptor: The descriptor to record the shape to. + **kwargs: Additional keyword arguments. + """ + if output is not None: + descriptor.shape = (output.shape[-1],) + return + # --- Warp-first fallback: derive shape without output --- + # 1) From axes metadata (RootState) + axes = descriptor.extras.get("axes") if descriptor.extras else None + if axes: + descriptor.shape = (len(axes),) + return + # 2) From asset_cfg for JointState + if descriptor.observation_type == "JointState": + asset_cfg = kwargs.get("asset_cfg") + if asset_cfg is not None: + asset: Articulation = kwargs["env"].scene[asset_cfg.name] + joint_ids = asset_cfg.joint_ids + if joint_ids == slice(None): + descriptor.shape = (len(asset.joint_names),) + else: + descriptor.shape = (len(joint_ids),) + + +def record_dtype(output: wp.array | None, descriptor: GenericObservationIODescriptor, **kwargs) -> None: + """Record the dtype of the output buffer. + + No-op when ``output`` is ``None`` (the typical case during Warp-first + inspection — dtype is already set to ``"float32"`` by the decorator). + + Args: + output: The pre-allocated output buffer, or ``None`` during inspection. + descriptor: The descriptor to record the dtype to. + **kwargs: Additional keyword arguments. + """ + if output is None: + return + descriptor.dtype = str(output.dtype) + + +def record_joint_shape(output: wp.array | None, descriptor: GenericObservationIODescriptor, **kwargs) -> None: + """Derive the observation shape from the resolved ``joint_ids`` count. + + This is the Warp-first alternative to :func:`record_shape` for joint-based + observations. It ignores ``output`` and reads the shape from the asset + configuration instead. + + Args: + output: Ignored — kept for hook signature compatibility. + descriptor: The descriptor to update. + **kwargs: Must contain ``env`` and ``asset_cfg``. + """ + asset: Articulation = kwargs["env"].scene[kwargs["asset_cfg"].name] + joint_ids = kwargs["asset_cfg"].joint_ids + if joint_ids == slice(None, None, None): + descriptor.shape = (len(asset.joint_names),) + else: + descriptor.shape = (len(joint_ids),) + + +def record_joint_names(output: wp.array | None, descriptor: GenericObservationIODescriptor, **kwargs) -> None: + """Record the joint names selected by ``asset_cfg.joint_ids``. + + Expects the ``asset_cfg`` keyword argument to be set. + + Args: + output: Ignored — kept for hook signature compatibility. + descriptor: The descriptor to record the joint names to. + **kwargs: Additional keyword arguments. + """ + asset: Articulation = kwargs["env"].scene[kwargs["asset_cfg"].name] + joint_ids = kwargs["asset_cfg"].joint_ids + if joint_ids == slice(None, None, None): + joint_ids = list(range(len(asset.joint_names))) + descriptor.joint_names = [asset.joint_names[i] for i in joint_ids] + + +def record_body_names(output: wp.array | None, descriptor: GenericObservationIODescriptor, **kwargs) -> None: + """Record the body names selected by ``asset_cfg.body_ids``. + + Expects the ``asset_cfg`` keyword argument to be set. + + Args: + output: Ignored — kept for hook signature compatibility. + descriptor: The descriptor to record the body names to. + **kwargs: Additional keyword arguments. + """ + asset: Articulation = kwargs["env"].scene[kwargs["asset_cfg"].name] + body_ids = kwargs["asset_cfg"].body_ids + if body_ids == slice(None, None, None): + body_ids = list(range(len(asset.body_names))) + descriptor.body_names = [asset.body_names[i] for i in body_ids] + + +def record_joint_pos_offsets(output: wp.array | None, descriptor: GenericObservationIODescriptor, **kwargs): + """Record the default joint-position offsets (first env instance). + + Expects the ``asset_cfg`` keyword argument to be set. + + Args: + output: Ignored — kept for hook signature compatibility. + descriptor: The descriptor to record the joint position offsets to. + **kwargs: Additional keyword arguments. + """ + asset: Articulation = kwargs["env"].scene[kwargs["asset_cfg"].name] + ids = kwargs["asset_cfg"].joint_ids + # Get the offsets of the joints for the first robot in the scene. + # This assumes that all robots have the same joint offsets. + descriptor.joint_pos_offsets = asset.data.default_joint_pos.torch.clone()[:, ids][0] + + +def record_joint_vel_offsets(output: wp.array | None, descriptor: GenericObservationIODescriptor, **kwargs): + """Record the default joint-velocity offsets (first env instance). + + Expects the ``asset_cfg`` keyword argument to be set. + + Args: + output: Ignored — kept for hook signature compatibility. + descriptor: The descriptor to record the joint velocity offsets to. + **kwargs: Additional keyword arguments. + """ + asset: Articulation = kwargs["env"].scene[kwargs["asset_cfg"].name] + ids = kwargs["asset_cfg"].joint_ids + # Get the offsets of the joints for the first robot in the scene. + # This assumes that all robots have the same joint offsets. + descriptor.joint_vel_offsets = asset.data.default_joint_vel.torch.clone()[:, ids][0] diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/managers/__init__.py new file mode 100644 index 000000000000..b4521b98434a --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Experimental manager implementations. + +This package is intended for experimental forks of manager implementations while +keeping stable task configs and the stable `isaaclab.managers` package intact. +""" + +from isaaclab.managers import * # noqa: F401,F403 + +from .action_manager import ActionManager # noqa: F401 +from .command_manager import CommandManager # noqa: F401 +from .event_manager import EventManager # noqa: F401 + +# Override the stable implementation with the experimental fork. +from .manager_base import ManagerTermBase # noqa: F401 +from .manager_term_cfg import ObservationTermCfg, RewardTermCfg, TerminationTermCfg # noqa: F401 +from .observation_manager import ObservationManager # noqa: F401 +from .reward_manager import RewardManager # noqa: F401 +from .scene_entity_cfg import SceneEntityCfg # noqa: F401 +from .termination_manager import TerminationManager # noqa: F401 diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/action_manager.py b/source/isaaclab_experimental/isaaclab_experimental/managers/action_manager.py new file mode 100644 index 000000000000..672af82bc8ab --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/action_manager.py @@ -0,0 +1,492 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Action manager for processing actions sent to the environment.""" + +from __future__ import annotations + +import inspect +import re +import weakref +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import torch +import warp as wp +from prettytable import PrettyTable + +from isaaclab.assets import AssetBase +from isaaclab.envs.utils.io_descriptors import GenericActionIODescriptor + +from .manager_base import ManagerBase, ManagerTermBase +from .manager_term_cfg import ActionTermCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + +# Shared kernel – imported from utils to avoid duplication. +from isaaclab_experimental.utils.warp.utils import zero_masked_2d as _zero_masked_2d + + +class ActionTerm(ManagerTermBase): + """Base class for action terms. + + The action term is responsible for processing the raw actions sent to the environment + and applying them to the asset managed by the term. The action term is comprised of two + operations: + + * Processing of actions: This operation is performed once per **environment step** and + is responsible for pre-processing the raw actions sent to the environment. + * Applying actions: This operation is performed once per **simulation step** and is + responsible for applying the processed actions to the asset managed by the term. + """ + + def __init__(self, cfg: ActionTermCfg, env: ManagerBasedEnv): + """Initialize the action term. + + Args: + cfg: The configuration object. + env: The environment instance. + """ + # call the base class constructor + super().__init__(cfg, env) + # parse config to obtain asset to which the term is applied + self._asset: AssetBase = self._env.scene[self.cfg.asset_name] + self._IO_descriptor = GenericActionIODescriptor() + self._export_IO_descriptor = True + + # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) + self._debug_vis_handle = None + # set initial state of debug visualization + self.set_debug_vis(self.cfg.debug_vis) + + def __del__(self): + """Unsubscribe from the callbacks.""" + if self._debug_vis_handle: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + + """ + Properties. + """ + + @property + @abstractmethod + def action_dim(self) -> int: + """Dimension of the action term.""" + raise NotImplementedError + + @property + @abstractmethod + def raw_actions(self) -> wp.array: + """The input/raw actions sent to the term.""" + raise NotImplementedError + + @property + @abstractmethod + def processed_actions(self) -> wp.array: + """The actions computed by the term after applying any processing.""" + raise NotImplementedError + + @property + def has_debug_vis_implementation(self) -> bool: + """Whether the action term has a debug visualization implemented.""" + # check if function raises NotImplementedError + source_code = inspect.getsource(self._set_debug_vis_impl) + return "NotImplementedError" not in source_code + + @property + def IO_descriptor(self) -> GenericActionIODescriptor: + """The IO descriptor for the action term.""" + self._IO_descriptor.name = re.sub(r"([a-z])([A-Z])", r"\1_\2", self.__class__.__name__).lower() + self._IO_descriptor.full_path = f"{self.__class__.__module__}.{self.__class__.__name__}" + self._IO_descriptor.description = " ".join((self.__class__.__doc__ or "").split()) + self._IO_descriptor.export = self.export_IO_descriptor + return self._IO_descriptor + + @property + def export_IO_descriptor(self) -> bool: + """Whether to export the IO descriptor for the action term.""" + return self._export_IO_descriptor + + """ + Operations. + """ + + def set_debug_vis(self, debug_vis: bool) -> bool: + """Sets whether to visualize the action term data. + Args: + debug_vis: Whether to visualize the action term data. + Returns: + Whether the debug visualization was successfully set. False if the action term does + not support debug visualization. + """ + # check if debug visualization is supported + if not self.has_debug_vis_implementation: + return False + + import omni.kit.app + + # toggle debug visualization objects + self._set_debug_vis_impl(debug_vis) + # toggle debug visualization handles + if debug_vis: + # create a subscriber for the post update event if it doesn't exist + if self._debug_vis_handle is None: + app_interface = omni.kit.app.get_app_interface() + self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( + lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) + ) + else: + # remove the subscriber if it exists + if self._debug_vis_handle is not None: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + # return success + return True + + @abstractmethod + def process_actions(self, actions: wp.array, action_offset: int = 0): + """Processes the actions sent to the environment. + + Note: + This function is called once per environment step by the manager. + + Args: + actions: The full action buffer of shape (num_envs, total_action_dim). + action_offset: Column offset into the action buffer for this term. + """ + raise NotImplementedError + + @abstractmethod + def apply_actions(self): + """Applies the actions to the asset managed by the term. + + Note: + This is called at every simulation step by the manager. + """ + raise NotImplementedError + + def _set_debug_vis_impl(self, debug_vis: bool): + """Set debug visualization into visualization objects. + This function is responsible for creating the visualization objects if they don't exist + and input ``debug_vis`` is True. If the visualization objects exist, the function should + set their visibility into the stage. + """ + raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") + + def _debug_vis_callback(self, event): + """Callback for debug visualization. + This function calls the visualization objects and sets the data to visualize into them. + """ + raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") + + +class ActionManager(ManagerBase): + """Manager for processing and applying actions for a given world. + + The action manager handles the interpretation and application of user-defined + actions on a given world. It is comprised of different action terms that decide + the dimension of the expected actions. + + The action manager performs operations at two stages: + + * processing of actions: It splits the input actions to each term and performs any + pre-processing needed. This should be called once at every environment step. + * apply actions: This operation typically sets the processed actions into the assets in the + scene (such as robots). It should be called before every simulation step. + """ + + def __init__(self, cfg: object, env: ManagerBasedEnv): + """Initialize the action manager. + + Args: + cfg: The configuration object or dictionary (``dict[str, ActionTermCfg]``). + env: The environment instance. + + Raises: + ValueError: If the configuration is None. + """ + # check if config is None + if cfg is None: + raise ValueError("Action manager configuration is None. Please provide a valid configuration.") + + # call the base class constructor (this prepares the terms) + super().__init__(cfg, env) + # create buffers to store actions (Warp) + self._action = wp.zeros((self.num_envs, self.total_action_dim), dtype=wp.float32, device=self.device) + self._prev_action = wp.zeros((self.num_envs, self.total_action_dim), dtype=wp.float32, device=self.device) + + # torch views + self._action_torch = wp.to_torch(self._action) + self._prev_action_torch = wp.to_torch(self._prev_action) + + # check if any term has debug visualization implemented + self.cfg.debug_vis = False + for term in self._terms.values(): + self.cfg.debug_vis |= term.cfg.debug_vis + + def __str__(self) -> str: + """Returns: A string representation for action manager.""" + msg = f" contains {len(self._term_names)} active terms.\n" + + # create table for term information + table = PrettyTable() + table.title = f"Active Action Terms (shape: {self.total_action_dim})" + table.field_names = ["Index", "Name", "Dimension"] + # set alignment of table columns + table.align["Name"] = "l" + table.align["Dimension"] = "r" + # add info on each term + for index, (name, term) in enumerate(self._terms.items()): + table.add_row([index, name, term.action_dim]) + # convert table to string + msg += table.get_string() + msg += "\n" + + return msg + + """ + Properties. + """ + + @property + def total_action_dim(self) -> int: + """Total dimension of actions.""" + return sum(self.action_term_dim) + + @property + def active_terms(self) -> list[str]: + """Name of active action terms.""" + return self._term_names + + @property + def action_term_dim(self) -> list[int]: + """Shape of each action term.""" + return [term.action_dim for term in self._terms.values()] + + @property + def action(self) -> wp.array: + """The actions sent to the environment. Shape is (num_envs, total_action_dim).""" + return self._action + + @property + def prev_action(self) -> wp.array: + """The previous actions sent to the environment. Shape is (num_envs, total_action_dim).""" + return self._prev_action + + @property + def has_debug_vis_implementation(self) -> bool: + """Whether the command terms have debug visualization implemented.""" + # check if function raises NotImplementedError + has_debug_vis = False + for term in self._terms.values(): + has_debug_vis |= term.has_debug_vis_implementation + return has_debug_vis + + @property + def get_IO_descriptors(self) -> list[dict[str, Any]]: + """Get the IO descriptors for the action manager. + + Returns: + A dictionary with keys as the term names and values as the IO descriptors. + """ + + data = [] + + for term_name, term in self._terms.items(): + try: + data.append(term.IO_descriptor.__dict__.copy()) + except Exception as e: + print(f"Error getting IO descriptor for term '{term_name}': {e}") + + formatted_data = [] + for item in data: + name = item.pop("name") + formatted_item = {"name": name, "extras": item.pop("extras")} + if not item.pop("export"): + continue + for k, v in item.items(): + # Check if v is a tuple and convert to list + if isinstance(v, tuple): + v = list(v) + if k in ["description", "units"]: + formatted_item["extras"][k] = v + else: + formatted_item[k] = v + formatted_data.append(formatted_item) + + return formatted_data + + """ + Operations. + """ + + def get_active_iterable_terms(self, env_idx: int) -> Sequence[tuple[str, Sequence[float]]]: + """Returns the active terms as iterable sequence of tuples. + + The first element of the tuple is the name of the term and the second element is the raw value(s) of the term. + + Args: + env_idx: The specific environment to pull the active terms from. + + Returns: + The active terms. + """ + terms = [] + idx = 0 + # Copy to host for debug/inspection (not on hot path). + for name, term in self._terms.items(): + term_actions = self._action_torch[env_idx, idx : idx + term.action_dim] + terms.append((name, term_actions.tolist())) + idx += term.action_dim + return terms + + def set_debug_vis(self, debug_vis: bool): + """Sets whether to visualize the action data. + Args: + debug_vis: Whether to visualize the action data. + Returns: + Whether the debug visualization was successfully set. False if the action + does not support debug visualization. + """ + for term in self._terms.values(): + term.set_debug_vis(debug_vis) + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | None = None, + *, + env_mask: wp.array | None = None, + ) -> dict[str, Any]: + """Resets the action history. + + Args: + env_ids: The specific environment indices to reset. + If None, all environments are considered. + env_mask: Boolean Warp mask of shape (num_envs,) indicating which envs to reset. + If provided, takes precedence over ``env_ids``. + + Returns: + An empty dictionary. + """ + # Mask-first path: captured callers must provide env_mask. + if env_mask is None or not isinstance(env_mask, wp.array): + if wp.get_device().is_capturing: + raise RuntimeError( + "ActionManager.reset requires env_mask(wp.array[bool]) during capture. " + "Do not pass env_ids on captured paths." + ) + env_mask = self._env.resolve_env_mask(env_ids=env_ids, env_mask=env_mask) + + # reset the action history + if env_mask is None: + self._prev_action.fill_(0.0) + self._action.fill_(0.0) + else: + wp.launch( + kernel=_zero_masked_2d, + dim=(self.num_envs, self.total_action_dim), + inputs=[env_mask, self._prev_action], + device=self.device, + ) + wp.launch( + kernel=_zero_masked_2d, + dim=(self.num_envs, self.total_action_dim), + inputs=[env_mask, self._action], + device=self.device, + ) + + # reset all action terms + for term in self._terms.values(): + term.reset(env_mask=env_mask) + # nothing to log here + return {} + + def process_action(self, action: wp.array): + """Processes the actions sent to the environment. + + Note: + This function should be called once per environment step. + + Args: + action: The actions to process. Shape is (num_envs, total_action_dim). + """ + # check if action dimension is valid + if self.total_action_dim != action.shape[1]: + raise ValueError(f"Invalid action shape, expected: {self.total_action_dim}, received: {action.shape[1]}.") + + # store the input actions + wp.copy(self._prev_action, self._action) + wp.copy(self._action, action) + + # split the actions and apply to each term + idx = 0 + for term in self._terms.values(): + term.process_actions(self._action, idx) + idx += term.action_dim + + def apply_action(self) -> None: + """Applies the actions to the environment/simulation. + + Note: + This should be called at every simulation step. + """ + for term in self._terms.values(): + term.apply_actions() + + def get_term(self, name: str) -> ActionTerm: + """Returns the action term with the specified name. + + Args: + name: The name of the action term. + + Returns: + The action term with the specified name. + """ + return self._terms[name] + + def serialize(self) -> dict: + """Serialize the action manager configuration. + + Returns: + A dictionary of serialized action term configurations. + """ + return {term_name: term.serialize() for term_name, term in self._terms.items()} + + """ + Helper functions. + """ + + def _prepare_terms(self): + # create buffers to parse and store terms + self._term_names: list[str] = list() + self._terms: dict[str, ActionTerm] = dict() + + # check if config is dict already + if isinstance(self.cfg, dict): + cfg_items = self.cfg.items() + else: + cfg_items = self.cfg.__dict__.items() + # parse action terms from the config + for term_name, term_cfg in cfg_items: + # check if term config is None + if term_cfg is None: + continue + # check valid type + if not isinstance(term_cfg, ActionTermCfg): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type ActionTermCfg." + f" Received: '{type(term_cfg)}'." + ) + # create the action term + term = term_cfg.class_type(term_cfg, self._env) + # sanity check if term is valid type + if not isinstance(term, ActionTerm): + raise TypeError(f"Returned object for the term '{term_name}' is not of type ActionType.") + # add term name and parameters + self._term_names.append(term_name) + self._terms[term_name] = term diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/command_manager.py b/source/isaaclab_experimental/isaaclab_experimental/managers/command_manager.py new file mode 100644 index 000000000000..8b4e8f83dbe7 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/command_manager.py @@ -0,0 +1,599 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Command manager for generating and updating commands.""" + +from __future__ import annotations + +import inspect +import weakref +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp +from prettytable import PrettyTable + +from isaaclab_experimental.utils.warp.kernels import compute_reset_scale, count_masked + +from .manager_base import ManagerBase, ManagerTermBase +from .manager_term_cfg import CommandTermCfg + +# import omni.kit.app + + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +@wp.kernel +def _sum_and_zero_masked( + mask: wp.array(dtype=wp.bool), + scale: wp.array(dtype=wp.float32), + metric: wp.array(dtype=wp.float32), + out_mean: wp.array(dtype=wp.float32), +): + env_id = wp.tid() + if mask[env_id]: + wp.atomic_add(out_mean, 0, metric[env_id] * scale[0]) + metric[env_id] = 0.0 + + +@wp.kernel +def _zero_counter_masked(mask: wp.array(dtype=wp.bool), counter: wp.array(dtype=wp.int32)): + env_id = wp.tid() + if mask[env_id]: + counter[env_id] = 0 + + +@wp.kernel +def _step_time_left_and_build_resample_mask( + time_left: wp.array(dtype=wp.float32), + dt: wp.float32, + out_mask: wp.array(dtype=wp.bool), +): + env_id = wp.tid() + t = time_left[env_id] - dt + time_left[env_id] = t + out_mask[env_id] = t <= wp.float32(0.0) + + +@wp.kernel +def _resample_time_left_and_increment_counter( + mask: wp.array(dtype=wp.bool), + time_left: wp.array(dtype=wp.float32), + counter: wp.array(dtype=wp.int32), + rng_state: wp.array(dtype=wp.uint32), + lower: wp.float32, + upper: wp.float32, +): + env_id = wp.tid() + if mask[env_id]: + s = rng_state[env_id] + time_left[env_id] = wp.randf(s, lower, upper) + rng_state[env_id] = s + counter[env_id] = counter[env_id] + 1 + + +class CommandTerm(ManagerTermBase): + """The base class for implementing a command term. + + A command term is used to generate commands for goal-conditioned tasks. For example, + in the case of a goal-conditioned navigation task, the command term can be used to + generate a target position for the robot to navigate to. + + It implements a resampling mechanism that allows the command to be resampled at a fixed + frequency. The resampling frequency can be specified in the configuration object. + Additionally, it is possible to assign a visualization function to the command term + that can be used to visualize the command in the simulator. + """ + + def __init__(self, cfg: CommandTermCfg, env: ManagerBasedRLEnv): + """Initialize the command generator class. + + Args: + cfg: The configuration parameters for the command generator. + env: The environment object. + """ + super().__init__(cfg, env) + + # create buffers to store the command + # -- metrics that can be used for logging (metric_name -> wp.array(num_envs,)) + self.metrics = dict() + # -- time left before resampling + self.time_left_wp = wp.zeros((self.num_envs,), dtype=wp.float32, device=self.device) + # -- counter for the number of times the command has been resampled within the current episode + self.command_counter_wp = wp.zeros((self.num_envs,), dtype=wp.int32, device=self.device) + + # reset/compute scratch buffers (Warp) + self._reset_count_wp = wp.zeros((1,), dtype=wp.int32, device=self.device) + self._reset_scale_wp = wp.zeros((1,), dtype=wp.float32, device=self.device) + self._resample_mask_wp = wp.zeros((self.num_envs,), dtype=wp.bool, device=self.device) + + # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) + self._debug_vis_handle = None + # set initial state of debug visualization + self.set_debug_vis(self.cfg.debug_vis) + + # pre-allocated reset logging extras (filled during reset) + self._reset_metric_mean_wp: dict[str, wp.array] = {} + self._reset_extras: dict[str, torch.Tensor] = {} + + def __del__(self): + """Unsubscribe from the callbacks.""" + if self._debug_vis_handle: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + + """ + Properties + """ + + @property + @abstractmethod + def command(self) -> torch.Tensor | wp.array: + """The command tensor. Shape is (num_envs, command_dim).""" + raise NotImplementedError + + @property + def has_debug_vis_implementation(self) -> bool: + """Whether the command generator has a debug visualization implemented.""" + # check if function raises NotImplementedError + source_code = inspect.getsource(self._set_debug_vis_impl) + return "NotImplementedError" not in source_code + + @property + def reset_extras(self) -> dict[str, torch.Tensor]: + """Pre-allocated reset logging extras for this command term.""" + return self._reset_extras + + """ + Operations. + """ + + def set_debug_vis(self, debug_vis: bool) -> bool: + """Sets whether to visualize the command data. + + Args: + debug_vis: Whether to visualize the command data. + + Returns: + Whether the debug visualization was successfully set. False if the command + generator does not support debug visualization. + """ + # check if debug visualization is supported + if not self.has_debug_vis_implementation: + return False + # toggle debug visualization objects + self._set_debug_vis_impl(debug_vis) + # toggle debug visualization handles + if debug_vis: + # only enable debug_vis if omniverse is available + from isaaclab.sim.simulation_context import SimulationContext + + sim_context = SimulationContext.instance() + if not sim_context.has_omniverse_visualizer(): + return False + # create a subscriber for the post update event if it doesn't exist + if self._debug_vis_handle is None: + import omni.kit.app + + app_interface = omni.kit.app.get_app_interface() + self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( + lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) + ) + else: + # remove the subscriber if it exists + if self._debug_vis_handle is not None: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + # return success + return True + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | None = None, + *, + env_mask: wp.array | None = None, + ) -> dict[str, torch.Tensor]: + """Reset the command generator and log metrics. + + This function resets the command counter and resamples the command. It should be called + at the beginning of each episode. + + Args: + env_ids: The specific environment indices to reset. + If None, all environments are considered. + env_mask: Boolean Warp mask of shape (num_envs,) selecting reset environments. + If provided, takes precedence over ``env_ids``. + + Returns: + A dictionary containing the information to log under the "{name}" key. + """ + # Mask-first path: captured callers must provide env_mask. + if env_mask is None or not isinstance(env_mask, wp.array): + if wp.get_device().is_capturing: + raise RuntimeError( + "CommandTerm.reset requires env_mask(wp.array[bool]) during capture. " + "Do not pass env_ids on captured paths." + ) + env_mask = self._env.resolve_env_mask(env_ids=env_ids, env_mask=env_mask) + + # compute selected count and reset scale + self._reset_count_wp.zero_() + self._reset_scale_wp.zero_() + wp.launch(kernel=count_masked, dim=self.num_envs, inputs=[env_mask, self._reset_count_wp], device=self.device) + wp.launch( + kernel=compute_reset_scale, + dim=1, + inputs=[self._reset_count_wp, 1.0, self._reset_scale_wp], + device=self.device, + ) + + # update pre-allocated reset extras and clear selected metric rows + for metric_name, metric_value_wp in self.metrics.items(): + out_mean_wp = self._reset_metric_mean_wp[metric_name] + out_mean_wp.zero_() + wp.launch( + kernel=_sum_and_zero_masked, + dim=self.num_envs, + inputs=[env_mask, self._reset_scale_wp, metric_value_wp, out_mean_wp], + device=self.device, + ) + + # set the command counter to zero + wp.launch( + kernel=_zero_counter_masked, + dim=self.num_envs, + inputs=[env_mask, self.command_counter_wp], + device=self.device, + ) + # resample the command + self._resample(env_mask=env_mask) + + return self._reset_extras + + def _prepare_reset_extras(self): + """Pre-allocate reset logging extras from metric definitions.""" + self._reset_metric_mean_wp = {} + self._reset_extras = {} + for metric_name, metric_value in self.metrics.items(): + if not isinstance(metric_value, wp.array): + raise TypeError( + f"Metric '{metric_name}' must be a wp.array(dtype=wp.float32, shape=(num_envs,)). " + f"Received: {type(metric_value)}" + ) + if metric_value.dtype != wp.float32 or metric_value.ndim != 1: + raise TypeError( + f"Metric '{metric_name}' must be wp.float32 1D. " + f"Received dtype={metric_value.dtype}, ndim={metric_value.ndim}." + ) + if metric_value.shape[0] != self.num_envs: + raise ValueError( + f"Metric '{metric_name}' must have shape ({self.num_envs},), received {metric_value.shape}." + ) + out_mean_wp = wp.zeros((1,), dtype=wp.float32, device=self.device) + self._reset_metric_mean_wp[metric_name] = out_mean_wp + self._reset_extras[metric_name] = wp.to_torch(out_mean_wp)[0] + + def compute(self, dt: float): + """Compute the command. + + Args: + dt: The time step passed since the last call to compute. + """ + # update the metrics based on current state + self._update_metrics() + # reduce the time left before resampling and build resample mask + wp.launch( + kernel=_step_time_left_and_build_resample_mask, + dim=self.num_envs, + inputs=[self.time_left_wp, float(dt), self._resample_mask_wp], + device=self.device, + ) + # resample masked envs + self._resample(env_mask=self._resample_mask_wp) + # update the command + self._update_command() + + """ + Helper functions. + """ + + def _resample(self, env_mask: wp.array): + """Resample the command. + + This function resamples the command and time for which the command is applied for the + specified environment mask. + + Args: + env_mask: The boolean environment mask to resample. + """ + if not isinstance(env_mask, wp.array): + raise TypeError(f"env_mask must be a wp.array (got {type(env_mask)}).") + if env_mask.dtype != wp.bool or env_mask.ndim != 1: + raise TypeError(f"env_mask must be wp.bool 1D (got dtype={env_mask.dtype}, ndim={env_mask.ndim}).") + if self._env.rng_state_wp is None: + raise RuntimeError("Environment rng_state_wp is not initialized.") + + # resample time-left and increment command-counter for masked envs + wp.launch( + kernel=_resample_time_left_and_increment_counter, + dim=self.num_envs, + inputs=[ + env_mask, + self.time_left_wp, + self.command_counter_wp, + self._env.rng_state_wp, + float(self.cfg.resampling_time_range[0]), + float(self.cfg.resampling_time_range[1]), + ], + device=self.device, + ) + # resample command values for masked envs + self._resample_command(env_mask) + + """ + Implementation specific functions. + """ + + @abstractmethod + def _update_metrics(self): + """Update the metrics based on the current state.""" + raise NotImplementedError + + @abstractmethod + def _resample_command(self, env_mask: wp.array): + """Resample the command for the specified masked environments.""" + raise NotImplementedError + + @abstractmethod + def _update_command(self): + """Update the command based on the current state.""" + raise NotImplementedError + + def _set_debug_vis_impl(self, debug_vis: bool): + """Set debug visualization into visualization objects. + + This function is responsible for creating the visualization objects if they don't exist + and input ``debug_vis`` is True. If the visualization objects exist, the function should + set their visibility into the stage. + """ + raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") + + def _debug_vis_callback(self, event): + """Callback for debug visualization. + + This function calls the visualization objects and sets the data to visualize into them. + """ + raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") + + +class CommandManager(ManagerBase): + """Manager for generating commands. + + The command manager is used to generate commands for an agent to execute. It makes it convenient to switch + between different command generation strategies within the same environment. For instance, in an environment + consisting of a quadrupedal robot, the command to it could be a velocity command or position command. + By keeping the command generation logic separate from the environment, it is easy to switch between different + command generation strategies. + + The command terms are implemented as classes that inherit from the :class:`CommandTerm` class. + Each command generator term should also have a corresponding configuration class that inherits from the + :class:`CommandTermCfg` class. + """ + + _env: ManagerBasedRLEnv + """The environment instance.""" + + def __init__(self, cfg: object, env: ManagerBasedRLEnv): + """Initialize the command manager. + + Args: + cfg: The configuration object or dictionary (``dict[str, CommandTermCfg]``). + env: The environment instance. + """ + # create buffers to parse and store terms + self._terms: dict[str, CommandTerm] = dict() + + # call the base class constructor (this prepares the terms) + super().__init__(cfg, env) + # store the commands + self._commands = dict() + if self.cfg: + self.cfg.debug_vis = False + for term in self._terms.values(): + self.cfg.debug_vis |= term.cfg.debug_vis + + # reset logging extras (persistent holder for orchestrator aggregation) + self._reset_extras: dict[str, torch.Tensor] = {} + for term_name, term in self._terms.items(): + for metric_name, metric_value in term.reset_extras.items(): + self._reset_extras[f"Metrics/{term_name}/{metric_name}"] = metric_value + + def __str__(self) -> str: + """Returns: A string representation for the command manager.""" + msg = f" contains {len(self._terms.values())} active terms.\n" + + # create table for term information + table = PrettyTable() + table.title = "Active Command Terms" + table.field_names = ["Index", "Name", "Type"] + # set alignment of table columns + table.align["Name"] = "l" + # add info on each term + for index, (name, term) in enumerate(self._terms.items()): + table.add_row([index, name, term.__class__.__name__]) + # convert table to string + msg += table.get_string() + msg += "\n" + + return msg + + """ + Properties. + """ + + @property + def active_terms(self) -> list[str]: + """Name of active command terms.""" + return list(self._terms.keys()) + + @property + def has_debug_vis_implementation(self) -> bool: + """Whether the command terms have debug visualization implemented.""" + # check if function raises NotImplementedError + has_debug_vis = False + for term in self._terms.values(): + has_debug_vis |= term.has_debug_vis_implementation + return has_debug_vis + + @property + def reset_extras(self) -> dict[str, torch.Tensor]: + """Persistent reset logging extras for command terms.""" + return self._reset_extras + + """ + Operations. + """ + + def get_active_iterable_terms(self, env_idx: int) -> Sequence[tuple[str, Sequence[float]]]: + """Returns the active terms as iterable sequence of tuples. + + The first element of the tuple is the name of the term and the second element is the raw value(s) of the term. + + Args: + env_idx: The specific environment to pull the active terms from. + + Returns: + The active terms. + """ + + terms = [] + for name, term in self._terms.items(): + command = term.command + if isinstance(command, wp.array): + command = wp.to_torch(command) + terms.append((name, command[env_idx].cpu().tolist())) + return terms + + def set_debug_vis(self, debug_vis: bool): + """Sets whether to visualize the command data. + + Args: + debug_vis: Whether to visualize the command data. + + Returns: + Whether the debug visualization was successfully set. False if the command + generator does not support debug visualization. + """ + for term in self._terms.values(): + term.set_debug_vis(debug_vis) + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | None = None, + *, + env_mask: wp.array | None = None, + ) -> dict[str, torch.Tensor]: + """Reset the command terms and log their metrics. + + This function resets the command counter and resamples the command for each term. It should be called + at the beginning of each episode. + + Args: + env_ids: The specific environment indices to reset. + If None, all environments are considered. + env_mask: Boolean Warp mask of shape (num_envs,) selecting reset environments. + If provided, takes precedence over ``env_ids``. + + Returns: + A dictionary containing the information to log under the "Metrics/{term_name}/{metric_name}" key. + """ + # Mask-first path: captured callers must provide env_mask. + if env_mask is None or not isinstance(env_mask, wp.array): + if wp.get_device().is_capturing: + raise RuntimeError( + "CommandManager.reset requires env_mask(wp.array[bool]) during capture. " + "Do not pass env_ids on captured paths." + ) + env_mask = self._env.resolve_env_mask(env_ids=env_ids, env_mask=env_mask) + + for term in self._terms.values(): + # reset the command term + term.reset(env_mask=env_mask) + + return self._reset_extras + + def compute(self, dt: float): + """Updates the commands. + + This function calls each command term managed by the class. + + Args: + dt: The time-step interval of the environment. + + """ + # iterate over all the command terms + for term in self._terms.values(): + # compute term's value + term.compute(dt) + + def get_command(self, name: str) -> torch.Tensor: + """Returns the command for the specified command term. + + Args: + name: The name of the command term. + + Returns: + The command tensor of the specified command term. + """ + command = self._terms[name].command + if isinstance(command, wp.array): + return wp.to_torch(command) + return command + + def get_term(self, name: str) -> CommandTerm: + """Returns the command term with the specified name. + + Args: + name: The name of the command term. + + Returns: + The command term with the specified name. + """ + return self._terms[name] + + """ + Helper functions. + """ + + def _prepare_terms(self): + # check if config is dict already + if isinstance(self.cfg, dict): + cfg_items = self.cfg.items() + else: + cfg_items = self.cfg.__dict__.items() + # iterate over all the terms + for term_name, term_cfg in cfg_items: + # check for non config + if term_cfg is None: + continue + # check for valid config type + if not isinstance(term_cfg, CommandTermCfg): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type CommandTermCfg." + f" Received: '{type(term_cfg)}'." + ) + # create the action term + term = term_cfg.class_type(term_cfg, self._env) + # sanity check if term is valid type + if not isinstance(term, CommandTerm): + raise TypeError(f"Returned object for the term '{term_name}' is not of type CommandType.") + # pre-build reset extras once for capture-friendly reset logging + term._prepare_reset_extras() + # add class to dict + self._terms[term_name] = term diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/event_manager.py b/source/isaaclab_experimental/isaaclab_experimental/managers/event_manager.py new file mode 100644 index 000000000000..3b36613a2ac9 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/event_manager.py @@ -0,0 +1,499 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Event manager for orchestrating operations based on different simulation events (experimental, Warp-first). + +This module mirrors :mod:`isaaclab.managers.event_manager` but removes torch ops from hot paths to enable +CUDA-graph-friendly execution for modes that can be captured (notably ``interval``). + +Key differences from the stable manager: +- ``interval`` and ``reset`` modes are **mask-based** internally and implemented using Warp kernels. +- No ``torch.rand`` / ``nonzero`` / tensor allocations in the ``interval`` apply path. + +Event term signature for Warp-first interval/reset modes: + ``func(env, env_mask_wp, **params) -> None`` + +Other modes (e.g. ``prestartup``, ``startup``) are called using the stable convention: + ``func(env, env_ids, **params) -> None`` +""" + +from __future__ import annotations + +import inspect +import logging +from collections.abc import Sequence + +import torch +import warp as wp +from prettytable import PrettyTable + +from .manager_base import ManagerBase +from .manager_term_cfg import EventTermCfg + +logger = logging.getLogger(__name__) + + +@wp.kernel +def _interval_init_per_env( + time_left: wp.array(dtype=wp.float32), + rng_state: wp.array(dtype=wp.uint32), + lower: wp.float32, + upper: wp.float32, +): + env_id = wp.tid() + s = rng_state[env_id] + time_left[env_id] = wp.randf(s, lower, upper) + rng_state[env_id] = s + + +@wp.kernel +def _interval_init_global( + time_left: wp.array(dtype=wp.float32), + rng_state: wp.array(dtype=wp.uint32), + lower: wp.float32, + upper: wp.float32, +): + # single element + s = rng_state[0] + time_left[0] = wp.randf(s, lower, upper) + rng_state[0] = s + + +@wp.kernel +def _interval_step_per_env( + time_left: wp.array(dtype=wp.float32), + rng_state: wp.array(dtype=wp.uint32), + trigger_mask: wp.array(dtype=wp.bool), + dt: wp.float32, + lower: wp.float32, + upper: wp.float32, +): + env_id = wp.tid() + t = time_left[env_id] - dt + if t < wp.float32(1.0e-6): + trigger_mask[env_id] = True + s = rng_state[env_id] + time_left[env_id] = wp.randf(s, lower, upper) + rng_state[env_id] = s + else: + trigger_mask[env_id] = False + time_left[env_id] = t + + +@wp.kernel +def _interval_step_global( + time_left: wp.array(dtype=wp.float32), + rng_state: wp.array(dtype=wp.uint32), + trigger_flag: wp.array(dtype=wp.bool), + dt: wp.float32, + lower: wp.float32, + upper: wp.float32, +): + t = time_left[0] - dt + if t < wp.float32(1.0e-6): + trigger_flag[0] = True + s = rng_state[0] + time_left[0] = wp.randf(s, lower, upper) + rng_state[0] = s + else: + trigger_flag[0] = False + time_left[0] = t + + +@wp.kernel +def _interval_reset_selected( + env_mask: wp.array(dtype=wp.bool), + time_left: wp.array(dtype=wp.float32), + rng_state: wp.array(dtype=wp.uint32), + lower: wp.float32, + upper: wp.float32, +): + env_id = wp.tid() + if env_mask[env_id]: + s = rng_state[env_id] + time_left[env_id] = wp.randf(s, lower, upper) + rng_state[env_id] = s + + +@wp.kernel +def _seed_global_rng_from_env_rng( + env_rng_state: wp.array(dtype=wp.uint32), + global_rng_state: wp.array(dtype=wp.uint32), +): + global_rng_state[0] = wp.rand_init(wp.int32(env_rng_state[0]), wp.int32(0)) + + +@wp.kernel +def _reset_compute_valid_mask( + in_mask: wp.array(dtype=wp.bool), + last_triggered_step: wp.array(dtype=wp.int32), + triggered_once: wp.array(dtype=wp.bool), + out_mask: wp.array(dtype=wp.bool), + global_step_count_buf: wp.array(dtype=wp.int32), + min_step_count: wp.int32, +): + env_id = wp.tid() + if not in_mask[env_id]: + out_mask[env_id] = False + return + + global_step_count = global_step_count_buf[0] + if min_step_count == wp.int32(0): + out_mask[env_id] = True + last_triggered_step[env_id] = global_step_count + triggered_once[env_id] = True + return + + last = last_triggered_step[env_id] + once = triggered_once[env_id] + steps_since = global_step_count - last + valid = steps_since >= min_step_count + # Trigger at least once at the start (matching stable behavior). + valid = valid or ((last == wp.int32(0)) and (not once)) + out_mask[env_id] = valid + if valid: + last_triggered_step[env_id] = global_step_count + triggered_once[env_id] = True + + +class EventManager(ManagerBase): + """Manager for orchestrating operations based on different simulation events (Warp-first for interval/reset).""" + + def __init__(self, cfg: object, env): + # create buffers to parse and store terms + self._mode_term_names: dict[str, list[str]] = {} + self._mode_term_cfgs: dict[str, list[EventTermCfg]] = {} + self._mode_class_term_cfgs: dict[str, list[EventTermCfg]] = {} + + # Warp buffers for interval/reset modes (populated in _prepare_terms) + self._interval_term_time_left_wp: list[wp.array] = [] + self._interval_term_ranges: list[tuple[float, float]] = [] + self._interval_term_is_global: list[bool] = [] + # Scalar RNG state for global interval timers (allocated lazily if needed). + self._interval_global_rng_state_wp: wp.array | None = None + + self._reset_term_last_triggered_step_wp: list[wp.array] = [] + self._reset_term_triggered_once_wp: list[wp.array] = [] + + super().__init__(cfg, env) + + # persistent scratch mask for per-term interval/reset triggering (must be stable pointer for capture) + self._scratch_term_mask_wp = wp.zeros((self.num_envs,), dtype=wp.bool, device=self.device) + + # scratch scalar flag & broadcast view for global interval triggering (no per-term masks) + self._scratch_interval_trigger_flag_wp = wp.zeros((1,), dtype=wp.bool, device=self.device) + self._scratch_interval_trigger_mask_view_wp = wp.array( + ptr=self._scratch_interval_trigger_flag_wp.ptr, + dtype=wp.bool, + shape=(self.num_envs,), + strides=(0,), + capacity=self._scratch_interval_trigger_flag_wp.capacity, + device=self._scratch_interval_trigger_flag_wp.device, + copy=False, + ) + + def __str__(self) -> str: + msg = f" contains {len(self._mode_term_names)} active terms.\n" + for mode in self._mode_term_names: + table = PrettyTable() + table.title = f"Active Event Terms in Mode: '{mode}'" + if mode == "interval": + table.field_names = ["Index", "Name", "Interval time range (s)"] + table.align["Name"] = "l" + for index, (name, cfg) in enumerate(zip(self._mode_term_names[mode], self._mode_term_cfgs[mode])): + table.add_row([index, name, cfg.interval_range_s]) + else: + table.field_names = ["Index", "Name"] + table.align["Name"] = "l" + for index, name in enumerate(self._mode_term_names[mode]): + table.add_row([index, name]) + msg += table.get_string() + msg += "\n" + return msg + + @property + def active_terms(self) -> dict[str, list[str]]: + return self._mode_term_names + + @property + def available_modes(self) -> list[str]: + return list(self._mode_term_names.keys()) + + def set_term_cfg(self, term_name: str, cfg: EventTermCfg): + term_found = False + for mode, terms in self._mode_term_names.items(): + if term_name in terms: + self._mode_term_cfgs[mode][terms.index(term_name)] = cfg + term_found = True + break + if not term_found: + raise ValueError(f"Event term '{term_name}' not found.") + + def get_term_cfg(self, term_name: str) -> EventTermCfg: + for mode, terms in self._mode_term_names.items(): + if term_name in terms: + return self._mode_term_cfgs[mode][terms.index(term_name)] + raise ValueError(f"Event term '{term_name}' not found.") + + def reset( + self, + env_ids: Sequence[int] | slice | torch.Tensor | wp.array | None = None, + *, + env_mask: wp.array | torch.Tensor | None = None, + ) -> dict[str, float]: + # Mask-first path: captured callers must provide env_mask. + if env_mask is None or not isinstance(env_mask, wp.array): + # Keep all id->mask resolution strictly outside capture. + if wp.get_device().is_capturing: + raise RuntimeError( + "EventManager.reset requires env_mask(wp.array[bool]) during capture. " + "Do not pass env_ids on captured paths." + ) + env_mask = self._env.resolve_env_mask(env_ids=env_ids, env_mask=env_mask) + + # reset class terms (mask-based) + for mode_cfg in self._mode_class_term_cfgs.values(): + for term_cfg in mode_cfg: + term_cfg.func.reset(env_mask=env_mask) + + # reset interval timers for non-global interval events + if "interval" in self._mode_term_cfgs: + for i, term_cfg in enumerate(self._mode_term_cfgs["interval"]): + if term_cfg.is_global_time: + continue + lower, upper = self._interval_term_ranges[i] + wp.launch( + kernel=_interval_reset_selected, + dim=self.num_envs, + inputs=[ + env_mask, + self._interval_term_time_left_wp[i], + self._env.rng_state_wp, + float(lower), + float(upper), + ], + device=self.device, + ) + return {} + + def apply( + self, + mode: str, + env_ids: Sequence[int] | slice | torch.Tensor | wp.array | None = None, + dt: float | None = None, + global_env_step_count: wp.array | None = None, + *, + env_mask_wp: wp.array | None = None, + ): + if mode not in self._mode_term_names: + logger.warning(f"Event mode '{mode}' is not defined. Skipping event.") + return + + # SceneEntityCfg-dependent term params should be resolved before entering captured event paths. + if not self._is_scene_entities_resolved: + if wp.get_device().is_capturing: + raise RuntimeError( + "EventManager terms are unresolved during CUDA graph capture. " + "Resolve terms before entering captured event paths." + ) + if self._env.sim.is_playing(): + self._resolve_terms_callback(None) + + if mode == "interval": + if dt is None: + raise ValueError(f"Event mode '{mode}' requires the time-step of the environment.") + if env_ids is not None: + raise ValueError( + f"Event mode '{mode}' does not require environment indices. This is an undefined behavior." + ) + self._apply_interval(float(dt)) + return + + # resolve the environment mask + if env_mask_wp is None: + if wp.get_device().is_capturing: + raise ValueError(f"Event mode '{mode}' requires the environment mask to be provided when capturing.") + env_mask_wp = self._env.resolve_env_mask(env_ids=env_ids) + + if mode == "reset": + if global_env_step_count is None: + raise ValueError(f"Event mode '{mode}' requires the total number of environment steps to be provided.") + self._apply_reset(env_mask_wp, global_env_step_count) + return + + # other modes (startup, prestartup, custom) — env_mask forwarded + for term_cfg in self._mode_term_cfgs[mode]: + term_cfg.func(self._env, env_mask_wp, **term_cfg.params) + + def _apply_interval(self, dt: float) -> None: + if self._env.rng_state_wp is None: + raise RuntimeError("EventManager._apply_interval: env.rng_state_wp is not initialized.") + + # iterate over all the interval terms (fixed list; captured graph-friendly) + for i, term_cfg in enumerate(self._mode_term_cfgs["interval"]): + lower, upper = self._interval_term_ranges[i] + if self._interval_term_is_global[i]: + if self._interval_global_rng_state_wp is None: + raise RuntimeError( + "EventManager._apply_interval: _interval_global_rng_state_wp is not initialized." + ) + # update scalar time_left and scalar flag (mask is a broadcast view of the flag) + wp.launch( + kernel=_interval_step_global, + dim=1, + inputs=[ + self._interval_term_time_left_wp[i], + self._interval_global_rng_state_wp, + self._scratch_interval_trigger_flag_wp, + float(dt), + float(lower), + float(upper), + ], + device=self.device, + ) + term_cfg.func(self._env, self._scratch_interval_trigger_mask_view_wp, **term_cfg.params) + else: + wp.launch( + kernel=_interval_step_per_env, + dim=self.num_envs, + inputs=[ + self._interval_term_time_left_wp[i], + self._env.rng_state_wp, + self._scratch_term_mask_wp, + float(dt), + float(lower), + float(upper), + ], + device=self.device, + ) + term_cfg.func(self._env, self._scratch_term_mask_wp, **term_cfg.params) + + def _apply_reset(self, env_mask_wp: wp.array, global_env_step_count_wp: wp.array) -> None: + if self._scratch_term_mask_wp is None: + raise RuntimeError("EventManager._apply_reset: _scratch_term_mask_wp is not initialized.") + + # iterate over all the reset terms + for index, term_cfg in enumerate(self._mode_term_cfgs["reset"]): + min_step_count = int(term_cfg.min_step_count_between_reset) + wp.launch( + kernel=_reset_compute_valid_mask, + dim=self.num_envs, + inputs=[ + env_mask_wp, + self._reset_term_last_triggered_step_wp[index], + self._reset_term_triggered_once_wp[index], + self._scratch_term_mask_wp, + global_env_step_count_wp, + int(min_step_count), + ], + device=self.device, + ) + term_cfg.func(self._env, self._scratch_term_mask_wp, **term_cfg.params) + + def _prepare_terms(self): + # check if config is dict already + if isinstance(self.cfg, dict): + cfg_items = self.cfg.items() + else: + cfg_items = self.cfg.__dict__.items() + + # iterate over all the terms + for term_name, term_cfg in cfg_items: + if term_cfg is None: + continue + if not isinstance(term_cfg, EventTermCfg): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type EventTermCfg. Received:" + f" '{type(term_cfg)}'." + ) + + if term_cfg.mode != "reset" and term_cfg.min_step_count_between_reset != 0: + logger.warning( + f"Event term '{term_name}' has 'min_step_count_between_reset' set to a non-zero value" + " but the mode is not 'reset'. Ignoring the 'min_step_count_between_reset' value." + ) + + # resolve common parameters + self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2) + + # check if mode is pre-startup and scene replication is enabled + if term_cfg.mode == "prestartup" and self._env.scene.cfg.replicate_physics: + raise RuntimeError( + "Scene replication is enabled, which may affect USD-level randomization." + " When assets are replicated, their properties are shared across instances," + " potentially leading to unintended behavior." + " For stable USD-level randomization, please disable scene replication" + " by setting 'replicate_physics' to False in 'InteractiveSceneCfg'." + ) + + # for prestartup callable class terms, initialize early (stable behavior) + if inspect.isclass(term_cfg.func) and term_cfg.mode == "prestartup": + logger.info(f"Initializing term '{term_name}' with class '{term_cfg.func.__name__}'.") + term_cfg.func = term_cfg.func(cfg=term_cfg, env=self._env) + + # ensure mode buckets exist + if term_cfg.mode not in self._mode_term_names: + self._mode_term_names[term_cfg.mode] = [] + self._mode_term_cfgs[term_cfg.mode] = [] + self._mode_class_term_cfgs[term_cfg.mode] = [] + # add term name and parameters + self._mode_term_names[term_cfg.mode].append(term_name) + self._mode_term_cfgs[term_cfg.mode].append(term_cfg) + + if inspect.isclass(term_cfg.func): + self._mode_class_term_cfgs[term_cfg.mode].append(term_cfg) + + # per-mode Warp buffers + if term_cfg.mode == "interval": + if term_cfg.interval_range_s is None: + raise ValueError( + f"Event term '{term_name}' has mode 'interval' but 'interval_range_s' is not specified." + ) + lower, upper = term_cfg.interval_range_s + self._interval_term_ranges.append((float(lower), float(upper))) + + if term_cfg.is_global_time: + # allocate and seed scalar global RNG state if needed (avoid consuming env0 RNG stream) + if self._interval_global_rng_state_wp is None: + if self._env.rng_state_wp is None: + raise RuntimeError("EventManager._prepare_terms: env.rng_state_wp is not initialized.") + self._interval_global_rng_state_wp = wp.zeros((1,), dtype=wp.uint32, device=self.device) + wp.launch( + kernel=_seed_global_rng_from_env_rng, + dim=1, + inputs=[self._env.rng_state_wp, self._interval_global_rng_state_wp], + device=self.device, + ) + time_left = wp.zeros((1,), dtype=wp.float32, device=self.device) + wp.launch( + kernel=_interval_init_global, + dim=1, + inputs=[time_left, self._interval_global_rng_state_wp, float(lower), float(upper)], + device=self.device, + ) + self._interval_term_time_left_wp.append(time_left) + self._interval_term_is_global.append(True) + else: + time_left = wp.zeros((self.num_envs,), dtype=wp.float32, device=self.device) + wp.launch( + kernel=_interval_init_per_env, + dim=self.num_envs, + inputs=[time_left, self._env.rng_state_wp, float(lower), float(upper)], + device=self.device, + ) + self._interval_term_time_left_wp.append(time_left) + self._interval_term_is_global.append(False) + + elif term_cfg.mode == "reset": + if term_cfg.min_step_count_between_reset < 0: + raise ValueError( + f"Event term '{term_name}' has mode 'reset' but 'min_step_count_between_reset' is" + f" negative: {term_cfg.min_step_count_between_reset}. Please provide a non-negative value." + ) + # per-env last-trigger bookkeeping (Warp) + self._reset_term_last_triggered_step_wp.append( + wp.zeros((self.num_envs,), dtype=wp.int32, device=self.device) + ) + self._reset_term_triggered_once_wp.append(wp.zeros((self.num_envs,), dtype=wp.bool, device=self.device)) diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/manager_base.py b/source/isaaclab_experimental/isaaclab_experimental/managers/manager_base.py new file mode 100644 index 000000000000..d40920b23fe9 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/manager_base.py @@ -0,0 +1,454 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base classes for managers (experimental). + +This file is a local copy of :mod:`isaaclab.managers.manager_base` placed under +``isaaclab_experimental`` so it can evolve independently for Warp-first / graph-friendly +pipelines. + +Key differences from the stable version: +- :meth:`ManagerTermBase.reset` is **mask-based** (preferred for capture-friendly subset operations). +""" + +from __future__ import annotations + +import contextlib +import copy +import inspect +import logging +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import warp as wp + +import isaaclab.utils.string as string_utils +from isaaclab.utils import class_to_dict, string_to_callable + +from isaaclab_experimental.utils.warp import is_warp_capturable + +from .manager_term_cfg import ManagerTermBaseCfg +from .scene_entity_cfg import SceneEntityCfg + +# import omni.timeline + + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + +# import logger +logger = logging.getLogger(__name__) + + +class ManagerTermBase(ABC): + """Base class for manager terms. + + Manager term implementations can be functions or classes. If the term is a class, it should + inherit from this base class and implement the required methods. + + Each manager is implemented as a class that inherits from the :class:`ManagerBase` class. Each manager + class should also have a corresponding configuration class that defines the configuration terms for the + manager. Each term should the :class:`ManagerTermBaseCfg` class or its subclass. + + Example pseudo-code for creating a manager: + + .. code-block:: python + + from isaaclab.utils import configclass + from isaaclab.utils.mdp import ManagerBase, ManagerTermBaseCfg + + + @configclass + class MyManagerCfg: + my_term_1: ManagerTermBaseCfg = ManagerTermBaseCfg(...) + my_term_2: ManagerTermBaseCfg = ManagerTermBaseCfg(...) + my_term_3: ManagerTermBaseCfg = ManagerTermBaseCfg(...) + + + # define manager instance + my_manager = ManagerBase(cfg=ManagerCfg(), env=env) + + """ + + def __init__(self, cfg: ManagerTermBaseCfg, env: ManagerBasedEnv): + """Initialize the manager term. + + Args: + cfg: The configuration object. + env: The environment instance. + """ + # store the inputs + self.cfg = cfg + self._env = env + + """ + Properties. + """ + + @property + def num_envs(self) -> int: + """Number of environments.""" + return self._env.num_envs + + @property + def device(self) -> str: + """Device on which to perform computations.""" + return self._env.device + + @property + def __name__(self) -> str: + """Return the name of the class or subclass.""" + return self.__class__.__name__ + + """ + Operations. + """ + + def reset(self, env_mask: wp.array | None = None) -> None: + """Resets the manager term (mask-based). + + Args: + env_mask: Boolean mask of shape (num_envs,) indicating which envs to reset. + If None, all envs are considered. + """ + pass + + def serialize(self) -> dict: + """General serialization call. Includes the configuration dict.""" + return {"cfg": class_to_dict(self.cfg)} + + def __call__(self, *args) -> Any: + """Returns the value of the term required by the manager. + + In case of a class implementation, this function is called by the manager + to get the value of the term. The arguments passed to this function are + the ones specified in the term configuration (see :attr:`ManagerTermBaseCfg.params`). + + .. attention:: + To be consistent with memory-less implementation of terms with functions, it is + recommended to ensure that the returned mutable quantities are cloned before + returning them. For instance, if the term returns a tensor, it is recommended + to ensure that the returned tensor is a clone of the original tensor. This prevents + the manager from storing references to the tensors and altering the original tensors. + + Args: + *args: Variable length argument list. + + Returns: + The value of the term. + """ + raise NotImplementedError("The method '__call__' should be implemented by the subclass.") + + +class ManagerBase(ABC): + """Base class for all managers.""" + + def __init__(self, cfg: object, env: ManagerBasedEnv): + """Initialize the manager. + + This function is responsible for parsing the configuration object and creating the terms. + + If the simulation is not playing, the scene entities are not resolved immediately. + Instead, the resolution is deferred until the simulation starts. This is done to ensure + that the scene entities are resolved even if the manager is created after the simulation + has already started. + + Args: + cfg: The configuration object. If None, the manager is initialized without any terms. + env: The environment instance. + """ + # store the inputs + self.cfg = copy.deepcopy(cfg) + self._env = env + + # flag for whether the scene entities have been resolved + # if sim is playing, we resolve the scene entities directly while preparing the terms + self._is_scene_entities_resolved = self._env.sim.is_playing() + + # if the simulation is not playing, we use callbacks to trigger the resolution of the scene + # entities configuration. this is needed for cases where the manager is created after the simulation + # but before the simulation is playing. + # FIXME: Once Isaac Sim supports storing this information as USD schema, we can remove this + # callback and resolve the scene entities directly inside `_prepare_terms`. + # if not self._env.sim.is_playing(): + # # note: Use weakref on all callbacks to ensure that this object can be deleted when its destructor + # # is called + # # The order is set to 20 to allow asset/sensor initialization to complete before the scene entities + # # are resolved. Those have the order 10. + # timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + # self._resolve_terms_handle = timeline_event_stream.create_subscription_to_pop_by_type( + # int(omni.timeline.TimelineEventType.PLAY), + # lambda event, obj=weakref.proxy(self): obj._resolve_terms_callback(event), + # order=20, + # ) + # else: + # self._resolve_terms_handle = None + self._resolve_terms_handle = None + + # parse config to create terms information + if self.cfg: + self._prepare_terms() + + def __del__(self): + """Delete the manager.""" + # Suppress errors during Python shutdown + # Note: contextlib may be None during interpreter shutdown + if contextlib is not None: + with contextlib.suppress(ImportError, AttributeError, TypeError): + if getattr(self, "_resolve_terms_handle", None): + self._resolve_terms_handle.unsubscribe() + self._resolve_terms_handle = None + + """ + Properties. + """ + + @property + def num_envs(self) -> int: + """Number of environments.""" + return self._env.num_envs + + @property + def device(self) -> str: + """Device on which to perform computations.""" + return self._env.device + + @property + @abstractmethod + def active_terms(self) -> list[str] | dict[str, list[str]]: + """Name of active terms.""" + raise NotImplementedError + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> dict[str, float]: + """Resets the manager and returns logging information for the current time-step. + + Args: + env_ids: The environment ids for which to log data. + Defaults None, which logs data for all environments. + + Returns: + Dictionary containing the logging information. + """ + return {} + + def find_terms(self, name_keys: str | Sequence[str]) -> list[str]: + """Find terms in the manager based on the names. + + This function searches the manager for terms based on the names. The names can be + specified as regular expressions or a list of regular expressions. The search is + performed on the active terms in the manager. + + Please check the :meth:`~isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the term names. + + Returns: + A list of term names that match the input keys. + """ + # resolve search keys + if isinstance(self.active_terms, dict): + list_of_strings = [] + for names in self.active_terms.values(): + list_of_strings.extend(names) + else: + list_of_strings = self.active_terms + + # return the matching names + return string_utils.resolve_matching_names(name_keys, list_of_strings)[1] + + def get_active_iterable_terms(self, env_idx: int) -> Sequence[tuple[str, Sequence[float]]]: + """Returns the active terms as iterable sequence of tuples. + + The first element of the tuple is the name of the term and the second element is the raw value(s) of the term. + + Returns: + The active terms. + """ + raise NotImplementedError + + """ + Implementation specific. + """ + + @abstractmethod + def _prepare_terms(self): + """Prepare terms information from the configuration object.""" + raise NotImplementedError + + """ + Internal callbacks. + """ + + def _resolve_terms_callback(self, event): + """Resolve configurations of terms once the simulation starts. + + Please check the :meth:`_process_term_cfg_at_play` method for more information. + """ + # check if scene entities have been resolved + if self._is_scene_entities_resolved: + return + # check if config is dict already + if isinstance(self.cfg, dict): + cfg_items = self.cfg.items() + else: + cfg_items = self.cfg.__dict__.items() + + # iterate over all the terms + for term_name, term_cfg in cfg_items: + # check for non config + if term_cfg is None: + continue + # process attributes at runtime + # these properties are only resolvable once the simulation starts playing + self._process_term_cfg_at_play(term_name, term_cfg) + + # set the flag + self._is_scene_entities_resolved = True + + """ + Internal functions. + """ + + def _resolve_common_term_cfg(self, term_name: str, term_cfg: ManagerTermBaseCfg, min_argc: int = 1): + """Resolve common attributes of the term configuration. + + Usually, called by the :meth:`_prepare_terms` method to resolve common attributes of the term + configuration. These include: + + * Resolving the term function and checking if it is callable. + * Checking if the term function's arguments are matched by the parameters. + * Resolving special attributes of the term configuration like ``asset_cfg``, ``sensor_cfg``, etc. + * Initializing the term if it is a class. + + The last two steps are only possible once the simulation starts playing. + + By default, all term functions are expected to have at least one argument, which is the + environment object. Some other managers may expect functions to take more arguments, for + instance, the environment indices as the second argument. In such cases, the + ``min_argc`` argument can be used to specify the minimum number of arguments + required by the term function to be called correctly by the manager. + + Args: + term_name: The name of the term. + term_cfg: The term configuration. + min_argc: The minimum number of arguments required by the term function to be called correctly + by the manager. + + Raises: + TypeError: If the term configuration is not of type :class:`ManagerTermBaseCfg`. + ValueError: If the scene entity defined in the term configuration does not exist. + AttributeError: If the term function is not callable. + ValueError: If the term function's arguments are not matched by the parameters. + """ + # check if the term is a valid term config + if not isinstance(term_cfg, ManagerTermBaseCfg): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type ManagerTermBaseCfg." + f" Received: '{type(term_cfg)}'." + ) + + # get the corresponding function or functional class + if isinstance(term_cfg.func, str): + term_cfg.func = string_to_callable(term_cfg.func) + # check if function is callable + if not callable(term_cfg.func): + raise AttributeError(f"The term '{term_name}' is not callable. Received: {term_cfg.func}") + + # check if the term is a class of valid type + if inspect.isclass(term_cfg.func): + if not issubclass(term_cfg.func, ManagerTermBase): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type ManagerTermBase." + f" Received: '{type(term_cfg.func)}'." + ) + func_static = term_cfg.func.__call__ + min_argc += 1 # forward by 1 to account for 'self' argument + else: + func_static = term_cfg.func + # check if function is callable + if not callable(func_static): + raise AttributeError(f"The term '{term_name}' is not callable. Received: {term_cfg.func}") + + # Materialize configclass defaults from the function signature into params. + # Without this, defaults live only in the callable signature and never get + # resolved/cached by the manager (e.g. SceneEntityCfg.resolve() is never called). + signature = inspect.signature(func_static) + for param in list(signature.parameters.values())[min_argc:]: + if param.default is inspect.Parameter.empty: + continue + if param.name not in term_cfg.params and hasattr(param.default, "__dataclass_fields__"): + term_cfg.params[param.name] = param.default.copy() + + # check statically if the term's arguments are matched by params + term_params = list(term_cfg.params.keys()) + args = signature.parameters + args_with_defaults = [arg for arg in args if args[arg].default is not inspect.Parameter.empty] + args_without_defaults = [arg for arg in args if args[arg].default is inspect.Parameter.empty] + args = args_without_defaults + args_with_defaults + # ignore first two arguments for env and env_ids + # Think: Check for cases when kwargs are set inside the function? + if len(args) > min_argc: + if set(args[min_argc:]) != set(term_params + args_with_defaults): + raise ValueError( + f"The term '{term_name}' expects mandatory parameters: {args_without_defaults[min_argc:]}" + f" and optional parameters: {args_with_defaults}, but received: {term_params}." + ) + + # register non-capturable terms with the call switch for mode=2 fallback + if not is_warp_capturable(term_cfg.func): + switch = getattr(self._env, "_manager_call_switch", None) + if switch is not None: + switch.register_manager_capturability(type(self).__name__, False) + + # process attributes at runtime + # these properties are only resolvable once the simulation starts playing + if self._env.sim.is_playing(): + self._process_term_cfg_at_play(term_name, term_cfg) + + def _process_term_cfg_at_play(self, term_name: str, term_cfg: ManagerTermBaseCfg): + """Process the term configuration at runtime. + + This function is called when the simulation starts playing. It is used to process the term + configuration at runtime. This includes: + + * Resolving the scene entity configuration for the term. + * Initializing the term if it is a class. + + Since the above steps rely on PhysX to parse over the simulation scene, they are deferred + until the simulation starts playing. + + Args: + term_name: The name of the term. + term_cfg: The term configuration. + """ + for key, value in term_cfg.params.items(): + if isinstance(value, SceneEntityCfg): + # load the entity + try: + value.resolve(self._env.scene) + except ValueError as e: + raise ValueError(f"Error while parsing '{term_name}:{key}'. {e}") + # log the entity for checking later + msg = f"[{term_cfg.__class__.__name__}:{term_name}] Found entity '{value.name}'." + if value.joint_ids is not None: + msg += f"\n\tJoint names: {value.joint_names} [{value.joint_ids}]" + if value.body_ids is not None: + msg += f"\n\tBody names: {value.body_names} [{value.body_ids}]" + # print the information + logger.info(msg) + # store the entity + term_cfg.params[key] = value + + # initialize the term if it is a class + if inspect.isclass(term_cfg.func): + logger.info(f"Initializing term '{term_name}' with class '{term_cfg.func.__name__}'.") + term_cfg.func = term_cfg.func(cfg=term_cfg, env=self._env) diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/manager_term_cfg.py b/source/isaaclab_experimental/isaaclab_experimental/managers/manager_term_cfg.py new file mode 100644 index 000000000000..3fab3bfc5ffc --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/manager_term_cfg.py @@ -0,0 +1,94 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration terms for different managers (experimental, Warp-first). + +This module is a passthrough to :mod:`isaaclab.managers.manager_term_cfg` except for +the following term configs which are overridden for Warp-first execution: + +- :class:`ObservationTermCfg` +- :class:`RewardTermCfg` +- :class:`TerminationTermCfg` +""" + +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import MISSING + +from isaaclab.managers.manager_term_cfg import * # noqa: F401,F403 +from isaaclab.managers.manager_term_cfg import ManagerTermBaseCfg as _ManagerTermBaseCfg +from isaaclab.utils import configclass + + +@configclass +class RewardTermCfg(_ManagerTermBaseCfg): + """Configuration for a reward term. + + The function is expected to write the (unweighted) reward values into a + pre-allocated Warp buffer provided by the manager. + + Expected signature: + + - ``func(env, out, **params) -> None`` + + where ``out`` is a Warp array of shape ``(num_envs,)`` with float32 dtype. + """ + + func: Callable[..., None] = MISSING + """The function to be called to fill the pre-allocated reward buffer.""" + + weight: float = MISSING + """The weight of the reward term.""" + + +@configclass +class TerminationTermCfg(_ManagerTermBaseCfg): + """Configuration for a termination term (experimental, Warp-first). + + The function is expected to write termination flags into a pre-allocated Warp buffer. + + Expected signature: + + - ``func(env, out, **params) -> None`` + + where ``out`` is a Warp array of shape ``(num_envs,)`` with boolean dtype. + """ + + func: Callable[..., None] = MISSING + """The function to be called to fill the pre-allocated termination buffer.""" + + time_out: bool = False + """Whether the termination term contributes towards episodic timeouts. Defaults to False.""" + + +@configclass +class ObservationTermCfg(_ManagerTermBaseCfg): + """Configuration for an observation term (experimental, Warp-first). + + The function is expected to write observation values into a pre-allocated Warp buffer provided + by the observation manager. + + Expected signature: + + - ``func(env, out, **params) -> None`` + + where ``out`` is a Warp array of shape ``(num_envs, obs_term_dim)`` with float32 dtype. + + Notes: + - The stable fields (noise/modifiers/history) are kept for config compatibility, but the + experimental Warp-first observation manager may not support all of them initially. + """ + + func: Callable[..., None] = MISSING + """The function to be called to fill the pre-allocated observation buffer.""" + + # Keep stable configuration fields for compatibility with existing task configs. + modifiers: list[ModifierCfg] | None = None # noqa: F405 + noise: NoiseCfg | NoiseModelCfg | None = None # noqa: F405 + clip: tuple[float, float] | None = None + scale: tuple[float, ...] | float | None = None + history_length: int = 0 + flatten_history_dim: bool = True diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/observation_manager.py b/source/isaaclab_experimental/isaaclab_experimental/managers/observation_manager.py new file mode 100644 index 000000000000..74d0a9a955be --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/observation_manager.py @@ -0,0 +1,941 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Observation manager for computing observation signals for a given world. + +Observations are organized into groups based on their intended usage. This allows having different observation +groups for different types of learning such as asymmetric actor-critic and student-teacher training. Each +group contains observation terms which contain information about the observation function to call, the noise +corruption model to use, and the sensor to retrieve data from. + +Each observation group should inherit from the :class:`ObservationGroupCfg` class. Within each group, each +observation term should instantiate the :class:`ObservationTermCfg` class. Based on the configuration, the +observations in a group can be concatenated into a single tensor or returned as a dictionary with keys +corresponding to the term's name. + +If the observations in a group are concatenated, the shape of the concatenated tensor is computed based on the +shapes of the individual observation terms. This information is stored in the :attr:`group_obs_dim` dictionary +with keys as the group names and values as the shape of the observation tensor. When the terms in a group are not +concatenated, the attribute stores a list of shapes for each term in the group. + +.. note:: + When the observation terms in a group do not have the same shape, the observation terms cannot be + concatenated. In this case, please set the :attr:`ObservationGroupCfg.concatenate_terms` attribute in the + group configuration to False. + +Observations can also have history. This means a running history is updated per sim step. History can be controlled +per :class:`ObservationTermCfg` (See the :attr:`ObservationTermCfg.history_length` and +:attr:`ObservationTermCfg.flatten_history_dim`). History can also be controlled via :class:`ObservationGroupCfg` +where group configuration overwrites per term configuration if set. History follows an oldest to newest ordering. + +The observation manager can be used to compute observations for all the groups or for a specific group. The +observations are computed by calling the registered functions for each term in the group. The functions are +called in the order of the terms in the group. The functions are expected to return a tensor with shape +(num_envs, ...). + +If a noise model or custom modifier is registered for a term, the function is called to corrupt +the observation. The corruption function is expected to return a tensor with the same shape as the observation. +The observations are clipped and scaled as per the configuration settings. + +Experimental (Warp-first) note: + Observation term functions follow a Warp-first signature and **write** into pre-allocated Warp buffers: + ``func(env, out, **params) -> None``. Post-processing may be implemented via Warp kernels where possible. +""" + +from __future__ import annotations + +import inspect +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import numpy as np +import torch +import warp as wp +from prettytable import PrettyTable + +from isaaclab.utils import class_to_dict + +from isaaclab_experimental.utils import modifiers, noise +from isaaclab_experimental.utils.buffers import CircularBuffer +from isaaclab_experimental.utils.torch_utils import clone_obs_buffer + +from .manager_base import ManagerBase, ManagerTermBase +from .manager_term_cfg import ObservationGroupCfg, ObservationTermCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + +@wp.kernel +def _apply_clip(out: wp.array(dtype=wp.float32, ndim=2), clip_lo: wp.float32, clip_hi: wp.float32): + env_id = wp.tid() + for j in range(out.shape[1]): + out[env_id, j] = wp.clamp(out[env_id, j], clip_lo, clip_hi) + + +@wp.kernel +def _apply_scale(out: wp.array(dtype=wp.float32, ndim=2), scale: wp.array(dtype=wp.float32)): + env_id = wp.tid() + for j in range(out.shape[1]): + out[env_id, j] = out[env_id, j] * scale[j] + + +def _resolve_scale_vector(value: Any, dim: int, device: str) -> torch.Tensor: + """Resolve scale into a (dim,) float32 tensor (defaults to ones).""" + if value is None: + return torch.ones((dim,), device=device, dtype=torch.float32) + if isinstance(value, torch.Tensor): + t = value.to(device=device, dtype=torch.float32) + if t.numel() == 1: + return t.reshape(1).repeat(dim) + if t.numel() == dim: + return t.reshape(dim) + raise ValueError(f"Expected scale tensor with numel=1 or numel={dim}, got {t.numel()}.") + if isinstance(value, (float, int)): + return torch.full((dim,), float(value), device=device, dtype=torch.float32) + if isinstance(value, (tuple, list)): + if len(value) != dim: + raise ValueError(f"Expected scale length {dim}, got {len(value)}.") + return torch.tensor(value, device=device, dtype=torch.float32) + raise TypeError(f"Unsupported scale type: {type(value)}") + + +class ObservationManager(ManagerBase): + """Manager for computing observation signals for a given world. + + Observations are organized into groups based on their intended usage. This allows having different observation + groups for different types of learning such as asymmetric actor-critic and student-teacher training. Each + group contains observation terms which contain information about the observation function to call, the noise + corruption model to use, and the sensor to retrieve data from. + + Each observation group should inherit from the :class:`ObservationGroupCfg` class. Within each group, each + observation term should instantiate the :class:`ObservationTermCfg` class. Based on the configuration, the + observations in a group can be concatenated into a single tensor or returned as a dictionary with keys + corresponding to the term's name. + + If the observations in a group are concatenated, the shape of the concatenated tensor is computed based on the + shapes of the individual observation terms. This information is stored in the :attr:`group_obs_dim` dictionary + with keys as the group names and values as the shape of the observation tensor. When the terms in a group are not + concatenated, the attribute stores a list of shapes for each term in the group. + + .. note:: + When the observation terms in a group do not have the same shape, the observation terms cannot be + concatenated. In this case, please set the :attr:`ObservationGroupCfg.concatenate_terms` attribute in the + group configuration to False. + + Observations can also have history. This means a running history is updated per sim step. History can be controlled + per :class:`ObservationTermCfg` (See the :attr:`ObservationTermCfg.history_length` and + :attr:`ObservationTermCfg.flatten_history_dim`). History can also be controlled via :class:`ObservationGroupCfg` + where group configuration overwrites per term configuration if set. History follows an oldest to newest ordering. + + The observation manager can be used to compute observations for all the groups or for a specific group. The + observations are computed by calling the registered functions for each term in the group. The functions are + called in the order of the terms in the group. The functions are expected to return a tensor with shape + (num_envs, ...). + + If a noise model or custom modifier is registered for a term, the function is called to corrupt + the observation. The corruption function is expected to return a tensor with the same shape as the observation. + The observations are clipped and scaled as per the configuration settings. + + Experimental (Warp-first) note: + Observation term functions follow a Warp-first signature and **write** into pre-allocated Warp buffers: + ``func(env, out, **params) -> None``. + """ + + def __init__(self, cfg: object, env: ManagerBasedEnv): + """Initialize observation manager. + + Args: + cfg: The configuration object or dictionary (``dict[str, ObservationGroupCfg]``). + env: The environment instance. + + Raises: + ValueError: If the configuration is None. + RuntimeError: If the shapes of the observation terms in a group are not compatible for concatenation + and the :attr:`~ObservationGroupCfg.concatenate_terms` attribute is set to True. + """ + if cfg is None: + raise ValueError("Observation manager configuration is None. Please provide a valid configuration.") + + # call the base class constructor (this will parse the terms config) + super().__init__(cfg, env) + + # compute combined vector for obs group (matches stable semantics) + self._group_obs_dim: dict[str, tuple[int, ...] | list[tuple[int, ...]]] = dict() + for group_name, group_term_dims in self._group_obs_term_dim.items(): + # if terms are concatenated, compute the combined shape into a single tuple + # otherwise, keep the list of shapes as is + if self._group_obs_concatenate[group_name]: + try: + term_dims = torch.stack([torch.tensor(dims, device="cpu") for dims in group_term_dims], dim=0) + if len(term_dims.shape) > 1: + if self._group_obs_concatenate_dim[group_name] >= 0: + dim = self._group_obs_concatenate_dim[group_name] - 1 # account for the batch offset + else: + dim = self._group_obs_concatenate_dim[group_name] + dim_sum = torch.sum(term_dims[:, dim], dim=0) + term_dims[0, dim] = dim_sum + term_dims = term_dims[0] + else: + term_dims = torch.sum(term_dims, dim=0) + self._group_obs_dim[group_name] = tuple(term_dims.tolist()) + except RuntimeError: + raise RuntimeError( + f"Unable to concatenate observation terms in group '{group_name}'." + f" The shapes of the terms are: {group_term_dims}." + " Please ensure that the shapes are compatible for concatenation." + " Otherwise, set 'concatenate_terms' to False in the group configuration." + ) + else: + self._group_obs_dim[group_name] = group_term_dims + + # Stores the latest observations. + self._obs_buffer: dict[str, torch.Tensor | dict[str, torch.Tensor]] | None = None + # Note: Persistent Warp output buffers (`_group_out_wp` / `_group_out_torch`) and per-term post-processing + # buffers are allocated during `_prepare_terms()` since they are per-term/per-group setup. + + def __str__(self) -> str: + """Returns: A string representation for the observation manager.""" + msg = f" contains {len(self._group_obs_term_names)} groups.\n" + + # add info for each group + for group_name, group_dim in self._group_obs_dim.items(): + # create table for term information + table = PrettyTable() + table.title = f"Active Observation Terms in Group: '{group_name}'" + if self._group_obs_concatenate[group_name]: + table.title += f" (shape: {group_dim})" + table.field_names = ["Index", "Name", "Shape"] + # set alignment of table columns + table.align["Name"] = "l" + # add info for each term + obs_terms = zip( + self._group_obs_term_names[group_name], + self._group_obs_term_dim[group_name], + ) + for index, (name, dims) in enumerate(obs_terms): + # resolve inputs to simplify prints + tab_dims = tuple(dims) + # add row + table.add_row([index, name, tab_dims]) + # convert table to string + msg += table.get_string() + msg += "\n" + + return msg + + def get_active_iterable_terms(self, env_idx: int) -> Sequence[tuple[str, Sequence[float]]]: + """Returns the active terms as iterable sequence of tuples. + + The first element of the tuple is the name of the term and the second element is the raw value(s) of the term. + + Args: + env_idx: The specific environment to pull the active terms from. + + Returns: + The active terms. + """ + terms = [] + + if self._obs_buffer is None: + self.compute() + obs_buffer: dict[str, torch.Tensor | dict[str, torch.Tensor]] = self._obs_buffer + + for group_name, _ in self._group_obs_dim.items(): + if not self.group_obs_concatenate[group_name]: + for name, term in obs_buffer[group_name].items(): + terms.append((group_name + "-" + name, term[env_idx].cpu().tolist())) + continue + + idx = 0 + # add info for each term + data = obs_buffer[group_name] + for name, shape in zip( + self._group_obs_term_names[group_name], + self._group_obs_term_dim[group_name], + ): + data_length = np.prod(shape) + term = data[env_idx, idx : idx + data_length] + terms.append((group_name + "-" + name, term.cpu().tolist())) + idx += data_length + + return terms + + """ + Properties. + """ + + @property + def active_terms(self) -> dict[str, list[str]]: + """Name of active observation terms in each group. + + The keys are the group names and the values are the list of observation term names in the group. + """ + return self._group_obs_term_names + + @property + def group_obs_dim(self) -> dict[str, tuple[int, ...] | list[tuple[int, ...]]]: + """Shape of computed observations in each group. + + The key is the group name and the value is the shape of the observation tensor. + If the terms in the group are concatenated, the value is a single tuple representing the + shape of the concatenated observation tensor. Otherwise, the value is a list of tuples, + where each tuple represents the shape of the observation tensor for a term in the group. + """ + return self._group_obs_dim + + @property + def group_obs_term_dim(self) -> dict[str, list[tuple[int, ...]]]: + """Shape of individual observation terms in each group. + + The key is the group name and the value is a list of tuples representing the shape of the observation terms + in the group. The order of the tuples corresponds to the order of the terms in the group. + This matches the order of the terms in the :attr:`active_terms`. + """ + return self._group_obs_term_dim + + @property + def group_obs_concatenate(self) -> dict[str, bool]: + """Whether the observation terms are concatenated in each group or not. + + The key is the group name and the value is a boolean specifying whether the observation terms in the group + are concatenated into a single tensor. If True, the observations are concatenated along the last dimension. + + The values are set based on the :attr:`~ObservationGroupCfg.concatenate_terms` attribute in the group + configuration. + """ + return self._group_obs_concatenate + + @property + def get_IO_descriptors(self, group_names_to_export: list[str] = ["policy"]): + """Get the IO descriptors for the observation manager. + + Returns: + A dictionary with keys as the group names and values as the IO descriptors. + """ + group_data: dict[str, list[dict[str, Any]]] = {} + + # Collect raw descriptor dicts (plus overloads). + for group_name in self._group_obs_term_names: + group_data[group_name] = [] + # check if group name is valid + if group_name not in self._group_obs_term_names: + raise ValueError( + f"Unable to find the group '{group_name}' in the observation manager." + f" Available groups are: {list(self._group_obs_term_names.keys())}" + ) + for term_name, term_cfg in zip( + self._group_obs_term_names[group_name], self._group_obs_term_cfgs[group_name] + ): + func = term_cfg.func + if not getattr(func, "_has_descriptor", False): + continue + try: + # Both stable-style and Warp-first decorated terms support + # the ``inspect=True`` keyword. Warp-first terms (decorated + # with ``generic_io_descriptor_warp``) will NOT execute the + # underlying function; their hooks derive metadata from + # env/config objects instead. + func(self._env, **term_cfg.params, inspect=True) + desc = func._descriptor.__dict__.copy() + overloads = {} + for k in ["modifiers", "clip", "scale", "history_length", "flatten_history_dim"]: + if hasattr(term_cfg, k): + overloads[k] = getattr(term_cfg, k) + desc.update(overloads) + group_data[group_name].append(desc) + except Exception as e: + print(f"Error getting IO descriptor for term '{term_name}' in group '{group_name}': {e}") + + formatted_data: dict[str, list[dict[str, Any]]] = {} + for group_name, data in group_data.items(): + if group_name not in group_names_to_export: + continue + formatted_data[group_name] = [] + for item in data: + name = item.pop("name") + extras = item.pop("extras", {}) + formatted_item = {"name": name, "overloads": {}, "extras": extras} + for k, v in item.items(): + if isinstance(v, tuple): + v = list(v) + if isinstance(v, torch.Tensor): + v = v.detach().cpu().numpy().tolist() + if k in ["scale", "clip", "history_length", "flatten_history_dim"]: + formatted_item["overloads"][k] = v + elif k in ["modifiers", "description", "units"]: + formatted_item["extras"][k] = v + else: + formatted_item[k] = v + formatted_data[group_name].append(formatted_item) + return formatted_data + + """ + Operations. + """ + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | None = None, + *, + env_mask: wp.array | None = None, + ) -> dict[str, float]: + # Mask-first path: captured callers must provide env_mask. + if env_mask is None or not isinstance(env_mask, wp.array): + # Keep all id->mask resolution strictly outside capture. + if wp.get_device().is_capturing: + raise RuntimeError( + "ObservationManager.reset requires env_mask(wp.array[bool]) during capture. " + "Do not pass env_ids on captured paths." + ) + env_mask = self._env.resolve_env_mask(env_ids=env_ids, env_mask=env_mask) + + # call all terms that are classes + for group_name, group_cfg in self._group_obs_class_term_cfgs.items(): + for term_cfg in group_cfg: + term_cfg.func.reset(env_mask=env_mask) + # reset terms with history + for term_name in self._group_obs_term_names[group_name]: + if term_name in self._group_obs_term_history_buffer[group_name]: + self._group_obs_term_history_buffer[group_name][term_name].reset(env_mask=env_mask) + # call all modifiers/noise models that are classes + for mod in self._group_obs_class_instances: + mod.reset(env_mask=env_mask) + + # nothing to log here + return {} + + def compute( + self, update_history: bool = False, return_cloned_output: bool = True + ) -> dict[str, torch.Tensor | dict[str, torch.Tensor]]: + """Compute the observations per group for all groups. + + The method computes the observations for all the groups handled by the observation manager. + Please check the :meth:`compute_group` on the processing of observations per group. + + Args: + update_history: The boolean indicator without return obs should be appended to observation history. + Default to False, in which case calling compute_group does not modify history. This input is no-ops + if the group's history_length == 0. + return_cloned_output: Whether to return a cloned snapshot of the observation buffer. + Set to False to return the persistent internal buffer by reference. + + Returns: + A dictionary with keys as the group names and values as the computed observations. + The observations are either concatenated into a single tensor or returned as a dictionary + with keys corresponding to the term's name. + """ + # Launch kernels for every group (writes into persistent buffers in-place). + for group_name in self._group_obs_term_names: + self.compute_group(group_name, update_history=update_history) + # Build the obs buffer once (persistent refs to in-place-updated tensors/dicts). + if self._obs_buffer is None: + self._obs_buffer = { + group_name: ( + self._group_out_torch[group_name] + if self._group_use_warp_concat[group_name] + else self._group_obs_dict[group_name] + ) + for group_name in self._group_obs_term_names + } + if return_cloned_output: + return clone_obs_buffer(self._obs_buffer) + return self._obs_buffer + + def compute_group(self, group_name: str, update_history: bool = False) -> torch.Tensor | dict[str, torch.Tensor]: + """Computes the observations for a given group. + + The observations for a given group are computed by calling the registered functions for each + term in the group. The functions are called in the order of the terms in the group. The functions + are expected to return a tensor with shape (num_envs, ...). + + The following steps are performed for each observation term: + + 1. Compute observation term by calling the function + 2. Apply custom modifiers in the order specified in :attr:`ObservationTermCfg.modifiers` + 3. Apply corruption/noise model based on :attr:`ObservationTermCfg.noise` + 4. Apply clipping based on :attr:`ObservationTermCfg.clip` + 5. Apply scaling based on :attr:`ObservationTermCfg.scale` + + We apply noise to the computed term first to maintain the integrity of how noise affects the data + as it truly exists in the real world. If the noise is applied after clipping or scaling, the noise + could be artificially constrained or amplified, which might misrepresent how noise naturally occurs + in the data. + + Args: + group_name: The name of the group for which to compute the observations. Defaults to None, + in which case observations for all the groups are computed and returned. + update_history: The boolean indicator without return obs should be appended to observation group's history. + Default to False, in which case calling compute_group does not modify history. This input is no-ops + if the group's history_length == 0. + + Returns: + Depending on the group's configuration, the tensors for individual observation terms are + concatenated along the last dimension into a single tensor. Otherwise, they are returned as + a dictionary with keys corresponding to the term's name. + + Raises: + ValueError: If input ``group_name`` is not a valid group handled by the manager. + """ + # check if group name is valid + if group_name not in self._group_obs_term_names: + raise ValueError( + f"Unable to find the group '{group_name}' in the observation manager." + f" Available groups are: {list(self._group_obs_term_names.keys())}" + ) + # iterate over all the terms in each group + group_term_names = self._group_obs_term_names[group_name] + + # Persistent per-term obs dict (pre-allocated in _prepare_terms). + group_obs = self._group_obs_dict[group_name] + + # evaluate terms: compute, add noise, clip, scale, custom modifiers + for term_name, term_cfg in zip(group_term_names, self._group_obs_term_cfgs[group_name]): + # compute term's value into pre-allocated Warp output + term_cfg.func(self._env, term_cfg.out_wp, **term_cfg.params) + + # apply custom modifiers (in-place on out_wp) + if term_cfg.modifiers is not None: + for modifier in term_cfg.modifiers: + modifier.func(term_cfg.out_wp, **modifier.params) + + # apply noise (Warp in-place on out_wp) + if isinstance(term_cfg.noise, noise.NoiseCfg): + term_cfg.noise.func(term_cfg.out_wp, term_cfg.noise) + elif isinstance(term_cfg.noise, noise.NoiseModelCfg) and term_cfg.noise.func is not None: + term_cfg.noise.func(term_cfg.out_wp) + + # clip then scale (stable semantics); implementation may use Warp kernels + if term_cfg.clip is not None: + wp.launch( + kernel=_apply_clip, + dim=self.num_envs, + inputs=[term_cfg.out_wp, float(term_cfg.clip[0]), float(term_cfg.clip[1])], + device=self.device, + ) + if term_cfg.scale is not None: + wp.launch( + kernel=_apply_scale, + dim=self.num_envs, + inputs=[term_cfg.out_wp, term_cfg.scale_wp], + device=self.device, + ) + + # TODO(jichuanh): This is not migrated yet. Need revisit. + # Update the history buffer if observation term has history enabled + if term_cfg.history_length > 0: + # circular buffer is not capture safe + if wp.get_device().is_capturing: + raise RuntimeError( + "Observation terms with history (circular buffer) are not CUDA-graph-capture-safe yet. " + "Disable history for observation terms used inside a captured graph, or restructure " + "the graph to exclude history-buffered terms." + ) + circular_buffer = self._group_obs_term_history_buffer[group_name][term_name] + if update_history: + circular_buffer.append(wp.to_torch(term_cfg.out_wp)) + elif circular_buffer._buffer is None: + # because circular buffer only exits after the simulation steps, + # this guards history buffer from corruption by external calls before simulation start + circular_buffer = CircularBuffer( + max_len=circular_buffer.max_length, + batch_size=circular_buffer.batch_size, + device=circular_buffer.device, + ) + self._group_obs_term_history_buffer[group_name][term_name] = circular_buffer + circular_buffer.append(wp.to_torch(term_cfg.out_wp)) + + if term_cfg.flatten_history_dim: + group_obs[term_name] = circular_buffer.buffer.reshape(self._env.num_envs, -1) + else: + group_obs[term_name] = circular_buffer.buffer + + # return persistent output (updated in-place by kernels above) + if self._group_use_warp_concat[group_name]: + return self._group_out_torch[group_name] + return group_obs + + def serialize(self) -> dict: + """Serialize the observation term configurations for all active groups. + + Returns: + A dictionary where each group name maps to its serialized observation term configurations. + """ + output = { + group_name: { + term_name: ( + term_cfg.func.serialize() + if isinstance(term_cfg.func, ManagerTermBase) + else {"cfg": class_to_dict(term_cfg)} + ) + for term_name, term_cfg in zip( + self._group_obs_term_names[group_name], + self._group_obs_term_cfgs[group_name], + ) + } + for group_name in self.active_terms.keys() + } + + return output + + """ + Helper functions. + """ + + def _prepare_terms(self): # noqa: C901 + """Prepares a list of observation terms functions.""" + # create buffers to store information for each observation group + # TODO: Make this more convenient by using data structures. + self._group_obs_term_names: dict[str, list[str]] = dict() + self._group_obs_term_dim: dict[str, list[tuple[int, ...]]] = dict() + self._group_obs_term_cfgs: dict[str, list[ObservationTermCfg]] = dict() + self._group_obs_class_term_cfgs: dict[str, list[ObservationTermCfg]] = dict() + self._group_obs_concatenate: dict[str, bool] = dict() + self._group_obs_concatenate_dim: dict[str, int] = dict() + + self._group_obs_term_history_buffer: dict[str, dict] = dict() + # create a list to store classes instances, e.g., for modifiers and noise models + # we store it as a separate list to only call reset on them and prevent unnecessary calls + self._group_obs_class_instances: list[modifiers.ModifierBase | noise.NoiseModel] = list() + + # Persistent Warp output buffers for concatenated 2D groups (optional fast-path). + # For other cases (non-concat groups, history outputs, non-2D concat dims), we allocate per-term outputs. + self._group_out_wp: dict[str, wp.array] = {} + self._group_out_torch: dict[str, torch.Tensor] = {} + self._group_use_warp_concat: dict[str, bool] = {} + self._group_obs_dict: dict[str, dict[str, torch.Tensor]] = {} + + # make sure the simulation is playing since we compute obs dims which needs asset quantities + if not self._env.sim.is_playing(): + raise RuntimeError( + "Simulation is not playing. Observation manager requires the simulation to be playing" + " to compute observation dimensions. Please start the simulation before using the" + " observation manager." + ) + + # check if config is dict already + if isinstance(self.cfg, dict): + group_cfg_items = self.cfg.items() + else: + group_cfg_items = self.cfg.__dict__.items() + # iterate over all the groups + for group_name, group_cfg in group_cfg_items: + # check for non config + if group_cfg is None: + continue + # check if the term is a curriculum term + if not isinstance(group_cfg, ObservationGroupCfg): + raise TypeError( + f"Observation group '{group_name}' is not of type 'ObservationGroupCfg'." + f" Received: '{type(group_cfg)}'." + ) + # initialize list for the group settings + # group name to list of group term names + self._group_obs_term_names[group_name] = list() + + self._group_obs_term_dim[group_name] = list() + self._group_obs_term_cfgs[group_name] = list() + self._group_obs_class_term_cfgs[group_name] = list() + group_entry_history_buffer: dict[str, CircularBuffer] = dict() + # read common config for the group + self._group_obs_concatenate[group_name] = group_cfg.concatenate_terms + # to account for the batch dimension + self._group_obs_concatenate_dim[group_name] = ( + group_cfg.concatenate_dim + 1 if group_cfg.concatenate_dim >= 0 else group_cfg.concatenate_dim + ) + # check if config is dict already + if isinstance(group_cfg, dict): + term_cfg_items = group_cfg.items() + else: + term_cfg_items = group_cfg.__dict__.items() + # iterate over all the terms in each group + # (we also track raw term dims for Warp output allocation) + group_term_cfgs: list[ObservationTermCfg] = [] + group_term_raw_dims: list[int] = [] + for term_name, term_cfg in term_cfg_items: + # skip non-obs settings + if term_name in [ + "enable_corruption", + "concatenate_terms", + "history_length", + "flatten_history_dim", + "concatenate_dim", + ]: + continue + # check for non config + if term_cfg is None: + continue + if not isinstance(term_cfg, ObservationTermCfg): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type ObservationTermCfg." + f" Received: '{type(term_cfg)}'." + ) + # resolve common terms in the config + # Warp-first signature is (env, out, **params) + self._resolve_common_term_cfg(f"{group_name}/{term_name}", term_cfg, min_argc=2) + + # check noise settings + if not group_cfg.enable_corruption: + term_cfg.noise = None + # check group history params and override terms + if group_cfg.history_length is not None: + term_cfg.history_length = group_cfg.history_length + term_cfg.flatten_history_dim = group_cfg.flatten_history_dim + # add term config to list + self._group_obs_term_names[group_name].append(term_name) + self._group_obs_term_cfgs[group_name].append(term_cfg) + + # infer dimensions (Warp-first: terms write to out; we infer dim from resolved scene info) + term_dim = self._infer_term_dim_scalar(term_cfg) + # Cache the "raw" term output dimension (before history reshaping) for Warp buffer allocation. + # This matches the tensor shape produced directly by the term into `out`: (num_envs, term_dim). + term_cfg._term_dim = int(term_dim) + group_term_cfgs.append(term_cfg) + group_term_raw_dims.append(int(term_dim)) + obs_dims = (self._env.num_envs, term_dim) + + # if scale is set, check if single float or tuple + if term_cfg.scale is not None: + if not isinstance(term_cfg.scale, (float, int, tuple)): + raise TypeError( + f"Scale for observation term '{term_name}' in group '{group_name}'" + f" is not of type float, int or tuple. Received: '{type(term_cfg.scale)}'." + ) + if isinstance(term_cfg.scale, tuple) and len(term_cfg.scale) != obs_dims[1]: + raise ValueError( + f"Scale for observation term '{term_name}' in group '{group_name}'" + f" does not match the dimensions of the observation. Expected: {obs_dims[1]}" + f" but received: {len(term_cfg.scale)}." + ) + + scale_vals = ( + term_cfg.scale if isinstance(term_cfg.scale, tuple) else [float(term_cfg.scale)] * obs_dims[1] + ) + term_cfg.scale_wp = wp.array(scale_vals, dtype=wp.float32, device=self._env.device) + + # prepare modifiers for each observation + if term_cfg.modifiers is not None: + # initialize list of modifiers for term + for mod_cfg in term_cfg.modifiers: + # check if class modifier and initialize with observation size when adding + if isinstance(mod_cfg, modifiers.ModifierCfg): + # to list of modifiers + if inspect.isclass(mod_cfg.func): + if not issubclass(mod_cfg.func, modifiers.ModifierBase): + raise TypeError( + f"Modifier function '{mod_cfg.func}' for observation term '{term_name}'" + f" is not a subclass of 'ModifierBase'. Received: '{type(mod_cfg.func)}'." + ) + mod_cfg.func = mod_cfg.func(cfg=mod_cfg, data_dim=obs_dims, device=self._env.device) + + # add to list of class modifiers + self._group_obs_class_instances.append(mod_cfg.func) + else: + raise TypeError( + f"Modifier configuration '{mod_cfg}' of observation term '{term_name}' is not of" + f" required type ModifierCfg, Received: '{type(mod_cfg)}'" + ) + + # check if function is callable + if not callable(mod_cfg.func): + raise AttributeError( + f"Modifier '{mod_cfg}' of observation term '{term_name}' is not callable." + f" Received: {mod_cfg.func}" + ) + + # check if term's arguments are matched by params + term_params = list(mod_cfg.params.keys()) + args = inspect.signature(mod_cfg.func).parameters + args_with_defaults = [arg for arg in args if args[arg].default is not inspect.Parameter.empty] + args_without_defaults = [arg for arg in args if args[arg].default is inspect.Parameter.empty] + args = args_without_defaults + args_with_defaults + # ignore first argument for data + if len(args) > 1: + if set(args[1:]) != set(term_params + args_with_defaults): + raise ValueError( + f"Modifier '{mod_cfg}' of observation term '{term_name}' expects" + f" mandatory parameters: {args_without_defaults[1:]}" + f" and optional parameters: {args_with_defaults}, but received: {term_params}." + ) + + # prepare noise model classes + if term_cfg.noise is not None and isinstance(term_cfg.noise, noise.NoiseModelCfg): + # plumb the shared per-env RNG state so Warp noise kernels can consume it + term_cfg.noise.rng_state_wp = self._env.rng_state_wp + noise_model_cls = term_cfg.noise.class_type + if not issubclass(noise_model_cls, noise.NoiseModel): + raise TypeError( + f"Class type for observation term '{term_name}' NoiseModelCfg" + f" is not a subclass of 'NoiseModel'. Received: '{type(noise_model_cls)}'." + ) + # initialize func to be the noise model class instance + term_cfg.noise.func = noise_model_cls( + term_cfg.noise, num_envs=self._env.num_envs, device=self._env.device + ) + self._group_obs_class_instances.append(term_cfg.noise.func) + + # create history buffers and calculate history term dimensions + if term_cfg.history_length > 0: + group_entry_history_buffer[term_name] = CircularBuffer( + max_len=term_cfg.history_length, batch_size=self._env.num_envs, device=self._env.device + ) + old_dims = list(obs_dims) + old_dims.insert(1, term_cfg.history_length) + obs_dims = tuple(old_dims) + if term_cfg.flatten_history_dim: + obs_dims = (obs_dims[0], np.prod(obs_dims[1:])) + raise NotImplementedError("History reshaping is not implemented yet for warp.") + + self._group_obs_term_dim[group_name].append(obs_dims[1:]) + + # add term in a separate list if term is a class + if isinstance(term_cfg.func, ManagerTermBase): + self._group_obs_class_term_cfgs[group_name].append(term_cfg) + # call reset (in-case internal state should be reset at init) + term_cfg.func.reset() + + # Allocate persistent outputs for this group. + # - If group is concatenated into a flat 2D vector (N, D) with no history terms, allocate a single group + # buffer and map term outputs to contiguous slices (fast-path). + # - Otherwise allocate per-term outputs. + can_use_group_buffer = ( + self._group_obs_concatenate[group_name] + and self._group_obs_concatenate_dim[group_name] in (1, -1) + and all(cfg.history_length == 0 for cfg in group_term_cfgs) + ) + + if can_use_group_buffer: + total = int(sum(group_term_raw_dims)) + out_wp = wp.zeros((self.num_envs, total), dtype=wp.float32, device=self.device) + self._group_out_wp[group_name] = out_wp + self._group_out_torch[group_name] = wp.to_torch(out_wp) + + base_ptr = out_wp.ptr + row_stride = out_wp.strides[0] + col_stride = out_wp.strides[1] + start = 0 + for term_cfg, d in zip(group_term_cfgs, group_term_raw_dims): + out_view = wp.array( + ptr=base_ptr + start * col_stride, + dtype=wp.float32, + shape=(self.num_envs, int(d)), + strides=(row_stride, col_stride), + device=self.device, + ) + term_cfg.out_wp = out_view + term_cfg.out_torch = wp.to_torch(term_cfg.out_wp) + start += int(d) + else: + for term_cfg, d in zip(group_term_cfgs, group_term_raw_dims): + term_cfg.out_wp = wp.zeros((self.num_envs, int(d)), dtype=wp.float32, device=self.device) + term_cfg.out_torch = wp.to_torch(term_cfg.out_wp) + + # Guard: concat groups must use the Warp fast-path (standard concat dim, no history). + if self._group_obs_concatenate[group_name] and not can_use_group_buffer: + raise ValueError( + f"Observation group '{group_name}' is concatenated but cannot use the Warp" + " fast-path (requires concatenate_dim 0 or -1, and all terms history_length == 0)." + ) + + # Precompute fast-path flag and persistent per-term obs dict. + self._group_use_warp_concat[group_name] = can_use_group_buffer + self._group_obs_dict[group_name] = { + term_name: cfg.out_torch + for term_name, cfg in zip(self._group_obs_term_names[group_name], group_term_cfgs) + } + + # add history buffers for each group + self._group_obs_term_history_buffer[group_name] = group_entry_history_buffer + + def _infer_term_dim_scalar(self, term_cfg: ObservationTermCfg) -> int: + """Infer observation output dimension (D,) using decorator metadata, scene info, or manager state. + + Resolution order: + 1. ``out_dim`` on the function's ``@generic_io_descriptor_warp`` decorator. + 2. ``axes`` on the decorator (e.g. ``axes=["X","Y","Z"]`` → dim 3). + 3. Explicit ``term_dim`` / ``out_dim`` / ``obs_dim`` in ``term_cfg.params`` (legacy). + 4. ``asset_cfg.joint_ids`` count (joint-based observations). + """ + # --- 1-2. Decorator metadata (preferred) --- + func = term_cfg.func + # Check for descriptor on the (possibly wrapped) function first, + # then fall back to unwrapping for class-based terms. + descriptor = getattr(func, "_descriptor", None) + if descriptor is None and hasattr(func, "__wrapped__"): + descriptor = getattr(func.__wrapped__, "_descriptor", None) + if descriptor is not None: + # 1. Explicit out_dim on decorator + out_dim = getattr(descriptor, "out_dim", None) + if out_dim is not None: + return self._resolve_out_dim(out_dim, term_cfg) + # 2. Derive from axes metadata + axes = descriptor.extras.get("axes") if descriptor.extras else None + if axes is not None: + return len(axes) + + # --- 3. Legacy explicit override in params --- + for k in ("term_dim", "out_dim", "obs_dim"): + if k in term_cfg.params: + return int(term_cfg.params[k]) + + # --- 3. Joint-based fallback via asset_cfg --- + asset_cfg = term_cfg.params.get("asset_cfg") + if asset_cfg is None: + raise ValueError( + f"Cannot infer output dimension for observation term '{getattr(func, '__name__', func)}'. " + "Add `out_dim=` to its @generic_io_descriptor_warp decorator." + ) + asset = self._env.scene[asset_cfg.name] + joint_ids_wp = getattr(asset_cfg, "joint_ids_wp", None) + if joint_ids_wp is not None: + return int(joint_ids_wp.shape[0]) + joint_ids = getattr(asset_cfg, "joint_ids", slice(None)) + if isinstance(joint_ids, slice): + return int(getattr(asset, "num_joints", asset.data.joint_pos.torch.shape[1])) + return int(len(joint_ids)) + + def _resolve_out_dim(self, out_dim: int | str, term_cfg: ObservationTermCfg) -> int: + """Resolve an ``out_dim`` value from a decorator into a concrete integer. + + Supports: + - ``int``: returned as-is (fixed dimension). + - ``"joint"``: number of selected joints from ``asset_cfg``. + - ``"body:N"``: ``N`` components per selected body from ``asset_cfg``. + - ``"command"``: query ``command_manager.get_command(name).shape[-1]``. + - ``"action"``: query ``action_manager.action.shape[-1]``. + """ + if isinstance(out_dim, int): + return out_dim + + if out_dim == "joint": + asset_cfg = term_cfg.params.get("asset_cfg") + asset = self._env.scene[asset_cfg.name] + joint_ids_wp = getattr(asset_cfg, "joint_ids_wp", None) + if joint_ids_wp is not None: + return int(joint_ids_wp.shape[0]) + joint_ids = getattr(asset_cfg, "joint_ids", slice(None)) + if isinstance(joint_ids, slice): + return int(getattr(asset, "num_joints", asset.data.joint_pos.warp.shape[1])) + return int(len(joint_ids)) + + if isinstance(out_dim, str) and out_dim.startswith("body:"): + per_body = int(out_dim.split(":")[1]) + asset_cfg = term_cfg.params.get("asset_cfg") + body_ids = getattr(asset_cfg, "body_ids", None) + if body_ids is None or body_ids == slice(None): + asset = self._env.scene[asset_cfg.name] + return per_body * len(asset.body_names) + return per_body * len(body_ids) + + if out_dim == "command": + command_name = term_cfg.params.get("command_name") + cmd = self._env.command_manager.get_command(command_name) + return int(cmd.shape[-1]) + + if out_dim == "action": + action = self._env.action_manager.action + return int(action.shape[-1]) + + raise ValueError(f"Unknown out_dim sentinel: {out_dim!r}") diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/reward_manager.py b/source/isaaclab_experimental/isaaclab_experimental/managers/reward_manager.py new file mode 100644 index 000000000000..67dcbc055c2f --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/reward_manager.py @@ -0,0 +1,419 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Reward manager for computing reward signals for a given world. + +This file is a copy of `isaaclab.managers.reward_manager` placed under +`isaaclab_experimental` so it can evolve independently. +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp +from prettytable import PrettyTable + +from isaaclab_experimental.utils.warp.kernels import compute_reset_scale, count_masked + +from .manager_base import ManagerBase, ManagerTermBase +from .manager_term_cfg import RewardTermCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +@wp.kernel +def _sum_and_zero_masked( + # input + mask: wp.array(dtype=wp.bool), + scale: wp.array(dtype=wp.float32), + # input/output + episode_sums: wp.array(dtype=wp.float32, ndim=2), + # output + out_avg: wp.array(dtype=wp.float32), +): + term_idx, env_id = wp.tid() + if mask[env_id]: + wp.atomic_add(out_avg, term_idx, episode_sums[term_idx, env_id] * scale[0]) + episode_sums[term_idx, env_id] = 0.0 + + +@wp.kernel +def _reward_finalize( + # input + term_outs: wp.array(dtype=wp.float32, ndim=2), + term_weights: wp.array(dtype=wp.float32), + dt: float, + # input/output + reward_buf: wp.array(dtype=wp.float32), + episode_sums: wp.array(dtype=wp.float32, ndim=2), + step_reward: wp.array(dtype=wp.float32, ndim=2), +): + env_id = wp.tid() + + total = wp.float32(0.0) + for term_idx in range(term_outs.shape[0]): + weight = term_weights[term_idx] + if weight != 0.0: + raw = term_outs[term_idx, env_id] + weighted = raw * weight + # store weighted reward rate (matches old: value/dt) + step_reward[env_id, term_idx] = weighted + val = weighted * dt + total += val + episode_sums[term_idx, env_id] += val + + reward_buf[env_id] = total + + +@wp.kernel +def _reward_pre_compute_reset( + # output + reward_buf: wp.array(dtype=wp.float32), + step_reward: wp.array(dtype=wp.float32, ndim=2), + term_outs: wp.array(dtype=wp.float32, ndim=2), +): + """Reset per-step reward buffers. + + Launched with dim = (num_envs,) to reset `reward_buf` and clear the corresponding row in `step_reward`. + This works even when `step_reward.shape[1] == 0` (no terms). + """ + env_id = wp.tid() + reward_buf[env_id] = 0.0 + for term_idx in range(term_outs.shape[0]): + step_reward[env_id, term_idx] = 0.0 + term_outs[term_idx, env_id] = 0.0 + + +class RewardManager(ManagerBase): + """Manager for computing reward signals for a given world. + + The reward manager computes the total reward as a sum of the weighted reward terms. The reward + terms are parsed from a nested config class containing the reward manger's settings and reward + terms configuration. + + The reward terms are parsed from a config class containing the manager's settings and each term's + parameters. Each reward term should instantiate the :class:`RewardTermCfg` class. + + .. note:: + + The reward manager multiplies the reward term's ``weight`` with the time-step interval ``dt`` + of the environment. This is done to ensure that the computed reward terms are balanced with + respect to the chosen time-step interval in the environment. + + """ + + _env: ManagerBasedRLEnv + """The environment instance.""" + + def __init__(self, cfg: object, env: ManagerBasedRLEnv): + """Initialize the reward manager. + + Args: + cfg: The configuration object or dictionary (``dict[str, RewardTermCfg]``). + env: The environment instance. + """ + + # create buffers to parse and store terms + self._term_names: list[str] = list() + self._term_cfgs: list[RewardTermCfg] = list() + self._class_term_cfgs: list[RewardTermCfg] = list() + + # call the base class constructor (this will parse the terms config) + super().__init__(cfg, env) + self._term_name_to_term_idx = {name: i for i, name in enumerate(self._term_names)} + + num_terms = len(self._term_names) + self._num_terms = num_terms + + # persistent term output buffer (raw, unweighted) laid out as (term, env) for contiguous per-term ops + self._term_outs_wp = wp.zeros((num_terms, self.num_envs), dtype=wp.float32, device=self.device) + # per-term output buffers are views into rows of `_term_outs_wp` (Warp) + self._term_out_views_wp: list[wp.array] = [] + if num_terms > 0: + row_stride = self._term_outs_wp.strides[0] + col_stride = self._term_outs_wp.strides[1] + base_ptr = self._term_outs_wp.ptr + for term_idx, term_cfg in enumerate(self._term_cfgs): + out_view = wp.array( + ptr=base_ptr + term_idx * row_stride, + dtype=wp.float32, + shape=(self.num_envs,), + strides=(col_stride,), + device=self.device, + ) + self._term_out_views_wp.append(out_view) + term_cfg.out = out_view + + # prepare extra info to store individual reward term information (warp buffers) + self._episode_sums_wp = wp.zeros((num_terms, self.num_envs), dtype=wp.float32, device=self.device) + self._episode_sum_views_wp: dict[str, wp.array] = {} + if num_terms > 0: + row_stride = self._episode_sums_wp.strides[0] + col_stride = self._episode_sums_wp.strides[1] + base_ptr = self._episode_sums_wp.ptr + for term_idx, term_name in enumerate(self._term_names): + sum_view = wp.array( + ptr=base_ptr + term_idx * row_stride, + dtype=wp.float32, + shape=(self.num_envs,), + strides=(col_stride,), + device=self.device, + ) + self._episode_sum_views_wp[term_name] = sum_view + # per-env reward buffer (Warp) + self._reward_wp = wp.zeros((self.num_envs,), dtype=wp.float32, device=self.device) + + # buffer which stores the current step reward rate for each term for each environment (warp buffer) + self._step_reward_wp = wp.zeros((self.num_envs, num_terms), dtype=wp.float32, device=self.device) + + # per-term weights stored on-device for single-kernel accumulation + self._term_weights_wp = wp.array( + [float(term_cfg.weight) for term_cfg in self._term_cfgs], dtype=wp.float32, device=self.device + ) + + # persistent reset-time logging buffers (warp buffers) + self._episode_sum_avg_wp = wp.zeros((num_terms,), dtype=wp.float32, device=self.device) + self._reset_count_wp = wp.zeros((1,), dtype=wp.int32, device=self.device) + self._reset_scale_wp = wp.zeros((1,), dtype=wp.float32, device=self.device) + + # persistent torch tensor views (helpful for CUDA graph capture) + self._reward_tensor_view = wp.to_torch(self._reward_wp) + self._step_reward_tensor_view = wp.to_torch(self._step_reward_wp) + self._term_weights_tensor_view = wp.to_torch(self._term_weights_wp) + self._episode_sum_avg_tensor_view = wp.to_torch(self._episode_sum_avg_wp) + self._reset_extras = { + "Episode_Reward/" + term_name: self._episode_sum_avg_tensor_view[term_idx] + for term_idx, term_name in enumerate(self._term_names) + } + + def __str__(self) -> str: + """Returns: A string representation for reward manager.""" + msg = f" contains {len(self._term_names)} active terms.\n" + + # create table for term information + table = PrettyTable() + table.title = "Active Reward Terms" + table.field_names = ["Index", "Name", "Weight"] + # set alignment of table columns + table.align["Name"] = "l" + table.align["Weight"] = "r" + # add info on each term + for index, (name, term_cfg) in enumerate(zip(self._term_names, self._term_cfgs)): + table.add_row([index, name, term_cfg.weight]) + # convert table to string + msg += table.get_string() + msg += "\n" + + return msg + + """ + Properties. + """ + + @property + def active_terms(self) -> list[str]: + """Name of active reward terms.""" + return self._term_names + + """ + Operations. + """ + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | None = None, + *, + env_mask: wp.array | None = None, + ) -> dict[str, torch.Tensor]: + """Computes/reset episodic reward sums for masked envs (capturable core). + + Args: + env_ids: The specific environment indices to reset. + If None, all environments are considered. + env_mask: Boolean Warp mask of shape (num_envs,) selecting reset environments. + If provided, takes precedence over ``env_ids``. + + Returns: + A dictionary containing the information to log under the "Reward/{term_name}" key. + """ + # Mask-first path: captured callers must provide env_mask. + if env_mask is None or not isinstance(env_mask, wp.array): + if wp.get_device().is_capturing: + raise RuntimeError( + "RewardManager.reset requires env_mask(wp.array[bool]) during capture. " + "Do not pass env_ids on captured paths." + ) + env_mask = self._env.resolve_env_mask(env_ids=env_ids, env_mask=env_mask) + + self._episode_sum_avg_wp.zero_() + self._reset_count_wp.zero_() + self._reset_scale_wp.zero_() + + wp.launch(kernel=count_masked, dim=self.num_envs, inputs=[env_mask, self._reset_count_wp], device=self.device) + wp.launch( + kernel=compute_reset_scale, + dim=1, + inputs=[self._reset_count_wp, float(self._env.max_episode_length_s), self._reset_scale_wp], + device=self.device, + ) + + if self._num_terms > 0: + wp.launch( + kernel=_sum_and_zero_masked, + dim=(self._num_terms, self.num_envs), + inputs=[env_mask, self._reset_scale_wp, self._episode_sums_wp, self._episode_sum_avg_wp], + device=self.device, + ) + + # reset all the reward terms + for term_cfg in self._class_term_cfgs: + term_cfg.func.reset(env_mask=env_mask) + + return self._reset_extras + + def compute(self, dt: float) -> torch.Tensor: + """Computes the reward signal as a weighted sum of individual terms. + + This function calls each reward term managed by the class and adds them to compute the net + reward signal. It also updates the episodic sums corresponding to individual reward terms. + + Args: + dt: The time-step interval of the environment. + + Returns: + The net reward signal of shape (num_envs,). + """ + # TODO: Investigate performance diff between two .fill_ and kernel launch + # reset computation (Warp buffers) in a single kernel launch + wp.launch( + kernel=_reward_pre_compute_reset, + dim=self.num_envs, + inputs=[self._reward_wp, self._step_reward_wp, self._term_outs_wp], + device=self.device, + ) + # iterate over all the reward terms (Python loop; per-term math is warp) + for term_cfg in self._term_cfgs: + # skip if weight is zero (kind of a micro-optimization) + if term_cfg.weight == 0.0: + continue + # compute term into the persistent warp buffer (raw, unweighted) + # NOTE: `out` is pre-zeroed every step by `_reward_pre_compute_reset`. + term_cfg.func(self._env, term_cfg.out, **term_cfg.params) + + # update total reward, episodic sums and step rewards in a single kernel launch + wp.launch( + kernel=_reward_finalize, + dim=self.num_envs, + inputs=[ + self._term_outs_wp, + self._term_weights_wp, + float(dt), + self._reward_wp, + self._episode_sums_wp, + self._step_reward_wp, + ], + device=self.device, + ) + + return self._reward_tensor_view + + """ + Operations - Term settings. + """ + + def set_term_cfg(self, term_name: str, cfg: RewardTermCfg): + """Sets the configuration of the specified term into the manager. + + Args: + term_name: The name of the reward term. + cfg: The configuration for the reward term. + + Raises: + ValueError: If the term name is not found. + """ + if term_name not in self._term_names: + raise ValueError(f"Reward term '{term_name}' not found.") + # TODO(jichuanh): it's not guaranteed that the pre-allocated output view is still valid. + # Review this in curriculum manager migration. + # set the configuration (preserve the pre-allocated output view) + term_idx = self._term_names.index(term_name) + cfg.out = self._term_out_views_wp[term_idx] + self._term_cfgs[term_idx] = cfg + # keep on-device weights in sync (call this to update weights used in compute) + self._term_weights_tensor_view[term_idx] = float(cfg.weight) + + def get_term_cfg(self, term_name: str) -> RewardTermCfg: + """Gets the configuration for the specified term. + + Args: + term_name: The name of the reward term. + + Returns: + The configuration of the reward term. + + Raises: + ValueError: If the term name is not found. + """ + if term_name not in self._term_names: + raise ValueError(f"Reward term '{term_name}' not found.") + # return the configuration + return self._term_cfgs[self._term_names.index(term_name)] + + def get_active_iterable_terms(self, env_idx: int) -> Sequence[tuple[str, Sequence[float]]]: + """Returns the active terms as iterable sequence of tuples. + + The first element of the tuple is the name of the term and the second element is the raw value(s) of the term. + + Args: + env_idx: The specific environment to pull the active terms from. + + Returns: + The active terms. + """ + terms = [] + step_reward_torch = self._step_reward_tensor_view + for idx, name in enumerate(self._term_names): + terms.append((name, [step_reward_torch[env_idx, idx].cpu().item()])) + return terms + + """ + Helper functions. + """ + + def _prepare_terms(self): + # check if config is dict already + if isinstance(self.cfg, dict): + cfg_items = self.cfg.items() + else: + cfg_items = self.cfg.__dict__.items() + # iterate over all the terms + for term_name, term_cfg in cfg_items: + # check for non config + if term_cfg is None: + continue + # check for valid config type + if not isinstance(term_cfg, RewardTermCfg): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type RewardTermCfg." + f" Received: '{type(term_cfg)}'." + ) + # check for valid weight type + if not isinstance(term_cfg.weight, (float, int)): + raise TypeError( + f"Weight for the term '{term_name}' is not of type float or int." + f" Received: '{type(term_cfg.weight)}'." + ) + # resolve common parameters + self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2) + # add function to list + self._term_names.append(term_name) + self._term_cfgs.append(term_cfg) + # check if the term is a class + if isinstance(term_cfg.func, ManagerTermBase): + self._class_term_cfgs.append(term_cfg) diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/scene_entity_cfg.py b/source/isaaclab_experimental/isaaclab_experimental/managers/scene_entity_cfg.py new file mode 100644 index 000000000000..1930f6ce1cb7 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/scene_entity_cfg.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Experimental fork of :class:`isaaclab.managers.SceneEntityCfg`. + +This adds Warp-only cached selections (e.g. a joint mask) while keeping compatibility +with the stable manager stack (which type-checks against the stable SceneEntityCfg). +""" + +from __future__ import annotations + +import warp as wp + +from isaaclab.assets.articulation.base_articulation import BaseArticulation +from isaaclab.managers.scene_entity_cfg import SceneEntityCfg as _SceneEntityCfg +from isaaclab.scene import InteractiveScene + + +class SceneEntityCfg(_SceneEntityCfg): + """Scene entity configuration with an optional Warp joint mask. + + Notes: + - `joint_mask` is intended for Warp kernels only. + """ + + joint_mask: wp.array | None = None + + """Integer indices of selected joints — used for subset-sized gathers where a boolean mask + cannot provide the mapping from output index k to joint index.""" + joint_ids_wp: wp.array | None = None + + """Integer indices of selected bodies — used for subset-sized body gathers.""" + body_ids_wp: wp.array | None = None + + def resolve(self, scene: InteractiveScene): + # run the stable resolution first (fills joint_ids/body_ids from names/regex) + super().resolve(scene) + + entity = scene[self.name] + + # -- Warp joint mask / ids for articulations + if isinstance(entity, BaseArticulation): + if self.joint_ids == slice(None): + joint_ids_list = list(range(entity.num_joints)) + mask_list = [True] * entity.num_joints + else: + joint_ids_list = list(self.joint_ids) + mask_list = [False] * entity.num_joints + for idx in joint_ids_list: + mask_list[idx] = True + self.joint_mask = wp.array(mask_list, dtype=wp.bool, device=scene.device) + self.joint_ids_wp = wp.array(joint_ids_list, dtype=wp.int32, device=scene.device) + + # -- Warp body ids + if self.body_ids is not None and self.body_ids != slice(None): + self.body_ids_wp = wp.array(list(self.body_ids), dtype=wp.int32, device=scene.device) + elif hasattr(entity, "num_bodies"): + self.body_ids_wp = wp.array(list(range(entity.num_bodies)), dtype=wp.int32, device=scene.device) diff --git a/source/isaaclab_experimental/isaaclab_experimental/managers/termination_manager.py b/source/isaaclab_experimental/isaaclab_experimental/managers/termination_manager.py new file mode 100644 index 000000000000..8a768651b292 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/managers/termination_manager.py @@ -0,0 +1,355 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Termination manager for computing done signals for a given world (experimental, Warp-first). + +This file mirrors `isaaclab.managers.termination_manager` but switches to a Warp-first, +CUDA-graph-friendly implementation: + +- Term functions write into pre-allocated Warp buffers (no per-step torch returns). +- All per-env termination buffers are persistent Warp arrays with torch views at the boundary. +- No data-dependent indexing (e.g. `nonzero`) inside `compute()`; subset updates use masks/kernels. +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp +from prettytable import PrettyTable + +from .manager_base import ManagerBase, ManagerTermBase +from .manager_term_cfg import TerminationTermCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +@wp.kernel +def _termination_pre_compute_reset( + # output + term_dones: wp.array(dtype=wp.bool, ndim=2), + truncated: wp.array(dtype=wp.bool), + terminated: wp.array(dtype=wp.bool), + dones: wp.array(dtype=wp.bool), +): + """Reset per-step termination buffers. + + Launched with dim = (num_envs,) to reset per-env flags and clear the corresponding row in `term_dones`. + This works even when `term_dones.shape[1] == 0` (no terms). + """ + env_id = wp.tid() + truncated[env_id] = False + terminated[env_id] = False + dones[env_id] = False + for term_idx in range(term_dones.shape[1]): + term_dones[env_id, term_idx] = False + + +@wp.kernel +def _termination_finalize( + # input + term_dones: wp.array(dtype=wp.bool, ndim=2), + term_is_time_out: wp.array(dtype=wp.bool), + # output + truncated: wp.array(dtype=wp.bool), + terminated: wp.array(dtype=wp.bool), + dones: wp.array(dtype=wp.bool), + last_episode_dones: wp.array(dtype=wp.bool, ndim=2), +): + """Finalize termination flags and update last-episode term flags (single kernel). + + This kernel: + - reduces `term_dones` into `truncated`, `terminated`, and `dones` + - for envs where `dones=True`, copies the current `term_dones` row into `last_episode_dones` + (matching the stable manager's behavior). + """ + env_id = wp.tid() + + trunc = bool(False) + term = bool(False) + for term_idx in range(term_dones.shape[1]): + v = term_dones[env_id, term_idx] + if v: + if term_is_time_out[term_idx]: + trunc = True + else: + term = True + + done = trunc or term + truncated[env_id] = trunc + terminated[env_id] = term + dones[env_id] = done + + if done: + for term_idx in range(term_dones.shape[1]): + last_episode_dones[env_id, term_idx] = term_dones[env_id, term_idx] + + +# TODO(jichuanh): Look into wp.tile for better performance +@wp.kernel +def _termination_reset_mean_all_2d( + last_episode_dones: wp.array(dtype=wp.bool, ndim=2), + term_done_avg: wp.array(dtype=wp.float32), +): + """Compute mean(done) per term with 2D parallel accumulation.""" + env_id, term_idx = wp.tid() + num_envs = last_episode_dones.shape[0] + if num_envs > 0 and last_episode_dones[env_id, term_idx]: + wp.atomic_add(term_done_avg, term_idx, 1.0 / float(num_envs)) + + +class TerminationManager(ManagerBase): + """Manager for computing done signals for a given world (Warp-first). + + The termination manager computes the termination signal (also called dones) as a combination + of termination terms. Each termination term is a function which takes the environment and a + pre-allocated Warp boolean output buffer and fills it with per-env termination flags. + """ + + _env: ManagerBasedRLEnv + """The environment instance.""" + + def __init__(self, cfg: object, env: ManagerBasedRLEnv): + # create buffers to parse and store terms + self._term_names: list[str] = list() + self._term_cfgs: list[TerminationTermCfg] = list() + self._class_term_cfgs: list[TerminationTermCfg] = list() + + # call the base class constructor (this will parse the terms config) + super().__init__(cfg, env) + + self._term_name_to_term_idx = {name: i for i, name in enumerate(self._term_names)} + + # persistent buffers (Warp) + num_terms = len(self._term_names) + self._term_dones_wp = wp.zeros((self.num_envs, num_terms), dtype=wp.bool, device=self.device) + self._term_done_avg_wp = wp.zeros((num_terms,), dtype=wp.float32, device=self.device) + self._last_episode_dones_wp = wp.zeros((self.num_envs, num_terms), dtype=wp.bool, device=self.device) + self._truncated_wp = wp.zeros((self.num_envs,), dtype=wp.bool, device=self.device) + self._terminated_wp = wp.zeros((self.num_envs,), dtype=wp.bool, device=self.device) + self._dones_wp = wp.zeros((self.num_envs,), dtype=wp.bool, device=self.device) + + # per-term flags indicating if a term is a timeout (Warp) + self._term_is_time_out_wp = wp.array( + [bool(term_cfg.time_out) for term_cfg in self._term_cfgs], dtype=wp.bool, device=self.device + ) + + # per-term output buffers are views into the columns of `_term_dones_wp` (Warp). + # This avoids per-term temporary outputs and a per-term "store" kernel. + # TODO: Investigate performance diff whether it should using row as per env or per term + self._term_out_views_wp: list[wp.array] = [] + if num_terms > 0: + row_stride = self._term_dones_wp.strides[0] + col_stride = self._term_dones_wp.strides[1] + base_ptr = self._term_dones_wp.ptr + for term_idx, term_cfg in enumerate(self._term_cfgs): + out_view = wp.array( + ptr=base_ptr + term_idx * col_stride, + dtype=wp.bool, + shape=(self.num_envs,), + strides=(row_stride,), + device=self.device, + ) + self._term_out_views_wp.append(out_view) + term_cfg.out = out_view + + # torch tensor views (persistent) + self._term_dones_tensor_view = wp.to_torch(self._term_dones_wp) + self._last_episode_dones_tensor_view = wp.to_torch(self._last_episode_dones_wp) + self._truncated_tensor_view = wp.to_torch(self._truncated_wp) + self._terminated_tensor_view = wp.to_torch(self._terminated_wp) + self._dones_tensor_view = wp.to_torch(self._dones_wp) + self._term_done_avg_tensor_view = wp.to_torch(self._term_done_avg_wp) + self._reset_extras = { + "Episode_Termination/" + term_name: self._term_done_avg_tensor_view[term_idx] + for term_idx, term_name in enumerate(self._term_names) + } + + def __str__(self) -> str: + """Returns: A string representation for termination manager.""" + msg = f" contains {len(self._term_names)} active terms.\n" + + # create table for term information + table = PrettyTable() + table.title = "Active Termination Terms" + table.field_names = ["Index", "Name", "Time Out"] + # set alignment of table columns + table.align["Name"] = "l" + # add info on each term + for index, (name, term_cfg) in enumerate(zip(self._term_names, self._term_cfgs)): + table.add_row([index, name, term_cfg.time_out]) + # convert table to string + msg += table.get_string() + msg += "\n" + + return msg + + """ + Properties. + """ + + @property + def active_terms(self) -> list[str]: + """Name of active termination terms.""" + return self._term_names + + @property + def dones(self) -> torch.Tensor: + """The net termination signal. Shape is (num_envs,).""" + return self._dones_tensor_view + + @property + def dones_wp(self) -> wp.array: + """The net termination signal. Shape is (num_envs,).""" + return self._dones_wp + + @property + def time_outs(self) -> torch.Tensor: + """The timeout signal (reaching max episode length). Shape is (num_envs,).""" + return self._truncated_tensor_view + + @property + def time_outs_wp(self) -> wp.array: + """The timeout signal (reaching max episode length). Shape is (num_envs,).""" + return self._truncated_wp + + @property + def terminated(self) -> torch.Tensor: + """The terminated signal (reaching a terminal state). Shape is (num_envs,).""" + return self._terminated_tensor_view + + @property + def terminated_wp(self) -> wp.array: + """The terminated signal (reaching a terminal state). Shape is (num_envs,).""" + return self._terminated_wp + + """ + Operations. + """ + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | None = None, + *, + env_mask: wp.array | None = None, + ) -> dict[str, torch.Tensor]: + """Reset termination stats and class terms; return pre-allocated extras. + + Args: + env_ids: The specific environment indices to reset. + If None, all environments are considered. + env_mask: Boolean Warp mask of shape (num_envs,) selecting reset environments. + If provided, takes precedence over ``env_ids``. + + Returns: + A dictionary containing the information to log under the "Termination/{term_name}" key. + """ + # Mask-first path: captured callers must provide env_mask. + if env_mask is None or not isinstance(env_mask, wp.array): + if wp.get_device().is_capturing: + raise RuntimeError( + "TerminationManager.reset requires env_mask(wp.array[bool]) during capture. " + "Do not pass env_ids on captured paths." + ) + env_mask = self._env.resolve_env_mask(env_ids=env_ids, env_mask=env_mask) + if len(self._term_names) > 0: + self._term_done_avg_wp.zero_() + wp.launch( + kernel=_termination_reset_mean_all_2d, + dim=(self.num_envs, len(self._term_names)), + inputs=[self._last_episode_dones_wp, self._term_done_avg_wp], + device=self.device, + ) + for term_cfg in self._class_term_cfgs: + term_cfg.func.reset(env_mask=env_mask) + return self._reset_extras + + @property + def episode_termination_extras(self) -> dict[str, torch.Tensor]: + """Pre-allocated reset logging extras for termination terms.""" + return self._reset_extras + + def compute(self) -> torch.Tensor: + """Computes the termination signal as union of individual terms. + + Returns: + The combined termination signal of shape (num_envs,). + """ + # reset computation (Warp buffers) in a single kernel launch + wp.launch( + kernel=_termination_pre_compute_reset, + dim=self.num_envs, + inputs=[self._term_dones_wp, self._truncated_wp, self._terminated_wp, self._dones_wp], + device=self.device, + ) + + # iterate over all the termination terms (fixed list; per-term math is Warp) + for term_cfg in self._term_cfgs: + term_cfg.func(self._env, term_cfg.out, **term_cfg.params) + + # finalize dones and update last-episode term flags (single kernel launch) + wp.launch( + kernel=_termination_finalize, + dim=self.num_envs, + inputs=[ + self._term_dones_wp, + self._term_is_time_out_wp, + self._truncated_wp, + self._terminated_wp, + self._dones_wp, + self._last_episode_dones_wp, + ], + device=self.device, + ) + + return self._dones_tensor_view + + def get_term(self, name: str) -> torch.Tensor: + """Returns the termination term value at current step with the specified name. + + Returns: + The corresponding termination term value. Shape is (num_envs,). + """ + return self._term_dones_tensor_view[:, self._term_name_to_term_idx[name]] + + def get_active_iterable_terms(self, env_idx: int) -> Sequence[tuple[str, Sequence[float]]]: + """Returns the active terms as iterable sequence of tuples for debug/inspection.""" + terms = [] + for i, key in enumerate(self._term_names): + terms.append((key, [self._term_dones_tensor_view[env_idx, i].float().cpu().item()])) + return terms + + """ + Helper functions. + """ + + def _prepare_terms(self): + # check if config is dict already + if isinstance(self.cfg, dict): + cfg_items = self.cfg.items() + else: + cfg_items = self.cfg.__dict__.items() + # iterate over all the terms + for term_name, term_cfg in cfg_items: + # check for non config + if term_cfg is None: + continue + # check for valid config type + if not isinstance(term_cfg, TerminationTermCfg): + raise TypeError( + f"Configuration for the term '{term_name}' is not of type TerminationTermCfg." + f" Received: '{type(term_cfg)}'." + ) + # resolve common parameters (env, out) + self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2) + # add function to list + self._term_names.append(term_name) + self._term_cfgs.append(term_cfg) + # check if the term is a class + if isinstance(term_cfg.func, ManagerTermBase): + self._class_term_cfgs.append(term_cfg) diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/buffers/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/utils/buffers/__init__.py new file mode 100644 index 000000000000..b26627f06d48 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/buffers/__init__.py @@ -0,0 +1,12 @@ +# ########## New ########## +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module containing experimental buffer overrides.""" + +from isaaclab.utils.buffers import * # noqa: F401,F403 + +# Override with experimental implementation +from .circular_buffer import CircularBuffer # noqa: F401 diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/buffers/circular_buffer.py b/source/isaaclab_experimental/isaaclab_experimental/utils/buffers/circular_buffer.py new file mode 100644 index 000000000000..b25f91becbca --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/buffers/circular_buffer.py @@ -0,0 +1,194 @@ +# ########## New ########## +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from collections.abc import Sequence + +import torch +import warp as wp + + +class CircularBuffer: + """Circular buffer for storing a history of batched tensor data. + + This class implements a circular buffer for storing a history of batched tensor data. The buffer is + initialized with a maximum length and a batch size. The data is stored in a circular fashion, and the + data can be retrieved in a LIFO (Last-In-First-Out) fashion. The buffer is designed to be used in + multi-environment settings, where each environment has its own data. + + The shape of the appended data is expected to be (batch_size, ...), where the first dimension is the + batch dimension. Correspondingly, the shape of the ring buffer is (max_len, batch_size, ...). + """ + + def __init__(self, max_len: int, batch_size: int, device: str): + """Initialize the circular buffer. + + Args: + max_len: The maximum length of the circular buffer. The minimum allowed value is 1. + batch_size: The batch dimension of the data. + device: The device used for processing. + + Raises: + ValueError: If the buffer size is less than one. + """ + if max_len < 1: + raise ValueError(f"The buffer size should be greater than zero. However, it is set to {max_len}!") + # set the parameters + self._batch_size = batch_size + self._device = device + self._ALL_INDICES = torch.arange(batch_size, device=device) + + # max length tensor for comparisons + self._max_len = torch.full((batch_size,), max_len, dtype=torch.int, device=device) + # number of data pushes passed since the last call to :meth:`reset` + self._num_pushes = torch.zeros(batch_size, dtype=torch.long, device=device) + # the pointer to the current head of the circular buffer (-1 means not initialized) + self._pointer: int = -1 + # the actual buffer for data storage + # note: this is initialized on the first call to :meth:`append` + self._buffer: torch.Tensor = None # type: ignore + + """ + Properties. + """ + + @property + def batch_size(self) -> int: + """The batch size of the ring buffer.""" + return self._batch_size + + @property + def device(self) -> str: + """The device used for processing.""" + return self._device + + @property + def max_length(self) -> int: + """The maximum length of the ring buffer.""" + return int(self._max_len[0].item()) + + @property + def current_length(self) -> torch.Tensor: + """The current length of the buffer. Shape is (batch_size,). + + Since the buffer is circular, the current length is the minimum of the number of pushes + and the maximum length. + """ + return torch.minimum(self._num_pushes, self._max_len) + + @property + def buffer(self) -> torch.Tensor: + """Complete circular buffer with most recent entry at the end and oldest entry at the beginning. + Returns: + Complete circular buffer with most recent entry at the end and oldest entry at the beginning + of dimension 1. The shape is [batch_size, max_length, data.shape[1:]]. + """ + buf = self._buffer.clone() + buf = torch.roll(buf, shifts=self.max_length - self._pointer - 1, dims=0) + return torch.transpose(buf, dim0=0, dim1=1) + + """ + Operations. + """ + + def reset( + self, + batch_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + *, + env_mask: torch.Tensor | wp.array | None = None, + ): + """Reset the circular buffer at the specified batch indices. + + Args: + batch_ids: Elements to reset in the batch dimension. Default is None, which resets all the batch indices. + env_mask: Boolean mask of shape (batch_size,) selecting elements to reset. + If provided, it takes precedence over ``batch_ids``. + """ + if env_mask is not None: + if isinstance(env_mask, wp.array): + env_mask = wp.to_torch(env_mask) + elif not isinstance(env_mask, torch.Tensor): + raise TypeError(f"Unsupported env_mask type: {type(env_mask)}") + if env_mask.dtype != torch.bool: + env_mask = env_mask.to(dtype=torch.bool) + if str(env_mask.device) != self._device: + env_mask = env_mask.to(self._device) + if env_mask.ndim != 1 or env_mask.shape[0] != self._batch_size: + raise ValueError(f"Expected env_mask shape ({self._batch_size},), received {tuple(env_mask.shape)}.") + batch_ids = env_mask + elif isinstance(batch_ids, wp.array): + batch_ids = wp.to_torch(batch_ids) + + # resolve all indices + if batch_ids is None: + batch_ids = slice(None) + # reset the number of pushes for the specified batch indices + self._num_pushes[batch_ids] = 0 + if self._buffer is not None: + # set buffer at batch_id reset indices to 0.0 so that the buffer() getter + # returns the cleared circular buffer after reset. + self._buffer[:, batch_ids, :] = 0.0 + + def append(self, data: torch.Tensor): + """Append the data to the circular buffer. + + Args: + data: The data to append to the circular buffer. The first dimension should be the batch dimension. + Shape is (batch_size, ...). + + Raises: + ValueError: If the input data has a different batch size than the buffer. + """ + # check the batch size + if data.shape[0] != self.batch_size: + raise ValueError(f"The input data has '{data.shape[0]}' batch size while expecting '{self.batch_size}'") + + # move the data to the device + data = data.to(self._device) + # at the first call, initialize the buffer size + if self._buffer is None: + self._pointer = -1 + self._buffer = torch.empty((self.max_length, *data.shape), dtype=data.dtype, device=self._device) + # move the head to the next slot + self._pointer = (self._pointer + 1) % self.max_length + # add the new data to the last layer + self._buffer[self._pointer] = data + # Check for batches with zero pushes and initialize all values in batch to first append + is_first_push = self._num_pushes == 0 + if torch.any(is_first_push): + self._buffer[:, is_first_push] = data[is_first_push] + # increment number of number of pushes for all batches + self._num_pushes += 1 + + def __getitem__(self, key: torch.Tensor) -> torch.Tensor: + """Retrieve the data from the circular buffer in last-in-first-out (LIFO) fashion. + + If the requested index is larger than the number of pushes since the last call to :meth:`reset`, + the oldest stored data is returned. + + Args: + key: The index to retrieve from the circular buffer. The index should be less than the number of pushes + since the last call to :meth:`reset`. Shape is (batch_size,). + + Returns: + The data from the circular buffer. Shape is (batch_size, ...). + + Raises: + ValueError: If the input key has a different batch size than the buffer. + RuntimeError: If the buffer is empty. + """ + # check the batch size + if len(key) != self.batch_size: + raise ValueError(f"The argument 'key' has length {key.shape[0]}, while expecting {self.batch_size}") + # check if the buffer is empty + if torch.any(self._num_pushes == 0) or self._buffer is None: + raise RuntimeError("Attempting to retrieve data on an empty circular buffer. Please append data first.") + + # admissible lag + valid_keys = torch.minimum(key, self._num_pushes - 1) + # the index in the circular buffer (pointer points to the last+1 index) + index_in_buffer = torch.remainder(self._pointer - valid_keys, self.max_length) + # return output + return self._buffer[index_in_buffer, self._ALL_INDICES] diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/manager_call_switch.py b/source/isaaclab_experimental/isaaclab_experimental/utils/manager_call_switch.py new file mode 100644 index 000000000000..ad149900af2c --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/manager_call_switch.py @@ -0,0 +1,262 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Manager call switch for routing manager stage calls through stable/warp/captured paths.""" + +from __future__ import annotations + +import importlib +import json +import os +from enum import IntEnum +from typing import Any + +from isaaclab_experimental.utils.warp_graph_cache import WarpGraphCache + +from isaaclab.utils.timer import Timer + + +class ManagerCallMode(IntEnum): + """Execution mode for manager stage calls. + + * ``STABLE`` (0): Call stable Python manager implementations from :mod:`isaaclab.managers`. + * ``WARP_NOT_CAPTURED`` (1): Call Warp-compatible implementations without CUDA graph capture. + * ``WARP_CAPTURED`` (2): Call Warp implementations with CUDA graph capture/replay. + """ + + STABLE = 0 + WARP_NOT_CAPTURED = 1 + WARP_CAPTURED = 2 + + +class ManagerCallSwitch: + """Per-manager call switch for stable/warp/captured execution. + + Routes each manager stage call through the configured execution path: + stable Python, Warp (eager), or Warp (captured CUDA graph). Optionally + wraps each call in a :class:`Timer` context for profiling. + """ + + DEFAULT_CONFIG: dict[str, int] = {"default": 2} + DEFAULT_KEY = "default" + MANAGER_NAMES: tuple[str, ...] = ( + "ActionManager", + "ObservationManager", + "EventManager", + "RecorderManager", + "CommandManager", + "TerminationManager", + "RewardManager", + "CurriculumManager", + "Scene", + ) + # FIXME: Scene_write_data_to_sim calls articulation._apply_actuator_model which + # uses wp.to_torch + torch indexing -- not capture-safe on this branch. + # Cap Scene stages to WARP_NOT_CAPTURED until the articulation layer is capture-ready. + MAX_MODE_OVERRIDES: dict[str, int] = {"Scene": ManagerCallMode.WARP_NOT_CAPTURED} + + ENV_VAR = "MANAGER_CALL_CONFIG" + """Environment variable name for the JSON config string. + + Example usage:: + + MANAGER_CALL_CONFIG='{"RewardManager": 0, "default": 2}' python train.py ... + """ + + def __init__( + self, + cfg_source: dict | str | None = None, + *, + max_modes: dict[str, int] | None = None, + ): + self._graph_cache = WarpGraphCache() + # Merge caller-supplied max_modes with the class-level MAX_MODE_OVERRIDES. + self._max_modes = dict(self.MAX_MODE_OVERRIDES) + if max_modes is not None: + self._max_modes.update(max_modes) + # Resolve config: prefer explicit cfg_source, fall back to env var. + if cfg_source is None: + cfg_source = os.environ.get(self.ENV_VAR) + self._cfg = self._load_cfg(cfg_source) + print("[INFO] ManagerCallSwitch configuration:") + print(f" - {self.DEFAULT_KEY}: {self._cfg[self.DEFAULT_KEY]}") + for manager_name in self.MANAGER_NAMES: + mode = int(self.get_mode_for_manager(manager_name)) + cap = self._max_modes.get(manager_name) + cap_str = f" (cap={cap})" if cap is not None else "" + print(f" - {manager_name}: {mode}{cap_str}") + + # ------------------------------------------------------------------ + # Graph management + # ------------------------------------------------------------------ + + def invalidate_graphs(self) -> None: + """Invalidate cached capture graphs and their cached return values.""" + self._graph_cache.invalidate() + + # ------------------------------------------------------------------ + # Stage dispatch + # ------------------------------------------------------------------ + + def call_stage( + self, + *, + stage: str, + warp_call: dict[str, Any], + stable_call: dict[str, Any] | None = None, + timer: bool = False, + ) -> Any: + """Run the stage according to configured mode, optionally wrapped in a :class:`Timer`. + + A call spec dict supports the following keys: + + * ``fn`` (required): The callable to invoke. + * ``args`` (optional): Positional arguments tuple. + * ``kwargs`` (optional): Keyword arguments dict. + * ``output`` (optional): A ``Callable[[Any], Any]`` that transforms the raw + return value into the final output. For captured stages the raw value is + ``None``. When omitted, the raw return value is used as-is. + + Args: + stage: Stage identifier in the form ``"ManagerName_function_name"``. + warp_call: Call spec for the warp path (eager or captured). + stable_call: Call spec for the stable (torch) path. Defaults to ``None``. + timer: Whether to wrap execution in a :class:`Timer`. Defaults to ``True`` + (controlled by the global :attr:`Timer.enable` class-level toggle). + Pass a module-level flag like ``TIMER_ENABLED_STEP`` to make timing + conditional on that flag. + + Returns: + The (possibly transformed) return value of the stage. + """ + with Timer(name=stage, msg=f"{stage} took:", enable=timer, time_unit="us"): + return self._dispatch(stage, stable_call, warp_call) + + def _dispatch( + self, + stage: str, + stable_call: dict[str, Any] | None, + warp_call: dict[str, Any], + ) -> Any: + """Select call path based on mode, execute, and apply output.""" + mode = self.get_mode_for_manager(self._manager_name_from_stage(stage)) + if mode == ManagerCallMode.STABLE: + if stable_call is None: + raise ValueError(f"Stage '{stage}' is configured as STABLE (mode=0) but no stable_call was provided.") + call, result = stable_call, self._run_call(stable_call) + elif mode == ManagerCallMode.WARP_CAPTURED: + call, result = warp_call, self._wp_capture_or_launch(stage, warp_call) + else: + call, result = warp_call, self._run_call(warp_call) + + output_fn = call.get("output") + return output_fn(result) if output_fn is not None else result + + # ------------------------------------------------------------------ + # Manager resolution + # ------------------------------------------------------------------ + + def _manager_name_from_stage(self, stage: str) -> str: + if "_" not in stage: + raise ValueError(f"Invalid stage '{stage}'. Expected '{{manager_name}}_{{function_name}}'.") + return stage.split("_", 1)[0] + + def get_mode_for_manager(self, manager_name: str) -> ManagerCallMode: + """Return the resolved execution mode for the given manager. + + Looks up the manager in the config dict, falls back to the default, + then caps by :attr:`_max_modes` (static overrides + dynamic registrations). + """ + default_key = next(iter(self.DEFAULT_CONFIG)) + mode_value = self._cfg.get(manager_name, self._cfg[default_key]) + cap = self._max_modes.get(manager_name) + if cap is not None: + mode_value = min(mode_value, cap) + return ManagerCallMode(mode_value) + + def resolve_manager_class(self, manager_name: str, mode_override: ManagerCallMode | int | None = None) -> type: + """Import and return the manager class for the configured mode.""" + mode = self.get_mode_for_manager(manager_name) if mode_override is None else ManagerCallMode(mode_override) + module_name = "isaaclab.managers" if mode == ManagerCallMode.STABLE else "isaaclab_experimental.managers" + module = importlib.import_module(module_name) + if not hasattr(module, manager_name): + raise AttributeError(f"Manager '{manager_name}' not found in module '{module_name}'.") + return getattr(module, manager_name) + + def register_manager_capturability(self, manager_name: str, capturable: bool) -> None: + """Register that a manager has non-capturable terms, capping its mode. + + Called by :class:`ManagerBase` during term preparation when a term + is decorated with ``@warp_capturable(False)``. + """ + if not capturable: + self._max_modes[manager_name] = min( + self._max_modes.get(manager_name, ManagerCallMode.WARP_CAPTURED), + ManagerCallMode.WARP_NOT_CAPTURED, + ) + + # ------------------------------------------------------------------ + # Internal helpers + # ------------------------------------------------------------------ + + def _run_call(self, call: dict[str, Any]) -> Any: + """Execute a single call spec eagerly.""" + return call["fn"](*call.get("args", ()), **call.get("kwargs", {})) + + def _wp_capture_or_launch(self, stage: str, call: dict[str, Any]) -> Any: + """Capture Warp CUDA graph on first call, then replay. + + Delegates to :class:`WarpGraphCache` which handles warm-up, capture, + caching the return value, and replay. + """ + return self._graph_cache.capture_or_replay( + stage, + call["fn"], + args=call.get("args", ()), + kwargs=call.get("kwargs", {}), + ) + + def _load_cfg(self, cfg_source: dict | str | None) -> dict[str, int]: + if cfg_source is None: + cfg = dict(self.DEFAULT_CONFIG) + elif isinstance(cfg_source, dict): + cfg = dict(cfg_source) + if self.DEFAULT_KEY not in cfg: + cfg[self.DEFAULT_KEY] = self.DEFAULT_CONFIG[self.DEFAULT_KEY] + elif isinstance(cfg_source, str): + if cfg_source.strip() == "": + cfg = dict(self.DEFAULT_CONFIG) + else: + parsed = json.loads(cfg_source) + if not isinstance(parsed, dict): + raise TypeError("manager_call_config must decode to a dict.") + cfg = dict(parsed) + if self.DEFAULT_KEY not in cfg: + cfg[self.DEFAULT_KEY] = self.DEFAULT_CONFIG[self.DEFAULT_KEY] + else: + raise TypeError(f"cfg_source must be a dict, string, or None, got: {type(cfg_source)}") + + # validation + for manager_name, mode_value in cfg.items(): + if not isinstance(mode_value, int): + raise TypeError( + f"manager_call_config value for '{manager_name}' must be int (0/1/2), got: {type(mode_value)}" + ) + try: + ManagerCallMode(mode_value) + except ValueError as exc: + raise ValueError( + f"Invalid manager_call_config value for '{manager_name}': {mode_value}. Expected 0/1/2." + ) from exc + + # Apply max mode caps: bake caps into the resolved config so + # get_mode_for_manager never needs per-call branching. + default_mode = cfg[self.DEFAULT_KEY] + for name, max_mode in self._max_modes.items(): + resolved = cfg.get(name, default_mode) + if resolved > max_mode: + cfg[name] = max_mode + + return cfg diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/__init__.py new file mode 100644 index 000000000000..3563b166642a --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native modifier implementations (experimental). + +Re-exports stable configs and base classes, then overrides the function-based +modifiers (``scale``, ``bias``, ``clip``) with Warp-native versions that +operate in-place on ``wp.array``. + +Calling convention (matches Warp MDP terms):: + + modifier.func(data_wp, **params) -> None # in-place on wp.array +""" + +from .modifier import bias, clip, scale # noqa: F401 +from .modifier_base import ModifierBase # noqa: F401 + +# Override with Warp-native implementations +from .modifier_cfg import ModifierCfg # noqa: F401 diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier.py b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier.py new file mode 100644 index 000000000000..887db5e4ee06 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier.py @@ -0,0 +1,80 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native function-based modifiers (experimental). + +Each modifier takes a ``wp.array`` as its first argument and operates **in-place** +via ``wp.launch``. The calling convention mirrors Warp MDP terms:: + + modifier.func(data_wp, **params) -> None +""" + +from __future__ import annotations + +import warp as wp + +# -- scale -------------------------------------------------------------------- + + +@wp.kernel +def _scale_kernel(data: wp.array(dtype=wp.float32, ndim=2), multiplier: wp.float32): + i, j = wp.tid() + data[i, j] = data[i, j] * multiplier + + +def scale(data: wp.array, multiplier: float) -> None: + """Scale all elements of *data* by *multiplier* in-place. + + Warp-native drop-in replacement for :func:`isaaclab.utils.modifiers.scale`. + + Args: + data: The observation buffer to modify. Shape ``(num_envs, D)``. + multiplier: Scalar multiplier. + """ + wp.launch(_scale_kernel, dim=data.shape, inputs=[data, float(multiplier)], device=data.device) + + +# -- bias --------------------------------------------------------------------- + + +@wp.kernel +def _bias_kernel(data: wp.array(dtype=wp.float32, ndim=2), value: wp.float32): + i, j = wp.tid() + data[i, j] = data[i, j] + value + + +def bias(data: wp.array, value: float) -> None: + """Add a uniform *value* to all elements of *data* in-place. + + Warp-native drop-in replacement for :func:`isaaclab.utils.modifiers.bias`. + + Args: + data: The observation buffer to modify. Shape ``(num_envs, D)``. + value: Scalar bias to add. + """ + wp.launch(_bias_kernel, dim=data.shape, inputs=[data, float(value)], device=data.device) + + +# -- clip --------------------------------------------------------------------- + + +@wp.kernel +def _clip_kernel(data: wp.array(dtype=wp.float32, ndim=2), lo: wp.float32, hi: wp.float32): + i, j = wp.tid() + data[i, j] = wp.clamp(data[i, j], lo, hi) + + +def clip(data: wp.array, bounds: tuple[float | None, float | None]) -> None: + """Clamp all elements of *data* to [lo, hi] in-place. + + Warp-native drop-in replacement for :func:`isaaclab.utils.modifiers.clip`. + + Args: + data: The observation buffer to modify. Shape ``(num_envs, D)``. + bounds: ``(min, max)`` tuple. ``None`` means no bound on that side. + """ + lo = float(bounds[0]) if bounds[0] is not None else float(-1e38) + hi = float(bounds[1]) if bounds[1] is not None else float(1e38) + wp.launch(_clip_kernel, dim=data.shape, inputs=[data, lo, hi], device=data.device) diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier_base.py b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier_base.py new file mode 100644 index 000000000000..083e1576e891 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier_base.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native modifier base class (experimental).""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +import warp as wp + +if TYPE_CHECKING: + from .modifier_cfg import ModifierCfg + + +class ModifierBase(ABC): + """Base class for Warp-native class-based modifiers. + + Experimental fork of :class:`isaaclab.utils.modifiers.ModifierBase` adapted for the + Warp-first calling convention. Subclasses operate **in-place** on ``wp.array`` + buffers and return ``None``. + + A class implementation of a modifier can be used to store state information between + calls. This is useful for modifiers that require stateful operations, such as + rolling averages, delays, or decaying filters. + """ + + def __init__(self, cfg: ModifierCfg, data_dim: tuple[int, ...], device: str) -> None: + """Initializes the modifier class. + + Args: + cfg: Configuration parameters. + data_dim: The dimensions of the data to be modified. First element is the + batch size (number of environments). + device: The device to run the modifier on. + """ + self._cfg = cfg + self._data_dim = data_dim + self._device = device + + @abstractmethod + def reset(self, env_mask: wp.array | None = None): + """Resets the modifier. + + Args: + env_mask: Boolean env mask of shape ``(num_envs,)`` selecting environments + to reset. Defaults to None, in which case all environments are + considered. + """ + raise NotImplementedError + + @abstractmethod + def __call__(self, data: wp.array) -> None: + """Apply the modification in-place. + + Args: + data: The ``wp.array`` buffer to modify. Shape should match the + *data_dim* passed during initialization. + """ + raise NotImplementedError diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier_cfg.py b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier_cfg.py new file mode 100644 index 000000000000..7cc22eaee6a9 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/modifiers/modifier_cfg.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native modifier configuration (experimental).""" + +from collections.abc import Callable +from dataclasses import MISSING +from typing import Any + +from isaaclab.utils import configclass + + +@configclass +class ModifierCfg: + """Configuration parameters for Warp-native modifiers. + + Experimental fork of :class:`isaaclab.utils.modifiers.ModifierCfg` adapted for the + Warp-first calling convention where modifier functions operate **in-place** on a + ``wp.array`` buffer and return ``None``. + """ + + func: Callable[..., None] = MISSING + """Function or callable class used by modifier. + + The function must take a ``wp.array`` as the first argument and operate on it + **in-place** (no return value). The remaining arguments are specified in the + :attr:`params` attribute. + + It also supports callable classes that implement ``__call__()``. In this case the + class should inherit from :class:`ModifierBase` and implement the required methods. + """ + + params: dict[str, Any] = dict() + """The parameters to be passed to the function or callable class as keyword arguments. + + Defaults to an empty dictionary. + """ diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/noise/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/utils/noise/__init__.py new file mode 100644 index 000000000000..16aa5ab0733c --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/noise/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native noise implementations (experimental). + +Re-exports stable configs and classes, then overrides the function-based +noise models (``constant_noise``, ``uniform_noise``, ``gaussian_noise``) +and their configs with Warp-native versions that operate **in-place** on +``wp.array``. + +Calling convention (matches Warp MDP terms):: + + noise_cfg.func(data_wp, noise_cfg) -> None # in-place on wp.array +""" + +from isaaclab.utils.noise import * # noqa: F401,F403 + +# Override with Warp-native implementations +from .noise_cfg import ConstantNoiseCfg, GaussianNoiseCfg, NoiseCfg, UniformNoiseCfg # noqa: F401 +from .noise_model import NoiseModel, constant_noise, gaussian_noise, uniform_noise # noqa: F401 diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/noise/noise_cfg.py b/source/isaaclab_experimental/isaaclab_experimental/utils/noise/noise_cfg.py new file mode 100644 index 000000000000..5c7045e65a69 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/noise/noise_cfg.py @@ -0,0 +1,72 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native noise configuration (experimental).""" + +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import MISSING +from typing import Literal + +import warp as wp + +from isaaclab.utils import configclass + +from . import noise_model + + +@configclass +class NoiseCfg: + """Configuration for a Warp-native noise term. + + Experimental fork of :class:`isaaclab.utils.noise.NoiseCfg` adapted for the + Warp-first calling convention where noise functions operate **in-place** on a + ``wp.array`` buffer and return ``None``. + """ + + func: Callable[[wp.array, NoiseCfg], None] = MISSING + """The function to be called for applying the noise. + + The function must take a ``wp.array`` as the first argument and the noise + configuration as the second argument. It operates **in-place** (no return value). + """ + + operation: Literal["add", "scale", "abs"] = "add" + """The operation to apply the noise on the data. Defaults to ``"add"``.""" + + +@configclass +class ConstantNoiseCfg(NoiseCfg): + """Configuration for a constant noise term (Warp-native).""" + + func = noise_model.constant_noise + + bias: float = 0.0 + """The bias to add. Defaults to 0.0.""" + + +@configclass +class UniformNoiseCfg(NoiseCfg): + """Configuration for a uniform noise term (Warp-native).""" + + func = noise_model.uniform_noise + + n_min: float = -1.0 + """The minimum value of the noise. Defaults to -1.0.""" + n_max: float = 1.0 + """The maximum value of the noise. Defaults to 1.0.""" + + +@configclass +class GaussianNoiseCfg(NoiseCfg): + """Configuration for a gaussian noise term (Warp-native).""" + + func = noise_model.gaussian_noise + + mean: float = 0.0 + """The mean of the noise. Defaults to 0.0.""" + std: float = 1.0 + """The standard deviation of the noise. Defaults to 1.0.""" diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/noise/noise_model.py b/source/isaaclab_experimental/isaaclab_experimental/utils/noise/noise_model.py new file mode 100644 index 000000000000..acba926ea488 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/noise/noise_model.py @@ -0,0 +1,200 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-native noise functions and models (experimental). + +Each noise function takes a ``wp.array`` as its first argument and operates **in-place** +via ``wp.launch``. The calling convention mirrors the stable noise interface:: + + noise_cfg.func(data_wp, noise_cfg) -> None + +Random noise kernels (gaussian, uniform) consume the shared per-env Warp RNG state +(``rng_state_wp``) that is set on the config at manager prep time from +``env.rng_state_wp``. See :func:`initialize_rng_state` in +``isaaclab_experimental.envs.manager_based_env_warp`` for the initialization pattern. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp + +if TYPE_CHECKING: + from . import noise_cfg + +## +# Operation mode mapping. +## + +_OPERATION_MAP: dict[str, int] = {"add": 0, "scale": 1, "abs": 2} + +## +# Noise as functions. +## + + +# -- constant ----------------------------------------------------------------- + + +@wp.kernel +def _apply_constant_noise( + out: wp.array(dtype=wp.float32, ndim=2), + bias: wp.float32, + operation: wp.int32, +): + env_id = wp.tid() + for j in range(out.shape[1]): + if operation == 0: + out[env_id, j] = out[env_id, j] + bias + elif operation == 1: + out[env_id, j] = out[env_id, j] * bias + else: + out[env_id, j] = bias + + +def constant_noise(data: wp.array, cfg: noise_cfg.ConstantNoiseCfg) -> None: + """Applies a constant noise bias to a given data set in-place. + + Warp-native drop-in replacement for :func:`isaaclab.utils.noise.constant_noise`. + + Args: + data: The data buffer to modify. Shape ``(num_envs, D)``. + cfg: The configuration parameters for constant noise. + """ + wp.launch( + _apply_constant_noise, + dim=data.shape[0], + inputs=[data, float(cfg.bias), _OPERATION_MAP[cfg.operation]], + device=data.device, + ) + + +# -- uniform ------------------------------------------------------------------ + + +@wp.kernel +def _apply_uniform_noise( + out: wp.array(dtype=wp.float32, ndim=2), + rng_state: wp.array(dtype=wp.uint32), + n_min: wp.float32, + n_max: wp.float32, + operation: wp.int32, +): + env_id = wp.tid() + state = rng_state[env_id] + for j in range(out.shape[1]): + n = wp.randf(state, n_min, n_max) + if operation == 0: + out[env_id, j] = out[env_id, j] + n + elif operation == 1: + out[env_id, j] = out[env_id, j] * n + else: + out[env_id, j] = n + rng_state[env_id] = state + + +def uniform_noise(data: wp.array, cfg: noise_cfg.UniformNoiseCfg) -> None: + """Applies uniform noise to a given data set in-place. + + Warp-native drop-in replacement for :func:`isaaclab.utils.noise.uniform_noise`. + + Args: + data: The data buffer to modify. Shape ``(num_envs, D)``. + cfg: The configuration parameters for uniform noise. + """ + wp.launch( + _apply_uniform_noise, + dim=data.shape[0], + inputs=[data, cfg.rng_state_wp, float(cfg.n_min), float(cfg.n_max), _OPERATION_MAP[cfg.operation]], + device=data.device, + ) + + +# -- gaussian ----------------------------------------------------------------- + + +@wp.kernel +def _apply_gaussian_noise( + out: wp.array(dtype=wp.float32, ndim=2), + rng_state: wp.array(dtype=wp.uint32), + mean: wp.float32, + std: wp.float32, + operation: wp.int32, +): + env_id = wp.tid() + state = rng_state[env_id] + for j in range(out.shape[1]): + n = mean + std * wp.randn(state) + if operation == 0: + out[env_id, j] = out[env_id, j] + n + elif operation == 1: + out[env_id, j] = out[env_id, j] * n + else: + out[env_id, j] = n + rng_state[env_id] = state + + +def gaussian_noise(data: wp.array, cfg: noise_cfg.GaussianNoiseCfg) -> None: + """Applies gaussian noise to a given data set in-place. + + Warp-native drop-in replacement for :func:`isaaclab.utils.noise.gaussian_noise`. + + Args: + data: The data buffer to modify. Shape ``(num_envs, D)``. + cfg: The configuration parameters for gaussian noise. + """ + wp.launch( + _apply_gaussian_noise, + dim=data.shape[0], + inputs=[data, cfg.rng_state_wp, float(cfg.mean), float(cfg.std), _OPERATION_MAP[cfg.operation]], + device=data.device, + ) + + +## +# Noise models as classes. +## + + +class NoiseModel: + """Warp-native base class for noise models. + + Experimental fork of :class:`isaaclab.utils.noise.NoiseModel` adapted for the + Warp-first calling convention where noise is applied **in-place** on ``wp.array``. + """ + + def __init__(self, noise_model_cfg: noise_cfg.NoiseModelCfg, num_envs: int, device: str): + """Initialize the noise model. + + Args: + noise_model_cfg: The noise configuration to use. + num_envs: The number of environments. + device: The device to use for the noise model. + """ + self._noise_model_cfg = noise_model_cfg + self._num_envs = num_envs + self._device = device + + def reset(self, env_mask: wp.array | None = None): + """Reset the noise model. + + This method can be implemented by derived classes to reset the noise model. + This is useful when implementing temporal noise models such as random walk. + + Args: + env_mask: Boolean env mask of shape ``(num_envs,)`` selecting environments + to reset. Defaults to None, in which case all environments are + considered. + """ + pass + + def __call__(self, data: wp.array) -> None: + """Apply the noise to the data in-place. + + Args: + data: The data to apply the noise to. Shape is ``(num_envs, ...)``. + """ + self._noise_model_cfg.noise_cfg.func(data, self._noise_model_cfg.noise_cfg) diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/torch_utils.py b/source/isaaclab_experimental/isaaclab_experimental/utils/torch_utils.py new file mode 100644 index 000000000000..1e611ec3fef1 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/torch_utils.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch + + +def clone_obs_buffer( + obs_buffer: dict[str, torch.Tensor | dict[str, torch.Tensor]], +) -> dict[str, torch.Tensor | dict[str, torch.Tensor]]: + """Clone a nested observation buffer, using :meth:`torch.Tensor.clone` for every leaf tensor. + + This avoids the overhead of :func:`copy.deepcopy` while still producing an independent + snapshot of the buffer (new dict objects + cloned tensor storage). + + Args: + obs_buffer: Observation buffer mapping group names to either a single concatenated + tensor or a dict of per-term tensors. + + Returns: + A new dictionary with the same structure whose tensors are clones of the originals. + """ + result: dict[str, torch.Tensor | dict[str, torch.Tensor]] = {} + for key, value in obs_buffer.items(): + if isinstance(value, torch.Tensor): + result[key] = value.clone() + else: # dict[str, torch.Tensor] + result[key] = {k: v.clone() for k, v in value.items()} + return result diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/warp/__init__.py b/source/isaaclab_experimental/isaaclab_experimental/utils/warp/__init__.py new file mode 100644 index 000000000000..2d071b823a46 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/warp/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp utility functions and shared kernels for isaaclab_experimental.""" + +from .kernels import compute_reset_scale, count_masked +from .utils import ( + WarpCapturable, + is_warp_capturable, + resolve_1d_mask, + warp_capturable, + wrap_to_pi, + zero_masked_2d, +) diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/warp/kernels.py b/source/isaaclab_experimental/isaaclab_experimental/utils/warp/kernels.py new file mode 100644 index 000000000000..8d3f9e65d49a --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/warp/kernels.py @@ -0,0 +1,45 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared Warp kernels used across multiple managers.""" + +from __future__ import annotations + +import warp as wp + + +@wp.kernel +def count_masked( + mask: wp.array(dtype=wp.bool), + out_count: wp.array(dtype=wp.int32), +): + """Count the number of True entries in a boolean mask. + + ``out_count`` must be zeroed before launch. Result is stored in ``out_count[0]``. + Launched with ``dim = num_envs``. + """ + env_id = wp.tid() + if mask[env_id]: + wp.atomic_add(out_count, 0, 1) + + +@wp.kernel +def compute_reset_scale( + reset_count: wp.array(dtype=wp.int32), + divisor: wp.float32, + out_scale: wp.array(dtype=wp.float32), +): + """Compute ``1 / (count * divisor)`` scaling factor from a reset count. + + Pass ``divisor = 1.0`` for plain ``1 / count`` (e.g. command manager). + Pass ``divisor = max_episode_length_s`` for reward-style normalization. + + Launched with ``dim = 1``. + """ + count = reset_count[0] + if count > 0: + out_scale[0] = 1.0 / (wp.float32(count) * divisor) + else: + out_scale[0] = 0.0 diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/warp/utils.py b/source/isaaclab_experimental/isaaclab_experimental/utils/warp/utils.py new file mode 100644 index 000000000000..a4beb8627ef9 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/warp/utils.py @@ -0,0 +1,204 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from collections.abc import Sequence + +import torch + +import warp as wp + + +@wp.kernel +def _set_mask_from_ids( + mask: wp.array(dtype=wp.bool), + ids: wp.array(dtype=wp.int32), +): + """Set ``mask[ids[i]] = True`` for each thread *i*.""" + i = wp.tid() + mask[ids[i]] = True + + +def resolve_1d_mask( + *, + ids: Sequence[int] | slice | wp.array | torch.Tensor | None, + mask: wp.array | torch.Tensor | None, + all_mask: wp.array, + scratch_mask: wp.array, + device: str, +) -> wp.array: + """Resolve ids/mask into a warp boolean mask. + + Matches the contract of ``ArticulationData._resolve_1d_mask`` on dev/newton. + Callers must provide pre-allocated ``all_mask`` (all-True) and ``scratch_mask`` + (reusable working buffer). No allocations happen inside this function. + + Args: + ids: Indices to set to ``True``. ``None`` or ``slice(None)`` means all. + mask: Explicit boolean mask. If provided, returned directly (after + torch->warp normalization if needed). Takes precedence over *ids*. + all_mask: Pre-allocated all-True mask of shape ``(size,)``, returned + when both *ids* and *mask* are ``None``. + scratch_mask: Pre-allocated scratch mask of shape ``(size,)``, filled + in-place when *ids* are provided. + device: Warp device string. + + Returns: + A ``wp.array(dtype=wp.bool)`` -- ``mask``, ``all_mask``, or ``scratch_mask``. + """ + # Fast path: explicit mask provided. + if mask is not None: + if isinstance(mask, torch.Tensor): + if mask.dtype != torch.bool: + mask = mask.to(dtype=torch.bool) + if str(mask.device) != device: + mask = mask.to(device) + return wp.from_torch(mask, dtype=wp.bool) + return mask + + # Fast path: all ids. + if ids is None or (isinstance(ids, slice) and ids == slice(None)): + return all_mask + + # Normalize slice into explicit indices. + if isinstance(ids, slice): + start, stop, step = ids.indices(scratch_mask.shape[0]) + ids = list(range(start, stop, step)) + elif not isinstance(ids, (torch.Tensor, wp.array)): + ids = list(ids) + + # Prepare output mask. + scratch_mask.fill_(False) + + # Normalize ids to wp.int32 array and launch kernel. + if isinstance(ids, torch.Tensor): + if ids.numel() == 0: + return scratch_mask + if str(ids.device) != device: + ids = ids.to(device) + if ids.dtype != torch.int32: + ids = ids.to(dtype=torch.int32) + if not ids.is_contiguous(): + ids = ids.contiguous() + ids_wp = wp.from_torch(ids, dtype=wp.int32) + elif isinstance(ids, wp.array): + if ids.shape[0] == 0: + return scratch_mask + ids_wp = ids + else: + if len(ids) == 0: + return scratch_mask + ids_wp = wp.array(ids, dtype=wp.int32, device=device) + + wp.launch(kernel=_set_mask_from_ids, dim=ids_wp.shape[0], inputs=[scratch_mask, ids_wp], device=device) + return scratch_mask + + +def warp_capturable(capturable: bool): + """Annotate an MDP term's CUDA-graph capturability. + + No-wrapper decorator: sets ``_warp_capturable`` directly on the function + and returns it unchanged. Safe to stack with any other decorator in any order. + + By default all MDP terms are assumed capturable (True). Use + ``@warp_capturable(False)`` on terms that call non-capturable external APIs. + """ + + def decorator(func): + func._warp_capturable = capturable + return func + + return decorator + + +def is_warp_capturable(func) -> bool: + """Check if a term function is CUDA-graph-capturable. + + Checks ``_warp_capturable`` on the function and its ``__wrapped__`` target. + Returns True (capturable) by default if no annotation is found. + """ + for f in (func, getattr(func, "__wrapped__", None)): + if f is not None: + val = getattr(f, "_warp_capturable", None) + if val is not None: + return val + return True + + +@wp.func +def wrap_to_pi(angle: float) -> float: + """Wrap input angle (in radians) to the range [-pi, pi].""" + two_pi = 2.0 * wp.pi + wrapped_angle = angle + wp.pi + # NOTE: Use floor-based remainder semantics to match torch's `%` for negative inputs. + wrapped_angle = wrapped_angle - wp.floor(wrapped_angle / two_pi) * two_pi + return wp.where((wrapped_angle == 0) and (angle > 0), wp.pi, wrapped_angle - wp.pi) + + +class WarpCapturable: + """CUDA graph capture safety: decorator, annotation checker, and runtime guard. + + Decorator usage:: + + @WarpCapturable(False) + def reset_root_state_uniform(env, env_mask, ...): + ... + + @WarpCapturable(False, reason="calls write_root_pose_to_sim") + def push_by_setting_velocity(env, env_mask, ...): + ... + + - ``@WarpCapturable(True)`` or no decorator: capturable, returned unwrapped. + - ``@WarpCapturable(False)``: sets ``func._warp_capturable = False``, wraps with + runtime guard that raises if ``wp.get_device().is_capturing`` is ``True``. + """ + + def __init__(self, capturable: bool, *, reason: str | None = None): + self._capturable = capturable + self._reason = reason + + def __call__(self, func): + """Decorate *func* with capture safety annotation and optional runtime guard.""" + import functools + + func._warp_capturable = self._capturable + if self._capturable: + return func + + reason = self._reason + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if wp.get_device().is_capturing: + msg = f"'{func.__qualname__}' is marked @WarpCapturable(False) but called during CUDA graph capture." + if reason: + msg = f"{msg} {reason}" + raise RuntimeError(msg) + return func(*args, **kwargs) + + wrapper._warp_capturable = False + return wrapper + + @staticmethod + def is_capturable(func) -> bool: + """Check capturability annotation. Default: ``True``. + + Checks ``__wrapped__`` for decorated functions to handle stacked decorators. + """ + for f in (func, getattr(func, "__wrapped__", None)): + if f is not None: + val = getattr(f, "_warp_capturable", None) + if val is not None: + return val + return True + + +@wp.kernel +def zero_masked_2d(mask: wp.array(dtype=wp.bool), values: wp.array(dtype=wp.float32, ndim=2)): + """Zero out rows of a 2D float32 array where mask is True.""" + env_id, j = wp.tid() + if mask[env_id]: + values[env_id, j] = 0.0 diff --git a/source/isaaclab_experimental/isaaclab_experimental/utils/warp_graph_cache.py b/source/isaaclab_experimental/isaaclab_experimental/utils/warp_graph_cache.py new file mode 100644 index 000000000000..d446be28d099 --- /dev/null +++ b/source/isaaclab_experimental/isaaclab_experimental/utils/warp_graph_cache.py @@ -0,0 +1,81 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp CUDA graph capture-or-replay utility.""" + +from collections.abc import Callable +from typing import Any + +import warp as wp + + +class WarpGraphCache: + """Caches Warp CUDA graphs by stage name: captures on first call, replays after. + + On the very first call for a given stage, an **eager warm-up** run + executes *before* graph capture. This lets one-time initialisation + code (memory allocations, torch dtype casts, ``hasattr`` guards, etc.) + run outside the capture context. Only the steady-state kernel + launches are then recorded into the graph. + + The return value from the capture run is cached and returned on every + subsequent replay, ensuring captured stages return the same references + (e.g. tensor views) as eager stages. + + Usage:: + + cache = WarpGraphCache() + result = cache.capture_or_replay("my_stage", my_warp_function) + # uncaptured work here ... + result2 = cache.capture_or_replay("my_stage_post", my_other_function) + """ + + def __init__(self): + self._graphs: dict[str, Any] = {} + self._results: dict[str, Any] = {} + + def capture_or_replay( + self, + stage: str, + fn: Callable[..., Any], + args: tuple = (), + kwargs: dict[str, Any] | None = None, + ) -> Any: + """Capture *fn* into a CUDA graph on the first call, then replay. + + Args: + stage: Unique name identifying this captured scope. + fn: The callable to capture. Must contain only CUDA-graph-safe + operations (pure warp kernels, no Python-level branching on + GPU data). + args: Positional arguments forwarded to *fn*. Defaults to ``()``. + kwargs: Keyword arguments forwarded to *fn*. Defaults to ``None``. + + Returns: + The cached return value from the first (capture) invocation. + """ + if kwargs is None: + kwargs = {} + graph = self._graphs.get(stage) + if graph is not None: + wp.capture_launch(graph) + return self._results[stage] + # Warm-up: run eagerly to flush first-call allocations / hasattr guards. + fn(*args, **kwargs) + # Capture: allocations already done, only wp.launch calls are recorded. + with wp.ScopedCapture() as capture: + result = fn(*args, **kwargs) + self._graphs[stage] = capture.graph + self._results[stage] = result + return result + + def invalidate(self, stage: str | None = None) -> None: + """Drop cached graph(s). If *stage* is ``None``, drop all.""" + if stage is None: + self._graphs.clear() + self._results.clear() + else: + self._graphs.pop(stage, None) + self._results.pop(stage, None) diff --git a/source/isaaclab_experimental/pyproject.toml b/source/isaaclab_experimental/pyproject.toml new file mode 100644 index 000000000000..d90ac3536f16 --- /dev/null +++ b/source/isaaclab_experimental/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel", "toml"] +build-backend = "setuptools.build_meta" diff --git a/source/isaaclab_experimental/setup.py b/source/isaaclab_experimental/setup.py new file mode 100644 index 000000000000..81e8ad2c3c2b --- /dev/null +++ b/source/isaaclab_experimental/setup.py @@ -0,0 +1,42 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_experimental' python package.""" + +import os + +import toml +from setuptools import find_packages, setup + +# Obtain the extension data from the extension.toml file +EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) +# Read the extension.toml file +EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +# Minimum dependencies required prior to installation +INSTALL_REQUIRES = [ + "toml", +] + +# Installation operation +setup( + name="isaaclab_experimental", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url=EXTENSION_TOML_DATA["package"]["repository"], + version=EXTENSION_TOML_DATA["package"]["version"], + description=EXTENSION_TOML_DATA["package"]["description"], + keywords=EXTENSION_TOML_DATA["package"]["keywords"], + license="BSD-3-Clause", + include_package_data=True, + python_requires=">=3.12", + install_requires=INSTALL_REQUIRES, + packages=find_packages(), + classifiers=[ + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", + ], + zip_safe=False, +) diff --git a/source/isaaclab_experimental/test/envs/mdp/parity_helpers.py b/source/isaaclab_experimental/test/envs/mdp/parity_helpers.py new file mode 100644 index 000000000000..9aa219ee4a41 --- /dev/null +++ b/source/isaaclab_experimental/test/envs/mdp/parity_helpers.py @@ -0,0 +1,628 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared test utilities for MDP warp-vs-stable parity tests. + +Contains constants, assertion helpers, warp kernel runners, mock classes, +numpy math utilities, and mutation helpers used by the observation, reward, +termination, event, and action parity test files. +""" + +from __future__ import annotations + +import numpy as np +import torch +import warp as wp + +from isaaclab.utils.warp import ProxyArray + +# --------------------------------------------------------------------------- +# Constants (shared across all MDP parity test files) +# --------------------------------------------------------------------------- +NUM_ENVS = 64 +NUM_JOINTS = 12 +NUM_ACTIONS = 6 +DEVICE = "cuda:0" +ATOL = 1e-5 +RTOL = 1e-5 + +# Body/sensor-level defaults shared by observation, reward, and termination tests +NUM_BODIES = 4 +NUM_HISTORY = 3 +CMD_DIM = 3 +BODY_IDS = [0, 2] + +# Gravity direction constant (normalized, same as ArticulationData.GRAVITY_VEC_W) +GRAVITY_DIR_NP = np.array([[0.0, 0.0, -1.0]], dtype=np.float32) + + +# --------------------------------------------------------------------------- +# Numpy math utilities +# --------------------------------------------------------------------------- + + +def quat_rotate_inv_np(q_xyzw: np.ndarray, v: np.ndarray) -> np.ndarray: + """Apply inverse quaternion rotation to vectors (numpy, batch). + + Equivalent to ``wp.quat_rotate_inv`` — rotates *v* by the conjugate of *q*. + + Args: + q_xyzw: (N, 4) quaternion array in [x, y, z, w] order (warp convention). + v: (N, 3) vector array. + + Returns: + (N, 3) rotated vectors in float32. + """ + qv = -q_xyzw[..., :3] # conjugate xyz + qw = q_xyzw[..., 3:4] + t = 2.0 * np.cross(qv, v) + return (v + qw * t + np.cross(qv, t)).astype(np.float32) + + +# --------------------------------------------------------------------------- +# Warp / numpy utilities +# --------------------------------------------------------------------------- + + +def proxy_array(*args, **kwargs) -> ProxyArray: + """Build a :class:`ProxyArray` directly from ``wp.array`` constructor arguments. + + Lets the parity-test mocks read as ``self.joint_pos = proxy_array(np_data, device=...)`` + instead of ``ProxyArray(wp.array(np_data, device=...))``. + """ + return ProxyArray(wp.array(*args, **kwargs)) + + +def copy_np_to_wp(dest: wp.array | ProxyArray, src_np: np.ndarray): + """In-place overwrite of a warp array's contents from numpy (preserves pointer).""" + if isinstance(dest, ProxyArray): + dest = dest.warp + tmp = wp.array(src_np, dtype=dest.dtype, device=str(dest.device)) + wp.copy(dest, tmp) + + +# --------------------------------------------------------------------------- +# Test runner helpers +# --------------------------------------------------------------------------- + + +def run_warp_obs(func, env, shape, device=DEVICE, **kwargs): + """Run a warp observation function and return the result as a torch tensor.""" + out = wp.zeros(shape, dtype=wp.float32, device=device) + func(env, out, **kwargs) + return wp.to_torch(out).clone() + + +def run_warp_obs_captured(func, env, shape, device=DEVICE, **kwargs): + """Run a warp observation function under CUDA graph capture and return the result.""" + out = wp.zeros(shape, dtype=wp.float32, device=device) + func(env, out, **kwargs) # warm-up + with wp.ScopedCapture() as capture: + func(env, out, **kwargs) + wp.capture_launch(capture.graph) + return wp.to_torch(out).clone() + + +def run_warp_rew(func, env, device=DEVICE, **kwargs): + """Run a warp reward function and return the result as a torch tensor.""" + out = wp.zeros((NUM_ENVS,), dtype=wp.float32, device=device) + func(env, out, **kwargs) + return wp.to_torch(out).clone() + + +def run_warp_rew_captured(func, env, device=DEVICE, **kwargs): + """Run a warp reward function under CUDA graph capture.""" + out = wp.zeros((NUM_ENVS,), dtype=wp.float32, device=device) + func(env, out, **kwargs) # warm-up + with wp.ScopedCapture() as capture: + func(env, out, **kwargs) + wp.capture_launch(capture.graph) + return wp.to_torch(out).clone() + + +def run_warp_term(func, env, device=DEVICE, **kwargs): + """Run a warp termination function and return the result as a torch tensor.""" + out = wp.zeros((NUM_ENVS,), dtype=wp.bool, device=device) + func(env, out, **kwargs) + return wp.to_torch(out).clone() + + +def run_warp_term_captured(func, env, device=DEVICE, **kwargs): + """Run a warp termination function under CUDA graph capture.""" + out = wp.zeros((NUM_ENVS,), dtype=wp.bool, device=device) + func(env, out, **kwargs) # warm-up + with wp.ScopedCapture() as capture: + func(env, out, **kwargs) + wp.capture_launch(capture.graph) + return wp.to_torch(out).clone() + + +# --------------------------------------------------------------------------- +# Assertion helpers +# --------------------------------------------------------------------------- + + +def assert_close(actual: torch.Tensor, expected: torch.Tensor, atol: float = ATOL, rtol: float = RTOL): + torch.testing.assert_close(actual, expected, atol=atol, rtol=rtol) + + +def assert_equal(actual: torch.Tensor, expected: torch.Tensor): + assert torch.equal(actual, expected), f"Mismatch:\n actual: {actual}\n expected: {expected}" + + +# --------------------------------------------------------------------------- +# Mock classes (shared across parity test files) +# --------------------------------------------------------------------------- + + +class MockArticulationData: + """Mock articulation data backed by Warp arrays (same storage Newton uses). + + Args: + num_envs: Number of environments. + num_joints: Number of joints. + device: Warp device string. + seed: Random seed for reproducibility. + num_bodies: Number of bodies. When > 0, generates body-level arrays + (body_pose_w, body_lin_acc_w, body_com_pos_b) and multi-body + projected_gravity_b. When 0, projected_gravity_b is root-level + (derived from root quaternion). + """ + + def __init__(self, num_envs=NUM_ENVS, num_joints=NUM_JOINTS, device=DEVICE, seed=42, num_bodies=0): + rng = np.random.RandomState(seed) + + # --- Joint state (float32 2D) --- + self.joint_pos = proxy_array(rng.randn(num_envs, num_joints).astype(np.float32), device=device) + self.joint_vel = proxy_array(rng.randn(num_envs, num_joints).astype(np.float32) * 2.0, device=device) + self.joint_acc = proxy_array(rng.randn(num_envs, num_joints).astype(np.float32) * 0.5, device=device) + self.default_joint_pos = proxy_array(rng.randn(num_envs, num_joints).astype(np.float32) * 0.01, device=device) + self.default_joint_vel = proxy_array(np.zeros((num_envs, num_joints), dtype=np.float32), device=device) + self.applied_torque = proxy_array(rng.randn(num_envs, num_joints).astype(np.float32) * 10.0, device=device) + self.computed_torque = proxy_array(rng.randn(num_envs, num_joints).astype(np.float32) * 10.0, device=device) + + # --- Soft joint limits --- + limits_np = np.zeros((num_envs, num_joints, 2), dtype=np.float32) + limits_np[:, :, 0] = -3.14 + limits_np[:, :, 1] = 3.14 + self.soft_joint_pos_limits = proxy_array(limits_np, dtype=wp.vec2f, device=device) + self.soft_joint_vel_limits = proxy_array(np.full((num_envs, num_joints), 10.0, dtype=np.float32), device=device) + + # --- Root state --- + root_pos_np = rng.randn(num_envs, 3).astype(np.float32) + root_pos_np[:, 2] = np.abs(root_pos_np[:, 2]) + 0.1 # positive heights + self.root_pos_w = proxy_array(root_pos_np, dtype=wp.vec3f, device=device) + + # Unit quaternions + quat_np = rng.randn(num_envs, 4).astype(np.float32) + quat_np /= np.linalg.norm(quat_np, axis=1, keepdims=True) + self.root_quat_w = proxy_array(quat_np, dtype=wp.quatf, device=device) + + # Tier 1 compound: root_link_pose_w (transformf = pos + quat) + pose_np = np.zeros((num_envs, 7), dtype=np.float32) + pose_np[:, :3] = root_pos_np + pose_np[:, 3:] = quat_np + self.root_link_pose_w = proxy_array(pose_np, dtype=wp.transformf, device=device) + + # World-frame velocities + lin_vel_w_np = rng.randn(num_envs, 3).astype(np.float32) + ang_vel_w_np = rng.randn(num_envs, 3).astype(np.float32) + self.root_lin_vel_w = proxy_array(lin_vel_w_np, dtype=wp.vec3f, device=device) + self.root_ang_vel_w = proxy_array(ang_vel_w_np, dtype=wp.vec3f, device=device) + + # Tier 1 compound: root_com_vel_w (spatial_vectorf: top=linear, bottom=angular) + vel_np = np.zeros((num_envs, 6), dtype=np.float32) + vel_np[:, :3] = lin_vel_w_np + vel_np[:, 3:] = ang_vel_w_np + self.root_com_vel_w = proxy_array(vel_np, dtype=wp.spatial_vectorf, device=device) + + # Gravity direction constant (1D array to match kernel signatures) + self.GRAVITY_VEC_W = proxy_array([0.0, 0.0, -1.0], dtype=wp.vec3f, device=device) + + # Derived body-frame quantities (consistent with Tier 1 compounds) + self.root_lin_vel_b = proxy_array(quat_rotate_inv_np(quat_np, lin_vel_w_np), dtype=wp.vec3f, device=device) + self.root_ang_vel_b = proxy_array(quat_rotate_inv_np(quat_np, ang_vel_w_np), dtype=wp.vec3f, device=device) + + # --- projected_gravity_b and body-level data --- + if num_bodies > 0: + # Multi-body projected_gravity_b: (num_envs, num_bodies) vec3f + grav_np = rng.randn(num_envs, num_bodies, 3).astype(np.float32) + grav_np[:, :, 2] = -1.0 + grav_np /= np.linalg.norm(grav_np, axis=2, keepdims=True) + self.projected_gravity_b = proxy_array(grav_np, dtype=wp.vec3f, device=device) + + # body_pose_w: (num_envs, num_bodies) transformf + bpose_np = np.zeros((num_envs, num_bodies, 7), dtype=np.float32) + bpose_np[:, :, :3] = rng.randn(num_envs, num_bodies, 3).astype(np.float32) + bpose_np[:, :, 3:7] = [0.0, 0.0, 0.0, 1.0] + self.body_pose_w = proxy_array(bpose_np, dtype=wp.transformf, device=device) + + # body_lin_acc_w: (num_envs, num_bodies) vec3f + self.body_lin_acc_w = proxy_array( + rng.randn(num_envs, num_bodies, 3).astype(np.float32), dtype=wp.vec3f, device=device + ) + + # body_com_pos_b: (num_envs, num_bodies) vec3f + self.body_com_pos_b = proxy_array( + rng.randn(num_envs, num_bodies, 3).astype(np.float32) * 0.01, dtype=wp.vec3f, device=device + ) + else: + # Root-level projected_gravity_b: (num_envs,) vec3f — derived from root quat + self.projected_gravity_b = proxy_array( + quat_rotate_inv_np(quat_np, np.tile(GRAVITY_DIR_NP, (num_envs, 1))), + dtype=wp.vec3f, + device=device, + ) + + # --- Event-specific data --- + self.root_vel_w = proxy_array( + rng.randn(num_envs, 6).astype(np.float32), dtype=wp.spatial_vectorf, device=device + ) + + default_pose_np = np.zeros((num_envs, 7), dtype=np.float32) + default_pose_np[:, 0:3] = rng.randn(num_envs, 3).astype(np.float32) * 0.1 + default_pose_np[:, 3:7] = [0.0, 0.0, 0.0, 1.0] + self.default_root_pose = proxy_array(default_pose_np, dtype=wp.transformf, device=device) + + self.default_root_vel = proxy_array( + np.zeros((num_envs, 6), dtype=np.float32), dtype=wp.spatial_vectorf, device=device + ) + + def resolve_joint_mask(self, joint_ids=None): + n = self.joint_pos.shape[1] + mask = [False] * n + if joint_ids is None or isinstance(joint_ids, slice): + mask = [True] * n + else: + for j in joint_ids: + mask[j] = True + return wp.array(mask, dtype=wp.bool, device=str(self.joint_pos.device)) + + +class MockWrenchComposer: + """Mock wrench composer with no-op methods.""" + + def set_forces_and_torques_mask(self, *a, **kw): + pass + + +class MockArticulation: + """Mock articulation asset with simulation write stubs. + + Provides both no-op write stubs (for event tests) and tracking write stubs + (for action tests). The ``last_*_target`` attributes record the most recent + values passed to ``set_joint_*_target``, enabling verification in action tests. + """ + + def __init__(self, data: MockArticulationData, num_bodies: int = 1, num_joints: int = NUM_JOINTS): + self.data = data + self.num_bodies = num_bodies + self.num_joints = num_joints + self.device = DEVICE + self._joint_names = [f"joint_{i}" for i in range(num_joints)] + self.permanent_wrench_composer = MockWrenchComposer() + # Tracking attributes for action tests + self.last_pos_target = None + self.last_vel_target = None + self.last_effort_target = None + self.last_joint_mask = None + + # -- Simulation write stubs (no-op, for event tests) -------------------- + + def write_root_velocity_to_sim(self, *a, **kw): + pass + + def write_root_velocity_to_sim_mask(self, *a, **kw): + pass + + def write_root_pose_to_sim(self, *a, **kw): + pass + + def write_root_pose_to_sim_mask(self, *a, **kw): + pass + + def write_joint_state_to_sim(self, *a, **kw): + pass + + def write_joint_position_to_sim_mask(self, *a, **kw): + pass + + def write_joint_velocity_to_sim_mask(self, *a, **kw): + pass + + def set_external_force_and_torque(self, *a, **kw): + pass + + # -- Action write stubs (tracking, for action tests) -------------------- + + def set_joint_position_target(self, target, joint_ids=None, joint_mask=None): + self.last_pos_target = target + self.last_joint_mask = joint_mask + + def set_joint_velocity_target(self, target, joint_ids=None, joint_mask=None): + self.last_vel_target = target + self.last_joint_mask = joint_mask + + def set_joint_effort_target(self, target, joint_ids=None, joint_mask=None): + self.last_effort_target = target + self.last_joint_mask = joint_mask + + def set_joint_position_target_index(self, target, joint_ids=None): + self.last_pos_target = target + + def set_joint_effort_target_index(self, target, joint_ids=None): + self.last_effort_target = target + + # -- Query stubs -------------------------------------------------------- + + def find_joints(self, names, preserve_order=False): + if isinstance(names, list) and names == [".*"]: + return list(range(self.num_joints)), list(self._joint_names) + ids = [] + resolved = [] + for name in names if isinstance(names, list) else [names]: + for i, jn in enumerate(self._joint_names): + if (name in jn or name == jn or name == ".*") and i not in ids: + ids.append(i) + resolved.append(jn) + if not ids: + ids = list(range(self.num_joints)) + resolved = list(self._joint_names) + return ids, resolved + + def find_bodies(self, name): + return [0], [name] + + +class MockScene: + """Mock scene with asset lookup, env origins, and optional sensors.""" + + def __init__(self, assets: dict, env_origins, sensors=None): + self._assets = assets + self.env_origins = env_origins + self.sensors = sensors or {} + self.articulations = dict(assets) + self.rigid_objects = {} + self.num_envs = NUM_ENVS + + def __getitem__(self, name: str): + return self._assets[name] + + +# --------------------------------------------------------------------------- +# Root-state mutation helper +# --------------------------------------------------------------------------- + + +def mutate_root_state(rng: np.random.RandomState, art_data: MockArticulationData, num_envs: int = NUM_ENVS): + """Mutate root-level state arrays in-place (preserves buffer pointers). + + Updates root_pos_w, root_quat_w, root_link_pose_w, root_com_vel_w, + root_lin_vel_w, root_ang_vel_w, root_lin_vel_b, root_ang_vel_b, and + (when 1D) projected_gravity_b — all consistently derived from a fresh + random quaternion and world-frame velocities. + """ + root_pos_np = rng.randn(num_envs, 3).astype(np.float32) + root_pos_np[:, 2] = np.abs(root_pos_np[:, 2]) + 0.05 + copy_np_to_wp(art_data.root_pos_w, root_pos_np) + + quat_np = rng.randn(num_envs, 4).astype(np.float32) + quat_np /= np.linalg.norm(quat_np, axis=1, keepdims=True) + copy_np_to_wp(art_data.root_quat_w, quat_np) + + pose_np = np.zeros((num_envs, 7), dtype=np.float32) + pose_np[:, :3] = root_pos_np + pose_np[:, 3:] = quat_np + copy_np_to_wp(art_data.root_link_pose_w, pose_np) + + lin_vel_w_np = rng.randn(num_envs, 3).astype(np.float32) + ang_vel_w_np = rng.randn(num_envs, 3).astype(np.float32) + copy_np_to_wp(art_data.root_lin_vel_w, lin_vel_w_np) + copy_np_to_wp(art_data.root_ang_vel_w, ang_vel_w_np) + + vel_np = np.zeros((num_envs, 6), dtype=np.float32) + vel_np[:, :3] = lin_vel_w_np + vel_np[:, 3:] = ang_vel_w_np + copy_np_to_wp(art_data.root_com_vel_w, vel_np) + + copy_np_to_wp(art_data.root_lin_vel_b, quat_rotate_inv_np(quat_np, lin_vel_w_np)) + copy_np_to_wp(art_data.root_ang_vel_b, quat_rotate_inv_np(quat_np, ang_vel_w_np)) + + # Root-level projected_gravity_b (1D) is derived from quat. + # Multi-body (2D) is mutated separately by callers. + if art_data.projected_gravity_b.warp.ndim == 1: + copy_np_to_wp( + art_data.projected_gravity_b, + quat_rotate_inv_np(quat_np, np.tile(GRAVITY_DIR_NP, (num_envs, 1))), + ) + + +class MockActionManagerWarp: + """Returns warp arrays (for experimental functions).""" + + def __init__(self, action_wp: wp.array, prev_action_wp: wp.array): + self._action = action_wp + self._prev_action = prev_action_wp + + @property + def action(self) -> wp.array: + return self._action + + @property + def prev_action(self) -> wp.array: + return self._prev_action + + +class MockActionManagerTorch: + """Returns torch tensors (for stable functions).""" + + def __init__(self, action_wp: wp.array, prev_action_wp: wp.array): + self._action = wp.to_torch(action_wp) + self._prev_action = wp.to_torch(prev_action_wp) + + @property + def action(self) -> torch.Tensor: + return self._action + + @property + def prev_action(self) -> torch.Tensor: + return self._prev_action + + +# --------------------------------------------------------------------------- +# Shared mock classes (previously duplicated across test files) +# --------------------------------------------------------------------------- + + +class MockSceneEntityCfg: + """Unified cfg that works for both stable (joint_ids) and experimental (joint_mask / joint_ids_wp).""" + + def __init__(self, name: str, joint_ids: list[int], num_joints: int, device: str): + self.name = name + self.joint_ids = joint_ids + + # Experimental extras + mask = [False] * num_joints + for idx in joint_ids: + mask[idx] = True + self.joint_mask = wp.array(mask, dtype=wp.bool, device=device) + self.joint_ids_wp = wp.array(joint_ids, dtype=wp.int32, device=device) + + +class MockContactSensorData: + """Mock contact sensor data with random force history. + + Stores ``net_forces_w_history`` as a warp ``vec3f`` 3D array of shape + ``(num_envs, num_history, num_bodies)``. Both warp kernels (which read + the warp array directly) and stable functions (which call + ``wp.to_torch``) work with this representation. + """ + + def __init__(self, num_envs=NUM_ENVS, num_history=NUM_HISTORY, num_bodies=NUM_BODIES, device=DEVICE, seed=77): + rng = np.random.RandomState(seed) + self.net_forces_w_history = proxy_array( + rng.randn(num_envs, num_history, num_bodies, 3).astype(np.float32), + dtype=wp.vec3f, + device=device, + ) + + +class MockContactSensor: + """Mock contact sensor wrapping :class:`MockContactSensorData`.""" + + def __init__(self, data: MockContactSensorData, num_bodies: int = NUM_BODIES): + self.data = data + self.num_bodies = num_bodies + + +class MockCommandTerm: + """Mock command term with time_left and command_counter.""" + + def __init__(self, num_envs=NUM_ENVS, device=DEVICE, seed=88): + rng = np.random.RandomState(seed) + self.time_left = torch.tensor(rng.rand(num_envs).astype(np.float32) * 0.05, device=device) + self.command_counter = torch.tensor(rng.randint(0, 3, (num_envs,)), dtype=torch.float32, device=device) + + +class MockCommandManager: + """Mock command manager returning a fixed command tensor and term.""" + + def __init__(self, command_tensor: torch.Tensor, cmd_term: MockCommandTerm): + self._cmd = command_tensor + self._term = cmd_term + + def get_command(self, name: str) -> torch.Tensor: + return self._cmd + + def get_term(self, name: str): + return self._term + + +class MockBodyCfg: + """SceneEntityCfg-like object for body-level reward/termination terms.""" + + def __init__(self, name="robot", body_ids=None): + self.name = name + self.body_ids = body_ids if body_ids is not None else list(BODY_IDS) + + +class MockSensorCfg: + """SceneEntityCfg-like object for contact sensor terms. + + Provides both ``body_ids`` (for stable functions) and ``body_ids_wp`` + (for experimental warp functions). + """ + + def __init__(self, name="contact_sensor", body_ids=None, device=DEVICE): + self.name = name + self.body_ids = body_ids if body_ids is not None else list(BODY_IDS) + self.body_ids_wp = wp.array(self.body_ids, dtype=wp.int32, device=device) + + +class MockTerminationManager: + """Mock termination manager providing both torch and warp terminated buffers.""" + + def __init__(self, num_envs=NUM_ENVS, device=DEVICE): + self.terminated = torch.zeros(num_envs, dtype=torch.bool, device=device) + self.terminated_wp = wp.from_torch(self.terminated) + + +# --------------------------------------------------------------------------- +# Art-data mutation helpers (previously duplicated in obs/rew/term test files) +# --------------------------------------------------------------------------- + + +def mutate_art_data( + art_data: MockArticulationData, + warp_env, + num_envs: int = NUM_ENVS, + num_joints: int = NUM_JOINTS, + num_actions: int = NUM_ACTIONS, + rng_seed: int = 200, +): + """Mutate every data array in-place so captured graphs see fresh values.""" + rng = np.random.RandomState(rng_seed) + + copy_np_to_wp(art_data.joint_pos, rng.randn(num_envs, num_joints).astype(np.float32) * 1.5) + copy_np_to_wp(art_data.joint_vel, rng.randn(num_envs, num_joints).astype(np.float32) * 3.0) + copy_np_to_wp(art_data.joint_acc, rng.randn(num_envs, num_joints).astype(np.float32) * 0.8) + copy_np_to_wp(art_data.default_joint_pos, rng.randn(num_envs, num_joints).astype(np.float32) * 0.02) + copy_np_to_wp(art_data.applied_torque, rng.randn(num_envs, num_joints).astype(np.float32) * 12.0) + copy_np_to_wp(art_data.computed_torque, rng.randn(num_envs, num_joints).astype(np.float32) * 12.0) + + mutate_root_state(rng, art_data, num_envs) + + copy_np_to_wp(warp_env.action_manager._action, rng.randn(num_envs, num_actions).astype(np.float32)) + copy_np_to_wp(warp_env.action_manager._prev_action, rng.randn(num_envs, num_actions).astype(np.float32)) + + warp_env.episode_length_buf[:] = torch.randint(0, 500, (num_envs,), dtype=torch.int64, device=DEVICE) + + wp.synchronize() + + +def mutate_body_data( + art_data: MockArticulationData, + num_envs: int = NUM_ENVS, + num_bodies: int = NUM_BODIES, + rng_seed: int = 200, +): + """Mutate body-level and root-level data in-place so captured graphs see fresh values.""" + rng = np.random.RandomState(rng_seed) + + mutate_root_state(rng, art_data, num_envs) + + grav_np = rng.randn(num_envs, num_bodies, 3).astype(np.float32) + grav_np[:, :, 2] = -1.0 + grav_np /= np.linalg.norm(grav_np, axis=2, keepdims=True) + copy_np_to_wp(art_data.projected_gravity_b, grav_np) + + copy_np_to_wp(art_data.body_lin_acc_w, rng.randn(num_envs, num_bodies, 3).astype(np.float32)) + + pose_np = np.zeros((num_envs, num_bodies, 7), dtype=np.float32) + pose_np[:, :, :3] = rng.randn(num_envs, num_bodies, 3).astype(np.float32) + pose_np[:, :, 3:7] = [0.0, 0.0, 0.0, 1.0] + copy_np_to_wp(art_data.body_pose_w, pose_np) + + wp.synchronize() diff --git a/source/isaaclab_experimental/test/envs/mdp/test_actions_warp_parity.py b/source/isaaclab_experimental/test/envs/mdp/test_actions_warp_parity.py new file mode 100644 index 000000000000..f2db4f64fbe9 --- /dev/null +++ b/source/isaaclab_experimental/test/envs/mdp/test_actions_warp_parity.py @@ -0,0 +1,227 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Parity tests for warp-first action MDP terms.""" + +from __future__ import annotations + +import numpy as np +import pytest +import torch +import warp as wp + +wp.init() +pytestmark = pytest.mark.skipif(not wp.is_cuda_available(), reason="CUDA device required") + +from isaaclab_experimental.envs.mdp.actions import ( + JointEffortAction, + JointEffortActionCfg, + JointPositionAction, + JointPositionActionCfg, +) +from parity_helpers import MockArticulation, MockArticulationData, MockScene, copy_np_to_wp + +NUM_ENVS = 32 +NUM_JOINTS = 6 +NUM_BODIES = 3 +DEVICE = "cuda:0" +ATOL = 1e-5 +RTOL = 1e-5 +JOINT_NAMES = [f"joint_{i}" for i in range(NUM_JOINTS)] + + +# ============================================================================ +# Mock infrastructure +# ============================================================================ + + +class MockEnv: + def __init__(self, asset): + self.scene = MockScene({"robot": asset}, env_origins=None) + self.num_envs = NUM_ENVS + self.device = DEVICE + + +# ============================================================================ +# Fixtures +# ============================================================================ + + +@pytest.fixture() +def art_data(): + data = MockArticulationData(num_envs=NUM_ENVS, num_joints=NUM_JOINTS, num_bodies=NUM_BODIES) + # Override defaults with specific per-joint values for action tests + copy_np_to_wp( + data.default_joint_pos, + np.tile([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], (NUM_ENVS, 1)).astype(np.float32), + ) + # Body quaternion for NonHolonomicAction (identity = [0,0,0,1] in xyzw) + quat_np = np.zeros((NUM_ENVS, NUM_BODIES, 4), dtype=np.float32) + quat_np[:, :, 3] = 1.0 + data.body_quat_w = wp.array(quat_np, dtype=wp.quatf, device=DEVICE) + data._num_joints = NUM_JOINTS + return data + + +@pytest.fixture() +def asset(art_data): + return MockArticulation(art_data, num_bodies=NUM_BODIES, num_joints=NUM_JOINTS) + + +@pytest.fixture() +def env(asset): + return MockEnv(asset) + + +@pytest.fixture() +def actions_wp(): + rng = np.random.RandomState(99) + return wp.array(rng.randn(NUM_ENVS, NUM_JOINTS).astype(np.float32), device=DEVICE) + + +# ============================================================================ +# Helpers +# ============================================================================ + + +def assert_close(actual, expected, atol=ATOL, rtol=RTOL): + if isinstance(actual, wp.array): + actual = wp.to_torch(actual) + if isinstance(expected, wp.array): + expected = wp.to_torch(expected) + torch.testing.assert_close(actual.float(), expected.float(), atol=atol, rtol=rtol) + + +# ============================================================================ +# Joint action tests (JointPosition, JointEffort) +# ============================================================================ + + +class TestJointActions: + """Test JointAction subclasses: process, apply, reset.""" + + def test_joint_effort_process_apply(self, env, asset, actions_wp): + cfg = JointEffortActionCfg(asset_name="robot", joint_names=[".*"]) + term = JointEffortAction(cfg, env) + + term.process_actions(actions_wp, action_offset=0) + term.apply_actions() + + # Processed = raw * scale(1.0) + offset(0.0) = raw + assert_close(term.processed_actions, actions_wp) + assert asset.last_effort_target is not None + + def test_joint_position_default_offset(self, env, asset, art_data, actions_wp): + cfg = JointPositionActionCfg(asset_name="robot", joint_names=[".*"], use_default_offset=True) + term = JointPositionAction(cfg, env) + + term.process_actions(actions_wp, action_offset=0) + term.apply_actions() + + # Processed = raw * 1.0 + default_joint_pos[0] + defaults = art_data.default_joint_pos.torch[0] + raw = wp.to_torch(actions_wp) + expected = raw + defaults.unsqueeze(0) + assert_close(term.processed_actions, expected) + assert asset.last_pos_target is not None + + def test_joint_action_reset(self, env, asset, actions_wp): + cfg = JointEffortActionCfg(asset_name="robot", joint_names=[".*"]) + term = JointEffortAction(cfg, env) + + # Process some actions + term.process_actions(actions_wp, action_offset=0) + assert wp.to_torch(term.raw_actions).abs().sum() > 0 + + # Reset all + term.reset(env_mask=None) + assert_close(term.raw_actions, wp.zeros_like(term.raw_actions)) + + def test_joint_action_reset_masked(self, env, asset, actions_wp): + cfg = JointEffortActionCfg(asset_name="robot", joint_names=[".*"]) + term = JointEffortAction(cfg, env) + + term.process_actions(actions_wp, action_offset=0) + raw_before = wp.to_torch(term.raw_actions).clone() + + # Reset only first half + mask_np = [i < NUM_ENVS // 2 for i in range(NUM_ENVS)] + mask = wp.array(mask_np, dtype=wp.bool, device=DEVICE) + term.reset(env_mask=mask) + + raw_after = wp.to_torch(term.raw_actions) + # First half zeroed + assert_close(raw_after[: NUM_ENVS // 2], torch.zeros(NUM_ENVS // 2, NUM_JOINTS, device=DEVICE)) + # Second half unchanged + assert_close(raw_after[NUM_ENVS // 2 :], raw_before[NUM_ENVS // 2 :]) + + def test_joint_action_with_scale(self, env, asset, actions_wp): + cfg = JointEffortActionCfg(asset_name="robot", joint_names=[".*"], scale=2.5) + term = JointEffortAction(cfg, env) + + term.process_actions(actions_wp, action_offset=0) + + raw = wp.to_torch(actions_wp) + expected = raw * 2.5 + assert_close(term.processed_actions, expected) + + +# ============================================================================ +# Mathematical parity tests: warp processed_actions == raw * scale + offset +# (This is the same formula used by the stable JointAction.process_actions.) +# ============================================================================ + + +class TestJointActionMathParity: + """Verify warp processed_actions match the affine formula raw * scale + offset. + + The stable ``JointAction.process_actions`` computes + ``processed = raw * scale + offset``. These tests verify the warp + implementation produces identical results for various scale/offset + configurations, confirming mathematical parity without needing to + instantiate the stable classes (which require a full env). + """ + + def test_effort_identity(self, env, actions_wp): + """scale=1, offset=0 -> processed == raw.""" + cfg = JointEffortActionCfg(asset_name="robot", joint_names=[".*"]) + term = JointEffortAction(cfg, env) + term.process_actions(actions_wp, action_offset=0) + + raw = wp.to_torch(actions_wp) + expected = raw * 1.0 + 0.0 + assert_close(term.processed_actions, expected) + + def test_effort_with_scale(self, env, actions_wp): + """scale=3.0, offset=0 -> processed == raw * 3.""" + cfg = JointEffortActionCfg(asset_name="robot", joint_names=[".*"], scale=3.0) + term = JointEffortAction(cfg, env) + term.process_actions(actions_wp, action_offset=0) + + raw = wp.to_torch(actions_wp) + expected = raw * 3.0 + assert_close(term.processed_actions, expected) + + def test_position_with_default_offset(self, env, art_data, actions_wp): + """use_default_offset=True -> processed == raw + defaults[0].""" + cfg = JointPositionActionCfg(asset_name="robot", joint_names=[".*"], use_default_offset=True) + term = JointPositionAction(cfg, env) + term.process_actions(actions_wp, action_offset=0) + + raw = wp.to_torch(actions_wp) + defaults = art_data.default_joint_pos.torch[0] + expected = raw * 1.0 + defaults.unsqueeze(0) + assert_close(term.processed_actions, expected) + + def test_position_scale_and_offset(self, env, art_data, actions_wp): + """scale=2, use_default_offset=True -> processed == raw * 2 + defaults[0].""" + cfg = JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=2.0, use_default_offset=True) + term = JointPositionAction(cfg, env) + term.process_actions(actions_wp, action_offset=0) + + raw = wp.to_torch(actions_wp) + defaults = art_data.default_joint_pos.torch[0] + expected = raw * 2.0 + defaults.unsqueeze(0) + assert_close(term.processed_actions, expected) diff --git a/source/isaaclab_experimental/test/envs/mdp/test_events_warp_parity.py b/source/isaaclab_experimental/test/envs/mdp/test_events_warp_parity.py new file mode 100644 index 000000000000..24b9eb022665 --- /dev/null +++ b/source/isaaclab_experimental/test/envs/mdp/test_events_warp_parity.py @@ -0,0 +1,340 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Parity tests for warp-first event MDP terms.""" + +from __future__ import annotations + +import numpy as np +import pytest +import torch +import warp as wp + +# Skip entire module if no CUDA device available +wp.init() +pytestmark = pytest.mark.skipif(not wp.is_cuda_available(), reason="CUDA device required") + +import isaaclab_experimental.envs.mdp.events as warp_evt +from parity_helpers import ( + DEVICE, + NUM_ACTIONS, + NUM_ENVS, + NUM_JOINTS, + MockActionManagerTorch, + MockActionManagerWarp, + MockArticulation, + MockArticulationData, + MockScene, + MockSceneEntityCfg, + assert_close, + copy_np_to_wp, +) + +# ============================================================================ +# Fixtures +# ============================================================================ + + +@pytest.fixture(autouse=True) +def _clear_function_caches(): + """Clear first-call caches on warp MDP functions so each test starts fresh. + + Functions that cache warp views via the ``hasattr`` pattern need clearing + between tests to avoid stale references from prior fixtures. + """ + yield + for fn in ( + warp_evt.push_by_setting_velocity, + warp_evt.apply_external_force_torque, + warp_evt.reset_root_state_uniform, + warp_evt.randomize_rigid_body_com, + ): + for attr in list(vars(fn)): + if attr.startswith("_"): + delattr(fn, attr) + + +@pytest.fixture() +def art_data(): + return MockArticulationData(NUM_ENVS, NUM_JOINTS, DEVICE) + + +@pytest.fixture() +def env_origins(): + rng = np.random.RandomState(77) + origins_np = rng.randn(NUM_ENVS, 3).astype(np.float32) + return wp.array(origins_np, dtype=wp.vec3f, device=DEVICE) + + +@pytest.fixture() +def scene(art_data, env_origins): + return MockScene({"robot": MockArticulation(art_data)}, env_origins) + + +@pytest.fixture() +def action_wp(): + rng = np.random.RandomState(99) + a = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + b = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + return a, b + + +@pytest.fixture() +def episode_length_buf(): + torch.manual_seed(55) + return torch.randint(0, 500, (NUM_ENVS,), dtype=torch.int64, device=DEVICE) + + +@pytest.fixture() +def warp_env(scene, action_wp, episode_length_buf): + """Env with warp action manager (for experimental functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + # RNG state for events (seeded deterministically) + env.rng_state_wp = wp.array(np.arange(NUM_ENVS, dtype=np.uint32) + 42, device=DEVICE) + return env + + +@pytest.fixture() +def stable_env(scene, action_wp, episode_length_buf): + """Env with torch action manager (for stable functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerTorch(action_wp[0], action_wp[1]) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + return env + + +@pytest.fixture() +def all_joints_cfg(): + return MockSceneEntityCfg("robot", list(range(NUM_JOINTS)), NUM_JOINTS, DEVICE) + + +# ============================================================================ +# Event parity tests: deterministic (zero-width range) warp vs stable +# ============================================================================ + + +class TestEventParity: + """Verify warp event functions produce the same result as stable torch equivalents. + + Since warp and stable use different RNG implementations, parity is tested using + deterministic (zero-width) ranges where randomness has no effect. Both must + produce ``default + 0`` (offset) or ``default * 1`` (scale), clamped to limits. + """ + + def test_reset_joints_by_offset_parity(self, warp_env, stable_env, art_data, all_joints_cfg): + """Zero-offset: both warp and stable should produce clamped defaults.""" + cfg = all_joints_cfg + mask = wp.array([True] * NUM_ENVS, dtype=wp.bool, device=DEVICE) + + # Set known defaults + new_defaults = np.full((NUM_ENVS, NUM_JOINTS), 0.5, dtype=np.float32) + copy_np_to_wp(art_data.default_joint_pos, new_defaults) + + # Run warp version + warp_evt.reset_joints_by_offset( + warp_env, mask, position_range=(0.0, 0.0), velocity_range=(0.0, 0.0), asset_cfg=cfg + ) + wp.synchronize() + warp_pos = art_data.joint_pos.torch.clone() + warp_vel = art_data.joint_vel.torch.clone() + + # Run stable version (writes via write_joint_position_to_sim_index — which our mock + # does not implement, so we compute the expected result directly) + defaults_t = art_data.default_joint_pos.torch.clone() + limits_t = art_data.soft_joint_pos_limits.torch + vel_limits_t = art_data.soft_joint_vel_limits.torch + expected_pos = defaults_t.clamp(limits_t[..., 0], limits_t[..., 1]) + expected_vel = art_data.default_joint_vel.torch.clone().clamp(-vel_limits_t, vel_limits_t) + + assert_close(warp_pos, expected_pos) + assert_close(warp_vel, expected_vel) + + def test_reset_joints_by_scale_parity(self, warp_env, stable_env, art_data, all_joints_cfg): + """Scale=1.0: both warp and stable should produce clamped defaults.""" + cfg = all_joints_cfg + mask = wp.array([True] * NUM_ENVS, dtype=wp.bool, device=DEVICE) + + # Set known defaults + new_defaults = np.full((NUM_ENVS, NUM_JOINTS), 0.25, dtype=np.float32) + copy_np_to_wp(art_data.default_joint_pos, new_defaults) + + # Run warp version + warp_evt.reset_joints_by_scale( + warp_env, mask, position_range=(1.0, 1.0), velocity_range=(1.0, 1.0), asset_cfg=cfg + ) + wp.synchronize() + warp_pos = art_data.joint_pos.torch.clone() + warp_vel = art_data.joint_vel.torch.clone() + + # Expected: default * 1.0, clamped to limits + defaults_t = art_data.default_joint_pos.torch.clone() + limits_t = art_data.soft_joint_pos_limits.torch + vel_limits_t = art_data.soft_joint_vel_limits.torch + expected_pos = defaults_t.clamp(limits_t[..., 0], limits_t[..., 1]) + expected_vel = art_data.default_joint_vel.torch.clone().clamp(-vel_limits_t, vel_limits_t) + + assert_close(warp_pos, expected_pos) + assert_close(warp_vel, expected_vel) + + +# ============================================================================ +# Event capture-mutate-replay tests (from test_mdp_warp_parity.py) +# ============================================================================ + + +class TestEventCapturedDataMutation: + """Verify event functions are capture-safe and react to mutated input data.""" + + # -- reset_joints_by_offset ------------------------------------------------- + + def test_reset_joints_by_offset(self, warp_env, art_data, all_joints_cfg): + """With zero-width offset, result == defaults. Mutate defaults -> result tracks.""" + cfg = all_joints_cfg + mask = wp.array([True] * NUM_ENVS, dtype=wp.bool, device=DEVICE) + + # Warm-up + warp_evt.reset_joints_by_offset( + warp_env, mask, position_range=(0.0, 0.0), velocity_range=(0.0, 0.0), asset_cfg=cfg + ) + + # Capture + with wp.ScopedCapture() as cap: + warp_evt.reset_joints_by_offset( + warp_env, mask, position_range=(0.0, 0.0), velocity_range=(0.0, 0.0), asset_cfg=cfg + ) + + # Mutate defaults in-place + new_defaults = np.full((NUM_ENVS, NUM_JOINTS), 0.5, dtype=np.float32) + copy_np_to_wp(art_data.default_joint_pos, new_defaults) + + # Replay + wp.capture_launch(cap.graph) + wp.synchronize() + + # With zero offset, joint_pos should equal new defaults (clamped to limits [-3.14, 3.14]) + result = art_data.joint_pos.torch + expected = torch.full((NUM_ENVS, NUM_JOINTS), 0.5, device=DEVICE) + assert_close(result, expected) + + # -- reset_joints_by_scale -------------------------------------------------- + + def test_reset_joints_by_scale(self, warp_env, art_data, all_joints_cfg): + """With scale=1.0, result == defaults. Mutate defaults -> result tracks.""" + cfg = all_joints_cfg + mask = wp.array([True] * NUM_ENVS, dtype=wp.bool, device=DEVICE) + + warp_evt.reset_joints_by_scale( + warp_env, mask, position_range=(1.0, 1.0), velocity_range=(1.0, 1.0), asset_cfg=cfg + ) + with wp.ScopedCapture() as cap: + warp_evt.reset_joints_by_scale( + warp_env, mask, position_range=(1.0, 1.0), velocity_range=(1.0, 1.0), asset_cfg=cfg + ) + + new_defaults = np.full((NUM_ENVS, NUM_JOINTS), 0.25, dtype=np.float32) + copy_np_to_wp(art_data.default_joint_pos, new_defaults) + + wp.capture_launch(cap.graph) + wp.synchronize() + + result = art_data.joint_pos.torch + expected = torch.full((NUM_ENVS, NUM_JOINTS), 0.25, device=DEVICE) + assert_close(result, expected) + + # -- push_by_setting_velocity ----------------------------------------------- + + def test_push_by_setting_velocity(self, warp_env, art_data, all_joints_cfg): + """With zero-width velocity range, scratch == root_vel_w. Mutate root_vel_w -> scratch tracks.""" + mask = wp.array([True] * NUM_ENVS, dtype=wp.bool, device=DEVICE) + zero_range = { + "x": (0.0, 0.0), + "y": (0.0, 0.0), + "z": (0.0, 0.0), + "roll": (0.0, 0.0), + "pitch": (0.0, 0.0), + "yaw": (0.0, 0.0), + } + + warp_evt.push_by_setting_velocity(warp_env, mask, velocity_range=zero_range) + with wp.ScopedCapture() as cap: + warp_evt.push_by_setting_velocity(warp_env, mask, velocity_range=zero_range) + + # Mutate root_vel_w + new_vel = np.tile([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], (NUM_ENVS, 1)).astype(np.float32) + copy_np_to_wp(art_data.root_vel_w, new_vel) + + wp.capture_launch(cap.graph) + wp.synchronize() + + scratch = wp.to_torch(warp_evt.push_by_setting_velocity._scratch_vel) + expected = torch.tensor([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], device=DEVICE).expand(NUM_ENVS, -1) + assert_close(scratch, expected) + + # -- apply_external_force_torque -------------------------------------------- + + def test_apply_external_force_torque(self, warp_env, art_data, all_joints_cfg): + """With zero-width ranges, forces/torques are zero. Non-zero ranges produce non-zero output.""" + mask = wp.array([True] * NUM_ENVS, dtype=wp.bool, device=DEVICE) + + # Zero-range: forces and torques should be zero + warp_evt.apply_external_force_torque(warp_env, mask, force_range=(0.0, 0.0), torque_range=(0.0, 0.0)) + with wp.ScopedCapture() as cap: + warp_evt.apply_external_force_torque(warp_env, mask, force_range=(0.0, 0.0), torque_range=(0.0, 0.0)) + wp.capture_launch(cap.graph) + wp.synchronize() + + forces = wp.to_torch(warp_evt.apply_external_force_torque._scratch_forces) + torques = wp.to_torch(warp_evt.apply_external_force_torque._scratch_torques) + assert_close(forces, torch.zeros_like(forces)) + assert_close(torques, torch.zeros_like(torques)) + + # -- reset_root_state_uniform ----------------------------------------------- + + # -- env_mask selectivity --------------------------------------------------- + + def test_reset_joints_mask_selectivity(self, warp_env, art_data, all_joints_cfg): + """Only masked envs are modified; unmasked envs retain their state.""" + cfg = all_joints_cfg + # Mask: only first half of envs + mask_np = np.array([i < NUM_ENVS // 2 for i in range(NUM_ENVS)]) + mask = wp.array(mask_np, dtype=wp.bool, device=DEVICE) + + # Set joint_pos to a known value + sentinel = np.full((NUM_ENVS, NUM_JOINTS), 999.0, dtype=np.float32) + copy_np_to_wp(art_data.joint_pos, sentinel) + + # Set defaults to 0 + copy_np_to_wp(art_data.default_joint_pos, np.zeros((NUM_ENVS, NUM_JOINTS), dtype=np.float32)) + + warp_evt.reset_joints_by_offset( + warp_env, mask, position_range=(0.0, 0.0), velocity_range=(0.0, 0.0), asset_cfg=cfg + ) + wp.synchronize() + + result = art_data.joint_pos.torch + # Masked envs: reset to 0 (defaults + 0 offset) + assert_close(result[: NUM_ENVS // 2], torch.zeros(NUM_ENVS // 2, NUM_JOINTS, device=DEVICE)) + # Unmasked envs: still 999.0 + assert_close(result[NUM_ENVS // 2 :], torch.full((NUM_ENVS // 2, NUM_JOINTS), 999.0, device=DEVICE)) diff --git a/source/isaaclab_experimental/test/envs/mdp/test_observations_warp_parity.py b/source/isaaclab_experimental/test/envs/mdp/test_observations_warp_parity.py new file mode 100644 index 000000000000..9f1eea23bf8d --- /dev/null +++ b/source/isaaclab_experimental/test/envs/mdp/test_observations_warp_parity.py @@ -0,0 +1,458 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Parity tests for warp-first observation MDP terms.""" + +from __future__ import annotations + +import numpy as np +import pytest +import torch +import warp as wp + +# Skip entire module if no CUDA device available +wp.init() +pytestmark = pytest.mark.skipif(not wp.is_cuda_available(), reason="CUDA device required") + +import isaaclab_experimental.envs.mdp.observations as warp_obs +from parity_helpers import ( + CMD_DIM, + DEVICE, + NUM_ACTIONS, + NUM_BODIES, + NUM_ENVS, + NUM_JOINTS, + MockActionManagerTorch, + MockActionManagerWarp, + MockArticulation, + MockArticulationData, + MockCommandManager, + MockCommandTerm, + MockContactSensor, + MockContactSensorData, + MockScene, + MockSceneEntityCfg, + assert_close, + mutate_art_data, + run_warp_obs, + run_warp_obs_captured, +) + +import isaaclab.envs.mdp.observations as stable_obs + +# ============================================================================ +# Fixtures +# ============================================================================ + + +@pytest.fixture(autouse=True) +def _clear_caches(): + yield + for fn in [warp_obs.generated_commands]: + for attr in list(vars(fn)): + if attr.startswith("_"): + delattr(fn, attr) + + +@pytest.fixture() +def art_data(): + return MockArticulationData(NUM_ENVS, NUM_JOINTS, DEVICE) + + +@pytest.fixture() +def art_data_bodies(): + return MockArticulationData(num_bodies=NUM_BODIES) + + +@pytest.fixture() +def env_origins(): + rng = np.random.RandomState(77) + origins_np = rng.randn(NUM_ENVS, 3).astype(np.float32) + return wp.array(origins_np, dtype=wp.vec3f, device=DEVICE) + + +@pytest.fixture() +def contact_data(): + return MockContactSensorData() + + +@pytest.fixture() +def cmd_tensor(): + rng = np.random.RandomState(99) + return torch.tensor(rng.randn(NUM_ENVS, CMD_DIM).astype(np.float32), device=DEVICE) + + +@pytest.fixture() +def cmd_term(): + return MockCommandTerm() + + +@pytest.fixture() +def scene(art_data, env_origins): + return MockScene({"robot": MockArticulation(art_data)}, env_origins) + + +@pytest.fixture() +def scene_bodies(art_data_bodies, env_origins, contact_data): + art = MockArticulation(art_data_bodies, num_bodies=NUM_BODIES) + sensor = MockContactSensor(contact_data) + return MockScene({"robot": art}, env_origins, sensors={"contact_sensor": sensor}) + + +@pytest.fixture() +def action_wp(): + rng = np.random.RandomState(99) + a = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + b = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + return a, b # (action, prev_action) + + +@pytest.fixture() +def episode_length_buf(): + torch.manual_seed(55) + return torch.randint(0, 500, (NUM_ENVS,), dtype=torch.int64, device=DEVICE) + + +@pytest.fixture() +def warp_env(scene, action_wp, episode_length_buf): + """Env with warp action manager (for experimental functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + env.rng_state_wp = wp.array(np.arange(NUM_ENVS, dtype=np.uint32) + 42, device=DEVICE) + return env + + +@pytest.fixture() +def stable_env(scene, action_wp, episode_length_buf): + """Env with torch action manager (for stable functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerTorch(action_wp[0], action_wp[1]) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + return env + + +@pytest.fixture() +def warp_env_bodies(scene_bodies, action_wp, episode_length_buf, cmd_tensor, cmd_term): + """Env with body-level data and command manager (for new-terms observation tests).""" + + class _Env: + pass + + env = _Env() + env.scene = scene_bodies + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.command_manager = MockCommandManager(cmd_tensor, cmd_term) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length = 500 + env.max_episode_length_s = 10.0 + env.rng_state_wp = wp.array(np.arange(NUM_ENVS, dtype=np.uint32) + 42, device=DEVICE) + return env + + +@pytest.fixture() +def stable_env_bodies(scene_bodies, action_wp, episode_length_buf, cmd_tensor, cmd_term): + """Env with body-level data and command manager (for stable new-terms observation tests).""" + + class _Env: + pass + + env = _Env() + env.scene = scene_bodies + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.command_manager = MockCommandManager(cmd_tensor, cmd_term) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length = 500 + env.max_episode_length_s = 10.0 + return env + + +@pytest.fixture() +def all_joints_cfg(): + return MockSceneEntityCfg("robot", list(range(NUM_JOINTS)), NUM_JOINTS, DEVICE) + + +@pytest.fixture() +def subset_cfg(): + return MockSceneEntityCfg("robot", [0, 2, 5, 8], NUM_JOINTS, DEVICE) + + +# ============================================================================ +# Observation parity tests (from test_mdp_warp_parity.py) +# ============================================================================ + + +class TestObservationParity: + """Verify experimental observation Warp kernels match stable torch implementations.""" + + # -- Root state observations ------------------------------------------------ + + def test_base_pos_z(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_obs.base_pos_z(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.base_pos_z, warp_env, (NUM_ENVS, 1), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.base_pos_z, warp_env, (NUM_ENVS, 1), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_base_lin_vel(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_obs.base_lin_vel(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.base_lin_vel, warp_env, (NUM_ENVS, 3), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.base_lin_vel, warp_env, (NUM_ENVS, 3), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_base_ang_vel(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_obs.base_ang_vel(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.base_ang_vel, warp_env, (NUM_ENVS, 3), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.base_ang_vel, warp_env, (NUM_ENVS, 3), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_projected_gravity(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_obs.projected_gravity(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.projected_gravity, warp_env, (NUM_ENVS, 3), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.projected_gravity, warp_env, (NUM_ENVS, 3), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Joint observations (all joints) ---------------------------------------- + + def test_joint_pos_all(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_obs.joint_pos(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.joint_pos, warp_env, (NUM_ENVS, NUM_JOINTS), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.joint_pos, warp_env, (NUM_ENVS, NUM_JOINTS), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_joint_vel_all(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_obs.joint_vel(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.joint_vel, warp_env, (NUM_ENVS, NUM_JOINTS), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.joint_vel, warp_env, (NUM_ENVS, NUM_JOINTS), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Joint observations (subset) ------------------------------------------- + + def test_joint_pos_subset(self, warp_env, stable_env, subset_cfg): + cfg = subset_cfg + n_selected = len(cfg.joint_ids) + expected = stable_obs.joint_pos(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.joint_pos, warp_env, (NUM_ENVS, n_selected), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.joint_pos, warp_env, (NUM_ENVS, n_selected), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_joint_vel_subset(self, warp_env, stable_env, subset_cfg): + cfg = subset_cfg + n_selected = len(cfg.joint_ids) + expected = stable_obs.joint_vel(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.joint_vel, warp_env, (NUM_ENVS, n_selected), asset_cfg=cfg) + actual_cap = run_warp_obs_captured(warp_obs.joint_vel, warp_env, (NUM_ENVS, n_selected), asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Normalized joint position ---------------------------------------------- + + def test_joint_pos_limit_normalized(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_obs.joint_pos_limit_normalized(stable_env, asset_cfg=cfg) + actual = run_warp_obs(warp_obs.joint_pos_limit_normalized, warp_env, (NUM_ENVS, NUM_JOINTS), asset_cfg=cfg) + actual_cap = run_warp_obs_captured( + warp_obs.joint_pos_limit_normalized, warp_env, (NUM_ENVS, NUM_JOINTS), asset_cfg=cfg + ) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Action observation ----------------------------------------------------- + + def test_last_action(self, warp_env, stable_env, action_wp): + # Stable last_action returns env.action_manager.action (torch tensor) + expected = stable_obs.last_action(stable_env) + actual = run_warp_obs(warp_obs.last_action, warp_env, (NUM_ENVS, NUM_ACTIONS)) + actual_cap = run_warp_obs_captured(warp_obs.last_action, warp_env, (NUM_ENVS, NUM_ACTIONS)) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + +# ============================================================================ +# Observation parity tests (from test_mdp_warp_parity_new_terms.py) +# ============================================================================ + + +class TestObservationParityNewTerms: + """Verify observation Warp kernels for newly migrated terms match stable torch implementations.""" + + def test_generated_commands(self, warp_env_bodies, stable_env_bodies): + expected = stable_obs.generated_commands(stable_env_bodies, command_name="vel") + actual = run_warp_obs(warp_obs.generated_commands, warp_env_bodies, (NUM_ENVS, CMD_DIM), command_name="vel") + actual_cap = run_warp_obs_captured( + warp_obs.generated_commands, warp_env_bodies, (NUM_ENVS, CMD_DIM), command_name="vel" + ) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + +# ============================================================================ +# Capture-then-mutate-then-replay observation tests (from test_mdp_warp_parity.py) +# ============================================================================ + + +def _mutate_art_data(art_data: MockArticulationData, warp_env, rng_seed: int = 200): + """Mutate every data array in-place so captured graphs see fresh values.""" + mutate_art_data(art_data, warp_env, rng_seed=rng_seed) + + +class TestCapturedDataMutationObservations: + """Capture a graph, mutate buffer data in-place, replay -- results must match stable on the *new* data. + + This verifies observation MDP functions are truly capture-safe. + """ + + def _capture_mutate_check_obs(self, warp_fn, stable_fn, warp_env, stable_env, art_data, shape, **kwargs): + out = wp.zeros(shape, dtype=wp.float32, device=DEVICE) + warp_fn(warp_env, out, **kwargs) # warm-up + with wp.ScopedCapture() as cap: + warp_fn(warp_env, out, **kwargs) + _mutate_art_data(art_data, warp_env) + wp.capture_launch(cap.graph) + assert_close(wp.to_torch(out).clone(), stable_fn(stable_env, **kwargs)) + + def test_base_pos_z(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_obs( + warp_obs.base_pos_z, + stable_obs.base_pos_z, + warp_env, + stable_env, + art_data, + (NUM_ENVS, 1), + asset_cfg=all_joints_cfg, + ) + + def test_base_lin_vel(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_obs( + warp_obs.base_lin_vel, + stable_obs.base_lin_vel, + warp_env, + stable_env, + art_data, + (NUM_ENVS, 3), + asset_cfg=all_joints_cfg, + ) + + def test_base_ang_vel(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_obs( + warp_obs.base_ang_vel, + stable_obs.base_ang_vel, + warp_env, + stable_env, + art_data, + (NUM_ENVS, 3), + asset_cfg=all_joints_cfg, + ) + + def test_projected_gravity(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_obs( + warp_obs.projected_gravity, + stable_obs.projected_gravity, + warp_env, + stable_env, + art_data, + (NUM_ENVS, 3), + asset_cfg=all_joints_cfg, + ) + + def test_joint_pos(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_obs( + warp_obs.joint_pos, + stable_obs.joint_pos, + warp_env, + stable_env, + art_data, + (NUM_ENVS, NUM_JOINTS), + asset_cfg=all_joints_cfg, + ) + + def test_joint_vel(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_obs( + warp_obs.joint_vel, + stable_obs.joint_vel, + warp_env, + stable_env, + art_data, + (NUM_ENVS, NUM_JOINTS), + asset_cfg=all_joints_cfg, + ) + + def test_joint_pos_limit_normalized(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_obs( + warp_obs.joint_pos_limit_normalized, + stable_obs.joint_pos_limit_normalized, + warp_env, + stable_env, + art_data, + (NUM_ENVS, NUM_JOINTS), + asset_cfg=all_joints_cfg, + ) + + def test_last_action(self, warp_env, stable_env, art_data): + self._capture_mutate_check_obs( + warp_obs.last_action, + stable_obs.last_action, + warp_env, + stable_env, + art_data, + (NUM_ENVS, NUM_ACTIONS), + ) + + +# ============================================================================ +# Capture-mutate-replay observation tests (from test_mdp_warp_parity_new_terms.py) +# ============================================================================ + + +class TestCapturedDataMutationObservationsNewTerms: + """Capture graph, mutate buffer data, replay -- verify new-terms observation results match stable.""" + + def test_generated_commands(self, warp_env_bodies, stable_env_bodies, art_data_bodies, cmd_tensor): + """Mutate command tensor, replay captured graph, verify new commands are read.""" + out = wp.zeros((NUM_ENVS, CMD_DIM), dtype=wp.float32, device=DEVICE) + warp_obs.generated_commands(warp_env_bodies, out, command_name="vel") + with wp.ScopedCapture() as cap: + warp_obs.generated_commands(warp_env_bodies, out, command_name="vel") + # Mutate the command tensor in-place (zero-copy view picks it up) + cmd_tensor[:] = torch.randn_like(cmd_tensor) + wp.capture_launch(cap.graph) + expected = stable_obs.generated_commands(stable_env_bodies, command_name="vel") + assert_close(wp.to_torch(out).clone(), expected) diff --git a/source/isaaclab_experimental/test/envs/mdp/test_rewards_warp_parity.py b/source/isaaclab_experimental/test/envs/mdp/test_rewards_warp_parity.py new file mode 100644 index 000000000000..69cabcd14a36 --- /dev/null +++ b/source/isaaclab_experimental/test/envs/mdp/test_rewards_warp_parity.py @@ -0,0 +1,567 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Parity tests for warp-first reward MDP terms.""" + +from __future__ import annotations + +import numpy as np +import pytest +import torch +import warp as wp + +# Skip entire module if no CUDA device available +wp.init() +pytestmark = pytest.mark.skipif(not wp.is_cuda_available(), reason="CUDA device required") + +import isaaclab_experimental.envs.mdp.rewards as warp_rew +from parity_helpers import ( + BODY_IDS, + CMD_DIM, + DEVICE, + NUM_ACTIONS, + NUM_BODIES, + NUM_ENVS, + NUM_JOINTS, + MockActionManagerTorch, + MockActionManagerWarp, + MockArticulation, + MockArticulationData, + MockBodyCfg, + MockCommandManager, + MockCommandTerm, + MockContactSensor, + MockContactSensorData, + MockScene, + MockSceneEntityCfg, + MockSensorCfg, + MockTerminationManager, + assert_close, + mutate_art_data, + mutate_body_data, + run_warp_rew, + run_warp_rew_captured, +) + +import isaaclab.envs.mdp.rewards as stable_rew + +# ============================================================================ +# Fixtures +# ============================================================================ + + +@pytest.fixture(autouse=True) +def _clear_caches(): + yield + for fn in [warp_rew.track_lin_vel_xy_exp, warp_rew.track_ang_vel_z_exp, warp_rew.undesired_contacts]: + for attr in list(vars(fn)): + if attr.startswith("_"): + delattr(fn, attr) + + +@pytest.fixture() +def art_data(): + return MockArticulationData(NUM_ENVS, NUM_JOINTS, DEVICE) + + +@pytest.fixture() +def art_data_bodies(): + return MockArticulationData(num_bodies=NUM_BODIES) + + +@pytest.fixture() +def env_origins(): + rng = np.random.RandomState(77) + origins_np = rng.randn(NUM_ENVS, 3).astype(np.float32) + return wp.array(origins_np, dtype=wp.vec3f, device=DEVICE) + + +@pytest.fixture() +def contact_data(): + return MockContactSensorData() + + +@pytest.fixture() +def cmd_tensor(): + rng = np.random.RandomState(99) + return torch.tensor(rng.randn(NUM_ENVS, CMD_DIM).astype(np.float32), device=DEVICE) + + +@pytest.fixture() +def cmd_term(): + return MockCommandTerm() + + +@pytest.fixture() +def scene(art_data, env_origins): + return MockScene({"robot": MockArticulation(art_data)}, env_origins) + + +@pytest.fixture() +def scene_bodies(art_data_bodies, env_origins, contact_data): + art = MockArticulation(art_data_bodies, num_bodies=NUM_BODIES) + sensor = MockContactSensor(contact_data) + return MockScene({"robot": art}, env_origins, sensors={"contact_sensor": sensor}) + + +@pytest.fixture() +def action_wp(): + rng = np.random.RandomState(99) + a = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + b = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + return a, b + + +@pytest.fixture() +def episode_length_buf(): + torch.manual_seed(55) + return torch.randint(0, 500, (NUM_ENVS,), dtype=torch.int64, device=DEVICE) + + +@pytest.fixture() +def term_mgr(): + return MockTerminationManager() + + +@pytest.fixture() +def warp_env(scene, action_wp, episode_length_buf, term_mgr): + """Env with warp action manager (for experimental functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.termination_manager = term_mgr + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + env.rng_state_wp = wp.array(np.arange(NUM_ENVS, dtype=np.uint32) + 42, device=DEVICE) + return env + + +@pytest.fixture() +def stable_env(scene, action_wp, episode_length_buf, term_mgr): + """Env with torch action manager (for stable functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerTorch(action_wp[0], action_wp[1]) + env.termination_manager = term_mgr + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + return env + + +@pytest.fixture() +def warp_env_bodies(scene_bodies, action_wp, episode_length_buf, cmd_tensor, cmd_term): + """Env with body-level data and command manager (for new-terms reward tests).""" + + class _Env: + pass + + env = _Env() + env.scene = scene_bodies + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.command_manager = MockCommandManager(cmd_tensor, cmd_term) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length = 500 + env.max_episode_length_s = 10.0 + env.rng_state_wp = wp.array(np.arange(NUM_ENVS, dtype=np.uint32) + 42, device=DEVICE) + return env + + +@pytest.fixture() +def stable_env_bodies(scene_bodies, action_wp, episode_length_buf, cmd_tensor, cmd_term): + """Env with body-level data and command manager (for stable new-terms reward tests).""" + + class _Env: + pass + + env = _Env() + env.scene = scene_bodies + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.command_manager = MockCommandManager(cmd_tensor, cmd_term) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length = 500 + env.max_episode_length_s = 10.0 + return env + + +@pytest.fixture() +def all_joints_cfg(): + return MockSceneEntityCfg("robot", list(range(NUM_JOINTS)), NUM_JOINTS, DEVICE) + + +@pytest.fixture() +def body_cfg(): + return MockBodyCfg("robot", BODY_IDS) + + +@pytest.fixture() +def sensor_cfg(): + return MockSensorCfg("contact_sensor", BODY_IDS) + + +# ============================================================================ +# Reward parity tests (from test_mdp_warp_parity.py) +# ============================================================================ + + +class TestRewardParity: + """Verify experimental reward Warp kernels match stable torch implementations.""" + + # -- General rewards -------------------------------------------------------- + + def test_is_alive(self, warp_env, stable_env, term_mgr): + # Set some envs as terminated so the reward is non-trivial + term_mgr.terminated[::2] = True + expected = stable_rew.is_alive(stable_env) + actual = run_warp_rew(warp_rew.is_alive, warp_env) + actual_cap = run_warp_rew_captured(warp_rew.is_alive, warp_env) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_is_terminated(self, warp_env, stable_env, term_mgr): + term_mgr.terminated[::3] = True + expected = stable_rew.is_terminated(stable_env) + actual = run_warp_rew(warp_rew.is_terminated, warp_env) + actual_cap = run_warp_rew_captured(warp_rew.is_terminated, warp_env) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Root penalties --------------------------------------------------------- + + def test_lin_vel_z_l2(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.lin_vel_z_l2(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.lin_vel_z_l2, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.lin_vel_z_l2, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_ang_vel_xy_l2(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.ang_vel_xy_l2(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.ang_vel_xy_l2, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.ang_vel_xy_l2, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_flat_orientation_l2(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.flat_orientation_l2(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.flat_orientation_l2, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.flat_orientation_l2, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Joint L2 penalties (masked) -------------------------------------------- + + def test_joint_vel_l2(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.joint_vel_l2(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.joint_vel_l2, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.joint_vel_l2, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_joint_acc_l2(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.joint_acc_l2(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.joint_acc_l2, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.joint_acc_l2, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_joint_torques_l2(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.joint_torques_l2(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.joint_torques_l2, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.joint_torques_l2, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Joint L1 penalties (masked) -------------------------------------------- + + def test_joint_vel_l1(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.joint_vel_l1(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.joint_vel_l1, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.joint_vel_l1, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Action penalties ------------------------------------------------------- + + def test_action_l2(self, warp_env, stable_env): + expected = stable_rew.action_l2(stable_env) + actual = run_warp_rew(warp_rew.action_l2, warp_env) + actual_cap = run_warp_rew_captured(warp_rew.action_l2, warp_env) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_action_rate_l2(self, warp_env, stable_env): + expected = stable_rew.action_rate_l2(stable_env) + actual = run_warp_rew(warp_rew.action_rate_l2, warp_env) + actual_cap = run_warp_rew_captured(warp_rew.action_rate_l2, warp_env) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Limit penalties -------------------------------------------------------- + + def test_joint_pos_limits(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.joint_pos_limits(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.joint_pos_limits, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.joint_pos_limits, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + # -- Additional penalties --------------------------------------------------- + + def test_joint_deviation_l1(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + expected = stable_rew.joint_deviation_l1(stable_env, asset_cfg=cfg) + actual = run_warp_rew(warp_rew.joint_deviation_l1, warp_env, asset_cfg=cfg) + actual_cap = run_warp_rew_captured(warp_rew.joint_deviation_l1, warp_env, asset_cfg=cfg) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + +# ============================================================================ +# New reward parity tests (from test_mdp_warp_parity_new_terms.py) +# ============================================================================ + + +class TestNewRewardParity: + """Verify newly migrated reward Warp kernels match stable torch implementations.""" + + def test_track_lin_vel_xy_exp(self, warp_env_bodies, stable_env_bodies, body_cfg): + cfg = MockBodyCfg("robot") + cfg.joint_ids = list(range(NUM_JOINTS)) # needed for stable + std = 0.25 + expected = stable_rew.track_lin_vel_xy_exp(stable_env_bodies, std=std, command_name="vel", asset_cfg=cfg) + actual = run_warp_rew( + warp_rew.track_lin_vel_xy_exp, warp_env_bodies, std=std, command_name="vel", asset_cfg=cfg + ) + actual_cap = run_warp_rew_captured( + warp_rew.track_lin_vel_xy_exp, warp_env_bodies, std=std, command_name="vel", asset_cfg=cfg + ) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_track_ang_vel_z_exp(self, warp_env_bodies, stable_env_bodies, body_cfg): + cfg = MockBodyCfg("robot") + cfg.joint_ids = list(range(NUM_JOINTS)) + std = 0.25 + expected = stable_rew.track_ang_vel_z_exp(stable_env_bodies, std=std, command_name="vel", asset_cfg=cfg) + actual = run_warp_rew(warp_rew.track_ang_vel_z_exp, warp_env_bodies, std=std, command_name="vel", asset_cfg=cfg) + actual_cap = run_warp_rew_captured( + warp_rew.track_ang_vel_z_exp, warp_env_bodies, std=std, command_name="vel", asset_cfg=cfg + ) + assert_close(actual, expected) + assert_close(actual_cap, expected) + + def test_undesired_contacts(self, warp_env_bodies, stable_env_bodies, sensor_cfg): + threshold = 1.0 + expected = stable_rew.undesired_contacts(stable_env_bodies, threshold=threshold, sensor_cfg=sensor_cfg) + actual = run_warp_rew(warp_rew.undesired_contacts, warp_env_bodies, threshold=threshold, sensor_cfg=sensor_cfg) + actual_cap = run_warp_rew_captured( + warp_rew.undesired_contacts, warp_env_bodies, threshold=threshold, sensor_cfg=sensor_cfg + ) + assert_close(actual, expected.float()) + assert_close(actual_cap, expected.float()) + + +# ============================================================================ +# Capture-then-mutate-then-replay reward tests (from test_mdp_warp_parity.py) +# ============================================================================ + + +def _mutate_art_data(art_data: MockArticulationData, warp_env, rng_seed: int = 200): + """Mutate every data array in-place so captured graphs see fresh values.""" + mutate_art_data(art_data, warp_env, rng_seed=rng_seed) + + +def _mutate_body_data(art_data: MockArticulationData, rng_seed=200): + """Mutate body-level and root-level data in-place so captured graphs see fresh values.""" + mutate_body_data(art_data, rng_seed=rng_seed) + + +class TestCapturedDataMutationRewards: + """Capture a graph, mutate buffer data in-place, replay -- results must match stable on the *new* data. + + This verifies reward MDP functions are truly capture-safe. + """ + + def _capture_mutate_check_rew(self, warp_fn, stable_fn, warp_env, stable_env, art_data, **kwargs): + out = wp.zeros((NUM_ENVS,), dtype=wp.float32, device=DEVICE) + warp_fn(warp_env, out, **kwargs) + with wp.ScopedCapture() as cap: + warp_fn(warp_env, out, **kwargs) + _mutate_art_data(art_data, warp_env) + wp.capture_launch(cap.graph) + assert_close(wp.to_torch(out).clone(), stable_fn(stable_env, **kwargs)) + + def test_lin_vel_z_l2(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.lin_vel_z_l2, + stable_rew.lin_vel_z_l2, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + def test_ang_vel_xy_l2(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.ang_vel_xy_l2, + stable_rew.ang_vel_xy_l2, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + def test_flat_orientation_l2(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.flat_orientation_l2, + stable_rew.flat_orientation_l2, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + def test_joint_vel_l2(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.joint_vel_l2, + stable_rew.joint_vel_l2, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + def test_joint_acc_l2(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.joint_acc_l2, + stable_rew.joint_acc_l2, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + def test_joint_torques_l2(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.joint_torques_l2, + stable_rew.joint_torques_l2, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + def test_action_l2(self, warp_env, stable_env, art_data): + self._capture_mutate_check_rew( + warp_rew.action_l2, + stable_rew.action_l2, + warp_env, + stable_env, + art_data, + ) + + def test_action_rate_l2(self, warp_env, stable_env, art_data): + self._capture_mutate_check_rew( + warp_rew.action_rate_l2, + stable_rew.action_rate_l2, + warp_env, + stable_env, + art_data, + ) + + def test_joint_pos_limits(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.joint_pos_limits, + stable_rew.joint_pos_limits, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + def test_joint_deviation_l1(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_rew( + warp_rew.joint_deviation_l1, + stable_rew.joint_deviation_l1, + warp_env, + stable_env, + art_data, + asset_cfg=all_joints_cfg, + ) + + +# ============================================================================ +# Capture-mutate-replay reward tests for new terms (from test_mdp_warp_parity_new_terms.py) +# ============================================================================ + + +class TestCapturedDataMutationRewardsNewTerms: + """Capture graph, mutate buffer data, replay -- verify new-terms reward results match stable.""" + + def _capture_mutate_check_rew(self, warp_fn, stable_fn, warp_env, stable_env, art_data, **kwargs): + out = wp.zeros((NUM_ENVS,), dtype=wp.float32, device=DEVICE) + warp_fn(warp_env, out, **kwargs) + with wp.ScopedCapture() as cap: + warp_fn(warp_env, out, **kwargs) + _mutate_body_data(art_data) + wp.capture_launch(cap.graph) + expected = stable_fn(stable_env, **kwargs) + assert_close(wp.to_torch(out).clone(), expected) + + def test_track_lin_vel_xy_exp(self, warp_env_bodies, stable_env_bodies, art_data_bodies): + cfg = MockBodyCfg("robot") + cfg.joint_ids = list(range(NUM_JOINTS)) + self._capture_mutate_check_rew( + warp_rew.track_lin_vel_xy_exp, + stable_rew.track_lin_vel_xy_exp, + warp_env_bodies, + stable_env_bodies, + art_data_bodies, + std=0.25, + command_name="vel", + asset_cfg=cfg, + ) + + def test_track_ang_vel_z_exp(self, warp_env_bodies, stable_env_bodies, art_data_bodies): + cfg = MockBodyCfg("robot") + cfg.joint_ids = list(range(NUM_JOINTS)) + self._capture_mutate_check_rew( + warp_rew.track_ang_vel_z_exp, + stable_rew.track_ang_vel_z_exp, + warp_env_bodies, + stable_env_bodies, + art_data_bodies, + std=0.25, + command_name="vel", + asset_cfg=cfg, + ) diff --git a/source/isaaclab_experimental/test/envs/mdp/test_terminations_warp_parity.py b/source/isaaclab_experimental/test/envs/mdp/test_terminations_warp_parity.py new file mode 100644 index 000000000000..d82339e01228 --- /dev/null +++ b/source/isaaclab_experimental/test/envs/mdp/test_terminations_warp_parity.py @@ -0,0 +1,349 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Parity tests for warp-first termination MDP terms.""" + +from __future__ import annotations + +import numpy as np +import pytest +import torch +import warp as wp + +# Skip entire module if no CUDA device available +wp.init() +pytestmark = pytest.mark.skipif(not wp.is_cuda_available(), reason="CUDA device required") + +import isaaclab_experimental.envs.mdp.terminations as warp_term +from parity_helpers import ( + BODY_IDS, + CMD_DIM, + DEVICE, + NUM_ACTIONS, + NUM_BODIES, + NUM_ENVS, + NUM_JOINTS, + MockActionManagerTorch, + MockActionManagerWarp, + MockArticulation, + MockArticulationData, + MockCommandManager, + MockCommandTerm, + MockContactSensor, + MockContactSensorData, + MockScene, + MockSceneEntityCfg, + MockSensorCfg, + MockTerminationManager, + assert_equal, + mutate_art_data, + mutate_body_data, + run_warp_term, + run_warp_term_captured, +) + +import isaaclab.envs.mdp.terminations as stable_term + +# ============================================================================ +# Fixtures +# ============================================================================ + + +@pytest.fixture(autouse=True) +def _clear_caches(): + yield + for fn in [warp_term.illegal_contact]: + for attr in list(vars(fn)): + if attr.startswith("_"): + delattr(fn, attr) + + +@pytest.fixture() +def art_data(): + return MockArticulationData(NUM_ENVS, NUM_JOINTS, DEVICE) + + +@pytest.fixture() +def art_data_bodies(): + return MockArticulationData(num_bodies=NUM_BODIES) + + +@pytest.fixture() +def env_origins(): + rng = np.random.RandomState(77) + origins_np = rng.randn(NUM_ENVS, 3).astype(np.float32) + return wp.array(origins_np, dtype=wp.vec3f, device=DEVICE) + + +@pytest.fixture() +def contact_data(): + return MockContactSensorData() + + +@pytest.fixture() +def cmd_tensor(): + rng = np.random.RandomState(99) + return torch.tensor(rng.randn(NUM_ENVS, CMD_DIM).astype(np.float32), device=DEVICE) + + +@pytest.fixture() +def cmd_term(): + return MockCommandTerm() + + +@pytest.fixture() +def scene(art_data, env_origins): + return MockScene({"robot": MockArticulation(art_data)}, env_origins) + + +@pytest.fixture() +def scene_bodies(art_data_bodies, env_origins, contact_data): + art = MockArticulation(art_data_bodies, num_bodies=NUM_BODIES) + sensor = MockContactSensor(contact_data) + return MockScene({"robot": art}, env_origins, sensors={"contact_sensor": sensor}) + + +@pytest.fixture() +def action_wp(): + rng = np.random.RandomState(99) + a = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + b = wp.array(rng.randn(NUM_ENVS, NUM_ACTIONS).astype(np.float32), device=DEVICE) + return a, b + + +@pytest.fixture() +def episode_length_buf(): + torch.manual_seed(55) + return torch.randint(0, 500, (NUM_ENVS,), dtype=torch.int64, device=DEVICE) + + +@pytest.fixture() +def warp_env(scene, action_wp, episode_length_buf): + """Env with warp action manager (for experimental functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + env.rng_state_wp = wp.array(np.arange(NUM_ENVS, dtype=np.uint32) + 42, device=DEVICE) + return env + + +@pytest.fixture() +def stable_env(scene, action_wp, episode_length_buf): + """Env with torch action manager (for stable functions).""" + + class _Env: + pass + + env = _Env() + env.scene = scene + env.action_manager = MockActionManagerTorch(action_wp[0], action_wp[1]) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env.step_dt = 0.02 + env.max_episode_length_s = 10.0 + return env + + +@pytest.fixture() +def warp_env_bodies(scene_bodies, action_wp, episode_length_buf, cmd_tensor, cmd_term): + """Env with body-level data and command manager (for new-terms termination tests).""" + + class _Env: + pass + + env = _Env() + env.scene = scene_bodies + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.command_manager = MockCommandManager(cmd_tensor, cmd_term) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env._episode_length_buf_wp = wp.from_torch(episode_length_buf) + env.step_dt = 0.02 + env.max_episode_length = 500 + env.max_episode_length_s = 10.0 + env.rng_state_wp = wp.array(np.arange(NUM_ENVS, dtype=np.uint32) + 42, device=DEVICE) + return env + + +@pytest.fixture() +def stable_env_bodies(scene_bodies, action_wp, episode_length_buf, cmd_tensor, cmd_term): + """Env with body-level data and command manager (for stable new-terms termination tests).""" + + class _Env: + pass + + env = _Env() + env.scene = scene_bodies + env.action_manager = MockActionManagerWarp(action_wp[0], action_wp[1]) + env.command_manager = MockCommandManager(cmd_tensor, cmd_term) + env.num_envs = NUM_ENVS + env.device = DEVICE + env.episode_length_buf = episode_length_buf + env._episode_length_buf_wp = wp.from_torch(episode_length_buf) + env.step_dt = 0.02 + env.max_episode_length = 500 + env.max_episode_length_s = 10.0 + # stable termination_manager needed for time_out + env.termination_manager = MockTerminationManager() + return env + + +@pytest.fixture() +def all_joints_cfg(): + return MockSceneEntityCfg("robot", list(range(NUM_JOINTS)), NUM_JOINTS, DEVICE) + + +@pytest.fixture() +def sensor_cfg(): + return MockSensorCfg("contact_sensor", BODY_IDS) + + +# ============================================================================ +# Termination parity tests (from test_mdp_warp_parity.py) +# ============================================================================ + + +class TestTerminationParity: + """Verify experimental termination Warp kernels match stable torch implementations.""" + + def test_root_height_below_minimum(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + min_h = 0.5 + expected = stable_term.root_height_below_minimum(stable_env, minimum_height=min_h, asset_cfg=cfg) + actual = run_warp_term(warp_term.root_height_below_minimum, warp_env, minimum_height=min_h, asset_cfg=cfg) + actual_cap = run_warp_term_captured( + warp_term.root_height_below_minimum, warp_env, minimum_height=min_h, asset_cfg=cfg + ) + assert_equal(actual, expected) + assert_equal(actual_cap, expected) + + def test_joint_pos_out_of_manual_limit(self, warp_env, stable_env, all_joints_cfg): + cfg = all_joints_cfg + bounds = (-1.0, 1.0) + expected = stable_term.joint_pos_out_of_manual_limit(stable_env, bounds=bounds, asset_cfg=cfg) + actual = run_warp_term(warp_term.joint_pos_out_of_manual_limit, warp_env, bounds=bounds, asset_cfg=cfg) + actual_cap = run_warp_term_captured( + warp_term.joint_pos_out_of_manual_limit, warp_env, bounds=bounds, asset_cfg=cfg + ) + assert_equal(actual, expected) + assert_equal(actual_cap, expected) + + +# ============================================================================ +# Termination parity tests (from test_mdp_warp_parity_new_terms.py) +# ============================================================================ + + +class TestTerminationParityNewTerms: + """Verify termination Warp kernels for newly migrated terms match stable torch implementations.""" + + def test_time_out(self, warp_env_bodies, stable_env_bodies): + expected = stable_term.time_out(stable_env_bodies) + actual = run_warp_term(warp_term.time_out, warp_env_bodies) + actual_cap = run_warp_term_captured(warp_term.time_out, warp_env_bodies) + assert_equal(actual, expected) + assert_equal(actual_cap, expected) + + def test_illegal_contact(self, warp_env_bodies, stable_env_bodies, sensor_cfg): + threshold = 1.0 + expected = stable_term.illegal_contact(stable_env_bodies, threshold=threshold, sensor_cfg=sensor_cfg) + actual = run_warp_term(warp_term.illegal_contact, warp_env_bodies, threshold=threshold, sensor_cfg=sensor_cfg) + actual_cap = run_warp_term_captured( + warp_term.illegal_contact, warp_env_bodies, threshold=threshold, sensor_cfg=sensor_cfg + ) + assert_equal(actual, expected) + assert_equal(actual_cap, expected) + + +# ============================================================================ +# Capture-then-mutate-then-replay termination tests (from test_mdp_warp_parity.py) +# ============================================================================ + + +def _mutate_art_data(art_data: MockArticulationData, warp_env, rng_seed: int = 200): + """Mutate every data array in-place so captured graphs see fresh values.""" + mutate_art_data(art_data, warp_env, rng_seed=rng_seed) + + +def _mutate_body_data(art_data: MockArticulationData, rng_seed=200): + """Mutate body-level and root-level data in-place so captured graphs see fresh values.""" + mutate_body_data(art_data, rng_seed=rng_seed) + + +class TestCapturedDataMutationTerminations: + """Capture a graph, mutate buffer data in-place, replay -- results must match stable on the *new* data. + + This verifies termination MDP functions are truly capture-safe. + """ + + def _capture_mutate_check_term(self, warp_fn, stable_fn, warp_env, stable_env, art_data, **kwargs): + out = wp.zeros((NUM_ENVS,), dtype=wp.bool, device=DEVICE) + warp_fn(warp_env, out, **kwargs) + with wp.ScopedCapture() as cap: + warp_fn(warp_env, out, **kwargs) + _mutate_art_data(art_data, warp_env) + wp.capture_launch(cap.graph) + assert_equal(wp.to_torch(out).clone(), stable_fn(stable_env, **kwargs)) + + def test_root_height_below_minimum(self, warp_env, stable_env, art_data, all_joints_cfg): + self._capture_mutate_check_term( + warp_term.root_height_below_minimum, + stable_term.root_height_below_minimum, + warp_env, + stable_env, + art_data, + minimum_height=0.5, + asset_cfg=all_joints_cfg, + ) + + def test_joint_pos_out_of_manual_limit(self, warp_env, stable_env, art_data, all_joints_cfg): + # joint_pos_out_of_manual_limit uses a 2D kernel that only writes True + # (never clears to False), so the output must be zeroed before each call. + # We include the zeroing inside the captured graph. + bounds = (-1.0, 1.0) + cfg = all_joints_cfg + out = wp.zeros((NUM_ENVS,), dtype=wp.bool, device=DEVICE) + # warm-up + out.zero_() + warp_term.joint_pos_out_of_manual_limit(warp_env, out, bounds=bounds, asset_cfg=cfg) + # capture (including the zero) + with wp.ScopedCapture() as cap: + out.zero_() + warp_term.joint_pos_out_of_manual_limit(warp_env, out, bounds=bounds, asset_cfg=cfg) + _mutate_art_data(art_data, warp_env) + wp.capture_launch(cap.graph) + expected = stable_term.joint_pos_out_of_manual_limit(stable_env, bounds=bounds, asset_cfg=cfg) + assert_equal(wp.to_torch(out).clone(), expected) + + +# ============================================================================ +# Capture-mutate-replay termination tests for new terms (from test_mdp_warp_parity_new_terms.py) +# ============================================================================ + + +class TestCapturedDataMutationTerminationsNewTerms: + """Capture graph, mutate buffer data, replay -- verify new-terms termination results match stable.""" + + def test_time_out(self, warp_env_bodies, stable_env_bodies, art_data_bodies): + out = wp.zeros((NUM_ENVS,), dtype=wp.bool, device=DEVICE) + warp_term.time_out(warp_env_bodies, out) + with wp.ScopedCapture() as cap: + warp_term.time_out(warp_env_bodies, out) + # Mutate episode length in-place + warp_env_bodies.episode_length_buf[:] = torch.randint(0, 600, (NUM_ENVS,), dtype=torch.int64, device=DEVICE) + wp.capture_launch(cap.graph) + expected = stable_term.time_out(stable_env_bodies) + assert_equal(wp.to_torch(out).clone(), expected) diff --git a/source/isaaclab_experimental/test/utils/test_manager_call_switch.py b/source/isaaclab_experimental/test/utils/test_manager_call_switch.py new file mode 100644 index 000000000000..b4745939eaa0 --- /dev/null +++ b/source/isaaclab_experimental/test/utils/test_manager_call_switch.py @@ -0,0 +1,475 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for ManagerCallSwitch config loading, mode resolution, and dispatch.""" + +from __future__ import annotations + +import json +import os +import unittest + +import warp as wp +from isaaclab_experimental.utils.manager_call_switch import ManagerCallMode, ManagerCallSwitch + + +@wp.kernel +def _add_one(a: wp.array(dtype=wp.float32), b: wp.array(dtype=wp.float32)): + i = wp.tid() + b[i] = a[i] + 1.0 + + +class TestManagerCallMode(unittest.TestCase): + """Tests for the ManagerCallMode enum.""" + + def test_enum_values(self): + self.assertEqual(ManagerCallMode.STABLE, 0) + self.assertEqual(ManagerCallMode.WARP_NOT_CAPTURED, 1) + self.assertEqual(ManagerCallMode.WARP_CAPTURED, 2) + + def test_ordering(self): + self.assertLess(ManagerCallMode.STABLE, ManagerCallMode.WARP_NOT_CAPTURED) + self.assertLess(ManagerCallMode.WARP_NOT_CAPTURED, ManagerCallMode.WARP_CAPTURED) + + +# ====================================================================== +# Config loading +# ====================================================================== + + +class TestConfigLoading(unittest.TestCase): + """Tests for ManagerCallSwitch config parsing from dict, JSON, env var, and None.""" + + def test_none_uses_default(self): + switch = ManagerCallSwitch(cfg_source=None) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.WARP_CAPTURED) + + def test_dict_config(self): + switch = ManagerCallSwitch(cfg_source={"default": 1}) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.WARP_NOT_CAPTURED) + + def test_dict_per_manager_override(self): + switch = ManagerCallSwitch(cfg_source={"default": 2, "RewardManager": 0}) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.STABLE) + self.assertEqual(switch.get_mode_for_manager("ActionManager"), ManagerCallMode.WARP_CAPTURED) + + def test_dict_without_default_key(self): + """A dict missing 'default' should inherit from DEFAULT_CONFIG.""" + switch = ManagerCallSwitch(cfg_source={"RewardManager": 0}) + # default should be 2 (from DEFAULT_CONFIG) + self.assertEqual(switch.get_mode_for_manager("ActionManager"), ManagerCallMode.WARP_CAPTURED) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.STABLE) + + def test_json_string_config(self): + cfg_str = json.dumps({"default": 1, "ObservationManager": 0}) + switch = ManagerCallSwitch(cfg_source=cfg_str) + self.assertEqual(switch.get_mode_for_manager("ObservationManager"), ManagerCallMode.STABLE) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.WARP_NOT_CAPTURED) + + def test_empty_string_uses_default(self): + switch = ManagerCallSwitch(cfg_source="") + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.WARP_CAPTURED) + + def test_env_var_fallback(self): + """When cfg_source is None, should read from MANAGER_CALL_CONFIG env var.""" + old = os.environ.get(ManagerCallSwitch.ENV_VAR) + try: + os.environ[ManagerCallSwitch.ENV_VAR] = json.dumps({"default": 0}) + switch = ManagerCallSwitch(cfg_source=None) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.STABLE) + finally: + if old is None: + os.environ.pop(ManagerCallSwitch.ENV_VAR, None) + else: + os.environ[ManagerCallSwitch.ENV_VAR] = old + + def test_invalid_json_raises(self): + with self.assertRaises(json.JSONDecodeError): + ManagerCallSwitch(cfg_source="not valid json") + + def test_invalid_mode_value_raises(self): + with self.assertRaises(ValueError): + ManagerCallSwitch(cfg_source={"default": 99}) + + def test_non_int_mode_raises(self): + with self.assertRaises(TypeError): + ManagerCallSwitch(cfg_source={"default": "fast"}) + + def test_invalid_cfg_type_raises(self): + with self.assertRaises(TypeError): + ManagerCallSwitch(cfg_source=42) + + +# ====================================================================== +# Mode resolution and capping +# ====================================================================== + + +class TestModeResolution(unittest.TestCase): + """Tests for mode resolution, MAX_MODE_OVERRIDES, and dynamic capturability.""" + + def test_max_mode_override_caps_default(self): + """Scene is capped to WARP_NOT_CAPTURED by MAX_MODE_OVERRIDES.""" + switch = ManagerCallSwitch(cfg_source={"default": 2}) + # Scene has a static cap of WARP_NOT_CAPTURED + self.assertEqual( + switch.get_mode_for_manager("Scene"), + ManagerCallMode.WARP_NOT_CAPTURED, + ) + # Other managers should not be capped + self.assertEqual( + switch.get_mode_for_manager("RewardManager"), + ManagerCallMode.WARP_CAPTURED, + ) + + def test_max_modes_kwarg(self): + """max_modes kwarg should cap managers beyond MAX_MODE_OVERRIDES.""" + switch = ManagerCallSwitch( + cfg_source={"default": 2}, + max_modes={"RewardManager": ManagerCallMode.WARP_NOT_CAPTURED}, + ) + self.assertEqual( + switch.get_mode_for_manager("RewardManager"), + ManagerCallMode.WARP_NOT_CAPTURED, + ) + + def test_register_manager_capturability_downgrades(self): + """register_manager_capturability(False) should cap a manager to WARP_NOT_CAPTURED.""" + switch = ManagerCallSwitch(cfg_source={"default": 2}) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.WARP_CAPTURED) + + switch.register_manager_capturability("RewardManager", capturable=False) + self.assertEqual( + switch.get_mode_for_manager("RewardManager"), + ManagerCallMode.WARP_NOT_CAPTURED, + ) + + def test_register_capturability_true_is_noop(self): + """register_manager_capturability(True) should not change anything.""" + switch = ManagerCallSwitch(cfg_source={"default": 2}) + switch.register_manager_capturability("RewardManager", capturable=True) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.WARP_CAPTURED) + + def test_register_capturability_does_not_upgrade(self): + """If a manager is already capped to NOT_CAPTURED, registering capturable=False again shouldn't change it.""" + switch = ManagerCallSwitch(cfg_source={"default": 2}) + switch.register_manager_capturability("RewardManager", capturable=False) + switch.register_manager_capturability("RewardManager", capturable=False) + self.assertEqual( + switch.get_mode_for_manager("RewardManager"), + ManagerCallMode.WARP_NOT_CAPTURED, + ) + + def test_capturability_interacts_with_static_cap(self): + """Dynamic capturability should respect existing static caps.""" + switch = ManagerCallSwitch( + cfg_source={"default": 2}, + max_modes={"RewardManager": ManagerCallMode.WARP_NOT_CAPTURED}, + ) + # Already capped, register_capturability(False) should be harmless + switch.register_manager_capturability("RewardManager", capturable=False) + self.assertEqual( + switch.get_mode_for_manager("RewardManager"), + ManagerCallMode.WARP_NOT_CAPTURED, + ) + + def test_mode_0_not_affected_by_cap(self): + """A manager explicitly set to STABLE (0) should stay at 0 even with cap at 1.""" + switch = ManagerCallSwitch(cfg_source={"default": 2, "RewardManager": 0}) + switch.register_manager_capturability("RewardManager", capturable=False) + self.assertEqual(switch.get_mode_for_manager("RewardManager"), ManagerCallMode.STABLE) + + +# ====================================================================== +# Stage dispatch +# ====================================================================== + + +class TestStageDispatch(unittest.TestCase): + """Tests for call_stage routing through stable / warp-eager / warp-captured paths.""" + + def test_stable_mode_calls_stable_fn(self): + switch = ManagerCallSwitch(cfg_source={"default": 0}) + called = {"stable": False, "warp": False} + + def stable_fn(): + called["stable"] = True + return "stable_result" + + def warp_fn(): + called["warp"] = True + return "warp_result" + + result = switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": warp_fn}, + stable_call={"fn": stable_fn}, + ) + self.assertTrue(called["stable"]) + self.assertFalse(called["warp"]) + self.assertEqual(result, "stable_result") + + def test_stable_mode_without_stable_call_raises(self): + switch = ManagerCallSwitch(cfg_source={"default": 0}) + with self.assertRaises(ValueError): + switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": lambda: None}, + stable_call=None, + ) + + def test_warp_eager_mode_calls_warp_fn(self): + switch = ManagerCallSwitch(cfg_source={"default": 1}) + called = {"stable": False, "warp": False} + + def stable_fn(): + called["stable"] = True + return "stable_result" + + def warp_fn(): + called["warp"] = True + return "warp_result" + + result = switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": warp_fn}, + stable_call={"fn": stable_fn}, + ) + self.assertFalse(called["stable"]) + self.assertTrue(called["warp"]) + self.assertEqual(result, "warp_result") + + def test_output_transform(self): + """The 'output' key in call spec should transform the return value.""" + switch = ManagerCallSwitch(cfg_source={"default": 1}) + + def warp_fn(): + return [1, 2, 3] + + result = switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": warp_fn, "output": len}, + ) + self.assertEqual(result, 3) + + def test_args_and_kwargs_forwarded(self): + """call_stage should forward args and kwargs from the call spec.""" + switch = ManagerCallSwitch(cfg_source={"default": 1}) + received = {} + + def warp_fn(a, b, key=None): + received["a"] = a + received["b"] = b + received["key"] = key + + switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": warp_fn, "args": (1, 2), "kwargs": {"key": "val"}}, + ) + self.assertEqual(received, {"a": 1, "b": 2, "key": "val"}) + + +class TestStageDispatchCaptured(unittest.TestCase): + """Tests for WARP_CAPTURED mode (requires GPU).""" + + def setUp(self): + self.device = "cuda:0" + + def test_captured_mode_produces_correct_output(self): + """WARP_CAPTURED should capture and replay a warp kernel correctly.""" + switch = ManagerCallSwitch(cfg_source={"default": 2}) + src = wp.full(4, value=5.0, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + def warp_fn(): + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + # First call: warm-up + capture + result = switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": warp_fn}, + ) + self.assertAlmostEqual(result.numpy()[0], 6.0, places=5) + + # Replay + wp.copy(src, wp.full(4, value=10.0, dtype=wp.float32, device=self.device)) + result2 = switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": warp_fn}, + ) + self.assertIs(result, result2, "Replay must return same reference") + self.assertAlmostEqual(result2.numpy()[0], 11.0, places=5) + + def test_captured_warmup_call_count(self): + """WARP_CAPTURED first call should invoke fn exactly 2 times (warm-up + capture).""" + switch = ManagerCallSwitch(cfg_source={"default": 2}) + src = wp.zeros(4, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + call_count = [0] + + def warp_fn(): + call_count[0] += 1 + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + # First call_stage: warm-up (1) + capture (2) + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 2, "First captured call should invoke fn twice (warm-up + capture)") + + # Second call_stage: replay only — no new fn invocation + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 2, "Replay should not invoke fn again") + + # Third call_stage: still replay + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 2) + + def test_captured_warmup_handles_hasattr_guard(self): + """Warm-up should flush first-call allocations so capture doesn't record them. + + This simulates the real-world pattern where MDP terms allocate scratch + buffers on first call using hasattr guards. + """ + switch = ManagerCallSwitch(cfg_source={"default": 2}) + src = wp.ones(8, dtype=wp.float32, device=self.device) + holder = {} + + def fn_with_guard(): + if "buf" not in holder: + # First-call allocation — must happen during warm-up, not capture + holder["buf"] = wp.zeros(8, dtype=wp.float32, device=self.device) + wp.launch(_add_one, dim=8, inputs=[src, holder["buf"]], device=self.device) + return holder["buf"] + + # Should not raise — warm-up handles the allocation outside capture context + result = switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": fn_with_guard}, + ) + result_np = result.numpy() + for val in result_np: + self.assertAlmostEqual(val, 2.0, places=5) + + # Replay should also work (allocation already done, only kernel replays) + wp.copy(src, wp.full(8, value=5.0, dtype=wp.float32, device=self.device)) + result2 = switch.call_stage( + stage="RewardManager_compute", + warp_call={"fn": fn_with_guard}, + ) + for val in result2.numpy(): + self.assertAlmostEqual(val, 6.0, places=5) + + def test_warp_eager_no_warmup(self): + """WARP_NOT_CAPTURED mode should call fn exactly once per call (no warm-up).""" + switch = ManagerCallSwitch(cfg_source={"default": 1}) + call_count = [0] + + def warp_fn(): + call_count[0] += 1 + + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 1, "Eager mode should call fn exactly once") + + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 2, "Eager mode should call fn again each time") + + +# ====================================================================== +# Graph invalidation +# ====================================================================== + + +class TestGraphInvalidation(unittest.TestCase): + """Tests for invalidate_graphs on ManagerCallSwitch.""" + + def setUp(self): + self.device = "cuda:0" + + def test_invalidate_forces_recapture(self): + switch = ManagerCallSwitch(cfg_source={"default": 2}) + src = wp.full(4, value=1.0, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + call_count = [0] + + def warp_fn(): + call_count[0] += 1 + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + # First capture + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 2) # warm-up + capture + + # Replay — no new calls + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 2) + + # Invalidate + switch.invalidate_graphs() + + # Should re-warm-up + re-capture + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": warp_fn}) + self.assertEqual(call_count[0], 4) + + +# ====================================================================== +# resolve_manager_class +# ====================================================================== + + +class TestResolveManagerClass(unittest.TestCase): + """Tests for resolve_manager_class.""" + + def test_stable_resolves_to_isaaclab_managers(self): + switch = ManagerCallSwitch(cfg_source={"default": 0}) + cls = switch.resolve_manager_class("RewardManager") + self.assertTrue(cls.__module__.startswith("isaaclab.managers")) + + def test_warp_resolves_to_experimental_managers(self): + switch = ManagerCallSwitch(cfg_source={"default": 1}) + cls = switch.resolve_manager_class("RewardManager") + self.assertTrue(cls.__module__.startswith("isaaclab_experimental")) + + def test_mode_override(self): + """mode_override should bypass the config for class resolution.""" + switch = ManagerCallSwitch(cfg_source={"default": 2}) + cls = switch.resolve_manager_class("RewardManager", mode_override=ManagerCallMode.STABLE) + self.assertTrue(cls.__module__.startswith("isaaclab.managers")) + + def test_invalid_manager_raises(self): + switch = ManagerCallSwitch(cfg_source={"default": 0}) + with self.assertRaises(AttributeError): + switch.resolve_manager_class("NonexistentManager") + + +# ====================================================================== +# Manager name parsing +# ====================================================================== + + +class TestManagerNameParsing(unittest.TestCase): + """Tests for stage name → manager name extraction.""" + + def test_valid_stage_name(self): + switch = ManagerCallSwitch(cfg_source={"default": 1}) + # Dispatch a stage with valid name format + called = [False] + + def fn(): + called[0] = True + + switch.call_stage(stage="RewardManager_compute", warp_call={"fn": fn}) + self.assertTrue(called[0]) + + def test_invalid_stage_name_raises(self): + switch = ManagerCallSwitch(cfg_source={"default": 1}) + with self.assertRaises(ValueError): + switch.call_stage(stage="nounderscore", warp_call={"fn": lambda: None}) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/isaaclab_experimental/test/utils/test_warp_graph_cache.py b/source/isaaclab_experimental/test/utils/test_warp_graph_cache.py new file mode 100644 index 000000000000..1b9c10a76895 --- /dev/null +++ b/source/isaaclab_experimental/test/utils/test_warp_graph_cache.py @@ -0,0 +1,249 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for WarpGraphCache capture/replay and warm-up behavior.""" + +from __future__ import annotations + +import unittest + +import warp as wp +from isaaclab_experimental.utils.warp_graph_cache import WarpGraphCache + + +@wp.kernel +def _add_one(a: wp.array(dtype=wp.float32), b: wp.array(dtype=wp.float32)): + """Simple warp kernel: b[i] = a[i] + 1.""" + i = wp.tid() + b[i] = a[i] + 1.0 + + +class TestWarpGraphCache(unittest.TestCase): + """Tests for :class:`WarpGraphCache`.""" + + def setUp(self): + self.device = "cuda:0" + self.cache = WarpGraphCache() + + # ------------------------------------------------------------------ + # Warm-up + # ------------------------------------------------------------------ + + def test_warmup_runs_before_capture(self): + """The function should be called eagerly (warm-up) before graph capture. + + We verify this by counting total invocations on the first call. + Warm-up = 1, capture = 1, so fn should be called exactly 2 times. + """ + call_count = [0] + src = wp.zeros(4, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + def counted_launch(): + call_count[0] += 1 + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + # First call: warm-up + capture + self.cache.capture_or_replay("stage_a", counted_launch) + self.assertEqual(call_count[0], 2, "First call should invoke fn twice (warm-up + capture)") + + # Second call: replay only + self.cache.capture_or_replay("stage_a", counted_launch) + self.assertEqual(call_count[0], 2, "Replay should NOT invoke fn again") + + def test_warmup_flushes_first_call_allocations(self): + """Warm-up should handle first-call allocations so capture is clean. + + Simulates a hasattr guard pattern: allocate a buffer on first call only. + Without warm-up, the allocation would be recorded in the graph. + """ + holder = {} + src = wp.ones(8, dtype=wp.float32, device=self.device) + + def fn_with_hasattr_guard(): + if "buf" not in holder: + holder["buf"] = wp.zeros(8, dtype=wp.float32, device=self.device) + wp.launch(_add_one, dim=8, inputs=[src, holder["buf"]], device=self.device) + return holder["buf"] + + # Should not raise — warm-up handles the allocation outside capture + result = self.cache.capture_or_replay("guarded", fn_with_hasattr_guard) + self.assertIsNotNone(result) + + # Verify the kernel produced correct output + result_np = result.numpy() + for val in result_np: + self.assertAlmostEqual(val, 2.0, places=5) + + # ------------------------------------------------------------------ + # Capture / replay correctness + # ------------------------------------------------------------------ + + def test_capture_produces_correct_output(self): + """After capture, replaying the graph should produce correct results.""" + src = wp.full(4, value=3.0, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + def my_fn(): + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + result = self.cache.capture_or_replay("compute", my_fn) + result_np = result.numpy() + for val in result_np: + self.assertAlmostEqual(val, 4.0, places=5) + + def test_replay_uses_updated_input(self): + """Replay should re-read from the same input buffer (pointer-stable). + + CUDA graph replay re-executes the same kernel on the same memory + addresses. If we update the input buffer in-place, the output + should reflect the new values. + """ + src = wp.full(4, value=1.0, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + def my_fn(): + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + # Capture + self.cache.capture_or_replay("replay_test", my_fn) + + # Update input in-place + wp.copy(src, wp.full(4, value=10.0, dtype=wp.float32, device=self.device)) + + # Replay — should see updated input + result = self.cache.capture_or_replay("replay_test", my_fn) + result_np = result.numpy() + for val in result_np: + self.assertAlmostEqual(val, 11.0, places=5) + + def test_cached_result_is_same_reference(self): + """Replay should return the exact same object reference as capture.""" + src = wp.zeros(4, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + def my_fn(): + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + result1 = self.cache.capture_or_replay("ref_test", my_fn) + result2 = self.cache.capture_or_replay("ref_test", my_fn) + self.assertIs(result1, result2, "Replay must return the same object reference") + + def test_multiple_stages_independent(self): + """Different stages should be captured and replayed independently.""" + src_a = wp.full(4, value=1.0, dtype=wp.float32, device=self.device) + dst_a = wp.zeros(4, dtype=wp.float32, device=self.device) + src_b = wp.full(4, value=5.0, dtype=wp.float32, device=self.device) + dst_b = wp.zeros(4, dtype=wp.float32, device=self.device) + + def fn_a(): + wp.launch(_add_one, dim=4, inputs=[src_a, dst_a], device=self.device) + return dst_a + + def fn_b(): + wp.launch(_add_one, dim=4, inputs=[src_b, dst_b], device=self.device) + return dst_b + + result_a = self.cache.capture_or_replay("stage_a", fn_a) + result_b = self.cache.capture_or_replay("stage_b", fn_b) + + self.assertAlmostEqual(result_a.numpy()[0], 2.0, places=5) + self.assertAlmostEqual(result_b.numpy()[0], 6.0, places=5) + + # Replay both + result_a2 = self.cache.capture_or_replay("stage_a", fn_a) + result_b2 = self.cache.capture_or_replay("stage_b", fn_b) + self.assertIs(result_a, result_a2) + self.assertIs(result_b, result_b2) + + # ------------------------------------------------------------------ + # Invalidation + # ------------------------------------------------------------------ + + def test_invalidate_all(self): + """invalidate() with no args should drop all cached graphs.""" + src = wp.zeros(4, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + call_count = [0] + + def my_fn(): + call_count[0] += 1 + wp.launch(_add_one, dim=4, inputs=[src, dst], device=self.device) + return dst + + self.cache.capture_or_replay("s1", my_fn) + self.assertEqual(call_count[0], 2) # warm-up + capture + + self.cache.invalidate() + + # After invalidation, next call should re-warm-up and re-capture + self.cache.capture_or_replay("s1", my_fn) + self.assertEqual(call_count[0], 4) # 2 more (warm-up + capture) + + def test_invalidate_single_stage(self): + """invalidate(stage) should only drop the named stage.""" + src = wp.zeros(4, dtype=wp.float32, device=self.device) + dst_a = wp.zeros(4, dtype=wp.float32, device=self.device) + dst_b = wp.zeros(4, dtype=wp.float32, device=self.device) + + count_a = [0] + count_b = [0] + + def fn_a(): + count_a[0] += 1 + wp.launch(_add_one, dim=4, inputs=[src, dst_a], device=self.device) + return dst_a + + def fn_b(): + count_b[0] += 1 + wp.launch(_add_one, dim=4, inputs=[src, dst_b], device=self.device) + return dst_b + + self.cache.capture_or_replay("a", fn_a) + self.cache.capture_or_replay("b", fn_b) + self.assertEqual(count_a[0], 2) + self.assertEqual(count_b[0], 2) + + # Invalidate only "a" + self.cache.invalidate("a") + + self.cache.capture_or_replay("a", fn_a) + self.cache.capture_or_replay("b", fn_b) + self.assertEqual(count_a[0], 4, "Stage 'a' should re-capture after invalidation") + self.assertEqual(count_b[0], 2, "Stage 'b' should replay (not re-capture)") + + def test_invalidate_nonexistent_stage_is_noop(self): + """Invalidating a stage that was never captured should not raise.""" + self.cache.invalidate("nonexistent") # should not raise + + # ------------------------------------------------------------------ + # Args / kwargs forwarding + # ------------------------------------------------------------------ + + def test_args_and_kwargs_forwarded(self): + """capture_or_replay should forward args and kwargs to fn.""" + src = wp.full(4, value=2.0, dtype=wp.float32, device=self.device) + dst = wp.zeros(4, dtype=wp.float32, device=self.device) + + def my_fn(a, b, device="cuda:0"): + wp.launch(_add_one, dim=4, inputs=[a, b], device=device) + return b + + result = self.cache.capture_or_replay( + "args_test", + my_fn, + args=(src, dst), + kwargs={"device": self.device}, + ) + self.assertAlmostEqual(result.numpy()[0], 3.0, places=5) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/isaaclab_mimic/changelog.d/.gitkeep b/source/isaaclab_mimic/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_mimic/config/extension.toml b/source/isaaclab_mimic/config/extension.toml index 5b498ae58652..f53461c3fc6f 100644 --- a/source/isaaclab_mimic/config/extension.toml +++ b/source/isaaclab_mimic/config/extension.toml @@ -1,7 +1,7 @@ [package] # Semantic Versioning is used: https://semver.org/ -version = "1.0.16" +version = "1.2.5" # Description category = "isaaclab" diff --git a/source/isaaclab_mimic/docs/CHANGELOG.rst b/source/isaaclab_mimic/docs/CHANGELOG.rst index 09b79c8cdd80..782cf6f7d220 100644 --- a/source/isaaclab_mimic/docs/CHANGELOG.rst +++ b/source/isaaclab_mimic/docs/CHANGELOG.rst @@ -1,6 +1,78 @@ Changelog --------- +1.2.5 (2026-04-14) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated mobility path utilities to import from ``isaacsim.replicator.experimental.mobility_gen``. + + +1.2.4 (2026-04-06) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Made performance enhancing changes to data generation pipeline (elimate large tensor usage, reduce asyncio overhead and blocking) +* Locked h5py dependency to last stable version 3.15.1 to prevent package import errors on Windows with version 3.16.0. + +Added +^^^^^ + +* Added data generation test cases for all tasks (single and multi environment). + + +1.2.3 (2026-03-12) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Add nvidia-srl-usd-to-urdf dependency to isaaclab_mimic extension. + + +1.2.2 (2026-03-10) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Add h5py dependency to isaaclab_mimic extension. + + +1.2.1 (2026-02-25) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed cuRobo planner quaternion handling and Warp API compatibility for Isaac Lab 3.0. +* Fixed Rerun visualization in cuRobo plan visualizer. +* Added ``--visualizer kit`` to SkillGen documentation for all non-headless commands. + + +1.2.0 (2026-02-23) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Update data generator to support Isaac Lab 3.0. +* Use unique quaternion for GR1 pick place env Mimic actions. +* Discard failed Mimic demos by default for Franka stacking task. + + +1.1.0 (2026-01-30) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Changed the quaternion ordering to match warp, PhysX, and Newton native XYZW quaternion ordering. + 1.0.16 (2025-11-10) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py index 37fabe4a4b47..6851a90256f2 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py @@ -6,6 +6,7 @@ """Base class for data generator.""" import asyncio +import contextlib import copy import logging from typing import Any @@ -32,6 +33,16 @@ from .datagen_info_pool import DataGenInfoPool +@contextlib.asynccontextmanager +async def _optional_lock(lock): + """Async context manager that acquires the lock only if it is not None.""" + if lock is not None: + async with lock: + yield + else: + yield + + def transform_source_data_segment_using_delta_object_pose( src_eef_poses: torch.Tensor, delta_obj_pose: torch.Tensor, @@ -663,10 +674,7 @@ async def generate( # noqa: C901 for subtask_constraint in self.env_cfg.task_constraint_configs: runtime_subtask_constraints_dict.update(subtask_constraint.generate_runtime_subtask_constraints()) - # save generated data in these variables - generated_states = [] - generated_obs = [] - generated_actions = [] + # Track if the generated trajectory was successful generated_success = False # some eef-specific state variables used during generation @@ -693,7 +701,8 @@ async def generate( # noqa: C901 # While loop that runs per time step while True: - async with self.src_demo_datagen_info_pool.asyncio_lock: + await asyncio.sleep(0) + async with _optional_lock(self.src_demo_datagen_info_pool.asyncio_lock): if len(self.src_demo_datagen_info_pool.datagen_infos) > prev_src_demo_datagen_info_pool_size: # src_demo_datagen_info_pool at this point may be updated with new demos, # So we need to update subtask boundaries again @@ -864,26 +873,24 @@ async def generate( # noqa: C901 # Update visualization if motion planner is available if motion_planner and motion_planner.visualize_spheres: - current_joints = self.env.scene["robot"].data.joint_pos[env_id] + current_joints = self.env.scene["robot"].data.joint_pos.torch[env_id] motion_planner._update_visualization_at_joint_positions(current_joints) eef_waypoint_dict[eef_name] = waypoint multi_waypoint = MultiWaypoint(eef_waypoint_dict) + await asyncio.sleep(0) + # Execute the next waypoints for all eefs - exec_results = await multi_waypoint.execute( + exec_success = await multi_waypoint.execute( env=self.env, success_term=success_term, env_id=env_id, env_action_queue=env_action_queue, ) - # Update execution state buffers - if len(exec_results["states"]) > 0: - generated_states.extend(exec_results["states"]) - generated_obs.extend(exec_results["observations"]) - generated_actions.extend(exec_results["actions"]) - generated_success = generated_success or exec_results["success"] + # Update success state + generated_success = generated_success or exec_success # Get the navigation state if self.env_cfg.datagen_config.use_navigation_controller: @@ -982,10 +989,6 @@ async def generate( # noqa: C901 if all(eef_subtasks_done.values()): break - # Merge numpy arrays - if len(generated_actions) > 0: - generated_actions = torch.cat(generated_actions, dim=0) - # Set success to the recorded episode data and export to file self.env.recorder_manager.set_success_to_episodes( env_id_tensor, torch.tensor([[generated_success]], dtype=torch.bool, device=self.env.device) @@ -995,9 +998,6 @@ async def generate( # noqa: C901 results = dict( initial_state=new_initial_state, - states=generated_states, - observations=generated_obs, - actions=generated_actions, success=generated_success, ) return results diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py index 18d1f6716d4a..1761cf9beaa7 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py @@ -78,6 +78,7 @@ def env_loop( env_action_queue: asyncio.Queue, shared_datagen_info_pool: DataGenInfoPool, asyncio_event_loop: asyncio.AbstractEventLoop, + data_gen_tasks: asyncio.Future | None = None, ): """Main asyncio loop for the environment. @@ -87,6 +88,8 @@ def env_loop( env_action_queue: The asyncio queue to handle actions to for executing actions. shared_datagen_info_pool: The shared datagen info pool that stores source demo info. asyncio_event_loop: The main asyncio event loop. + data_gen_tasks: The gathered async data generation future. When provided, the loop + will exit early if all tasks finish unexpectedly (e.g. due to an unhandled exception). """ global num_success, num_failures, num_attempts env_id_tensor = torch.tensor([0], dtype=torch.int64, device=env.device) @@ -97,17 +100,20 @@ def env_loop( # check if any environment needs to be reset while waiting for actions while env_action_queue.qsize() != env.num_envs: asyncio_event_loop.run_until_complete(asyncio.sleep(0)) + if data_gen_tasks is not None and data_gen_tasks.done(): + exc = data_gen_tasks.exception() + if exc is not None: + raise exc + return while not env_reset_queue.empty(): env_id_tensor[0] = env_reset_queue.get_nowait() env.reset(env_ids=env_id_tensor) env_reset_queue.task_done() actions = torch.zeros(env.action_space.shape) - - # get actions from all the data generators - for i in range(env.num_envs): - # an async-blocking call to get an action from a data generator - env_id, action = asyncio_event_loop.run_until_complete(env_action_queue.get()) + get_tasks = [env_action_queue.get() for _ in range(env.num_envs)] + results = asyncio_event_loop.run_until_complete(asyncio.gather(*get_tasks)) + for env_id, action in results: actions[env_id] = action # perform action on environment @@ -140,7 +146,8 @@ def env_loop( if env.sim.is_stopped(): break - env.close() + # Do not close env here: async data generator tasks may still be running. + # Caller must close env after cancelling and awaiting those tasks. def setup_env_config( @@ -151,6 +158,7 @@ def setup_env_config( device: str, generation_num_trials: int | None = None, recorder_cfg: RecorderManagerBaseCfg | None = None, + dataset_compression: bool = True, ) -> tuple[Any, Any]: """Configure the environment for data generation. @@ -161,6 +169,8 @@ def setup_env_config( num_envs: Number of environments to run device: Device to run on generation_num_trials: Optional override for number of trials + recorder_cfg: Recorder manager configuration + dataset_compression: Whether to enable dataset compression Returns: tuple containing: @@ -197,6 +207,8 @@ def setup_env_config( env_cfg.recorders.dataset_export_dir_path = output_dir env_cfg.recorders.dataset_filename = output_file_name + env_cfg.recorders.dataset_compression = dataset_compression + if env_cfg.datagen_config.generation_keep_failed: env_cfg.recorders.dataset_export_mode = DatasetExportMode.EXPORT_SUCCEEDED_FAILED_IN_SEPARATE_FILES else: @@ -229,8 +241,7 @@ def setup_async_generation( asyncio_event_loop = asyncio.get_event_loop() env_reset_queue = asyncio.Queue() env_action_queue = asyncio.Queue() - shared_datagen_info_pool_lock = asyncio.Lock() - shared_datagen_info_pool = DataGenInfoPool(env, env.cfg, env.device, asyncio_lock=shared_datagen_info_pool_lock) + shared_datagen_info_pool = DataGenInfoPool(env, env.cfg, env.device) shared_datagen_info_pool.load_from_dataset_file(input_file) print(f"Loaded {shared_datagen_info_pool.num_datagen_infos} to datagen info pool") diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/waypoint.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/waypoint.py index 964cc2a49dda..a0f4083e868a 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/waypoint.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/waypoint.py @@ -381,11 +381,8 @@ async def execute( env_action_queue: The asyncio queue to put the action into. Returns: - A dictionary containing the state, observation, action, and success of the multi-waypoint actions. + The success of the multi-waypoint actions. """ - # current state - state = env.scene.get_state(is_relative=True) - # construct action from target poses and gripper actions target_eef_pose_dict = {eef_name: waypoint.pose for eef_name, waypoint in self.waypoints.items()} gripper_action_dict = {eef_name: waypoint.gripper_action for eef_name, waypoint in self.waypoints.items()} @@ -412,18 +409,11 @@ async def execute( # step environment if env_action_queue is None: - obs, _, _, _, _ = env.step(play_action) + env.step(play_action) else: await env_action_queue.put((env_id, play_action[0])) await env_action_queue.join() - obs = env.obs_buf success = bool(success_term.func(env, **success_term.params)[env_id]) - result = dict( - states=[state], - observations=[obs], - actions=[play_action], - success=success, - ) - return result + return success diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py b/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py index f3a2705a68b6..30a52d3a4fd0 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py @@ -159,3 +159,52 @@ }, disable_env_checker=True, ) + +## +# GR1T2 Pick Place with Pink IK - Absolute Pose Control +## + +gym.register( + id="Isaac-PickPlace-GR1T2-Abs-Mimic-v0", + entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.pickplace_gr1t2_mimic_env_cfg:PickPlaceGR1T2MimicEnvCfg", + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-PickPlace-GR1T2-WaistEnabled-Abs-Mimic-v0", + entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", + kwargs={ + "env_cfg_entry_point": ( + f"{__name__}.pickplace_gr1t2_waist_enabled_mimic_env_cfg:PickPlaceGR1T2WaistEnabledMimicEnvCfg" + ), + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-NutPour-GR1T2-Pink-IK-Abs-Mimic-v0", + entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", + kwargs={"env_cfg_entry_point": f"{__name__}.nutpour_gr1t2_mimic_env_cfg:NutPourGR1T2MimicEnvCfg"}, + disable_env_checker=True, +) + +gym.register( + id="Isaac-ExhaustPipe-GR1T2-Pink-IK-Abs-Mimic-v0", + entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", + kwargs={"env_cfg_entry_point": f"{__name__}.exhaustpipe_gr1t2_mimic_env_cfg:ExhaustPipeGR1T2MimicEnvCfg"}, + disable_env_checker=True, +) + +## +# Locomanipulation G1 with Pink IK - Absolute Pose Control +## + +gym.register( + id="Isaac-Locomanipulation-G1-Abs-Mimic-v0", + entry_point=f"{__name__}.locomanipulation_g1_mimic_env:LocomanipulationG1MimicEnv", + kwargs={"env_cfg_entry_point": f"{__name__}.locomanipulation_g1_mimic_env_cfg:LocomanipulationG1MimicEnvCfg"}, + disable_env_checker=True, +) diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/exhaustpipe_gr1t2_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/exhaustpipe_gr1t2_mimic_env_cfg.py similarity index 100% rename from source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/exhaustpipe_gr1t2_mimic_env_cfg.py rename to source/isaaclab_mimic/isaaclab_mimic/envs/exhaustpipe_gr1t2_mimic_env_cfg.py diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env_cfg.py index 197852602d39..8f7d6af8df7f 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env_cfg.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env_cfg.py @@ -25,7 +25,7 @@ def __post_init__(self): # Override the existing values self.datagen_config.name = "demo_src_stack_isaac_lab_task_D0" self.datagen_config.generation_guarantee = True - self.datagen_config.generation_keep_failed = True + self.datagen_config.generation_keep_failed = False self.datagen_config.generation_num_trials = 10 self.datagen_config.generation_select_src_per_subtask = True self.datagen_config.generation_transform_first_robot_pose = False diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_visuomotor_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_visuomotor_mimic_env_cfg.py index ed63e973b2d0..816d85f611c4 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_visuomotor_mimic_env_cfg.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_visuomotor_mimic_env_cfg.py @@ -24,7 +24,7 @@ def __post_init__(self): # Override the existing values self.datagen_config.name = "isaac_lab_franka_stack_ik_rel_visuomotor_D0" self.datagen_config.generation_guarantee = True - self.datagen_config.generation_keep_failed = True + self.datagen_config.generation_keep_failed = False self.datagen_config.generation_num_trials = 10 self.datagen_config.generation_select_src_per_subtask = True self.datagen_config.generation_transform_first_robot_pose = False diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env.py b/source/isaaclab_mimic/isaaclab_mimic/envs/locomanipulation_g1_mimic_env.py similarity index 100% rename from source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env.py rename to source/isaaclab_mimic/isaaclab_mimic/envs/locomanipulation_g1_mimic_env.py diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/locomanipulation_g1_mimic_env_cfg.py similarity index 100% rename from source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/locomanipulation_g1_mimic_env_cfg.py rename to source/isaaclab_mimic/isaaclab_mimic/envs/locomanipulation_g1_mimic_env_cfg.py diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/nutpour_gr1t2_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/nutpour_gr1t2_mimic_env_cfg.py similarity index 100% rename from source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/nutpour_gr1t2_mimic_env_cfg.py rename to source/isaaclab_mimic/isaaclab_mimic/envs/nutpour_gr1t2_mimic_env_cfg.py diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/pickplace_gr1t2_mimic_env.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pickplace_gr1t2_mimic_env.py similarity index 90% rename from source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/pickplace_gr1t2_mimic_env.py rename to source/isaaclab_mimic/isaaclab_mimic/envs/pickplace_gr1t2_mimic_env.py index 9ec65040ef95..eaef73dcad43 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/pickplace_gr1t2_mimic_env.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/pickplace_gr1t2_mimic_env.py @@ -62,8 +62,8 @@ def target_eef_pose_to_action( target_left_eef_pos, left_target_rot = PoseUtils.unmake_pose(target_eef_pose_dict["left"]) target_right_eef_pos, right_target_rot = PoseUtils.unmake_pose(target_eef_pose_dict["right"]) - target_left_eef_rot_quat = PoseUtils.quat_from_matrix(left_target_rot) - target_right_eef_rot_quat = PoseUtils.quat_from_matrix(right_target_rot) + target_left_eef_rot_quat = PoseUtils.quat_unique(PoseUtils.quat_from_matrix(left_target_rot)) + target_right_eef_rot_quat = PoseUtils.quat_unique(PoseUtils.quat_from_matrix(right_target_rot)) # gripper actions left_gripper_action = gripper_action_dict["left"] @@ -75,10 +75,10 @@ def target_eef_pose_to_action( quat_noise_left = action_noise_dict["left"] * torch.randn_like(target_left_eef_rot_quat) quat_noise_right = action_noise_dict["right"] * torch.randn_like(target_right_eef_rot_quat) - target_left_eef_pos += pos_noise_left - target_right_eef_pos += pos_noise_right - target_left_eef_rot_quat += quat_noise_left - target_right_eef_rot_quat += quat_noise_right + target_left_eef_pos = target_left_eef_pos + pos_noise_left + target_right_eef_pos = target_right_eef_pos + pos_noise_right + target_left_eef_rot_quat = target_left_eef_rot_quat + quat_noise_left + target_right_eef_rot_quat = target_right_eef_rot_quat + quat_noise_right return torch.cat( ( diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/pickplace_gr1t2_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pickplace_gr1t2_mimic_env_cfg.py similarity index 100% rename from source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/pickplace_gr1t2_mimic_env_cfg.py rename to source/isaaclab_mimic/isaaclab_mimic/envs/pickplace_gr1t2_mimic_env_cfg.py diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/pickplace_gr1t2_waist_enabled_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pickplace_gr1t2_waist_enabled_mimic_env_cfg.py similarity index 100% rename from source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/pickplace_gr1t2_waist_enabled_mimic_env_cfg.py rename to source/isaaclab_mimic/isaaclab_mimic/envs/pickplace_gr1t2_waist_enabled_mimic_env_cfg.py diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/__init__.py b/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/__init__.py deleted file mode 100644 index e7fe847c42e8..000000000000 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/pinocchio_envs/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: Apache-2.0 - -"""Sub-package with environment wrappers for Isaac Lab Mimic.""" - -import gymnasium as gym - -gym.register( - id="Isaac-PickPlace-GR1T2-Abs-Mimic-v0", - entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", - kwargs={ - "env_cfg_entry_point": f"{__name__}.pickplace_gr1t2_mimic_env_cfg:PickPlaceGR1T2MimicEnvCfg", - }, - disable_env_checker=True, -) - -gym.register( - id="Isaac-PickPlace-GR1T2-WaistEnabled-Abs-Mimic-v0", - entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", - kwargs={ - "env_cfg_entry_point": ( - f"{__name__}.pickplace_gr1t2_waist_enabled_mimic_env_cfg:PickPlaceGR1T2WaistEnabledMimicEnvCfg" - ), - }, - disable_env_checker=True, -) - -gym.register( - id="Isaac-NutPour-GR1T2-Pink-IK-Abs-Mimic-v0", - entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", - kwargs={"env_cfg_entry_point": f"{__name__}.nutpour_gr1t2_mimic_env_cfg:NutPourGR1T2MimicEnvCfg"}, - disable_env_checker=True, -) - -gym.register( - id="Isaac-ExhaustPipe-GR1T2-Pink-IK-Abs-Mimic-v0", - entry_point=f"{__name__}.pickplace_gr1t2_mimic_env:PickPlaceGR1T2MimicEnv", - kwargs={"env_cfg_entry_point": f"{__name__}.exhaustpipe_gr1t2_mimic_env_cfg:ExhaustPipeGR1T2MimicEnvCfg"}, - disable_env_checker=True, -) - -gym.register( - id="Isaac-Locomanipulation-G1-Abs-Mimic-v0", - entry_point=f"{__name__}.locomanipulation_g1_mimic_env:LocomanipulationG1MimicEnv", - kwargs={"env_cfg_entry_point": f"{__name__}.locomanipulation_g1_mimic_env_cfg:LocomanipulationG1MimicEnvCfg"}, - disable_env_checker=True, -) diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/g1_locomanipulation_sdg_env.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/g1_locomanipulation_sdg_env.py index dca2945822a3..431a890d510d 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/g1_locomanipulation_sdg_env.py +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/g1_locomanipulation_sdg_env.py @@ -11,6 +11,7 @@ from isaaclab.envs.common import ViewerCfg from isaaclab.managers import ObservationTermCfg as ObsTerm from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.sensors import CameraCfg from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg from isaaclab.utils import configclass @@ -18,8 +19,10 @@ from isaaclab.utils.datasets import EpisodeData from isaaclab_mimic.locomanipulation_sdg.data_classes import LocomanipulationSDGInputData +from isaaclab_mimic.locomanipulation_sdg.occupancy_map_utils import OccupancyMap from isaaclab_mimic.locomanipulation_sdg.scene_utils import HasPose, SceneBody, SceneFixture +from isaaclab_tasks.manager_based.locomanipulation.pick_place import mdp as locomanip_mdp from isaaclab_tasks.manager_based.locomanipulation.pick_place.locomanipulation_g1_env_cfg import ( LocomanipulationG1EnvCfg, LocomanipulationG1SceneCfg, @@ -30,8 +33,8 @@ from .locomanipulation_sdg_env import LocomanipulationSDGEnv from .locomanipulation_sdg_env_cfg import LocomanipulationSDGEnvCfg, LocomanipulationSDGRecorderManagerCfg -NUM_FORKLIFTS = 6 -NUM_BOXES = 12 +NUM_FORKLIFTS = 0 +NUM_BOXES = 0 @configclass @@ -41,7 +44,7 @@ class G1LocomanipulationSDGSceneCfg(LocomanipulationG1SceneCfg): init_state=AssetBaseCfg.InitialStateCfg( pos=[-2, -3.55, -0.3], # rot=[0, 0, 0, 1]), - rot=[0.9238795, 0, 0, -0.3826834], + rot=[0, 0, -0.3826834, 0.9238795], ), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", @@ -49,46 +52,54 @@ class G1LocomanipulationSDGSceneCfg(LocomanipulationG1SceneCfg): ), ) - robot_pov_cam = CameraCfg( - prim_path="/World/envs/env_.*/Robot/torso_link/d435_link/camera", - update_period=0.0, - height=160, - width=256, - data_types=["rgb"], - spawn=sim_utils.PinholeCameraCfg(focal_length=8.0, clipping_range=(0.1, 20.0)), - offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.9848078, 0.0, -0.1736482, 0.0), convention="world"), - ) - - -# Add forklifts -for i in range(NUM_FORKLIFTS): - setattr( - G1LocomanipulationSDGSceneCfg, - f"forklift_{i}", - AssetBaseCfg( - prim_path=f"/World/envs/env_.*/Forklift{i}", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), - spawn=UsdFileCfg( - usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Forklift/forklift.usd", - rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + def add_robot_pov_cam(self, height, width): + robot_pov_cam = CameraCfg( + prim_path="/World/envs/env_.*/Robot/torso_link/d435_link/camera", + update_period=0.0, + height=height, + width=width, + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=8.0, clipping_range=(0.1, 20.0)), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, -0.1736482, 0.0, 0.9848078), convention="world"), + ) + setattr(self, "robot_pov_cam", robot_pov_cam) + + def add_background_asset(self, background_usd_path: str): + background = AssetBaseCfg( + prim_path="/World/envs/env_.*/Background", + init_state=AssetBaseCfg.InitialStateCfg( + pos=[0, 0, 0], + rot=[0.0, 0.0, 0.0, 1.0], ), - ), - ) - -# Add boxes -for i in range(NUM_BOXES): - setattr( - G1LocomanipulationSDGSceneCfg, - f"box_{i}", - AssetBaseCfg( - prim_path=f"/World/envs/env_.*/Box{i}", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), spawn=UsdFileCfg( - usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_681.usd", - rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + usd_path=background_usd_path, ), - ), - ) + ) + + setattr(self, "background", background) + + def add_forklifts(self, num_forklifts: int): + for i in range(num_forklifts): + forklift = AssetBaseCfg( + prim_path=f"/World/envs/env_.*/Forklift{i}", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0], rot=[0.0, 0.0, 0.0, 1.0]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Forklift/forklift.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + ), + ) + setattr(self, f"forklift_{i}", forklift) + + def add_boxes(self, num_boxes: int): + for i in range(num_boxes): + box = AssetBaseCfg( + prim_path=f"/World/envs/env_.*/Box{i}", + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.0, 0.0], rot=[0.0, 0.0, 0.0, 1.0]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Simple_Warehouse/Props/SM_CardBoxB_01_681.usd", + ), + ) + setattr(self, f"box_{i}", box) @configclass @@ -117,16 +128,20 @@ class G1LocomanipulationSDGEnvCfg(LocomanipulationG1EnvCfg, LocomanipulationSDGE # Scene settings scene: G1LocomanipulationSDGSceneCfg = G1LocomanipulationSDGSceneCfg( - num_envs=1, env_spacing=2.5, replicate_physics=True + num_envs=1, env_spacing=2.5, replicate_physics=False ) recorders: LocomanipulationSDGRecorderManagerCfg = LocomanipulationSDGRecorderManagerCfg() observations: G1LocomanipulationSDGObservationsCfg = G1LocomanipulationSDGObservationsCfg() + background_usd_path: str | None = None + background_occupancy_yaml_file: str | None = None + high_res_video: bool = False + def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 - self.episode_length_s = 100.0 + self.episode_length_s = 50.0 # simulation settings self.sim.dt = 1 / 200 # 200Hz self.sim.render_interval = 6 @@ -137,9 +152,40 @@ def __post_init__(self): # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) + # Override success condition: check placement relative to the drop-off table (packing_table_2), + # since this env has two tables: packing_table (pick-up) and packing_table_2 (drop-off). + self.terminations.success = DoneTerm( + func=locomanip_mdp.task_done_pick_place_table_frame, + params={ + "task_link_name": "right_wrist_yaw_link", + "table_cfg": SceneEntityCfg("packing_table_2"), + }, + ) + class G1LocomanipulationSDGEnv(LocomanipulationSDGEnv): def __init__(self, cfg: G1LocomanipulationSDGEnvCfg, **kwargs): + if cfg.background_usd_path is not None: + self._num_forklifts = 0 + self._num_boxes = 0 + set_ground_invisible = True + cfg.scene.add_background_asset(cfg.background_usd_path) + else: + self._num_forklifts = NUM_FORKLIFTS + self._num_boxes = NUM_BOXES + set_ground_invisible = False + + if cfg.high_res_video: + cfg.scene.add_robot_pov_cam(540, 960) + else: + cfg.scene.add_robot_pov_cam(160, 256) + + cfg.scene.add_forklifts(self._num_forklifts) + cfg.scene.add_boxes(self._num_boxes) + + if set_ground_invisible: + cfg.scene.ground.spawn.visible = False + super().__init__(cfg) self.sim.set_camera_view([10.5, 10.5, 10.5], [0.0, 0.0, 0.5]) self._upper_body_dim = self.action_manager.get_term("upper_body_ik").action_dim @@ -160,16 +206,17 @@ def load_input_data(self, episode_data: EpisodeData, step: int) -> Locomanipulat object_pose = dataset_state["rigid_object"]["object"]["root_pose"] + base_pose = episode_data.get_initial_state()["articulation"]["robot"]["root_pose"] data = LocomanipulationSDGInputData( left_hand_pose_target=dataset_action[0:7], right_hand_pose_target=dataset_action[7:14], left_hand_joint_positions_target=dataset_action[14:21], right_hand_joint_positions_target=dataset_action[21:28], - base_pose=episode_data.get_initial_state()["articulation"]["robot"]["root_pose"], + base_pose=base_pose, object_pose=object_pose, fixture_pose=torch.tensor( - [0.0, 0.55, -0.3, 1.0, 0.0, 0.0, 0.0] - ), # Table pose is not recorded for this env. + [0.0, 0.55, -0.3, 0.0, 0.0, 0.0, 1.0], device=base_pose.device + ), # Table pose is not recorded for this env. Quaternion in XYZW format (identity). ) return data @@ -182,6 +229,18 @@ def build_action_vector( right_hand_joint_positions_target: torch.Tensor, base_velocity_target: torch.Tensor, ): + """Build the action vector for the G1 locomanipulation environment. + + Action layout (upper_body_dim = left_pose + right_pose + left_fingers + right_fingers = 28): + + Indices Content + [0:7] left hand pose (pos[0:3] + quat[3:7]) + [7:14] right hand pose (pos[7:10] + quat[10:14]) + [14:21] left hand finger joints (7) + [21:28] right hand finger joints (7) + [upper_body_dim:upper_body_dim+3] base velocity (vx, vy, yaw_rate) + [upper_body_dim+3] base height + """ action = torch.zeros(self.action_space.shape) # Set base height @@ -240,40 +299,50 @@ def get_object(self) -> HasPose: return SceneBody(self.scene, "object", "sm_steeringwheel_a01_01") def get_start_fixture(self) -> SceneFixture: - return SceneFixture( + return SceneFixture.from_boundary( self.scene, "packing_table", - occupancy_map_boundary=np.array([[-1.45, -0.45], [1.45, -0.45], [1.45, 0.45], [-1.45, 0.45]]), - occupancy_map_resolution=0.05, + np.array([[-1.45, -0.45], [1.45, -0.45], [1.45, 0.45], [-1.45, 0.45]]), + 0.05, ) def get_end_fixture(self) -> SceneFixture: - return SceneFixture( + return SceneFixture.from_boundary( self.scene, "packing_table_2", - occupancy_map_boundary=np.array([[-1.45, -0.45], [1.45, -0.45], [1.45, 0.45], [-1.45, 0.45]]), - occupancy_map_resolution=0.05, + np.array([[-1.45, -0.45], [1.45, -0.45], [1.45, 0.45], [-1.45, 0.45]]), + 0.05, ) def get_obstacle_fixtures(self): obstacles = [ - SceneFixture( + SceneFixture.from_boundary( self.scene, f"forklift_{i}", - occupancy_map_boundary=np.array([[-1.0, -1.9], [1.0, -1.9], [1.0, 2.1], [-1.0, 2.1]]), - occupancy_map_resolution=0.05, + np.array([[-1.0, -1.9], [1.0, -1.9], [1.0, 2.1], [-1.0, 2.1]]), + 0.05, ) - for i in range(NUM_FORKLIFTS) + for i in range(self._num_forklifts) ] obstacles += [ - SceneFixture( + SceneFixture.from_boundary( self.scene, f"box_{i}", - occupancy_map_boundary=np.array([[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]]), - occupancy_map_resolution=0.05, + np.array([[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]]), + 0.05, ) - for i in range(NUM_BOXES) + for i in range(self._num_boxes) ] return obstacles + + def get_background_fixture(self) -> SceneFixture | None: + if "background" not in self.scene.keys(): + return None + + background_map = OccupancyMap.from_ros_yaml( + ros_yaml_path=self.cfg.background_occupancy_yaml_file, + ) + background_fixture = SceneFixture(self.scene, "background", background_map) + return background_fixture diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env.py index 83ae8e656846..5163ebc5f738 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env.py +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env.py @@ -89,3 +89,7 @@ def get_end_fixture(self) -> SceneFixture: def get_obstacle_fixtures(self) -> list[SceneFixture]: """Get the set of obstacle fixtures.""" raise NotImplementedError + + def get_background_fixture(self) -> SceneFixture | None: + """Get the background fixture body.""" + raise NotImplementedError diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env_cfg.py index f3c5dd6c47cb..2ccefe4d0c7b 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env_cfg.py +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/envs/locomanipulation_sdg_env_cfg.py @@ -8,17 +8,15 @@ from isaaclab.envs.mdp.recorders.recorders_cfg import ActionStateRecorderManagerCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import TerminationTermCfg as DoneTerm -from isaaclab.managers.recorder_manager import RecorderTerm, RecorderTermCfg +from isaaclab.managers.recorder_manager import RecorderTermCfg from isaaclab.utils import configclass -from .locomanipulation_sdg_env import LocomanipulationSDGOutputDataRecorder - @configclass class LocomanipulationSDGOutputDataRecorderCfg(RecorderTermCfg): """Configuration for the step policy observation recorder term.""" - class_type: type[RecorderTerm] = LocomanipulationSDGOutputDataRecorder + class_type: type | str = "{DIR}.locomanipulation_sdg_env:LocomanipulationSDGOutputDataRecorder" @configclass diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/occupancy_map_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/occupancy_map_utils.py index b2fdbdfb8527..87c66042bb05 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/occupancy_map_utils.py +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/occupancy_map_utils.py @@ -220,7 +220,9 @@ def from_ros_image( data = 255 - data freespace_mask = data < free_thresh - occupied_mask = data > occupied_thresh + + # To handle unknown areas as occupied + occupied_mask = ~freespace_mask return OccupancyMap.from_masks( freespace_mask=freespace_mask, occupied_mask=occupied_mask, resolution=resolution, origin=origin @@ -477,10 +479,7 @@ def check_world_point_in_bounds(self, point: Point2d) -> bool: x_px = int(pixel[0, 0]) y_px = int(pixel[0, 1]) - if (x_px < 0) or (x_px >= self.width_pixels()) or (y_px < 0) or (y_px >= self.height_pixels()): - return False - - return True + return self.check_pixel_in_bounds(x_px, y_px) def check_world_point_in_freespace(self, point: Point2d) -> bool: """Check if a world coordinate is inside the freespace region of the occupancy map @@ -500,6 +499,21 @@ def check_world_point_in_freespace(self, point: Point2d) -> bool: freespace = self.freespace_mask() return bool(freespace[y_px, x_px]) + def check_pixel_in_bounds(self, x_px: int, y_px: int) -> bool: + """Check if a pixel coordinate is inside the bounds of the occupancy map. + + Args: + x (int): The x coordinate. + y (int): The y coordinate. + + Returns: + bool: True if the coordinate is inside the bounds of the occupancy map. + """ + if (x_px < 0) or (x_px >= self.width_pixels()) or (y_px < 0) or (y_px >= self.height_pixels()): + return False + + return True + def transformed(self, transform: np.ndarray) -> "OccupancyMap": return transform_occupancy_map(self, transform) @@ -680,10 +694,13 @@ def occupancy_map_add_to_stage( draw = ImageDraw.Draw(image) line_coordinates = [] path_pixels = occupancy_map.world_to_pixel_numpy(draw_path) - for i in range(len(path_pixels)): - line_coordinates.append(int(path_pixels[i, 0])) - line_coordinates.append(int(path_pixels[i, 1])) width_pixels = draw_path_line_width_meter / occupancy_map.resolution + circle_radius = int(width_pixels / 2) + for i in range(len(path_pixels)): + x, y = int(path_pixels[i, 0]), int(path_pixels[i, 1]) + line_coordinates.append(x) + line_coordinates.append(y) + draw.ellipse([x - circle_radius, y - circle_radius, x + circle_radius, y + circle_radius], fill="red") draw.line(line_coordinates, fill="green", width=int(width_pixels / 2), joint="curve") # need to flip, ros uses inverted coordinates on y axis @@ -696,6 +713,7 @@ def occupancy_map_add_to_stage( # Add model modelRoot = UsdGeom.Xform.Define(stage, path) Usd.ModelAPI(modelRoot).SetKind(Kind.Tokens.component) + UsdGeom.Imageable(modelRoot).MakeInvisible() # Add mesh mesh = UsdGeom.Mesh.Define(stage, os.path.join(path, "mesh")) diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/path_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/path_utils.py index f3e203f401e8..46ade12e70a6 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/path_utils.py +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/path_utils.py @@ -6,7 +6,7 @@ import torch -from isaacsim.replicator.mobility_gen.impl.path_planner import compress_path, generate_paths +from isaacsim.replicator.experimental.mobility_gen.impl.path_planner import compress_path, generate_paths from .occupancy_map_utils import OccupancyMap from .scene_utils import HasPose2d @@ -171,7 +171,7 @@ def find_nearest(self, point: torch.Tensor) -> tuple[torch.Tensor, float, tuple[ return min_pt, min_pt_dist_along_path, min_pt_seg, min_pt_dist_to_seg -def plan_path(start: HasPose2d, end: HasPose2d, occupancy_map: OccupancyMap) -> torch.Tensor: +def plan_path(start: HasPose2d, end: HasPose2d, occupancy_map: OccupancyMap) -> torch.Tensor | None: """Plan collision-free path between start and end positions. Args: @@ -196,6 +196,25 @@ def plan_path(start: HasPose2d, end: HasPose2d, occupancy_map: OccupancyMap) -> start_yx_pixels = start_xy_pixels[..., 0, ::-1] end_yx_pixels = end_xy_pixels[..., 0, ::-1] + # Check if end_yx_pixels are inside the occupancy map bounds + map_height, map_width = occupancy_map.freespace_mask().shape + start_y, start_x = int(start_yx_pixels[0]), int(start_yx_pixels[1]) + end_y, end_x = int(end_yx_pixels[0]), int(end_yx_pixels[1]) + + if not occupancy_map.check_pixel_in_bounds(start_x, start_y): + print( + f"Warning: start_yx_pixels ({start_y}, {start_x}) is outside occupancy map bounds " + f"(height={map_height}, width={map_width})" + ) + return None + + if not occupancy_map.check_pixel_in_bounds(end_x, end_y): + print( + f"Warning: end_yx_pixels ({end_y}, {end_x}) is outside occupancy map bounds " + f"(height={map_height}, width={map_width})" + ) + return None + # Generate path using the mobility path planner path_planner_output = generate_paths(start=start_yx_pixels, freespace=occupancy_map.freespace_mask()) diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/scene_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/scene_utils.py index dc9c969171c0..a0eb00fb4a58 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/scene_utils.py +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/scene_utils.py @@ -7,8 +7,10 @@ import numpy as np import torch +import warp as wp import isaaclab.utils.math as math_utils +from isaaclab.sim.views import FrameView from .occupancy_map_utils import OccupancyMap, intersect_occupancy_maps from .transform_utils import transform_mul @@ -83,7 +85,8 @@ def __init__(self, scene, entity_name: str, body_name: str): def get_pose(self): """Get the 3D pose of the entity.""" - pose = self.scene[self.entity_name].data.body_link_state_w[ + body_link_state_w = self.scene[self.entity_name].data.body_link_state_w.torch + pose = body_link_state_w[ :, self.scene[self.entity_name].data.body_names.index(self.body_name), :7, @@ -98,19 +101,30 @@ def __init__(self, scene, entity_name: str): self.scene = scene self.entity_name = entity_name + def _get_xform_view(self) -> FrameView: + """Return the FrameView for this asset, refreshing it if prims were not yet cloned.""" + xform_prim = self.scene[self.entity_name] + if xform_prim.count == 0: + # The view was created before environment cloning; rebuild it now that prims exist. + xform_prim = FrameView(xform_prim._prim_path, device=xform_prim.device) + self.scene.extras[self.entity_name] = xform_prim + return xform_prim + def get_pose(self): """Get the 3D pose of the entity.""" - xform_prim = self.scene[self.entity_name] - position, orientation = xform_prim.get_world_poses() + xform_prim = self._get_xform_view() + pos_w, quat_w = xform_prim.get_world_poses() + position = pos_w.torch + orientation = quat_w.torch pose = torch.cat([position, orientation], dim=-1) return pose def set_pose(self, pose: torch.Tensor): """Set the 3D pose of the entity.""" - xform_prim = self.scene[self.entity_name] + xform_prim = self._get_xform_view() position = pose[..., :3] orientation = pose[..., 3:] - xform_prim.set_world_poses(position, orientation, None) + xform_prim.set_world_poses(wp.from_torch(position.contiguous()), wp.from_torch(orientation.contiguous()), None) class RelativePose(HasPose): @@ -124,8 +138,9 @@ def get_pose(self): """Get the 3D pose of the entity.""" parent_pose = self.parent.get_pose() + relative_pose = self.relative_pose.to(parent_pose.device) - pose = transform_mul(parent_pose, self.relative_pose) + pose = transform_mul(parent_pose, relative_pose) return pose @@ -133,23 +148,53 @@ def get_pose(self): class SceneFixture(SceneAsset, HasOccupancyMap): """A helper class for working with assets in a scene that have an associated occupancy map.""" - def __init__( - self, scene, entity_name: str, occupancy_map_boundary: np.ndarray, occupancy_map_resolution: float = 0.05 - ): + def __init__(self, scene, entity_name: str, local_occupancy_map: OccupancyMap): + """Initialize a SceneFixture from a local occupancy map + + Args: + scene: The scene + entity_name: The name of the entity + local_occupancy_map: The local occupancy map + """ SceneAsset.__init__(self, scene, entity_name) - self.occupancy_map_boundary = occupancy_map_boundary - self.occupancy_map_resolution = occupancy_map_resolution + self.local_occupancy_map = local_occupancy_map + + @classmethod + def from_boundary( + cls, scene, entity_name: str, occupancy_map_boundary: np.ndarray, occupancy_map_resolution: float = 0.05 + ) -> "SceneFixture": + """Create a SceneFixture from a known boundary/resolution pair + + Args: + scene: The scene + entity_name: The name of the entity + occupancy_map_boundary: The boundary of the occupancy map + occupancy_map_resolution: The resolution of the occupancy map + + Returns: + SceneFixture: The SceneFixture + """ + occupancy_map = OccupancyMap.from_occupancy_boundary( + boundary=occupancy_map_boundary, resolution=occupancy_map_resolution + ) + return cls(scene, entity_name, occupancy_map) def get_occupancy_map(self): - local_occupancy_map = OccupancyMap.from_occupancy_boundary( - boundary=self.occupancy_map_boundary, resolution=self.occupancy_map_resolution - ) + """Get the occupancy map of the SceneFixture - transform = self.get_transform_2d().detach().cpu().numpy() + Returns: + OccupancyMap: The occupancy map + """ + if self.local_occupancy_map is None: + raise RuntimeError("SceneFixture requires an occupancy map before querying it.") - occupancy_map = local_occupancy_map.transformed(transform) + transform = self.get_transform_2d().detach().cpu().numpy() + # get_world_poses() may return a batched (num_envs, 3, 3) or empty (0, 3, 3) tensor. + # For a fixed background asset placed at the world origin, fall back to identity when empty. + if transform.ndim == 3: + transform = transform[0] if transform.shape[0] > 0 else np.eye(3) - return occupancy_map + return self.local_occupancy_map.transformed(transform) def place_randomly( diff --git a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/transform_utils.py b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/transform_utils.py index 73406a187ffd..8d832656c235 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/transform_utils.py +++ b/source/isaaclab_mimic/isaaclab_mimic/locomanipulation_sdg/transform_utils.py @@ -44,5 +44,8 @@ def transform_inv(transform: torch.Tensor) -> torch.Tensor: def transform_relative_pose(world_pose: torch.Tensor, src_frame_pose: torch.Tensor, dst_frame_pose: torch.Tensor): """Compute the relative pose with respect to a source frame, and apply this relative pose to a destination frame.""" + device = dst_frame_pose.device + world_pose = world_pose.to(device) + src_frame_pose = src_frame_pose.to(device) pose = transform_mul(dst_frame_pose, transform_mul(transform_inv(src_frame_pose), world_pose)) return pose diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py index a7e1ebb36252..c03db9c32921 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py @@ -193,7 +193,7 @@ def __init__( # Use env-local base translation for multi-env rendering consistency env_origin = self.env.scene.env_origins[env_id, :3] - base_translation = (self.robot.data.root_pos_w[env_id, :3] - env_origin).detach().cpu().numpy() + base_translation = (self.robot.data.root_pos_w.torch[env_id, :3] - env_origin).detach().cpu().numpy() self.plan_visualizer = PlanVisualizer( robot_name=self.config.robot_name, recording_id=f"curobo_plan_{env_id}", @@ -457,10 +457,12 @@ def get_attached_pose(self, link_name: str, joint_state: JointState | None = Non link_poses = {} if link_state.links_position is not None and link_state.links_quaternion is not None: for i, link in enumerate(link_state.link_names): + # cuRobo kinematics returns quaternions in (w, x, y, z) format link_poses[link] = self._make_pose( position=link_state.links_position[..., i, :], quaternion=link_state.links_quaternion[..., i, :], name=link, + quat_is_xyzw=False, ) # For attached object link, use ee_link from robot config as parent @@ -625,22 +627,23 @@ def _sync_object_poses_with_isaaclab(self) -> None: # Get current pose from Lab (may be on CPU or CUDA depending on --device flag) obj = rigid_objects[object_name] env_origin = self.env.scene.env_origins[self.env_id] - current_pos_raw = obj.data.root_pos_w[self.env_id] - env_origin - current_quat_raw = obj.data.root_quat_w[self.env_id] # (w, x, y, z) + current_pos_raw = obj.data.root_pos_w.torch[self.env_id] - env_origin + current_quat_raw = obj.data.root_quat_w.torch[self.env_id] # (x, y, z, w) # Convert to cuRobo device and extract float values for pose list current_pos = self._to_curobo_device(current_pos_raw) current_quat = self._to_curobo_device(current_quat_raw) - # Convert to cuRobo pose format [x, y, z, w, x, y, z] + # Convert to cuRobo pose format [pos_x, pos_y, pos_z, qw, qx, qy, qz] + # Isaac Lab quaternion format: (x, y, z, w) -> cuRobo format: (w, x, y, z) pose_list = [ float(current_pos[0].item()), float(current_pos[1].item()), float(current_pos[2].item()), - float(current_quat[0].item()), - float(current_quat[1].item()), - float(current_quat[2].item()), - float(current_quat[3].item()), + float(current_quat[3].item()), # w + float(current_quat[0].item()), # x + float(current_quat[1].item()), # y + float(current_quat[2].item()), # z ] # Update object pose in cuRobo's world model @@ -665,8 +668,8 @@ def _sync_object_poses_with_isaaclab(self) -> None: # Get current pose and update in collision checker obj = rigid_objects[object_name] env_origin = self.env.scene.env_origins[self.env_id] - current_pos_raw = obj.data.root_pos_w[self.env_id] - env_origin - current_quat_raw = obj.data.root_quat_w[self.env_id] + current_pos_raw = obj.data.root_pos_w.torch[self.env_id] - env_origin + current_quat_raw = obj.data.root_quat_w.torch[self.env_id] current_pos = self._to_curobo_device(current_pos_raw) current_quat = self._to_curobo_device(current_quat_raw) @@ -929,7 +932,7 @@ def _get_current_joint_state_for_curobo(self) -> JointState: and zero velocity/acceleration. """ # Fetch joint position (shape: [1, num_joints]) - joint_pos_raw: torch.Tensor = self.robot.data.joint_pos[self.env_id, :].unsqueeze(0) + joint_pos_raw: torch.Tensor = self.robot.data.joint_pos.torch[self.env_id, :].unsqueeze(0) joint_vel_raw: torch.Tensor = torch.zeros_like(joint_pos_raw) joint_acc_raw: torch.Tensor = torch.zeros_like(joint_pos_raw) @@ -997,41 +1000,48 @@ def _make_pose( *, name: str | None = None, normalize_rotation: bool = False, + quat_is_xyzw: bool = True, ) -> Pose: """Create a cuRobo Pose with sensible defaults and device/dtype alignment. Auto-populates missing fields with identity values and ensures tensors are - on the cuRobo device with the correct dtype. + on the cuRobo device with the correct dtype. Handles quaternion format conversion + from Isaac Lab's (x, y, z, w) to cuRobo's (w, x, y, z) format when needed. Args: position: Optional position as Tensor/ndarray/list. Defaults to [0, 0, 0]. - quaternion: Optional quaternion as Tensor/ndarray/list (w, x, y, z). Defaults to [1, 0, 0, 0]. + quaternion: Optional quaternion as Tensor/ndarray/list. Defaults to identity quaternion. name: Optional name of the link that this pose represents. normalize_rotation: Whether to normalize the quaternion inside Pose. + quat_is_xyzw: If True, quaternion is in Isaac Lab format (x, y, z, w) and will be + converted to cuRobo format. If False, quaternion is already in cuRobo (w, x, y, z) format. Returns: Pose: A cuRobo Pose on the configured cuRobo device and dtype. """ - # Defaults if position is None: position = torch.tensor([0.0, 0.0, 0.0], dtype=self.tensor_args.dtype, device=self.tensor_args.device) if quaternion is None: - quaternion = torch.tensor( + quaternion_wxyz = torch.tensor( [1.0, 0.0, 0.0, 0.0], dtype=self.tensor_args.dtype, device=self.tensor_args.device ) + else: + if not isinstance(quaternion, torch.Tensor): + quaternion = torch.tensor(quaternion, dtype=self.tensor_args.dtype, device=self.tensor_args.device) + else: + quaternion = self._to_curobo_device(quaternion) + + if quat_is_xyzw: + quaternion_wxyz = torch.roll(quaternion, shifts=1, dims=-1) + else: + quaternion_wxyz = quaternion - # Convert to tensors if needed if not isinstance(position, torch.Tensor): position = torch.tensor(position, dtype=self.tensor_args.dtype, device=self.tensor_args.device) else: position = self._to_curobo_device(position) - if not isinstance(quaternion, torch.Tensor): - quaternion = torch.tensor(quaternion, dtype=self.tensor_args.dtype, device=self.tensor_args.device) - else: - quaternion = self._to_curobo_device(quaternion) - - return Pose(position=position, quaternion=quaternion, name=name, normalize_rotation=normalize_rotation) + return Pose(position=position, quaternion=quaternion_wxyz, name=name, normalize_rotation=normalize_rotation) def _set_active_links(self, links: list[str], active: bool) -> None: """Configure collision checking for specific robot links. @@ -1395,7 +1405,7 @@ def _linearly_retime_plan( return plan deltas = path[1:] - path[:-1] - distances = torch.norm(deltas, dim=-1) + distances = torch.linalg.norm(deltas, dim=-1) waypoints = [path[0]] for distance, waypoint in zip(distances, path[1:]): @@ -1409,7 +1419,7 @@ def _linearly_retime_plan( if len(waypoints) > 1: deltas = waypoints[1:] - waypoints[:-1] - distances = torch.norm(deltas, dim=-1) + distances = torch.linalg.norm(deltas, dim=-1) cum_distances = torch.cat([torch.zeros(1, device=distances.device), torch.cumsum(distances, dim=0)]) if len(waypoints) < 2 or cum_distances[-1] < 1e-6: @@ -1563,7 +1573,7 @@ def _update_visualization_at_joint_positions(self, joint_positions: torch.Tensor if self.frame_counter % self.sphere_update_freq != 0: return - original_joints: torch.Tensor = self.robot.data.joint_pos[self.env_id].clone() + original_joints: torch.Tensor = self.robot.data.joint_pos.torch[self.env_id].clone() try: # Ensure joint positions are on environment device for robot commands @@ -1690,7 +1700,7 @@ def _create_sphere_config(self, sphere_idx: int, sphere, robot_link_count: int) opacity = 0.9 if is_attached else 0.5 # Calculate position in world frame (do not use env_origin) - root_translation = (self.robot.data.root_pos_w[self.env_id, :3]).detach().cpu().numpy() + root_translation = self.robot.data.root_pos_w.torch[self.env_id, :3].detach().cpu().numpy() position = sphere.position.cpu().numpy() if hasattr(sphere.position, "cpu") else sphere.position if not is_attached: position = position + root_translation @@ -1777,7 +1787,7 @@ def update_world_and_plan_motion( gripper_closed = expected_attached_object is not None self._set_gripper_state(gripper_closed) current_attached = self.get_attached_objects() - gripper_pos = self.robot.data.joint_pos[env_id, -2:] + gripper_pos = self.robot.data.joint_pos.torch[env_id, -2:] self.logger.debug(f"Current attached objects: {current_attached}") @@ -1799,13 +1809,13 @@ def update_world_and_plan_motion( if expected_attached_object in rigid_objects: obj = rigid_objects[expected_attached_object] origin = self.env.scene.env_origins[env_id] - obj_pos = obj.data.root_pos_w[env_id] - origin + obj_pos = obj.data.root_pos_w.torch[env_id] - origin self.logger.debug(f"Isaac Lab object position: {obj_pos}") # Debug end-effector position ee_frame_cfg = SceneEntityCfg("ee_frame") ee_frame = self.env.scene[ee_frame_cfg.name] - ee_pos = ee_frame.data.target_pos_w[env_id, 0, :] - origin + ee_pos = ee_frame.data.target_pos_w.torch[env_id, 0, :] - origin self.logger.debug(f"End-effector position: {ee_pos}") # Debug distance diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py index 0f5252e208c9..f467ebdff0a6 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py @@ -27,6 +27,8 @@ except ImportError: raise ImportError("Rerun is not installed!") +_RR_HAS_TRANSFORM_AXES = hasattr(rr, "TransformAxes3D") + from curobo.types.state import JointState import isaaclab.utils.math as PoseUtils @@ -628,15 +630,17 @@ def _to_rerun_mesh(mesh: "trimesh.Trimesh") -> "rr.Mesh3D": # Always update transform (objects may move between calls) # NOTE: World scene objects are already in correct world coordinates, no offset needed - rr.log( - rr_path, - rr.Transform3D( - translation=tform[:3, 3], - mat3x3=tform[:3, :3], - axis_length=0.25, - ), - static=False, - ) + if _RR_HAS_TRANSFORM_AXES: + # rerun >= 0.28: axis_length moved to TransformAxes3D + rr.log(rr_path, rr.Transform3D(translation=tform[:3, 3], mat3x3=tform[:3, :3]), static=False) + rr.log(rr_path, rr.TransformAxes3D(axis_length=0.25), static=False) + else: + # rerun <= 0.27: axis_length on Transform3D directly + rr.log( + rr_path, + rr.Transform3D(translation=tform[:3, 3], mat3x3=tform[:3, :3], axis_length=0.25), + static=False, + ) # Geometry: send only once per node to avoid duplicates if rr_path not in self._logged_geometry: diff --git a/source/isaaclab_mimic/setup.py b/source/isaaclab_mimic/setup.py index c43d268fe260..e3d9e2dc6ac3 100644 --- a/source/isaaclab_mimic/setup.py +++ b/source/isaaclab_mimic/setup.py @@ -22,8 +22,14 @@ "tomli", # jupyter notebook "ipywidgets==8.1.5", + # data collection + "h5py==3.15.1", ] +# nvidia-srl-usd-to-urdf depends on usd-core which has no aarch64 wheels +if platform.machine() != "aarch64": + INSTALL_REQUIRES.append("nvidia-srl-usd-to-urdf") + # Extra dependencies for IL agents EXTRAS_REQUIRE = {"robomimic": []} @@ -50,14 +56,11 @@ extras_require=EXTRAS_REQUIRE, license="Apache-2.0", include_package_data=True, - python_requires=">=3.10", + python_requires=">=3.12", classifiers=[ "Natural Language :: English", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Isaac Sim :: 4.5.0", - "Isaac Sim :: 5.0.0", - "Isaac Sim :: 5.1.0", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", ], zip_safe=False, ) diff --git a/source/isaaclab_mimic/test/mimic_test_utils.py b/source/isaaclab_mimic/test/mimic_test_utils.py new file mode 100644 index 000000000000..1ffd7d466282 --- /dev/null +++ b/source/isaaclab_mimic/test/mimic_test_utils.py @@ -0,0 +1,83 @@ +# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Shared test utilities for isaaclab_mimic dataset generation tests.""" + +import contextlib +import os +import signal +import subprocess + +SUBPROCESS_GRACE_PERIOD = 15 +"""Grace period [s] after SIGTERM before sending SIGKILL.""" + +SUBPROCESS_DRAIN_TIMEOUT = 5 +"""Timeout [s] for draining remaining pipe output after killing a process group.""" + + +def _kill_process_group(pgid: int, sig: int): + """Send *sig* to every process in the group, ignoring errors if already dead.""" + with contextlib.suppress(ProcessLookupError): + os.killpg(pgid, sig) + + +def run_script(command: list[str], timeout: int = 5000) -> subprocess.CompletedProcess: + """Run a script in a subprocess and return a CompletedProcess. + + The Kit / Omniverse runtime's ``simulation_app.close()`` can hang + indefinitely when another ``SimulationApp`` instance is alive in the parent + test process (shared GPU / IPC resources). To avoid blocking the test + suite we use ``Popen`` with an explicit timeout: + + 1. Wait up to *timeout* seconds for the process to finish. + 2. On timeout send ``SIGTERM`` to the **entire process group** and wait + :data:`SUBPROCESS_GRACE_PERIOD` seconds. + 3. If still alive, ``SIGKILL`` the process group and collect remaining + output. + + The subprocess is started in its own session (``start_new_session=True``) + so that signals reach Kit child processes, and pipe draining after a kill + uses a bounded timeout to avoid blocking on orphaned FD holders. + + The captured *stdout* / *stderr* are returned regardless of how the process + terminated so that callers can validate the script's printed output. + + Args: + command: The command to execute as a list of strings. + timeout: Maximum time [s] to wait for the process to finish. + """ + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + start_new_session=True, + ) + pgid = os.getpgid(process.pid) + try: + stdout, stderr = process.communicate(timeout=timeout) + except subprocess.TimeoutExpired: + _kill_process_group(pgid, signal.SIGTERM) + try: + stdout, stderr = process.communicate(timeout=SUBPROCESS_GRACE_PERIOD) + except subprocess.TimeoutExpired: + _kill_process_group(pgid, signal.SIGKILL) + try: + stdout, stderr = process.communicate(timeout=SUBPROCESS_DRAIN_TIMEOUT) + except subprocess.TimeoutExpired: + # Orphaned grandchildren may still hold pipe FDs open. + # Close our ends so we don't block forever, then reap the + # zombie leader. + process.stdout.close() + process.stderr.close() + process.wait() + stdout, stderr = "", "" + + return subprocess.CompletedProcess( + args=command, + returncode=process.returncode, + stdout=stdout or "", + stderr=stderr or "", + ) diff --git a/source/isaaclab_mimic/test/test_curobo_planner_cube_stack.py b/source/isaaclab_mimic/test/test_curobo_planner_cube_stack.py index 4c532f62ef62..8dea1cb14f74 100644 --- a/source/isaaclab_mimic/test/test_curobo_planner_cube_stack.py +++ b/source/isaaclab_mimic/test/test_curobo_planner_cube_stack.py @@ -84,7 +84,7 @@ def _execute_gripper_action( _env_step_with_action(env, action) -DOWN_FACING_QUAT = torch.tensor([0.0, 1.0, 0.0, 0.0], dtype=torch.float32) +DOWN_FACING_QUAT = torch.tensor([1.0, 0.0, 0.0, 0.0], dtype=torch.float32) @pytest.fixture(scope="class") @@ -164,7 +164,7 @@ def _pose_from_xy_quat(self, xy: torch.Tensor, z: float, quat: torch.Tensor) -> def _get_cube_pos(self, cube_name: str) -> torch.Tensor: """Return the current world position of a cube's root (x, y, z).""" obj: RigidObject = self.env.scene[cube_name] - return obj.data.root_pos_w[0, :3].clone().detach() + return obj.data.root_pos_w.torch[0, :3].clone().detach() def _place_pose_over_cube(self, cube_name: str, height_offset: float) -> torch.Tensor: """Compute a goal pose directly above the named cube using the latest pose.""" diff --git a/source/isaaclab_mimic/test/test_curobo_planner_franka.py b/source/isaaclab_mimic/test/test_curobo_planner_franka.py index 9e5adf724c2f..7413bd3e2f9e 100644 --- a/source/isaaclab_mimic/test/test_curobo_planner_franka.py +++ b/source/isaaclab_mimic/test/test_curobo_planner_franka.py @@ -20,6 +20,7 @@ import gymnasium as gym import torch +import warp as wp import isaaclab.utils.assets as _al_assets import isaaclab.utils.math as math_utils @@ -39,11 +40,11 @@ # Predefined EE goals for the test # Each entry is a tuple of: (goal specification, goal ID) predefined_ee_goals_and_ids = [ - ({"pos": [0.70, -0.25, 0.25], "quat": [0.0, 0.707, 0.0, 0.707]}, "Behind wall, left"), - ({"pos": [0.70, 0.25, 0.25], "quat": [0.0, 0.707, 0.0, 0.707]}, "Behind wall, right"), - ({"pos": [0.65, 0.0, 0.45], "quat": [0.0, 1.0, 0.0, 0.0]}, "Behind wall, center, high"), - ({"pos": [0.80, -0.15, 0.35], "quat": [0.0, 0.5, 0.0, 0.866]}, "Behind wall, far left"), - ({"pos": [0.80, 0.15, 0.35], "quat": [0.0, 0.5, 0.0, 0.866]}, "Behind wall, far right"), + ({"pos": [0.70, -0.25, 0.25], "quat": [0.707, 0.0, 0.707, 0.0]}, "Behind wall, left"), + ({"pos": [0.70, 0.25, 0.25], "quat": [0.707, 0.0, 0.707, 0.0]}, "Behind wall, right"), + ({"pos": [0.65, 0.0, 0.45], "quat": [1.0, 0.0, 0.0, 0.0]}, "Behind wall, center, high"), + ({"pos": [0.80, -0.15, 0.35], "quat": [0.5, 0.0, 0.866, 0.0]}, "Behind wall, far left"), + ({"pos": [0.80, 0.15, 0.35], "quat": [0.5, 0.0, 0.866, 0.0]}, "Behind wall, far right"), ] @@ -141,7 +142,7 @@ def _set_arm_positions(self, q: torch.Tensor) -> None: q_full = torch.cat([q, fingers], dim=-1) else: q_full = q - self.robot.write_joint_position_to_sim(q_full) + self.robot.write_joint_position_to_sim(wp.from_torch(q_full.to(self.env.device))) @pytest.mark.parametrize("goal_spec, goal_id", predefined_ee_goals_and_ids) def test_plan_to_predefined_goal(self, goal_spec, goal_id) -> None: diff --git a/source/isaaclab_mimic/test/test_generate_dataset.py b/source/isaaclab_mimic/test/test_generate_dataset.py deleted file mode 100644 index 8568ab4ec01d..000000000000 --- a/source/isaaclab_mimic/test/test_generate_dataset.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: Apache-2.0 - -"""Test dataset generation for Isaac Lab Mimic workflow.""" - -from isaaclab.app import AppLauncher - -# launch omniverse app -simulation_app = AppLauncher(headless=True).app - -import os -import subprocess -import tempfile - -import pytest - -from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path - -DATASETS_DOWNLOAD_DIR = tempfile.mkdtemp(suffix="_Isaac-Stack-Cube-Franka-IK-Rel-Mimic-v0") -NUCLEUS_DATASET_PATH = os.path.join(ISAACLAB_NUCLEUS_DIR, "Tests", "Mimic", "dataset.hdf5") -EXPECTED_SUCCESSFUL_ANNOTATIONS = 10 - - -@pytest.fixture -def setup_test_environment(): - """Set up the environment for testing.""" - # Create the datasets directory if it does not exist - if not os.path.exists(DATASETS_DOWNLOAD_DIR): - print("Creating directory : ", DATASETS_DOWNLOAD_DIR) - os.makedirs(DATASETS_DOWNLOAD_DIR) - - # Try to download the dataset from Nucleus - try: - retrieve_file_path(NUCLEUS_DATASET_PATH, DATASETS_DOWNLOAD_DIR) - except Exception as e: - print(e) - print("Could not download dataset from Nucleus") - pytest.fail( - "The dataset required for this test is currently unavailable. Dataset path: " + NUCLEUS_DATASET_PATH - ) - - # Set the environment variable PYTHONUNBUFFERED to 1 to get all text outputs in result.stdout - pythonunbuffered_env_var_ = os.environ.get("PYTHONUNBUFFERED") - os.environ["PYTHONUNBUFFERED"] = "1" - - # Automatically detect the workflow root (backtrack from current file location) - current_dir = os.path.dirname(os.path.abspath(__file__)) - workflow_root = os.path.abspath(os.path.join(current_dir, "../../..")) - - # Run the command to generate core configs - config_command = [ - workflow_root + "/isaaclab.sh", - "-p", - os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/annotate_demos.py"), - "--task", - "Isaac-Stack-Cube-Franka-IK-Rel-Mimic-v0", - "--input_file", - DATASETS_DOWNLOAD_DIR + "/dataset.hdf5", - "--output_file", - DATASETS_DOWNLOAD_DIR + "/annotated_dataset.hdf5", - "--auto", - "--headless", - ] - print(config_command) - - # Execute the command and capture the result - result = subprocess.run(config_command, capture_output=True, text=True) - - print(f"Annotate demos result: {result.returncode}\n\n\n\n\n\n\n\n\n\n\n\n") - - # Print the result for debugging purposes - print("Config generation result:") - print(result.stdout) # Print standard output from the command - print(result.stderr) # Print standard error from the command - - # Check if the config generation was successful - assert result.returncode == 0, result.stderr - - # Check that at least one task was completed successfully by parsing stdout - # Look for the line that reports successful task completions - success_line = None - for line in result.stdout.split("\n"): - if "Successful task completions:" in line: - success_line = line - break - - assert success_line is not None, "Could not find 'Successful task completions:' in output" - - # Extract the number from the line - try: - successful_count = int(success_line.split(":")[-1].strip()) - assert successful_count == EXPECTED_SUCCESSFUL_ANNOTATIONS, ( - f"Expected 10 successful annotations but got {successful_count}" - ) - except (ValueError, IndexError) as e: - pytest.fail(f"Could not parse successful task count from line: '{success_line}'. Error: {e}") - - # Yield the workflow root for use in tests - yield workflow_root - - # Cleanup: restore the original environment variable - if pythonunbuffered_env_var_: - os.environ["PYTHONUNBUFFERED"] = pythonunbuffered_env_var_ - else: - del os.environ["PYTHONUNBUFFERED"] - - -@pytest.mark.isaacsim_ci -def test_generate_dataset(setup_test_environment): - """Test the dataset generation script.""" - workflow_root = setup_test_environment - - # Define the command to run the dataset generation script - command = [ - workflow_root + "/isaaclab.sh", - "-p", - os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/generate_dataset.py"), - "--input_file", - DATASETS_DOWNLOAD_DIR + "/annotated_dataset.hdf5", - "--output_file", - DATASETS_DOWNLOAD_DIR + "/generated_dataset.hdf5", - "--generation_num_trials", - "1", - "--headless", - ] - - # Call the script and capture output - result = subprocess.run(command, capture_output=True, text=True) - - # Print the result for debugging purposes - print("Dataset generation result:") - print(result.stdout) # Print standard output from the command - print(result.stderr) # Print standard error from the command - - # Check if the script executed successfully - assert result.returncode == 0, result.stderr - - # Check for specific output - expected_output = "successes/attempts. Exiting" - assert expected_output in result.stdout diff --git a/source/isaaclab_mimic/test/test_generate_dataset_franka_state.py b/source/isaaclab_mimic/test/test_generate_dataset_franka_state.py new file mode 100644 index 000000000000..cea7d31ea374 --- /dev/null +++ b/source/isaaclab_mimic/test/test_generate_dataset_franka_state.py @@ -0,0 +1,173 @@ +# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Test dataset generation for Isaac Lab Mimic workflow.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import os +import sys +import tempfile + +import pytest +from mimic_test_utils import run_script + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +DATASETS_DOWNLOAD_DIR = tempfile.mkdtemp(suffix="_Isaac-Stack-Cube-Franka-IK-Rel-Mimic-v0") +NUCLEUS_DATASET_PATH = os.path.join(ISAACLAB_NUCLEUS_DIR, "Tests", "Mimic", "dataset.hdf5") + +_SUBPROCESS_TIMEOUT = 5000 + + +@pytest.fixture +def setup_test_environment(): + """Set up the environment for testing.""" + # Create the datasets directory if it does not exist + if not os.path.exists(DATASETS_DOWNLOAD_DIR): + print("Creating directory : ", DATASETS_DOWNLOAD_DIR) + os.makedirs(DATASETS_DOWNLOAD_DIR) + + # Try to download the dataset from Nucleus. + # retrieve_file_path mirrors the remote directory tree under the download + # dir and returns the actual local path of the downloaded file. + try: + downloaded_dataset_path = retrieve_file_path(NUCLEUS_DATASET_PATH, DATASETS_DOWNLOAD_DIR) + except Exception as e: + print(e) + print("Could not download dataset from Nucleus") + pytest.fail( + "The dataset required for this test is currently unavailable. Dataset path: " + NUCLEUS_DATASET_PATH + ) + + # Verify the downloaded file actually exists on disk + assert os.path.isfile(downloaded_dataset_path), ( + f"retrieve_file_path returned '{downloaded_dataset_path}' but the file does not exist on disk." + ) + + # Set the environment variable PYTHONUNBUFFERED to 1 to get all text outputs in result.stdout + pythonunbuffered_env_var_ = os.environ.get("PYTHONUNBUFFERED") + os.environ["PYTHONUNBUFFERED"] = "1" + + # Automatically detect the workflow root (backtrack from current file location) + current_dir = os.path.dirname(os.path.abspath(__file__)) + workflow_root = os.path.abspath(os.path.join(current_dir, "../../..")) + + annotated_output_path = os.path.join(DATASETS_DOWNLOAD_DIR, "annotated_dataset.hdf5") + + # Run the annotate_demos script directly (bypassing isaaclab.sh) so that + # stdout is properly captured. When launched through the CLI wrapper the + # Omniverse/Kit runtime redirects OS-level file descriptors during + # SimulationApp init, swallowing all print() output. + config_command = [ + sys.executable, + os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/annotate_demos.py"), + "--task", + "Isaac-Stack-Cube-Franka-IK-Rel-Mimic-v0", + "--input_file", + downloaded_dataset_path, + "--output_file", + annotated_output_path, + "--auto", + "--headless", + ] + print(config_command) + + result = run_script(config_command, timeout=_SUBPROCESS_TIMEOUT) + + print(f"Annotate demos result: {result.returncode}\n") + + # Print the result for debugging purposes + print("Config generation result:") + print(result.stdout) # Print standard output from the command + print(result.stderr) # Print standard error from the command + + # Check that at least one task was completed successfully by parsing stdout. + # Note: we cannot rely on the process exit code because simulation_app.close() + # triggers Kit runtime cleanup that resets the exit code to 0 (or the process + # may have been killed after a cleanup hang, yielding -SIGKILL). + combined_output = result.stdout + "\n" + result.stderr + success_line = None + for line in combined_output.split("\n"): + if "Successful task completions:" in line: + success_line = line + break + + assert success_line is not None, ( + f"Could not find 'Successful task completions:' in output.\nstdout: {result.stdout}\nstderr: {result.stderr}" + ) + + # Extract the number from the line + try: + successful_count = int(success_line.split(":")[-1].strip()) + except (ValueError, IndexError) as e: + pytest.fail(f"Could not parse successful task count from line: '{success_line}'. Error: {e}") + + assert successful_count > 0, "No successful annotations found." + + # Also verify the annotated output file was created + assert os.path.exists(annotated_output_path), f"Annotated dataset file was not created at {annotated_output_path}" + + # Yield the workflow root for use in tests + yield workflow_root + + # Cleanup: restore the original environment variable + if pythonunbuffered_env_var_: + os.environ["PYTHONUNBUFFERED"] = pythonunbuffered_env_var_ + else: + del os.environ["PYTHONUNBUFFERED"] + + +def _run_generation(workflow_root: str, input_file: str, output_file: str, num_envs: int): + """Build the generation command, run it, and assert success.""" + command = [ + sys.executable, + os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/generate_dataset.py"), + "--input_file", + input_file, + "--output_file", + output_file, + "--num_envs", + str(num_envs), + "--generation_num_trials", + "1", + "--headless", + ] + + result = run_script(command, timeout=_SUBPROCESS_TIMEOUT) + + print(f"State-based dataset generation result (num_envs={num_envs}):") + print(result.stdout) + print(result.stderr) + + assert os.path.exists(output_file), ( + f"Generated dataset file was not created at {output_file}.\n" + f"returncode: {result.returncode}\nstderr: {result.stderr}" + ) + + combined_output = result.stdout + "\n" + result.stderr + expected_output = "successes/attempts. Exiting" + assert expected_output in combined_output, ( + f"Could not find '{expected_output}' in output.\nstdout: {result.stdout}\nstderr: {result.stderr}" + ) + + +def test_generate_dataset_franka_state(setup_test_environment): + """Test dataset generation for the state-based cube-stack environment (single env).""" + workflow_root = setup_test_environment + annotated_input_path = os.path.join(DATASETS_DOWNLOAD_DIR, "annotated_dataset.hdf5") + generated_output_path = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset.hdf5") + _run_generation(workflow_root, annotated_input_path, generated_output_path, num_envs=1) + + +def test_generate_dataset_franka_state_multi_env(setup_test_environment): + """Test dataset generation for the state-based cube-stack environment (5 envs).""" + workflow_root = setup_test_environment + annotated_input_path = os.path.join(DATASETS_DOWNLOAD_DIR, "annotated_dataset.hdf5") + generated_output_path = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset_multi_env.hdf5") + _run_generation(workflow_root, annotated_input_path, generated_output_path, num_envs=5) diff --git a/source/isaaclab_mimic/test/test_generate_dataset_franka_visuomotor.py b/source/isaaclab_mimic/test/test_generate_dataset_franka_visuomotor.py new file mode 100644 index 000000000000..4f4fe70d8a59 --- /dev/null +++ b/source/isaaclab_mimic/test/test_generate_dataset_franka_visuomotor.py @@ -0,0 +1,110 @@ +# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Test dataset generation for Isaac-Stack-Cube-Franka-IK-Rel-Visuomotor-Mimic-v0.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import os +import sys +import tempfile + +import pytest +from mimic_test_utils import run_script + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +_TASK_NAME = "Isaac-Stack-Cube-Franka-IK-Rel-Visuomotor-Mimic-v0" +DATASETS_DOWNLOAD_DIR = tempfile.mkdtemp(suffix=f"_{_TASK_NAME}") +NUCLEUS_ANNOTATED_DATASET_PATH = os.path.join( + ISAACLAB_NUCLEUS_DIR, "Mimic", "Tests", "annotated_dataset_franka_visuomotor_test.hdf5" +) + + +@pytest.fixture +def setup_visuomotor_test_environment(): + """Download the pre-annotated dataset and prepare the test environment.""" + if not os.path.exists(DATASETS_DOWNLOAD_DIR): + os.makedirs(DATASETS_DOWNLOAD_DIR) + + try: + downloaded_dataset_path = retrieve_file_path(NUCLEUS_ANNOTATED_DATASET_PATH, DATASETS_DOWNLOAD_DIR) + except Exception as e: + print(e) + pytest.fail( + "The pre-annotated dataset required for this test is currently unavailable. " + f"Dataset path: {NUCLEUS_ANNOTATED_DATASET_PATH}" + ) + + assert os.path.isfile(downloaded_dataset_path), ( + f"retrieve_file_path returned '{downloaded_dataset_path}' but the file does not exist on disk." + ) + + pythonunbuffered_env_var_ = os.environ.get("PYTHONUNBUFFERED") + os.environ["PYTHONUNBUFFERED"] = "1" + + current_dir = os.path.dirname(os.path.abspath(__file__)) + workflow_root = os.path.abspath(os.path.join(current_dir, "../../..")) + + yield workflow_root, downloaded_dataset_path + + if pythonunbuffered_env_var_: + os.environ["PYTHONUNBUFFERED"] = pythonunbuffered_env_var_ + else: + del os.environ["PYTHONUNBUFFERED"] + + +def _run_generation(workflow_root: str, input_file: str, output_file: str, num_envs: int): + """Build the generation command, run it, and assert success.""" + command = [ + sys.executable, + os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/generate_dataset.py"), + "--task", + _TASK_NAME, + "--input_file", + input_file, + "--output_file", + output_file, + "--num_envs", + str(num_envs), + "--generation_num_trials", + "1", + "--enable_cameras", + "--headless", + ] + + result = run_script(command) + + print(f"Visuomotor dataset generation result (num_envs={num_envs}):") + print(result.stdout) + print(result.stderr) + + assert os.path.exists(output_file), ( + f"Generated dataset file was not created at {output_file}.\n" + f"returncode: {result.returncode}\nstderr: {result.stderr}" + ) + + combined_output = result.stdout + "\n" + result.stderr + expected_output = "successes/attempts. Exiting" + assert expected_output in combined_output, ( + f"Could not find '{expected_output}' in output.\nstdout: {result.stdout}\nstderr: {result.stderr}" + ) + + +def test_generate_dataset_franka_visuomotor(setup_visuomotor_test_environment): + """Test dataset generation for the visuomotor cube-stack environment (single env).""" + workflow_root, input_file = setup_visuomotor_test_environment + output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset.hdf5") + _run_generation(workflow_root, input_file, output_file, num_envs=1) + + +def test_generate_dataset_franka_visuomotor_multi_env(setup_visuomotor_test_environment): + """Test dataset generation for the visuomotor cube-stack environment (5 envs).""" + workflow_root, input_file = setup_visuomotor_test_environment + output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset_multi_env.hdf5") + _run_generation(workflow_root, input_file, output_file, num_envs=5) diff --git a/source/isaaclab_mimic/test/test_generate_dataset_gr1t2_nutpour.py b/source/isaaclab_mimic/test/test_generate_dataset_gr1t2_nutpour.py new file mode 100644 index 000000000000..ed41a827752e --- /dev/null +++ b/source/isaaclab_mimic/test/test_generate_dataset_gr1t2_nutpour.py @@ -0,0 +1,110 @@ +# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Test dataset generation for Isaac-NutPour-GR1T2-Pink-IK-Abs-Mimic-v0.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import os +import sys +import tempfile + +import pytest +from mimic_test_utils import run_script + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +_TASK_NAME = "Isaac-NutPour-GR1T2-Pink-IK-Abs-Mimic-v0" +DATASETS_DOWNLOAD_DIR = tempfile.mkdtemp(suffix=f"_{_TASK_NAME}") +NUCLEUS_ANNOTATED_DATASET_PATH = os.path.join( + ISAACLAB_NUCLEUS_DIR, "Mimic", "Tests", "annotated_dataset_gr1_nut_pouring_test.hdf5" +) + + +@pytest.fixture +def setup_nutpour_gr1t2_test_environment(): + """Download the pre-annotated dataset and prepare the test environment.""" + if not os.path.exists(DATASETS_DOWNLOAD_DIR): + os.makedirs(DATASETS_DOWNLOAD_DIR) + + try: + downloaded_dataset_path = retrieve_file_path(NUCLEUS_ANNOTATED_DATASET_PATH, DATASETS_DOWNLOAD_DIR) + except Exception as e: + print(e) + pytest.fail( + "The pre-annotated dataset required for this test is currently unavailable. " + f"Dataset path: {NUCLEUS_ANNOTATED_DATASET_PATH}" + ) + + assert os.path.isfile(downloaded_dataset_path), ( + f"retrieve_file_path returned '{downloaded_dataset_path}' but the file does not exist on disk." + ) + + pythonunbuffered_env_var_ = os.environ.get("PYTHONUNBUFFERED") + os.environ["PYTHONUNBUFFERED"] = "1" + + current_dir = os.path.dirname(os.path.abspath(__file__)) + workflow_root = os.path.abspath(os.path.join(current_dir, "../../..")) + + yield workflow_root, downloaded_dataset_path + + if pythonunbuffered_env_var_: + os.environ["PYTHONUNBUFFERED"] = pythonunbuffered_env_var_ + else: + del os.environ["PYTHONUNBUFFERED"] + + +def _run_generation(workflow_root: str, input_file: str, output_file: str, num_envs: int): + """Build the generation command, run it, and assert success.""" + command = [ + sys.executable, + os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/generate_dataset.py"), + "--task", + _TASK_NAME, + "--input_file", + input_file, + "--output_file", + output_file, + "--num_envs", + str(num_envs), + "--generation_num_trials", + "1", + "--enable_cameras", + "--headless", + ] + + result = run_script(command) + + print(f"NutPour GR1T2 dataset generation result (num_envs={num_envs}):") + print(result.stdout) + print(result.stderr) + + assert os.path.exists(output_file), ( + f"Generated dataset file was not created at {output_file}.\n" + f"returncode: {result.returncode}\nstderr: {result.stderr}" + ) + + combined_output = result.stdout + "\n" + result.stderr + expected_output = "successes/attempts. Exiting" + assert expected_output in combined_output, ( + f"Could not find '{expected_output}' in output.\nstdout: {result.stdout}\nstderr: {result.stderr}" + ) + + +def test_generate_dataset_gr1t2_nutpour(setup_nutpour_gr1t2_test_environment): + """Test dataset generation for the GR1T2 nut-pour environment (single env).""" + workflow_root, input_file = setup_nutpour_gr1t2_test_environment + output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset.hdf5") + _run_generation(workflow_root, input_file, output_file, num_envs=1) + + +def test_generate_dataset_gr1t2_nutpour_multi_env(setup_nutpour_gr1t2_test_environment): + """Test dataset generation for the GR1T2 nut-pour environment (5 envs).""" + workflow_root, input_file = setup_nutpour_gr1t2_test_environment + output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset_multi_env.hdf5") + _run_generation(workflow_root, input_file, output_file, num_envs=5) diff --git a/source/isaaclab_mimic/test/test_generate_dataset_gr1t2_pickplace.py b/source/isaaclab_mimic/test/test_generate_dataset_gr1t2_pickplace.py new file mode 100644 index 000000000000..bd2017b5a7b0 --- /dev/null +++ b/source/isaaclab_mimic/test/test_generate_dataset_gr1t2_pickplace.py @@ -0,0 +1,109 @@ +# Copyright (c) 2024-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Test dataset generation for Isaac-PickPlace-GR1T2-Abs-Mimic-v0.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import os +import sys +import tempfile + +import pytest +from mimic_test_utils import run_script + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +_TASK_NAME = "Isaac-PickPlace-GR1T2-Abs-Mimic-v0" +DATASETS_DOWNLOAD_DIR = tempfile.mkdtemp(suffix=f"_{_TASK_NAME}") +NUCLEUS_ANNOTATED_DATASET_PATH = os.path.join( + ISAACLAB_NUCLEUS_DIR, "Mimic", "Tests", "annotated_dataset_gr1_steering_wheel_test.hdf5" +) + + +@pytest.fixture +def setup_pickplace_gr1t2_test_environment(): + """Download the pre-annotated dataset and prepare the test environment.""" + if not os.path.exists(DATASETS_DOWNLOAD_DIR): + os.makedirs(DATASETS_DOWNLOAD_DIR) + + try: + downloaded_dataset_path = retrieve_file_path(NUCLEUS_ANNOTATED_DATASET_PATH, DATASETS_DOWNLOAD_DIR) + except Exception as e: + print(e) + pytest.fail( + "The pre-annotated dataset required for this test is currently unavailable. " + f"Dataset path: {NUCLEUS_ANNOTATED_DATASET_PATH}" + ) + + assert os.path.isfile(downloaded_dataset_path), ( + f"retrieve_file_path returned '{downloaded_dataset_path}' but the file does not exist on disk." + ) + + pythonunbuffered_env_var_ = os.environ.get("PYTHONUNBUFFERED") + os.environ["PYTHONUNBUFFERED"] = "1" + + current_dir = os.path.dirname(os.path.abspath(__file__)) + workflow_root = os.path.abspath(os.path.join(current_dir, "../../..")) + + yield workflow_root, downloaded_dataset_path + + if pythonunbuffered_env_var_: + os.environ["PYTHONUNBUFFERED"] = pythonunbuffered_env_var_ + else: + del os.environ["PYTHONUNBUFFERED"] + + +def _run_generation(workflow_root: str, input_file: str, output_file: str, num_envs: int): + """Build the generation command, run it, and assert success.""" + command = [ + sys.executable, + os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/generate_dataset.py"), + "--task", + _TASK_NAME, + "--input_file", + input_file, + "--output_file", + output_file, + "--num_envs", + str(num_envs), + "--generation_num_trials", + "1", + "--headless", + ] + + result = run_script(command) + + print(f"PickPlace GR1T2 dataset generation result (num_envs={num_envs}):") + print(result.stdout) + print(result.stderr) + + assert os.path.exists(output_file), ( + f"Generated dataset file was not created at {output_file}.\n" + f"returncode: {result.returncode}\nstderr: {result.stderr}" + ) + + combined_output = result.stdout + "\n" + result.stderr + expected_output = "successes/attempts. Exiting" + assert expected_output in combined_output, ( + f"Could not find '{expected_output}' in output.\nstdout: {result.stdout}\nstderr: {result.stderr}" + ) + + +def test_generate_dataset_gr1t2_pickplace(setup_pickplace_gr1t2_test_environment): + """Test dataset generation for the GR1T2 pick-place environment (single env).""" + workflow_root, input_file = setup_pickplace_gr1t2_test_environment + output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset.hdf5") + _run_generation(workflow_root, input_file, output_file, num_envs=1) + + +def test_generate_dataset_gr1t2_pickplace_multi_env(setup_pickplace_gr1t2_test_environment): + """Test dataset generation for the GR1T2 pick-place environment (5 envs).""" + workflow_root, input_file = setup_pickplace_gr1t2_test_environment + output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset_multi_env.hdf5") + _run_generation(workflow_root, input_file, output_file, num_envs=5) diff --git a/source/isaaclab_mimic/test/test_generate_dataset_skillgen.py b/source/isaaclab_mimic/test/test_generate_dataset_skillgen.py index 7f5afc7d664c..b88c9b9a2eff 100644 --- a/source/isaaclab_mimic/test/test_generate_dataset_skillgen.py +++ b/source/isaaclab_mimic/test/test_generate_dataset_skillgen.py @@ -11,10 +11,11 @@ simulation_app = AppLauncher(headless=True).app import os -import subprocess +import sys import tempfile import pytest +from mimic_test_utils import run_script from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path @@ -23,6 +24,8 @@ ISAACLAB_NUCLEUS_DIR, "Mimic", "franka_stack_datasets", "annotated_dataset_skillgen.hdf5" ) +_SUBPROCESS_TIMEOUT = 5000 + @pytest.fixture def setup_skillgen_test_environment(): @@ -32,8 +35,15 @@ def setup_skillgen_test_environment(): print("Creating directory : ", DATASETS_DOWNLOAD_DIR) os.makedirs(DATASETS_DOWNLOAD_DIR) - # Download the SkillGen annotated dataset from Nucleus into DATASETS_DOWNLOAD_DIR - retrieve_file_path(NUCLEUS_SKILLGEN_ANNOTATED_DATASET_PATH, DATASETS_DOWNLOAD_DIR) + # Download the SkillGen annotated dataset from Nucleus into DATASETS_DOWNLOAD_DIR. + # retrieve_file_path mirrors the remote directory tree under the download + # dir and returns the actual local path of the downloaded file. + downloaded_dataset_path = retrieve_file_path(NUCLEUS_SKILLGEN_ANNOTATED_DATASET_PATH, DATASETS_DOWNLOAD_DIR) + + # Verify the downloaded file actually exists on disk + assert os.path.isfile(downloaded_dataset_path), ( + f"retrieve_file_path returned '{downloaded_dataset_path}' but the file does not exist on disk." + ) # Set the environment variable PYTHONUNBUFFERED to 1 to get all text outputs in result.stdout pythonunbuffered_env_var_ = os.environ.get("PYTHONUNBUFFERED") @@ -43,8 +53,8 @@ def setup_skillgen_test_environment(): current_dir = os.path.dirname(os.path.abspath(__file__)) workflow_root = os.path.abspath(os.path.join(current_dir, "../../..")) - # Yield the workflow root for use in tests - yield workflow_root + # Yield the workflow root and the actual downloaded file path for use in tests + yield workflow_root, downloaded_dataset_path # Cleanup: restore the original environment variable if pythonunbuffered_env_var_: @@ -55,14 +65,14 @@ def setup_skillgen_test_environment(): def test_generate_dataset_skillgen(setup_skillgen_test_environment): """Test dataset generation with SkillGen enabled.""" - workflow_root = setup_skillgen_test_environment + workflow_root, input_file = setup_skillgen_test_environment - input_file = os.path.join(DATASETS_DOWNLOAD_DIR, "annotated_dataset_skillgen.hdf5") output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset_skillgen.hdf5") + # Run the script directly (bypassing isaaclab.sh) so that stdout is + # properly captured (see _run_script docstring for details). command = [ - workflow_root + "/isaaclab.sh", - "-p", + sys.executable, os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/generate_dataset.py"), "--device", "cpu", @@ -80,12 +90,21 @@ def test_generate_dataset_skillgen(setup_skillgen_test_environment): "Isaac-Stack-Cube-Franka-IK-Rel-Skillgen-v0", ] - result = subprocess.run(command, capture_output=True, text=True) + result = run_script(command, timeout=_SUBPROCESS_TIMEOUT) print("SkillGen dataset generation result:") print(result.stdout) print(result.stderr) - assert result.returncode == 0, result.stderr + # Verify the generated dataset file was created. + assert os.path.exists(output_file), ( + f"Generated SkillGen dataset file was not created at {output_file}.\n" + f"returncode: {result.returncode}\nstderr: {result.stderr}" + ) + + # Check for the expected completion message in output + combined_output = result.stdout + "\n" + result.stderr expected_output = "successes/attempts. Exiting" - assert expected_output in result.stdout + assert expected_output in combined_output, ( + f"Could not find '{expected_output}' in output.\nstdout: {result.stdout}\nstderr: {result.stderr}" + ) diff --git a/source/isaaclab_mimic/test/test_selection_strategy.py b/source/isaaclab_mimic/test/test_selection_strategy.py index ac58be34db0f..e378932f72c5 100644 --- a/source/isaaclab_mimic/test/test_selection_strategy.py +++ b/source/isaaclab_mimic/test/test_selection_strategy.py @@ -54,13 +54,13 @@ def test_select_source_demo_identity_orientations_object_strategy(nearest_neighb # Generate object poses for cluster 1 with varying translations src_object_poses_in_world_cluster_1 = [ - torch.eye(4) + torch.tensor([[0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, -1.0]]) + torch.eye(4) + torch.tensor([[0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, -1.0, 0.0]]) for i in range(cluster_1_range_min, cluster_1_range_max) ] # Generate object poses for cluster 2 similarly src_object_poses_in_world_cluster_2 = [ - torch.eye(4) + torch.tensor([[0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, -1.0]]) + torch.eye(4) + torch.tensor([[0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, 0.0, i], [0.0, 0.0, -1.0, 0.0]]) for i in range(cluster_2_range_min, cluster_2_range_max) ] @@ -160,13 +160,13 @@ def test_select_source_demo_identity_orientations_robot_distance_strategy(neares # Generate random transformed object poses for cluster 1 with varying translations # This represents the first object pose for the transformed subtask segment for each source demo transformed_eef_pose_cluster_1 = [ - torch.eye(4) + torch.tensor([[0, 0, 0, i], [0, 0, 0, i], [0, 0, 0, i], [0, 0, 0, -1]]) + torch.eye(4) + torch.tensor([[0, 0, 0, i], [0, 0, 0, i], [0, 0, 0, i], [0, 0, -1, 0]]) for i in range(cluster_1_range_min, cluster_1_range_max) ] # Generate object poses for cluster 2 similarly transformed_eef_pose_cluster_2 = [ - torch.eye(4) + torch.tensor([[0, 0, 0, i], [0, 0, 0, i], [0, 0, 0, i], [0, 0, 0, -1]]) + torch.eye(4) + torch.tensor([[0, 0, 0, i], [0, 0, 0, i], [0, 0, 0, i], [0, 0, -1, 0]]) for i in range(cluster_2_range_min, cluster_2_range_max) ] diff --git a/source/isaaclab_newton/benchmark/assets/benchmark_articulation.py b/source/isaaclab_newton/benchmark/assets/benchmark_articulation.py new file mode 100644 index 000000000000..c9f11a5defb5 --- /dev/null +++ b/source/isaaclab_newton/benchmark/assets/benchmark_articulation.py @@ -0,0 +1,1296 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for Articulation class (Newton backend). + +This module provides a benchmarking framework to measure the performance of setter and writer +methods in the Articulation class. Each method is benchmarked under three scenarios: + +1. **Torch List**: Inputs are PyTorch tensors with list indices (via deprecated wrappers). +2. **Torch Tensor**: Inputs are PyTorch tensors with tensor indices (via deprecated wrappers). +3. **Warp Mask**: Inputs are warp arrays with boolean masks (via ``_mask`` methods). + +Usage: + python benchmark_articulation.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] [--num_joints J] + +Example: + python benchmark_articulation.py --num_iterations 1000 --warmup_steps 10 + python benchmark_articulation.py --mode torch_list # Only run list-based benchmarks + python benchmark_articulation.py --mode warp_mask # Only run warp mask benchmarks +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Benchmark Articulation methods (Newton backend).") +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=12, help="Number of bodies") +parser.add_argument("--num_joints", type=int, default=11, help="Number of joints") +parser.add_argument("--mode", type=str, default="all", help="Benchmark mode (all, torch_list, torch_tensor, warp_mask)") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") +parser.add_argument("--no_shape_checks", action="store_true", help="Disable shape/dtype assertions") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import logging +import warnings + +import numpy as np +import torch +import warp as wp +from isaaclab_newton.test.mock_interfaces import ( + MockNewtonArticulationView, + MockWrenchComposer, + create_mock_newton_manager, +) + +from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg +from isaaclab.test.benchmark import MethodBenchmarkDefinition, MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + +# Also suppress logging warnings +logging.getLogger("isaaclab_newton").setLevel(logging.ERROR) +logging.getLogger("isaaclab").setLevel(logging.ERROR) + + +# ============================================================================= +# Index Helpers +# ============================================================================= + + +def make_tensor_env_ids(num_instances: int, device: str) -> torch.Tensor: + """Create a tensor of environment IDs.""" + return torch.arange(num_instances, dtype=torch.int32, device=device) + + +def make_tensor_joint_ids(num_joints: int, device: str) -> torch.Tensor: + """Create a tensor of joint IDs.""" + return torch.arange(num_joints, dtype=torch.int32, device=device) + + +def make_tensor_body_ids(num_bodies: int, device: str) -> torch.Tensor: + """Create a tensor of body IDs.""" + return torch.arange(num_bodies, dtype=torch.int32, device=device) + + +# ============================================================================= +# Test Articulation Factory +# ============================================================================= + + +def create_test_articulation( + num_instances: int = 2, + num_joints: int = 6, + num_bodies: int = 7, + device: str = "cuda:0", +): + """Create a test Articulation instance with mocked dependencies.""" + from isaaclab_newton.assets.articulation.articulation import Articulation + + joint_names = [f"joint_{i}" for i in range(num_joints)] + body_names = [f"body_{i}" for i in range(num_bodies)] + + articulation = object.__new__(Articulation) + + articulation.cfg = ArticulationCfg( + prim_path="/World/Robot", + soft_joint_pos_limit_factor=1.0, + actuators={}, + ) + + # Create Newton mock view + mock_view = MockNewtonArticulationView( + num_instances=num_instances, + num_bodies=num_bodies, + num_joints=num_joints, + device=device, + joint_names=joint_names, + body_names=body_names, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + object.__setattr__(articulation, "_root_view", mock_view) + object.__setattr__(articulation, "_device", device) + object.__setattr__(articulation, "_check_shapes", not args.no_shape_checks) + + # Create ArticulationData instance (NewtonManager already mocked at call site) + from isaaclab_newton.assets.articulation.articulation_data import ArticulationData + + data = ArticulationData(mock_view, device) + object.__setattr__(articulation, "_data", data) + + # Create mock wrench composers + mock_inst_wrench = MockWrenchComposer(articulation) + mock_perm_wrench = MockWrenchComposer(articulation) + object.__setattr__(articulation, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(articulation, "_permanent_wrench_composer", mock_perm_wrench) + + # Set up other required attributes + object.__setattr__(articulation, "actuators", {}) + object.__setattr__(articulation, "_has_implicit_actuators", False) + object.__setattr__(articulation, "_ALL_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device)) + object.__setattr__( + articulation, "_ALL_BODY_INDICES", wp.array(np.arange(num_bodies, dtype=np.int32), device=device) + ) + object.__setattr__( + articulation, "_ALL_JOINT_INDICES", wp.array(np.arange(num_joints, dtype=np.int32), device=device) + ) + object.__setattr__(articulation, "_ALL_ENV_MASK", wp.ones((num_instances,), dtype=wp.bool, device=device)) + object.__setattr__(articulation, "_ALL_JOINT_MASK", wp.ones((num_joints,), dtype=wp.bool, device=device)) + object.__setattr__(articulation, "_ALL_BODY_MASK", wp.ones((num_bodies,), dtype=wp.bool, device=device)) + object.__setattr__(articulation, "_ALL_FIXED_TENDON_INDICES", wp.array([], dtype=wp.int32, device=device)) + object.__setattr__(articulation, "_ALL_FIXED_TENDON_MASK", wp.zeros((0,), dtype=wp.bool, device=device)) + object.__setattr__(articulation, "_ALL_SPATIAL_TENDON_INDICES", wp.array([], dtype=wp.int32, device=device)) + object.__setattr__(articulation, "_ALL_SPATIAL_TENDON_MASK", wp.zeros((0,), dtype=wp.bool, device=device)) + + # Initialize joint targets + object.__setattr__( + articulation, "_joint_pos_target_sim", wp.zeros((num_instances, num_joints), dtype=wp.float32, device=device) + ) + object.__setattr__( + articulation, "_joint_vel_target_sim", wp.zeros((num_instances, num_joints), dtype=wp.float32, device=device) + ) + object.__setattr__( + articulation, + "_joint_effort_target_sim", + wp.zeros((num_instances, num_joints), dtype=wp.float32, device=device), + ) + + return articulation, mock_view + + +# ============================================================================= +# Input Generators (Torch-only for Newton backend) +# ============================================================================= + + +# --- Root Link Pose --- +def gen_root_link_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Pose --- +def gen_root_com_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link Velocity --- +def gen_root_link_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Velocity --- +def gen_root_com_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root State (Deprecated) --- +def gen_root_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM State (Deprecated) --- +def gen_root_com_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link State (Deprecated) --- +def gen_root_link_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Joint State --- +def gen_joint_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Position --- +def gen_joint_position_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_position_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Velocity --- +def gen_joint_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Stiffness --- +def gen_joint_stiffness_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "stiffness": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_stiffness_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "stiffness": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Damping --- +def gen_joint_damping_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "damping": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_damping_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "damping": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Position Limit --- +def gen_joint_position_limit_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + lower = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * -3.14 + upper = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * 3.14 + return { + "limits": torch.cat([lower, upper], dim=-1), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_position_limit_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + lower = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * -3.14 + upper = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * 3.14 + return { + "limits": torch.cat([lower, upper], dim=-1), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Velocity Limit --- +def gen_joint_velocity_limit_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "limits": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 10.0, + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_velocity_limit_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "limits": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 10.0, + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Effort Limit --- +def gen_joint_effort_limit_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "limits": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 100.0 + ), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_effort_limit_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "limits": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 100.0 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Armature --- +def gen_joint_armature_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "armature": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.1 + ), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_armature_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "armature": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.1 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Friction Coefficient --- +def gen_joint_friction_coefficient_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "joint_friction_coeff": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.5 + ), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_friction_coefficient_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "joint_friction_coeff": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.5 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Joint Position Target --- +def gen_set_joint_position_target_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_set_joint_position_target_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Joint Velocity Target --- +def gen_set_joint_velocity_target_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_set_joint_velocity_target_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Joint Effort Target --- +def gen_set_joint_effort_target_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_set_joint_effort_target_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Masses --- +def gen_set_masses_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_masses_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set CoMs --- +def gen_set_coms_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_coms_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set Inertias --- +def gen_set_inertias_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_inertias_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set External Force and Torque --- +def gen_set_external_force_and_torque_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_set_external_force_and_torque_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# ============================================================================= +# Warp Mask Input Generators (for _mask methods) +# ============================================================================= + + +def _env_mask(config: MethodBenchmarkRunnerConfig) -> wp.array: + return wp.ones((config.num_instances,), dtype=wp.bool, device=config.device) + + +def _joint_mask(config: MethodBenchmarkRunnerConfig) -> wp.array: + return wp.ones((config.num_joints,), dtype=wp.bool, device=config.device) + + +def _body_mask(config: MethodBenchmarkRunnerConfig) -> wp.array: + return wp.ones((config.num_bodies,), dtype=wp.bool, device=config.device) + + +# --- Root Link Pose (mask) --- +def gen_root_link_pose_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Root COM Pose (mask) --- +def gen_root_com_pose_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Root Link Velocity (mask) --- +def gen_root_link_velocity_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Root COM Velocity (mask) --- +def gen_root_com_velocity_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Joint State (mask) --- +def gen_joint_state_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Position (mask) --- +def gen_joint_position_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Velocity (mask) --- +def gen_joint_velocity_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Stiffness (mask) --- +def gen_joint_stiffness_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "stiffness": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Damping (mask) --- +def gen_joint_damping_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "damping": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Position Limit (mask) --- +def gen_joint_position_limit_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + lower = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * -3.14 + upper = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * 3.14 + return { + "limits": torch.cat([lower, upper], dim=-1), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Velocity Limit (mask) --- +def gen_joint_velocity_limit_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "limits": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 10.0, + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Effort Limit (mask) --- +def gen_joint_effort_limit_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "limits": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 100.0 + ), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Armature (mask) --- +def gen_joint_armature_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "armature": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.1 + ), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Joint Friction Coefficient (mask) --- +def gen_joint_friction_coefficient_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "joint_friction_coeff": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.5 + ), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set Joint Position Target (mask) --- +def gen_set_joint_position_target_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set Joint Velocity Target (mask) --- +def gen_set_joint_velocity_target_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set Joint Effort Target (mask) --- +def gen_set_joint_effort_target_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "joint_mask": _joint_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set Masses (mask) --- +def gen_set_masses_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set CoMs (mask) --- +def gen_set_coms_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set Inertias (mask) --- +def gen_set_inertias_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# ============================================================================= +# Benchmarks +# ============================================================================= + +BENCHMARKS = [ + # --- Root State (Deprecated, no _mask equivalent) --- + MethodBenchmarkDefinition( + name="write_root_state_to_sim", + method_name="write_root_state_to_sim", + input_generators={ + "torch_list": gen_root_state_torch_list, + "torch_tensor": gen_root_state_torch_tensor, + }, + category="root_state", + ), + MethodBenchmarkDefinition( + name="write_root_com_state_to_sim", + method_name="write_root_com_state_to_sim", + input_generators={ + "torch_list": gen_root_com_state_torch_list, + "torch_tensor": gen_root_com_state_torch_tensor, + }, + category="root_state", + ), + MethodBenchmarkDefinition( + name="write_root_link_state_to_sim", + method_name="write_root_link_state_to_sim", + input_generators={ + "torch_list": gen_root_link_state_torch_list, + "torch_tensor": gen_root_link_state_torch_tensor, + }, + category="root_state", + ), + # --- Root Pose / Velocity --- + MethodBenchmarkDefinition( + name="write_root_link_pose_to_sim", + method_name="write_root_link_pose_to_sim", + input_generators={ + "torch_list": gen_root_link_pose_torch_list, + "torch_tensor": gen_root_link_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_link_pose_to_sim_mask", + method_name="write_root_link_pose_to_sim_mask", + input_generators={"warp_mask": gen_root_link_pose_warp_mask}, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_com_pose_to_sim", + method_name="write_root_com_pose_to_sim", + input_generators={ + "torch_list": gen_root_com_pose_torch_list, + "torch_tensor": gen_root_com_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_com_pose_to_sim_mask", + method_name="write_root_com_pose_to_sim_mask", + input_generators={"warp_mask": gen_root_com_pose_warp_mask}, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_link_velocity_to_sim", + method_name="write_root_link_velocity_to_sim", + input_generators={ + "torch_list": gen_root_link_velocity_torch_list, + "torch_tensor": gen_root_link_velocity_torch_tensor, + }, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_link_velocity_to_sim_mask", + method_name="write_root_link_velocity_to_sim_mask", + input_generators={"warp_mask": gen_root_link_velocity_warp_mask}, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_com_velocity_to_sim", + method_name="write_root_com_velocity_to_sim", + input_generators={ + "torch_list": gen_root_com_velocity_torch_list, + "torch_tensor": gen_root_com_velocity_torch_tensor, + }, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_com_velocity_to_sim_mask", + method_name="write_root_com_velocity_to_sim_mask", + input_generators={"warp_mask": gen_root_com_velocity_warp_mask}, + category="root_velocity", + ), + # --- Joint State --- + MethodBenchmarkDefinition( + name="write_joint_state_to_sim", + method_name="write_joint_state_to_sim", + input_generators={ + "torch_list": gen_joint_state_torch_list, + "torch_tensor": gen_joint_state_torch_tensor, + }, + category="joint_state", + ), + MethodBenchmarkDefinition( + name="write_joint_state_to_sim_mask", + method_name="write_joint_state_to_sim_mask", + input_generators={"warp_mask": gen_joint_state_warp_mask}, + category="joint_state", + ), + MethodBenchmarkDefinition( + name="write_joint_position_to_sim", + method_name="write_joint_position_to_sim", + input_generators={ + "torch_list": gen_joint_position_torch_list, + "torch_tensor": gen_joint_position_torch_tensor, + }, + category="joint_state", + ), + MethodBenchmarkDefinition( + name="write_joint_position_to_sim_mask", + method_name="write_joint_position_to_sim_mask", + input_generators={"warp_mask": gen_joint_position_warp_mask}, + category="joint_state", + ), + MethodBenchmarkDefinition( + name="write_joint_velocity_to_sim", + method_name="write_joint_velocity_to_sim", + input_generators={ + "torch_list": gen_joint_velocity_torch_list, + "torch_tensor": gen_joint_velocity_torch_tensor, + }, + category="joint_state", + ), + MethodBenchmarkDefinition( + name="write_joint_velocity_to_sim_mask", + method_name="write_joint_velocity_to_sim_mask", + input_generators={"warp_mask": gen_joint_velocity_warp_mask}, + category="joint_state", + ), + # --- Joint Params --- + MethodBenchmarkDefinition( + name="write_joint_stiffness_to_sim", + method_name="write_joint_stiffness_to_sim", + input_generators={ + "torch_list": gen_joint_stiffness_torch_list, + "torch_tensor": gen_joint_stiffness_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_stiffness_to_sim_mask", + method_name="write_joint_stiffness_to_sim_mask", + input_generators={"warp_mask": gen_joint_stiffness_warp_mask}, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_damping_to_sim", + method_name="write_joint_damping_to_sim", + input_generators={ + "torch_list": gen_joint_damping_torch_list, + "torch_tensor": gen_joint_damping_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_damping_to_sim_mask", + method_name="write_joint_damping_to_sim_mask", + input_generators={"warp_mask": gen_joint_damping_warp_mask}, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_position_limit_to_sim", + method_name="write_joint_position_limit_to_sim", + input_generators={ + "torch_list": gen_joint_position_limit_torch_list, + "torch_tensor": gen_joint_position_limit_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_position_limit_to_sim_mask", + method_name="write_joint_position_limit_to_sim_mask", + input_generators={"warp_mask": gen_joint_position_limit_warp_mask}, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_velocity_limit_to_sim", + method_name="write_joint_velocity_limit_to_sim", + input_generators={ + "torch_list": gen_joint_velocity_limit_torch_list, + "torch_tensor": gen_joint_velocity_limit_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_velocity_limit_to_sim_mask", + method_name="write_joint_velocity_limit_to_sim_mask", + input_generators={"warp_mask": gen_joint_velocity_limit_warp_mask}, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_effort_limit_to_sim", + method_name="write_joint_effort_limit_to_sim", + input_generators={ + "torch_list": gen_joint_effort_limit_torch_list, + "torch_tensor": gen_joint_effort_limit_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_effort_limit_to_sim_mask", + method_name="write_joint_effort_limit_to_sim_mask", + input_generators={"warp_mask": gen_joint_effort_limit_warp_mask}, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_armature_to_sim", + method_name="write_joint_armature_to_sim", + input_generators={ + "torch_list": gen_joint_armature_torch_list, + "torch_tensor": gen_joint_armature_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_armature_to_sim_mask", + method_name="write_joint_armature_to_sim_mask", + input_generators={"warp_mask": gen_joint_armature_warp_mask}, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_friction_coefficient_to_sim", + method_name="write_joint_friction_coefficient_to_sim", + input_generators={ + "torch_list": gen_joint_friction_coefficient_torch_list, + "torch_tensor": gen_joint_friction_coefficient_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_friction_coefficient_to_sim_mask", + method_name="write_joint_friction_coefficient_to_sim_mask", + input_generators={"warp_mask": gen_joint_friction_coefficient_warp_mask}, + category="joint_params", + ), + # --- Joint Targets --- + MethodBenchmarkDefinition( + name="set_joint_position_target", + method_name="set_joint_position_target", + input_generators={ + "torch_list": gen_set_joint_position_target_torch_list, + "torch_tensor": gen_set_joint_position_target_torch_tensor, + }, + category="joint_targets", + ), + MethodBenchmarkDefinition( + name="set_joint_position_target_mask", + method_name="set_joint_position_target_mask", + input_generators={"warp_mask": gen_set_joint_position_target_warp_mask}, + category="joint_targets", + ), + MethodBenchmarkDefinition( + name="set_joint_velocity_target", + method_name="set_joint_velocity_target", + input_generators={ + "torch_list": gen_set_joint_velocity_target_torch_list, + "torch_tensor": gen_set_joint_velocity_target_torch_tensor, + }, + category="joint_targets", + ), + MethodBenchmarkDefinition( + name="set_joint_velocity_target_mask", + method_name="set_joint_velocity_target_mask", + input_generators={"warp_mask": gen_set_joint_velocity_target_warp_mask}, + category="joint_targets", + ), + MethodBenchmarkDefinition( + name="set_joint_effort_target", + method_name="set_joint_effort_target", + input_generators={ + "torch_list": gen_set_joint_effort_target_torch_list, + "torch_tensor": gen_set_joint_effort_target_torch_tensor, + }, + category="joint_targets", + ), + MethodBenchmarkDefinition( + name="set_joint_effort_target_mask", + method_name="set_joint_effort_target_mask", + input_generators={"warp_mask": gen_set_joint_effort_target_warp_mask}, + category="joint_targets", + ), + # --- Body Properties --- + MethodBenchmarkDefinition( + name="set_masses", + method_name="set_masses", + input_generators={ + "torch_list": gen_set_masses_torch_list, + "torch_tensor": gen_set_masses_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_masses_mask", + method_name="set_masses_mask", + input_generators={"warp_mask": gen_set_masses_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms", + method_name="set_coms", + input_generators={ + "torch_list": gen_set_coms_torch_list, + "torch_tensor": gen_set_coms_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms_mask", + method_name="set_coms_mask", + input_generators={"warp_mask": gen_set_coms_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias", + method_name="set_inertias", + input_generators={ + "torch_list": gen_set_inertias_torch_list, + "torch_tensor": gen_set_inertias_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias_mask", + method_name="set_inertias_mask", + input_generators={"warp_mask": gen_set_inertias_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_external_force_and_torque", + method_name="set_external_force_and_torque", + input_generators={ + "torch_list": gen_set_external_force_and_torque_torch_list, + "torch_tensor": gen_set_external_force_and_torque_torch_tensor, + }, + category="external_wrench", + ), +] + + +# ============================================================================= +# Fill-Ratio Benchmarks (5%, 95%, 100% of env_ids filled) +# ============================================================================= + +FILL_RATIOS = {"5pct": 0.05, "95pct": 0.95, "100pct": 1.0} + + +def _make_fill_ratio_generator(base_gen_fn, fill_ratio): + """Create a generator that subsets env_ids to a given fill ratio. + + Only env_ids are subsetted — joint_ids and body_ids remain full-range. + Data tensors keyed on env count are sliced to match. + """ + + def generator(config): + n = max(1, int(config.num_instances * fill_ratio)) + base_inputs = base_gen_fn(config) + inputs = {} + for key, val in base_inputs.items(): + if key == "env_ids": + inputs[key] = ( + torch.randperm(config.num_instances, device=config.device)[:n].sort().values.to(torch.int32) + ) + elif isinstance(val, torch.Tensor) and val.dim() >= 1 and val.shape[0] == config.num_instances: + inputs[key] = val[:n] + else: + inputs[key] = val + return inputs + + return generator + + +def _make_fill_ratio_mask_generator(base_mask_gen_fn, fill_ratio): + """Create a mask generator with a given fill ratio. + + Sets a random subset of the env_mask entries to True. Data stays full-sized (mask methods expect full data). + """ + + def generator(config): + base_inputs = base_mask_gen_fn(config) + n = max(1, int(config.num_instances * fill_ratio)) + # Create a mask with n random entries set to True + perm = torch.randperm(config.num_instances, device=config.device) + mask_tensor = torch.zeros(config.num_instances, dtype=torch.bool, device=config.device) + mask_tensor[perm[:n]] = True + base_inputs["env_mask"] = wp.from_torch(mask_tensor, dtype=wp.bool) + return base_inputs + + return generator + + +def _build_fill_benchmarks(): + """Auto-generate fill-ratio benchmark definitions from existing generators.""" + fill_benchmarks = [] + for bm in BENCHMARKS: + generators = {} + # Add tensor fill variants from torch_tensor generators + if "torch_tensor" in bm.input_generators: + base_gen = bm.input_generators["torch_tensor"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"tensor_{suffix}"] = _make_fill_ratio_generator(base_gen, ratio) + # Add mask fill variants from warp_mask generators + if "warp_mask" in bm.input_generators: + base_gen = bm.input_generators["warp_mask"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"mask_{suffix}"] = _make_fill_ratio_mask_generator(base_gen, ratio) + if generators: + fill_benchmarks.append( + MethodBenchmarkDefinition( + name=bm.name, + method_name=bm.method_name, + input_generators=generators, + category=f"{bm.category}_fill", + ) + ) + return fill_benchmarks + + +FILL_BENCHMARKS = _build_fill_benchmarks() + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=args.num_joints, + device=args.device, + mode=args.mode, + ) + + # Patch the NewtonManager for both articulation and articulation_data modules + with ( + create_mock_newton_manager( + "isaaclab_newton.assets.articulation.articulation_data.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ), + create_mock_newton_manager( + "isaaclab_newton.assets.articulation.articulation.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ), + ): + # Create the test articulation + articulation, _ = create_test_articulation( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + num_joints=config.num_joints, + device=config.device, + ) + + print( + f"Benchmarking Articulation (Newton) with {config.num_instances} instances, {config.num_bodies} bodies," + f" {config.num_joints} joints..." + ) + + # Create runner and run benchmarks + runner = MethodBenchmarkRunner( + benchmark_name="newton_articulation_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + runner.run_benchmarks(BENCHMARKS, articulation) + + print("\n" + "=" * 80) + print("Fill-Ratio Benchmarks (env_ids at 5%, 95%, 100% fill)") + print("=" * 80) + + runner.run_benchmarks(FILL_BENCHMARKS, articulation) + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_newton/benchmark/assets/benchmark_articulation_data.py b/source/isaaclab_newton/benchmark/assets/benchmark_articulation_data.py new file mode 100644 index 000000000000..59fb541767bc --- /dev/null +++ b/source/isaaclab_newton/benchmark/assets/benchmark_articulation_data.py @@ -0,0 +1,342 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for ArticulationData class (Newton backend). + +This module provides a benchmarking framework to measure the performance of all properties +in the Newton ArticulationData class. Each property is run multiple times with randomized mock data, +and timing statistics (mean and standard deviation) are reported. + +Usage: + python benchmark_articulation_data.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] [--num_joints J] + +Example: + python benchmark_articulation_data.py --num_iterations 10000 --warmup_steps 10 +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="Micro-benchmarking framework for ArticulationData class (Newton backend).", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=12, help="Number of bodies") +parser.add_argument("--num_joints", type=int, default=11, help="Number of joints") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import warnings + +import numpy as np +import warp as wp +from isaaclab_newton.test.mock_interfaces import MockNewtonArticulationView, create_mock_newton_manager + +from isaaclab.test.benchmark import MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + + +# ============================================================================= +# Skip Lists +# ============================================================================= + +# List of deprecated properties - skip these +DEPRECATED_PROPERTIES = { + "root_pose_w", + "root_pos_w", + "root_quat_w", + "root_vel_w", + "root_lin_vel_w", + "root_ang_vel_w", + "root_lin_vel_b", + "root_ang_vel_b", + "body_pose_w", + "body_pos_w", + "body_quat_w", + "body_vel_w", + "body_lin_vel_w", + "body_ang_vel_w", + "body_acc_w", + "body_lin_acc_w", + "body_ang_acc_w", + "com_pos_b", + "com_quat_b", + "joint_limits", + "joint_friction", + "fixed_tendon_limit", + "applied_torque", + "computed_torque", + "joint_dynamic_friction", + "joint_effort_target", + "joint_viscous_friction", + "joint_velocity_limits", + # Combined state properties marked as deprecated + "root_state_w", + "root_link_state_w", + "root_com_state_w", + "body_state_w", + "body_link_state_w", + "body_com_state_w", +} + +# List of properties that raise NotImplementedError - skip these +NOT_IMPLEMENTED_PROPERTIES = { + "fixed_tendon_stiffness", + "fixed_tendon_damping", + "fixed_tendon_limit_stiffness", + "fixed_tendon_rest_length", + "fixed_tendon_offset", + "fixed_tendon_pos_limits", + "spatial_tendon_stiffness", + "spatial_tendon_damping", + "spatial_tendon_limit_stiffness", + "spatial_tendon_offset", + "body_incoming_joint_wrench_b", +} + +# Removed default_* properties that raise RuntimeError +REMOVED_PROPERTIES = { + "default_fixed_tendon_damping", + "default_fixed_tendon_limit", + "default_fixed_tendon_limit_stiffness", + "default_fixed_tendon_offset", + "default_fixed_tendon_pos_limits", + "default_fixed_tendon_rest_length", + "default_fixed_tendon_stiffness", + "default_inertia", + "default_joint_armature", + "default_joint_damping", + "default_joint_dynamic_friction_coeff", + "default_joint_friction", + "default_joint_friction_coeff", + "default_joint_limits", + "default_joint_pos_limits", + "default_joint_stiffness", + "default_joint_viscous_friction_coeff", + "default_mass", + "default_spatial_tendon_damping", + "default_spatial_tendon_limit_stiffness", + "default_spatial_tendon_offset", + "default_spatial_tendon_stiffness", +} + +# Private/internal properties and methods to skip +INTERNAL_PROPERTIES = { + "_create_simulation_bindings", + "_create_buffers", + "update", + "is_primed", + "device", + "body_names", + "joint_names", + "fixed_tendon_names", + "spatial_tendon_names", + "GRAVITY_VEC_W", + "GRAVITY_VEC_W_TORCH", + "FORWARD_VEC_B", + "FORWARD_VEC_B_TORCH", + "ALL_ENV_MASK", + "ALL_BODY_MASK", + "ALL_JOINT_MASK", + "ENV_MASK", + "BODY_MASK", + "JOINT_MASK", +} + +# Dependency mapping for properties +PROPERTY_DEPENDENCIES = { + "root_link_lin_vel_w": ["root_link_vel_w"], + "root_link_ang_vel_w": ["root_link_vel_w"], + "root_link_lin_vel_b": ["root_link_vel_b"], + "root_link_ang_vel_b": ["root_link_vel_b"], + "root_com_pos_w": ["root_com_pose_w"], + "root_com_quat_w": ["root_com_pose_w"], + "root_com_lin_vel_b": ["root_com_vel_b"], + "root_com_ang_vel_b": ["root_com_vel_b"], + "root_com_lin_vel_w": ["root_com_vel_w"], + "root_com_ang_vel_w": ["root_com_vel_w"], + "root_link_pos_w": ["root_link_pose_w"], + "root_link_quat_w": ["root_link_pose_w"], + "body_link_lin_vel_w": ["body_link_vel_w"], + "body_link_ang_vel_w": ["body_link_vel_w"], + "body_link_pos_w": ["body_link_pose_w"], + "body_link_quat_w": ["body_link_pose_w"], + "body_com_pos_w": ["body_com_pose_w"], + "body_com_quat_w": ["body_com_pose_w"], + "body_com_lin_vel_w": ["body_com_vel_w"], + "body_com_ang_vel_w": ["body_com_vel_w"], + "body_com_lin_acc_w": ["body_com_acc_w"], + "body_com_ang_acc_w": ["body_com_acc_w"], + "body_com_quat_b": ["body_com_pose_b"], +} + + +# ============================================================================= +# Benchmark Functions +# ============================================================================= + + +def get_benchmarkable_properties(articulation_data) -> list[str]: + """Get list of properties that can be benchmarked.""" + all_properties = [] + + for name in dir(articulation_data): + if name.startswith("_"): + continue + if name in DEPRECATED_PROPERTIES: + continue + if name in NOT_IMPLEMENTED_PROPERTIES: + continue + if name in REMOVED_PROPERTIES: + continue + if name in INTERNAL_PROPERTIES: + continue + + try: + attr = getattr(type(articulation_data), name, None) + if isinstance(attr, property): + all_properties.append(name) + except Exception: + pass + + return sorted(all_properties) + + +def setup_mock_environment(config: MethodBenchmarkRunnerConfig) -> MockNewtonArticulationView: + """Set up the mock environment for benchmarking.""" + mock_view = MockNewtonArticulationView( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + num_joints=config.num_joints, + device=config.device, + ) + return mock_view + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=args.num_joints, + device=args.device, + ) + + # Patch the NewtonManager for the articulation_data module + with create_mock_newton_manager( + "isaaclab_newton.assets.articulation.articulation_data.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ): + # Setup mock environment + mock_view = setup_mock_environment(config) + mock_view.set_random_mock_data() + + # Import ArticulationData inside the patch context + from isaaclab_newton.assets.articulation.articulation_data import ArticulationData + + # Create ArticulationData instance + articulation_data = ArticulationData(mock_view, config.device) + + # Get list of properties to benchmark + properties = get_benchmarkable_properties(articulation_data) + + # Generator that updates mock data and invalidates timestamp + def gen_mock_data(cfg: MethodBenchmarkRunnerConfig) -> dict: + N, L, J = cfg.num_instances, cfg.num_bodies, cfg.num_joints + dev = cfg.device + + # Update root transforms + root_tf_np = np.random.randn(N, 1, 7).astype(np.float32) + root_tf_np[..., 3:7] /= np.linalg.norm(root_tf_np[..., 3:7], axis=-1, keepdims=True) + mock_view.set_mock_root_transforms(wp.array(root_tf_np, dtype=wp.transformf, device=dev)) + + # Update root velocities + root_vel_np = np.random.randn(N, 1, 6).astype(np.float32) + mock_view.set_mock_root_velocities(wp.array(root_vel_np, dtype=wp.spatial_vectorf, device=dev)) + + # Update link transforms + link_tf_np = np.random.randn(N, 1, L, 7).astype(np.float32) + link_tf_np[..., 3:7] /= np.linalg.norm(link_tf_np[..., 3:7], axis=-1, keepdims=True) + mock_view.set_mock_link_transforms(wp.array(link_tf_np, dtype=wp.transformf, device=dev)) + + # Update link velocities + link_vel_np = np.random.randn(N, 1, L, 6).astype(np.float32) + mock_view.set_mock_link_velocities(wp.array(link_vel_np, dtype=wp.spatial_vectorf, device=dev)) + + # Update DOF state + mock_view.set_mock_dof_positions( + wp.array(np.random.randn(N, 1, J).astype(np.float32), dtype=wp.float32, device=dev) + ) + mock_view.set_mock_dof_velocities( + wp.array(np.random.randn(N, 1, J).astype(np.float32), dtype=wp.float32, device=dev) + ) + + # Update body properties + mock_view.set_mock_coms( + wp.array(np.random.randn(N, 1, L, 3).astype(np.float32), dtype=wp.vec3f, device=dev) + ) + mock_view.set_mock_inertias( + wp.array(np.random.randn(N, 1, L, 9).astype(np.float32), dtype=wp.mat33f, device=dev) + ) + mock_view.set_mock_masses( + wp.array((np.random.rand(N, 1, L) * 10 + 0.1).astype(np.float32), dtype=wp.float32, device=dev) + ) + + # Invalidate timestamp to trigger recomputation + articulation_data._sim_timestamp += 1.0 + return {} + + # Create runner + runner = MethodBenchmarkRunner( + benchmark_name="newton_articulation_data_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + # Run property benchmarks + runner.run_property_benchmarks( + target_data=articulation_data, + properties=properties, + gen_mock_data=gen_mock_data, + dependencies=PROPERTY_DEPENDENCIES, + category="property", + ) + + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object.py b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object.py new file mode 100644 index 000000000000..e0df122d4f00 --- /dev/null +++ b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object.py @@ -0,0 +1,623 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObject class (Newton backend). + +This module provides a benchmarking framework to measure the performance of setter and writer +methods in the RigidObject class. Each method is benchmarked under three scenarios: + +1. **Torch List**: Inputs are PyTorch tensors with list indices (via deprecated wrappers). +2. **Torch Tensor**: Inputs are PyTorch tensors with tensor indices (via deprecated wrappers). +3. **Warp Mask**: Inputs are warp arrays with boolean masks (via ``_mask`` methods). + +Usage: + python benchmark_rigid_object.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] + +Example: + python benchmark_rigid_object.py --num_iterations 1000 --warmup_steps 10 + python benchmark_rigid_object.py --mode torch_list # Only run list-based benchmarks + python benchmark_rigid_object.py --mode warp_mask # Only run warp mask benchmarks +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Benchmark RigidObject methods (Newton backend).") +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=1, help="Number of bodies") +parser.add_argument("--mode", type=str, default="all", help="Benchmark mode (all, torch_list, torch_tensor, warp_mask)") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") +parser.add_argument("--no_shape_checks", action="store_true", help="Disable shape/dtype assertions") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import logging +import warnings + +import numpy as np +import torch +import warp as wp +from isaaclab_newton.test.mock_interfaces import ( + MockNewtonArticulationView, + MockWrenchComposer, + create_mock_newton_manager, +) + +from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg +from isaaclab.test.benchmark import MethodBenchmarkDefinition, MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + +# Also suppress logging warnings +logging.getLogger("isaaclab_newton").setLevel(logging.ERROR) +logging.getLogger("isaaclab").setLevel(logging.ERROR) + + +# ============================================================================= +# Index Helpers +# ============================================================================= + + +def make_tensor_env_ids(num_instances: int, device: str) -> torch.Tensor: + """Create a tensor of environment IDs.""" + return torch.arange(num_instances, dtype=torch.int32, device=device) + + +def make_tensor_body_ids(num_bodies: int, device: str) -> torch.Tensor: + """Create a tensor of body IDs.""" + return torch.arange(num_bodies, dtype=torch.int32, device=device) + + +# ============================================================================= +# Test RigidObject Factory +# ============================================================================= + + +def create_test_rigid_object( + num_instances: int = 2, + num_bodies: int = 1, + device: str = "cuda:0", +): + """Create a test RigidObject instance with mocked dependencies.""" + from isaaclab_newton.assets.rigid_object.rigid_object import RigidObject + + body_names = [f"body_{i}" for i in range(num_bodies)] + + rigid_object = object.__new__(RigidObject) + + rigid_object.cfg = RigidObjectCfg( + prim_path="/World/Object", + ) + + # Create Newton mock view + mock_view = MockNewtonArticulationView( + num_instances=num_instances, + num_bodies=num_bodies, + num_joints=0, + device=device, + joint_names=[], + body_names=body_names, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + object.__setattr__(rigid_object, "_root_view", mock_view) + object.__setattr__(rigid_object, "_device", device) + object.__setattr__(rigid_object, "_check_shapes", not args.no_shape_checks) + + # Create RigidObjectData instance (NewtonManager already mocked at call site) + from isaaclab_newton.assets.rigid_object.rigid_object_data import RigidObjectData + + data = RigidObjectData(mock_view, device) + object.__setattr__(rigid_object, "_data", data) + + # Create mock wrench composers + mock_inst_wrench = MockWrenchComposer(rigid_object) + mock_perm_wrench = MockWrenchComposer(rigid_object) + object.__setattr__(rigid_object, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(rigid_object, "_permanent_wrench_composer", mock_perm_wrench) + + # Set up other required attributes + object.__setattr__(rigid_object, "actuators", {}) + object.__setattr__(rigid_object, "_ALL_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device)) + object.__setattr__( + rigid_object, "_ALL_BODY_INDICES", wp.array(np.arange(num_bodies, dtype=np.int32), device=device) + ) + object.__setattr__(rigid_object, "_ALL_ENV_MASK", wp.ones((num_instances,), dtype=wp.bool, device=device)) + object.__setattr__(rigid_object, "_ALL_BODY_MASK", wp.ones((num_bodies,), dtype=wp.bool, device=device)) + + # set information about rigid body into data + data.body_names = body_names + + return rigid_object, mock_view + + +# ============================================================================= +# Input Generators (Torch-only for Newton backend) +# ============================================================================= + + +# --- Root Link Pose --- +def gen_root_link_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Pose --- +def gen_root_com_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link Velocity --- +def gen_root_link_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Velocity --- +def gen_root_com_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Set Masses --- +def gen_set_masses_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_masses_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set CoMs --- +def gen_set_coms_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_coms_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set Inertias --- +def gen_set_inertias_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_inertias_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set External Force and Torque --- +def gen_set_external_force_and_torque_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_set_external_force_and_torque_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# ============================================================================= +# Warp Mask Input Generators (for _mask methods) +# ============================================================================= + + +def _env_mask(config: MethodBenchmarkRunnerConfig) -> wp.array: + return wp.ones((config.num_instances,), dtype=wp.bool, device=config.device) + + +def _body_mask(config: MethodBenchmarkRunnerConfig) -> wp.array: + return wp.ones((config.num_bodies,), dtype=wp.bool, device=config.device) + + +# --- Root Link Pose (mask) --- +def gen_root_link_pose_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Root COM Pose (mask) --- +def gen_root_com_pose_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Root Link Velocity (mask) --- +def gen_root_link_velocity_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Root COM Velocity (mask) --- +def gen_root_com_velocity_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Set Masses (mask) --- +def gen_set_masses_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set CoMs (mask) --- +def gen_set_coms_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set Inertias (mask) --- +def gen_set_inertias_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# ============================================================================= +# Benchmarks +# ============================================================================= + +BENCHMARKS = [ + # --- Root Pose / Velocity --- + MethodBenchmarkDefinition( + name="write_root_link_pose_to_sim", + method_name="write_root_link_pose_to_sim", + input_generators={ + "torch_list": gen_root_link_pose_torch_list, + "torch_tensor": gen_root_link_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_link_pose_to_sim_mask", + method_name="write_root_link_pose_to_sim_mask", + input_generators={"warp_mask": gen_root_link_pose_warp_mask}, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_com_pose_to_sim", + method_name="write_root_com_pose_to_sim", + input_generators={ + "torch_list": gen_root_com_pose_torch_list, + "torch_tensor": gen_root_com_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_com_pose_to_sim_mask", + method_name="write_root_com_pose_to_sim_mask", + input_generators={"warp_mask": gen_root_com_pose_warp_mask}, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_link_velocity_to_sim", + method_name="write_root_link_velocity_to_sim", + input_generators={ + "torch_list": gen_root_link_velocity_torch_list, + "torch_tensor": gen_root_link_velocity_torch_tensor, + }, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_link_velocity_to_sim_mask", + method_name="write_root_link_velocity_to_sim_mask", + input_generators={"warp_mask": gen_root_link_velocity_warp_mask}, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_com_velocity_to_sim", + method_name="write_root_com_velocity_to_sim", + input_generators={ + "torch_list": gen_root_com_velocity_torch_list, + "torch_tensor": gen_root_com_velocity_torch_tensor, + }, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_com_velocity_to_sim_mask", + method_name="write_root_com_velocity_to_sim_mask", + input_generators={"warp_mask": gen_root_com_velocity_warp_mask}, + category="root_velocity", + ), + # --- Body Properties --- + MethodBenchmarkDefinition( + name="set_masses", + method_name="set_masses", + input_generators={ + "torch_list": gen_set_masses_torch_list, + "torch_tensor": gen_set_masses_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_masses_mask", + method_name="set_masses_mask", + input_generators={"warp_mask": gen_set_masses_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms", + method_name="set_coms", + input_generators={ + "torch_list": gen_set_coms_torch_list, + "torch_tensor": gen_set_coms_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms_mask", + method_name="set_coms_mask", + input_generators={"warp_mask": gen_set_coms_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias", + method_name="set_inertias", + input_generators={ + "torch_list": gen_set_inertias_torch_list, + "torch_tensor": gen_set_inertias_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias_mask", + method_name="set_inertias_mask", + input_generators={"warp_mask": gen_set_inertias_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_external_force_and_torque", + method_name="set_external_force_and_torque", + input_generators={ + "torch_list": gen_set_external_force_and_torque_torch_list, + "torch_tensor": gen_set_external_force_and_torque_torch_tensor, + }, + category="external_wrench", + ), +] + + +# ============================================================================= +# Fill-Ratio Benchmarks (5%, 95%, 100% of env_ids filled) +# ============================================================================= + +FILL_RATIOS = {"5pct": 0.05, "95pct": 0.95, "100pct": 1.0} + + +def _make_fill_ratio_generator(base_gen_fn, fill_ratio): + """Create a generator that subsets env_ids to a given fill ratio. + + Only env_ids are subsetted — body_ids remain full-range. + Data tensors keyed on env count are sliced to match. + """ + + def generator(config): + n = max(1, int(config.num_instances * fill_ratio)) + base_inputs = base_gen_fn(config) + inputs = {} + for key, val in base_inputs.items(): + if key == "env_ids": + inputs[key] = ( + torch.randperm(config.num_instances, device=config.device)[:n].sort().values.to(torch.int32) + ) + elif isinstance(val, torch.Tensor) and val.dim() >= 1 and val.shape[0] == config.num_instances: + inputs[key] = val[:n] + else: + inputs[key] = val + return inputs + + return generator + + +def _make_fill_ratio_mask_generator(base_mask_gen_fn, fill_ratio): + """Create a mask generator with a given fill ratio. + + Sets a random subset of the env_mask entries to True. Data stays full-sized (mask methods expect full data). + """ + + def generator(config): + base_inputs = base_mask_gen_fn(config) + n = max(1, int(config.num_instances * fill_ratio)) + # Create a mask with n random entries set to True + perm = torch.randperm(config.num_instances, device=config.device) + mask_tensor = torch.zeros(config.num_instances, dtype=torch.bool, device=config.device) + mask_tensor[perm[:n]] = True + base_inputs["env_mask"] = wp.from_torch(mask_tensor, dtype=wp.bool) + return base_inputs + + return generator + + +def _build_fill_benchmarks(): + """Auto-generate fill-ratio benchmark definitions from existing generators.""" + fill_benchmarks = [] + for bm in BENCHMARKS: + generators = {} + # Add tensor fill variants from torch_tensor generators + if "torch_tensor" in bm.input_generators: + base_gen = bm.input_generators["torch_tensor"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"tensor_{suffix}"] = _make_fill_ratio_generator(base_gen, ratio) + # Add mask fill variants from warp_mask generators + if "warp_mask" in bm.input_generators: + base_gen = bm.input_generators["warp_mask"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"mask_{suffix}"] = _make_fill_ratio_mask_generator(base_gen, ratio) + if generators: + fill_benchmarks.append( + MethodBenchmarkDefinition( + name=bm.name, + method_name=bm.method_name, + input_generators=generators, + category=f"{bm.category}_fill", + ) + ) + return fill_benchmarks + + +FILL_BENCHMARKS = _build_fill_benchmarks() + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=0, + device=args.device, + mode=args.mode, + ) + + # Patch the NewtonManager for both rigid_object and rigid_object_data modules + with ( + create_mock_newton_manager( + "isaaclab_newton.assets.rigid_object.rigid_object_data.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ), + create_mock_newton_manager( + "isaaclab_newton.assets.rigid_object.rigid_object.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ), + ): + # Create the test rigid object + rigid_object, _ = create_test_rigid_object( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + device=config.device, + ) + + print(f"Benchmarking RigidObject (Newton) with {config.num_instances} instances, {config.num_bodies} bodies...") + + # Create runner and run benchmarks + runner = MethodBenchmarkRunner( + benchmark_name="newton_rigid_object_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + runner.run_benchmarks(BENCHMARKS, rigid_object) + + print("\n" + "=" * 80) + print("Fill-Ratio Benchmarks (env_ids at 5%, 95%, 100% fill)") + print("=" * 80) + + runner.run_benchmarks(FILL_BENCHMARKS, rigid_object) + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_collection.py b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_collection.py new file mode 100644 index 000000000000..f75c7d0d03b8 --- /dev/null +++ b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_collection.py @@ -0,0 +1,654 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObjectCollection class (Newton backend). + +This module provides a benchmarking framework to measure the performance of setter and writer +methods in the RigidObjectCollection class. Each method is benchmarked under three scenarios: + +1. **Torch List**: Inputs are PyTorch tensors with list indices (via deprecated wrappers). +2. **Torch Tensor**: Inputs are PyTorch tensors with tensor indices (via deprecated wrappers). +3. **Warp Mask**: Inputs are warp arrays with boolean masks (via ``_mask`` methods). + +Usage: + python benchmark_rigid_object_collection.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] + +Example: + python benchmark_rigid_object_collection.py --num_iterations 1000 --warmup_steps 10 + python benchmark_rigid_object_collection.py --mode torch_list # Only run list-based benchmarks + python benchmark_rigid_object_collection.py --mode warp_mask # Only run warp mask benchmarks +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Benchmark RigidObjectCollection methods (Newton backend).") +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=3, help="Number of bodies (object types)") +parser.add_argument("--mode", type=str, default="all", help="Benchmark mode (all, torch_list, torch_tensor, warp_mask)") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") +parser.add_argument("--no_shape_checks", action="store_true", help="Disable shape/dtype assertions") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import logging +import warnings + +import numpy as np +import torch +import warp as wp +from isaaclab_newton.test.mock_interfaces import ( + MockWrenchComposer, + create_mock_newton_manager, +) +from isaaclab_newton.test.mock_interfaces.views import MockNewtonCollectionView + +from isaaclab.assets.rigid_object_collection.rigid_object_collection_cfg import RigidObjectCollectionCfg +from isaaclab.test.benchmark import MethodBenchmarkDefinition, MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + +# Also suppress logging warnings +logging.getLogger("isaaclab_newton").setLevel(logging.ERROR) +logging.getLogger("isaaclab").setLevel(logging.ERROR) + + +# ============================================================================= +# Index Helpers +# ============================================================================= + + +def make_tensor_env_ids(num_instances: int, device: str) -> torch.Tensor: + """Create a tensor of environment IDs.""" + return torch.arange(num_instances, dtype=torch.int32, device=device) + + +def make_tensor_body_ids(num_bodies: int, device: str) -> torch.Tensor: + """Create a tensor of body IDs.""" + return torch.arange(num_bodies, dtype=torch.int32, device=device) + + +# ============================================================================= +# Test RigidObjectCollection Factory +# ============================================================================= + + +def create_test_collection( + num_instances: int = 2, + num_bodies: int = 3, + device: str = "cuda:0", +): + """Create a test RigidObjectCollection instance with mocked dependencies.""" + from isaaclab_newton.assets.rigid_object_collection.rigid_object_collection import RigidObjectCollection + + object_names = [f"object_{i}" for i in range(num_bodies)] + + collection = object.__new__(RigidObjectCollection) + + # Create a minimal config with dummy rigid objects + from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg + + rigid_objects = {name: RigidObjectCfg(prim_path=f"/World/{name}") for name in object_names} + collection.cfg = RigidObjectCollectionCfg(rigid_objects=rigid_objects) + + # Create Newton mock view + mock_view = MockNewtonCollectionView( + num_envs=num_instances, + num_bodies=num_bodies, + device=device, + body_names=object_names, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + object.__setattr__(collection, "_root_view", mock_view) + object.__setattr__(collection, "_device", device) + object.__setattr__(collection, "_body_names_list", object_names) + object.__setattr__(collection, "_check_shapes", not args.no_shape_checks) + + # Create RigidObjectCollectionData instance (NewtonManager already mocked at call site) + from isaaclab_newton.assets.rigid_object_collection.rigid_object_collection_data import RigidObjectCollectionData + + data = RigidObjectCollectionData(mock_view, num_bodies, device) + object.__setattr__(collection, "_data", data) + + # Create mock wrench composers + mock_inst_wrench = MockWrenchComposer(collection) + mock_perm_wrench = MockWrenchComposer(collection) + object.__setattr__(collection, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(collection, "_permanent_wrench_composer", mock_perm_wrench) + + # Set up other required attributes + object.__setattr__( + collection, "_ALL_ENV_INDICES", wp.array(np.arange(num_instances, dtype=np.int32), device=device) + ) + object.__setattr__(collection, "_ALL_BODY_INDICES", wp.array(np.arange(num_bodies, dtype=np.int32), device=device)) + object.__setattr__(collection, "_ALL_ENV_MASK", wp.ones((num_instances,), dtype=wp.bool, device=device)) + object.__setattr__(collection, "_ALL_BODY_MASK", wp.ones((num_bodies,), dtype=wp.bool, device=device)) + + # Temporary 2D wrench buffer for write_data_to_sim + object.__setattr__( + collection, + "_wrench_buffer", + wp.zeros((num_instances, num_bodies), dtype=wp.spatial_vectorf, device=device), + ) + + return collection, mock_view + + +# ============================================================================= +# Input Generators (Torch-only for Newton backend) +# ============================================================================= + + +# --- Body Link Pose --- +def gen_body_link_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_body_link_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Body COM Pose --- +def gen_body_com_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_body_com_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Body Link Velocity --- +def gen_body_link_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_body_link_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Body COM Velocity --- +def gen_body_com_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_body_com_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set Masses --- +def gen_set_masses_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_masses_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set CoMs --- +def gen_set_coms_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_coms_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set Inertias --- +def gen_set_inertias_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_inertias_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set External Force and Torque --- +def gen_set_external_force_and_torque_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_set_external_force_and_torque_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# ============================================================================= +# Warp Mask Input Generators (for _mask methods) +# ============================================================================= + + +def _env_mask(config: MethodBenchmarkRunnerConfig) -> wp.array: + return wp.ones((config.num_instances,), dtype=wp.bool, device=config.device) + + +def _body_mask(config: MethodBenchmarkRunnerConfig) -> wp.array: + return wp.ones((config.num_bodies,), dtype=wp.bool, device=config.device) + + +# --- Body Link Pose (mask) --- +def gen_body_link_pose_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Body COM Pose (mask) --- +def gen_body_com_pose_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_mask": _env_mask(config), + } + + +# --- Body Link Velocity (mask) --- +def gen_body_link_velocity_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_mask": _env_mask(config), + } + + +# --- Body COM Velocity (mask) --- +def gen_body_com_velocity_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_mask": _env_mask(config), + } + + +# --- Set Masses (mask) --- +def gen_set_masses_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set CoMs (mask) --- +def gen_set_coms_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# --- Set Inertias (mask) --- +def gen_set_inertias_warp_mask(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "body_mask": _body_mask(config), + "env_mask": _env_mask(config), + } + + +# ============================================================================= +# Benchmarks +# ============================================================================= + +BENCHMARKS = [ + # --- Body Link Pose --- + MethodBenchmarkDefinition( + name="write_body_link_pose_to_sim", + method_name="write_body_link_pose_to_sim", + input_generators={ + "torch_list": gen_body_link_pose_torch_list, + "torch_tensor": gen_body_link_pose_torch_tensor, + }, + category="body_pose", + ), + MethodBenchmarkDefinition( + name="write_body_link_pose_to_sim_mask", + method_name="write_body_link_pose_to_sim_mask", + input_generators={"warp_mask": gen_body_link_pose_warp_mask}, + category="body_pose", + ), + # --- Body COM Pose --- + MethodBenchmarkDefinition( + name="write_body_com_pose_to_sim", + method_name="write_body_com_pose_to_sim", + input_generators={ + "torch_list": gen_body_com_pose_torch_list, + "torch_tensor": gen_body_com_pose_torch_tensor, + }, + category="body_pose", + ), + MethodBenchmarkDefinition( + name="write_body_com_pose_to_sim_mask", + method_name="write_body_com_pose_to_sim_mask", + input_generators={"warp_mask": gen_body_com_pose_warp_mask}, + category="body_pose", + ), + # --- Body Link Velocity --- + MethodBenchmarkDefinition( + name="write_body_link_velocity_to_sim", + method_name="write_body_link_velocity_to_sim", + input_generators={ + "torch_list": gen_body_link_velocity_torch_list, + "torch_tensor": gen_body_link_velocity_torch_tensor, + }, + category="body_velocity", + ), + MethodBenchmarkDefinition( + name="write_body_link_velocity_to_sim_mask", + method_name="write_body_link_velocity_to_sim_mask", + input_generators={"warp_mask": gen_body_link_velocity_warp_mask}, + category="body_velocity", + ), + # --- Body COM Velocity --- + MethodBenchmarkDefinition( + name="write_body_com_velocity_to_sim", + method_name="write_body_com_velocity_to_sim", + input_generators={ + "torch_list": gen_body_com_velocity_torch_list, + "torch_tensor": gen_body_com_velocity_torch_tensor, + }, + category="body_velocity", + ), + MethodBenchmarkDefinition( + name="write_body_com_velocity_to_sim_mask", + method_name="write_body_com_velocity_to_sim_mask", + input_generators={"warp_mask": gen_body_com_velocity_warp_mask}, + category="body_velocity", + ), + # --- Body Properties --- + MethodBenchmarkDefinition( + name="set_masses", + method_name="set_masses", + input_generators={ + "torch_list": gen_set_masses_torch_list, + "torch_tensor": gen_set_masses_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_masses_mask", + method_name="set_masses_mask", + input_generators={"warp_mask": gen_set_masses_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms", + method_name="set_coms", + input_generators={ + "torch_list": gen_set_coms_torch_list, + "torch_tensor": gen_set_coms_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms_mask", + method_name="set_coms_mask", + input_generators={"warp_mask": gen_set_coms_warp_mask}, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias", + method_name="set_inertias", + input_generators={ + "torch_list": gen_set_inertias_torch_list, + "torch_tensor": gen_set_inertias_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias_mask", + method_name="set_inertias_mask", + input_generators={"warp_mask": gen_set_inertias_warp_mask}, + category="body_props", + ), + # --- External Force and Torque --- + MethodBenchmarkDefinition( + name="set_external_force_and_torque", + method_name="set_external_force_and_torque", + input_generators={ + "torch_list": gen_set_external_force_and_torque_torch_list, + "torch_tensor": gen_set_external_force_and_torque_torch_tensor, + }, + category="external_wrench", + ), +] + + +# ============================================================================= +# Fill-Ratio Benchmarks (5%, 95%, 100% of env_ids filled) +# ============================================================================= + +FILL_RATIOS = {"5pct": 0.05, "95pct": 0.95, "100pct": 1.0} + + +def _make_fill_ratio_generator(base_gen_fn, fill_ratio): + """Create a generator that subsets env_ids to a given fill ratio. + + Only env_ids are subsetted -- body_ids remain full-range. + Data tensors keyed on env count are sliced to match. + """ + + def generator(config): + n = max(1, int(config.num_instances * fill_ratio)) + base_inputs = base_gen_fn(config) + inputs = {} + for key, val in base_inputs.items(): + if key == "env_ids": + inputs[key] = ( + torch.randperm(config.num_instances, device=config.device)[:n].sort().values.to(torch.int32) + ) + elif isinstance(val, torch.Tensor) and val.dim() >= 1 and val.shape[0] == config.num_instances: + inputs[key] = val[:n] + else: + inputs[key] = val + return inputs + + return generator + + +def _make_fill_ratio_mask_generator(base_mask_gen_fn, fill_ratio): + """Create a mask generator with a given fill ratio. + + Sets a random subset of the env_mask entries to True. Data stays full-sized (mask methods expect full data). + """ + + def generator(config): + base_inputs = base_mask_gen_fn(config) + n = max(1, int(config.num_instances * fill_ratio)) + # Create a mask with n random entries set to True + perm = torch.randperm(config.num_instances, device=config.device) + mask_tensor = torch.zeros(config.num_instances, dtype=torch.bool, device=config.device) + mask_tensor[perm[:n]] = True + base_inputs["env_mask"] = wp.from_torch(mask_tensor, dtype=wp.bool) + return base_inputs + + return generator + + +def _build_fill_benchmarks(): + """Auto-generate fill-ratio benchmark definitions from existing generators.""" + fill_benchmarks = [] + for bm in BENCHMARKS: + generators = {} + # Add tensor fill variants from torch_tensor generators + if "torch_tensor" in bm.input_generators: + base_gen = bm.input_generators["torch_tensor"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"tensor_{suffix}"] = _make_fill_ratio_generator(base_gen, ratio) + # Add mask fill variants from warp_mask generators + if "warp_mask" in bm.input_generators: + base_gen = bm.input_generators["warp_mask"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"mask_{suffix}"] = _make_fill_ratio_mask_generator(base_gen, ratio) + if generators: + fill_benchmarks.append( + MethodBenchmarkDefinition( + name=bm.name, + method_name=bm.method_name, + input_generators=generators, + category=f"{bm.category}_fill", + ) + ) + return fill_benchmarks + + +FILL_BENCHMARKS = _build_fill_benchmarks() + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=0, + device=args.device, + mode=args.mode, + ) + + # Patch the NewtonManager for both collection and collection_data modules + with ( + create_mock_newton_manager( + "isaaclab_newton.assets.rigid_object_collection.rigid_object_collection_data.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ), + create_mock_newton_manager( + "isaaclab_newton.assets.rigid_object_collection.rigid_object_collection.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ), + ): + # Create the test collection + collection, _ = create_test_collection( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + device=config.device, + ) + + print( + f"Benchmarking RigidObjectCollection (Newton) with {config.num_instances} instances, " + f"{config.num_bodies} bodies..." + ) + + # Create runner and run benchmarks + runner = MethodBenchmarkRunner( + benchmark_name="newton_rigid_object_collection_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + runner.run_benchmarks(BENCHMARKS, collection) + + print("\n" + "=" * 80) + print("Fill-Ratio Benchmarks (env_ids at 5%, 95%, 100% fill)") + print("=" * 80) + + runner.run_benchmarks(FILL_BENCHMARKS, collection) + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_collection_data.py b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_collection_data.py new file mode 100644 index 000000000000..e397b42d2ec4 --- /dev/null +++ b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_collection_data.py @@ -0,0 +1,252 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObjectCollectionData class (Newton backend). + +This module provides a benchmarking framework to measure the performance of all properties +in the Newton RigidObjectCollectionData class. Each property is run multiple times with +randomized mock data, and timing statistics (mean and standard deviation) are reported. + +Usage: + python benchmark_rigid_object_collection_data.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] + +Example: + python benchmark_rigid_object_collection_data.py --num_iterations 10000 --warmup_steps 10 +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="Micro-benchmarking framework for RigidObjectCollectionData class (Newton backend).", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=3, help="Number of bodies (object types)") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import warnings + +import numpy as np +import warp as wp +from isaaclab_newton.test.mock_interfaces import create_mock_newton_manager +from isaaclab_newton.test.mock_interfaces.views import MockNewtonCollectionView + +from isaaclab.test.benchmark import MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + + +# ============================================================================= +# Skip Lists +# ============================================================================= + +# List of deprecated properties - skip these +DEPRECATED_PROPERTIES = { + "default_body_state", + "body_state_w", + "body_link_state_w", + "body_com_state_w", +} + +# List of properties that raise NotImplementedError - skip these +NOT_IMPLEMENTED_PROPERTIES = set() + +# Removed default_* properties that raise RuntimeError +REMOVED_PROPERTIES = { + "default_inertia", + "default_mass", +} + +# Private/internal properties and methods to skip +INTERNAL_PROPERTIES = { + "_create_simulation_bindings", + "_create_buffers", + "update", + "is_primed", + "device", + "body_names", + "object_names", + "GRAVITY_VEC_W", + "GRAVITY_VEC_W_TORCH", + "FORWARD_VEC_B", + "FORWARD_VEC_B_TORCH", + "ALL_ENV_MASK", + "ENV_MASK", + "ALL_OBJECT_MASK", + "OBJECT_MASK", + "num_bodies", + "num_instances", +} + +# Dependency mapping for properties +PROPERTY_DEPENDENCIES = { + "body_link_pos_w": ["body_link_pose_w"], + "body_link_quat_w": ["body_link_pose_w"], + "body_link_lin_vel_w": ["body_link_vel_w"], + "body_link_ang_vel_w": ["body_link_vel_w"], + "body_com_pos_w": ["body_com_pose_w"], + "body_com_quat_w": ["body_com_pose_w"], + "body_com_lin_vel_w": ["body_com_vel_w"], + "body_com_ang_vel_w": ["body_com_vel_w"], + "body_com_lin_acc_w": ["body_com_acc_w"], + "body_com_ang_acc_w": ["body_com_acc_w"], + "body_com_quat_b": ["body_com_pose_b"], +} + + +# ============================================================================= +# Benchmark Functions +# ============================================================================= + + +def get_benchmarkable_properties(data) -> list[str]: + """Get list of properties that can be benchmarked.""" + all_properties = [] + + for name in dir(data): + if name.startswith("_"): + continue + if name in DEPRECATED_PROPERTIES: + continue + if name in NOT_IMPLEMENTED_PROPERTIES: + continue + if name in REMOVED_PROPERTIES: + continue + if name in INTERNAL_PROPERTIES: + continue + + try: + attr = getattr(type(data), name, None) + if isinstance(attr, property): + all_properties.append(name) + except Exception: + pass + + return sorted(all_properties) + + +def setup_mock_environment(config: MethodBenchmarkRunnerConfig) -> MockNewtonCollectionView: + """Set up the mock environment for benchmarking.""" + mock_view = MockNewtonCollectionView( + num_envs=config.num_instances, + num_bodies=config.num_bodies, + device=config.device, + ) + return mock_view + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=0, + device=args.device, + ) + + # Patch the NewtonManager for the collection_data module + with create_mock_newton_manager( + "isaaclab_newton.assets.rigid_object_collection.rigid_object_collection_data.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ): + # Setup mock environment + mock_view = setup_mock_environment(config) + mock_view.set_random_mock_data() + + # Import RigidObjectCollectionData inside the patch context + from isaaclab_newton.assets.rigid_object_collection.rigid_object_collection_data import ( + RigidObjectCollectionData, + ) + + # Create RigidObjectCollectionData instance + data = RigidObjectCollectionData(mock_view, config.num_bodies, config.device) + + # Get list of properties to benchmark + properties = get_benchmarkable_properties(data) + + N, B = config.num_instances, config.num_bodies + dev = config.device + + # Generator that updates mock data and invalidates timestamp + def gen_mock_data(cfg: MethodBenchmarkRunnerConfig) -> dict: + # Update root transforms (shape: N, B, 7) + root_tf_np = np.random.randn(N, B, 7).astype(np.float32) + root_tf_np[..., 3:7] /= np.linalg.norm(root_tf_np[..., 3:7], axis=-1, keepdims=True) + mock_view._root_transforms = wp.array(root_tf_np, dtype=wp.transformf, device=dev) + + # Update root velocities (shape: N, B, 6) + root_vel_np = np.random.randn(N, B, 6).astype(np.float32) + mock_view._root_velocities = wp.array(root_vel_np, dtype=wp.spatial_vectorf, device=dev) + + # Update body properties (attributes have trailing link dim of 1: N, B, 1) + mock_view._attributes["body_com"] = wp.array( + np.random.randn(N, B, 1, 3).astype(np.float32), dtype=wp.vec3f, device=dev + ) + mock_view._attributes["body_mass"] = wp.array( + (np.random.rand(N, B, 1) * 10 + 0.1).astype(np.float32), dtype=wp.float32, device=dev + ) + mock_view._attributes["body_inertia"] = wp.array( + np.random.randn(N, B, 1, 9).astype(np.float32), dtype=wp.mat33f, device=dev + ) + + # Re-create simulation bindings to pick up the new mock data + data._create_simulation_bindings() + + # Invalidate timestamp to trigger recomputation + data._sim_timestamp += 1.0 + return {} + + # Create runner + runner = MethodBenchmarkRunner( + benchmark_name="newton_rigid_object_collection_data_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + # Run property benchmarks + runner.run_property_benchmarks( + target_data=data, + properties=properties, + gen_mock_data=gen_mock_data, + dependencies=PROPERTY_DEPENDENCIES, + category="property", + ) + + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_data.py b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_data.py new file mode 100644 index 000000000000..b8889e1fcd2c --- /dev/null +++ b/source/isaaclab_newton/benchmark/assets/benchmark_rigid_object_data.py @@ -0,0 +1,285 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObjectData class (Newton backend). + +This module provides a benchmarking framework to measure the performance of all properties +in the Newton RigidObjectData class. Each property is run multiple times with randomized mock data, +and timing statistics (mean and standard deviation) are reported. + +Usage: + python benchmark_rigid_object_data.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] + +Example: + python benchmark_rigid_object_data.py --num_iterations 10000 --warmup_steps 10 +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="Micro-benchmarking framework for RigidObjectData class (Newton backend).", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import warnings + +import numpy as np +import warp as wp +from isaaclab_newton.test.mock_interfaces import MockNewtonArticulationView, create_mock_newton_manager + +from isaaclab.test.benchmark import MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + + +# ============================================================================= +# Skip Lists +# ============================================================================= + +# List of deprecated properties - skip these +DEPRECATED_PROPERTIES = { + "default_root_state", + "root_pose_w", + "root_pos_w", + "root_quat_w", + "root_vel_w", + "root_lin_vel_w", + "root_ang_vel_w", + "root_lin_vel_b", + "root_ang_vel_b", + "body_pose_w", + "body_pos_w", + "body_quat_w", + "body_vel_w", + "body_lin_vel_w", + "body_ang_vel_w", + "body_acc_w", + "body_lin_acc_w", + "body_ang_acc_w", + "com_pos_b", + "com_quat_b", + # Combined state properties marked as deprecated + "root_state_w", + "root_link_state_w", + "root_com_state_w", + "body_state_w", + "body_link_state_w", + "body_com_state_w", +} + +# List of properties that raise NotImplementedError - skip these +NOT_IMPLEMENTED_PROPERTIES = set() + +# Removed default_* properties that raise RuntimeError +REMOVED_PROPERTIES = { + "default_inertia", + "default_mass", +} + +# Private/internal properties and methods to skip +INTERNAL_PROPERTIES = { + "_create_simulation_bindings", + "_create_buffers", + "update", + "is_primed", + "device", + "body_names", + "GRAVITY_VEC_W", + "GRAVITY_VEC_W_TORCH", + "FORWARD_VEC_B", + "FORWARD_VEC_B_TORCH", + "ALL_ENV_MASK", + "ENV_MASK", +} + +# Dependency mapping for properties +PROPERTY_DEPENDENCIES = { + "root_link_lin_vel_w": ["root_link_vel_w"], + "root_link_ang_vel_w": ["root_link_vel_w"], + "root_link_lin_vel_b": ["root_link_vel_b"], + "root_link_ang_vel_b": ["root_link_vel_b"], + "root_com_pos_w": ["root_com_pose_w"], + "root_com_quat_w": ["root_com_pose_w"], + "root_com_lin_vel_b": ["root_com_vel_b"], + "root_com_ang_vel_b": ["root_com_vel_b"], + "root_com_lin_vel_w": ["root_com_vel_w"], + "root_com_ang_vel_w": ["root_com_vel_w"], + "root_link_pos_w": ["root_link_pose_w"], + "root_link_quat_w": ["root_link_pose_w"], + "body_link_lin_vel_w": ["body_link_vel_w"], + "body_link_ang_vel_w": ["body_link_vel_w"], + "body_link_pos_w": ["body_link_pose_w"], + "body_link_quat_w": ["body_link_pose_w"], + "body_com_pos_w": ["body_com_pose_w"], + "body_com_quat_w": ["body_com_pose_w"], + "body_com_lin_vel_w": ["body_com_vel_w"], + "body_com_ang_vel_w": ["body_com_vel_w"], + "body_com_lin_acc_w": ["body_com_acc_w"], + "body_com_ang_acc_w": ["body_com_acc_w"], + "body_com_quat_b": ["body_com_pose_b"], +} + + +# ============================================================================= +# Benchmark Functions +# ============================================================================= + + +def get_benchmarkable_properties(rigid_object_data) -> list[str]: + """Get list of properties that can be benchmarked.""" + all_properties = [] + + for name in dir(rigid_object_data): + if name.startswith("_"): + continue + if name in DEPRECATED_PROPERTIES: + continue + if name in NOT_IMPLEMENTED_PROPERTIES: + continue + if name in REMOVED_PROPERTIES: + continue + if name in INTERNAL_PROPERTIES: + continue + + try: + attr = getattr(type(rigid_object_data), name, None) + if isinstance(attr, property): + all_properties.append(name) + except Exception: + pass + + return sorted(all_properties) + + +def setup_mock_environment(config: MethodBenchmarkRunnerConfig) -> MockNewtonArticulationView: + """Set up the mock environment for benchmarking.""" + mock_view = MockNewtonArticulationView( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + num_joints=0, + device=config.device, + ) + return mock_view + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=1, + num_joints=0, + device=args.device, + ) + + # Patch the NewtonManager for the rigid_object_data module + with create_mock_newton_manager( + "isaaclab_newton.assets.rigid_object.rigid_object_data.SimulationManager", + gravity=(0.0, 0.0, -9.81), + ): + # Setup mock environment + mock_view = setup_mock_environment(config) + mock_view.set_random_mock_data() + + # Import RigidObjectData inside the patch context + from isaaclab_newton.assets.rigid_object.rigid_object_data import RigidObjectData + + # Create RigidObjectData instance + rigid_object_data = RigidObjectData(mock_view, config.device) + + # Get list of properties to benchmark + properties = get_benchmarkable_properties(rigid_object_data) + + # Generator that updates mock data and invalidates timestamp + def gen_mock_data(cfg: MethodBenchmarkRunnerConfig) -> dict: + N, L = cfg.num_instances, cfg.num_bodies + dev = cfg.device + + # Update root transforms + root_tf_np = np.random.randn(N, 1, 7).astype(np.float32) + root_tf_np[..., 3:7] /= np.linalg.norm(root_tf_np[..., 3:7], axis=-1, keepdims=True) + mock_view.set_mock_root_transforms(wp.array(root_tf_np, dtype=wp.transformf, device=dev)) + + # Update root velocities + root_vel_np = np.random.randn(N, 1, 6).astype(np.float32) + mock_view.set_mock_root_velocities(wp.array(root_vel_np, dtype=wp.spatial_vectorf, device=dev)) + + # Update link transforms + link_tf_np = np.random.randn(N, 1, L, 7).astype(np.float32) + link_tf_np[..., 3:7] /= np.linalg.norm(link_tf_np[..., 3:7], axis=-1, keepdims=True) + mock_view.set_mock_link_transforms(wp.array(link_tf_np, dtype=wp.transformf, device=dev)) + + # Update link velocities + link_vel_np = np.random.randn(N, 1, L, 6).astype(np.float32) + mock_view.set_mock_link_velocities(wp.array(link_vel_np, dtype=wp.spatial_vectorf, device=dev)) + + # Update body properties + mock_view.set_mock_coms( + wp.array(np.random.randn(N, 1, L, 3).astype(np.float32), dtype=wp.vec3f, device=dev) + ) + mock_view.set_mock_inertias( + wp.array(np.random.randn(N, 1, L, 9).astype(np.float32), dtype=wp.mat33f, device=dev) + ) + mock_view.set_mock_masses( + wp.array((np.random.rand(N, 1, L) * 10 + 0.1).astype(np.float32), dtype=wp.float32, device=dev) + ) + + # Invalidate timestamp to trigger recomputation + rigid_object_data._sim_timestamp += 1.0 + return {} + + # Create runner + runner = MethodBenchmarkRunner( + benchmark_name="newton_rigid_object_data_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + # Run property benchmarks + runner.run_property_benchmarks( + target_data=rigid_object_data, + properties=properties, + gen_mock_data=gen_mock_data, + dependencies=PROPERTY_DEPENDENCIES, + category="property", + ) + + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_newton/changelog.d/.gitkeep b/source/isaaclab_newton/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_newton/changelog.d/clone-plan-visualizer-cleanup.minor.rst b/source/isaaclab_newton/changelog.d/clone-plan-visualizer-cleanup.minor.rst new file mode 100644 index 000000000000..6fed4677a471 --- /dev/null +++ b/source/isaaclab_newton/changelog.d/clone-plan-visualizer-cleanup.minor.rst @@ -0,0 +1,9 @@ +Removed +^^^^^^^ + +* **Breaking:** Removed + ``isaaclab_newton.cloner.newton_replicate.create_newton_visualizer_prebuild_clone_fn``. + Callers that need a Newton model for visualization should call + :func:`~isaaclab_newton.cloner.newton_replicate.newton_visualizer_prebuild` + directly with the ``(sources, destinations, env_ids, mask, positions)`` bundle + derived from :meth:`~isaaclab.sim.SimulationContext.get_clone_plans`. diff --git a/source/isaaclab_newton/changelog.d/mtrepte-expand_viz_markers.skip b/source/isaaclab_newton/changelog.d/mtrepte-expand_viz_markers.skip new file mode 100644 index 000000000000..a23b7c7322b3 --- /dev/null +++ b/source/isaaclab_newton/changelog.d/mtrepte-expand_viz_markers.skip @@ -0,0 +1 @@ +Marker visualization changes are covered by the isaaclab fragment. diff --git a/source/isaaclab_newton/changelog.d/mym-newton-manager-abstraction.rst b/source/isaaclab_newton/changelog.d/mym-newton-manager-abstraction.rst new file mode 100644 index 000000000000..1ad8a9265830 --- /dev/null +++ b/source/isaaclab_newton/changelog.d/mym-newton-manager-abstraction.rst @@ -0,0 +1,14 @@ +Changed +^^^^^^^ + +* Changed :class:`~isaaclab_newton.physics.NewtonManager` to dispatch through + solver-specific manager subclasses while preserving the existing + ``NewtonCfg(solver_cfg=...)`` configuration pattern. + +Deprecated +^^^^^^^^^^ + +* Deprecated :attr:`~isaaclab_newton.physics.NewtonSolverCfg.solver_type` for + manager dispatch in favor of + :attr:`~isaaclab_newton.physics.NewtonSolverCfg.class_type`. Existing configs + remain valid, but new code should rely on ``class_type``. diff --git a/source/isaaclab_newton/changelog.d/pr-5458-merge-develop.rst b/source/isaaclab_newton/changelog.d/pr-5458-merge-develop.rst new file mode 100644 index 000000000000..30eb959531c2 --- /dev/null +++ b/source/isaaclab_newton/changelog.d/pr-5458-merge-develop.rst @@ -0,0 +1,13 @@ +Removed +^^^^^^^ + +* Removed the unimplemented ``ArticulationData.body_incoming_joint_wrench_b`` + accessor. Add :class:`~isaaclab.sensors.JointWrenchSensorCfg` to the scene + and read :attr:`~isaaclab.sensors.JointWrenchSensorData.force` and + :attr:`~isaaclab.sensors.JointWrenchSensorData.torque` instead. + +Fixed +^^^^^ + +* Fixed :class:`~isaaclab_newton.sensors.JointWrenchSensor` initialization for + USD assets whose articulation root is nested below the configured asset prim. diff --git a/source/isaaclab_newton/changelog.d/rschmitt_decouple_rednerer_camera.rst b/source/isaaclab_newton/changelog.d/rschmitt_decouple_rednerer_camera.rst new file mode 100644 index 000000000000..1c3efcb5c33e --- /dev/null +++ b/source/isaaclab_newton/changelog.d/rschmitt_decouple_rednerer_camera.rst @@ -0,0 +1,4 @@ +Changed +^^^^^^^^ + +* Modified the newton renderer to use the new patterns from renderer/camera decoupling. diff --git a/source/isaaclab_newton/config/extension.toml b/source/isaaclab_newton/config/extension.toml new file mode 100644 index 000000000000..0a8eed8000c2 --- /dev/null +++ b/source/isaaclab_newton/config/extension.toml @@ -0,0 +1,21 @@ +[package] + +# Note: Semantic Versioning is used: https://semver.org/ +version = "0.5.26" + +# Description +title = "Newton simulation interfaces for IsaacLab core package" +description="Extension providing IsaacLab with Newton specific abstractions." +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["robotics", "simulation", "newton"] + +[dependencies] +"isaaclab" = {} + +[core] +reloadable = false + +[[python.module]] +name = "isaaclab_newton" diff --git a/source/isaaclab_newton/docs/CHANGELOG.rst b/source/isaaclab_newton/docs/CHANGELOG.rst new file mode 100644 index 000000000000..d9626a890476 --- /dev/null +++ b/source/isaaclab_newton/docs/CHANGELOG.rst @@ -0,0 +1,598 @@ +Changelog +--------- + +0.5.26 (2026-04-30) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.sensors.JointWrenchSensor`. + + +0.5.25 (2026-04-28) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.physics.KaminoSolverCfg` to support Newton's Kamino + solver backend, a Proximal-ADMM based solver for constrained rigid multi-body dynamics. +* Added fused :meth:`~isaaclab_newton.assets.Articulation.write_joint_state_to_sim_index` + and :meth:`~isaaclab_newton.assets.Articulation.write_joint_state_to_sim_mask` that + write joint position and velocity in a single kernel launch instead of two. + +Changed +^^^^^^^ + +* Removed dead state-buffer output parameters from 8 root pose/velocity warp kernels + in :mod:`~isaaclab_newton.assets.kernels`, reducing kernel argument marshalling + overhead. + +Fixed +^^^^^ + +* Replaced boolean ``_fk_dirty`` and ``_kamino_needs_fk`` flags with per-world + reset masks (``_world_reset_mask`` and ``_fk_reset_mask``). Asset write methods + now call :meth:`~isaaclab_newton.physics.NewtonManager.invalidate_fk` with + ``env_mask``/``env_ids`` and ``articulation_ids``, so ``eval_fk`` and + ``SolverKamino.reset()`` only operate on dirtied environments. Rigid object + and rigid object collection write methods now also trigger FK invalidation. +* Fixed CUDA error 700 (illegal memory access) when calling ``SolverKamino.reset()`` + after CUDA graph capture. ``StateKamino.from_newton()`` lazily allocates + ``body_f_total``, ``joint_q_prev``, and ``joint_lambdas`` via ``wp.clone``/``wp.zeros`` + during the first ``step()`` inside graph capture. These memory-pool addresses become + stale without a warm-up ``wp.capture_launch`` replay to pin them before any eager + ``solver.reset()`` call. + + +0.5.24 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.physics.NewtonShapeCfg` exposing + per-shape collision defaults (``margin``, ``gap``) via + :attr:`~isaaclab_newton.physics.NewtonCfg.default_shape_cfg`. + :meth:`~isaaclab_newton.physics.NewtonManager.create_builder` now + forwards the wrapper onto Newton's upstream + ``ModelBuilder.default_shape_cfg`` via + :func:`~isaaclab.utils.checked_apply`. The previous code only set + ``gap`` and left ``margin`` at Newton's upstream default of ``0.0``, + causing all non-Anymal-D robots to fail to learn rough-terrain + locomotion on triangle-mesh terrain. ``RoughPhysicsCfg`` opts in to + ``margin=0.01``. + + +0.5.23 (2026-04-24) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated :class:`~isaaclab_newton.sim.views.NewtonSiteFrameView` to match the + new :class:`~isaaclab.sim.views.BaseFrameView` ProxyArray return contract. + See the ``isaaclab`` 4.6.15 changelog for migration guidance. + + +0.5.22 (2026-04-23) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Properties on the following data classes now return + :class:`~isaaclab.utils.warp.ProxyArray` instead of raw ``wp.array``: + :class:`~isaaclab_newton.assets.articulation.ArticulationData`, + :class:`~isaaclab_newton.assets.rigid_object.RigidObjectData`, + :class:`~isaaclab_newton.assets.rigid_object_collection.RigidObjectCollectionData`, + :class:`~isaaclab_newton.sensors.contact_sensor.ContactSensorData`, + :class:`~isaaclab_newton.sensors.frame_transformer.FrameTransformerData`, + :class:`~isaaclab_newton.sensors.imu.ImuData`, and + :class:`~isaaclab_newton.sensors.pva.PvaData`. + Use ``.torch`` for a cached zero-copy ``torch.Tensor`` view, or ``.warp`` for + the underlying ``wp.array``. Implicit torch operations (arithmetic, + ``torch.*`` functions) work during the deprecation period but emit a warning. + + +0.5.21 (2026-04-23) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed flakiness in ``test_body_root_state_properties`` by bounding the random spin velocity so + numerical drift stays within the position tolerance over the simulated trajectory. + + +0.5.20 (2026-04-22) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.sim.views.XformPrimView` providing the Newton + backend implementation for xform prim views. + +Changed +^^^^^^^ + +* Renamed :class:`~isaaclab_newton.sim.views.NewtonSiteXformPrimView` to + :class:`~isaaclab_newton.sim.views.NewtonSiteFrameView`. Old name is kept as a deprecated alias. + + +0.5.19 (2026-04-22) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated ``write_data_to_sim`` in :class:`~isaaclab_newton.assets.Articulation`, + :class:`~isaaclab_newton.assets.RigidObject`, and :class:`~isaaclab_newton.assets.RigidObjectCollection` + to use the dual-buffer :class:`~isaaclab.utils.wrench_composer.WrenchComposer`. Composed wrenches are + applied after body-frame composition. +* Updated the PhysX Tensor API docstring link in :class:`~isaaclab_newton.assets.ArticulationData` + from ``omni.physics.tensors.impl.api`` to ``omni.physics.tensors.api`` to track the upstream + Isaac Sim module relocation (the ``impl`` submodule was removed). + + +0.5.18 (2026-04-21) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Upgraded Newton from ``2684d75`` to ``a27277e``. Includes collision improvements, contact quality fixes, + hydroelastic contact optimization, and memory usage fixes in CollisionPipeline. For details see + ``Newton changelog ``. +* Pinned ``mujoco`` and ``mujoco-warp`` to ``3.6.0`` to align with the Newton library. + + +0.5.17 (2026-04-20) +~~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed Newton visualization colors drifting from the USD stage by calling + :func:`~isaaclab.sim.utils.newton_model_utils.replace_newton_shape_colors` + after the model is finalized in :class:`~isaaclab_newton.physics.NewtonManager`. + +Changed +^^^^^^^ + +* Changed Newton Warp tiled camera outputs to clear with a light linear gray + (0xFFEEEEEE, 93% gray, fully opaque) background via ``SensorTiledCamera.ClearData`` + in :class:`~isaaclab_newton.renderers.NewtonWarpRenderer`. + +0.5.16 (2026-04-17) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed incorrect attribute name ``contact_margin`` on Newton + ``ShapeConfig`` in + :meth:`~isaaclab_newton.physics.NewtonManager.create_builder`. The + field was renamed to ``gap`` in Newton PR #1732. The typo created a + dead attribute so the intended 1 cm default shape gap was never applied. + + +0.5.15 (2026-04-16) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.sensors.pva.Pva` sensor wrapping Newton's + body state (``body_q``, ``body_qd``, ``body_qdd``) to provide world-frame + pose and body-frame velocities/accelerations. + + +0.5.14 (2026-04-14) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.sensors.Imu` sensor wrapping Newton's + ``SensorIMU``, providing angular velocity and linear acceleration in the + sensor's body frame. + + +0.5.13 (2026-04-13) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.physics.NewtonCollisionPipelineCfg` to expose Newton + collision pipeline parameters via :attr:`~isaaclab_newton.physics.NewtonCfg.collision_cfg`. +* Added :attr:`~isaaclab_newton.physics.MJWarpSolverCfg.tolerance` for solver convergence control. + +Fixed +^^^^^ + +* Fixed truthiness check on hydroelastic config dict in collision pipeline + initialization. An explicit ``is not None`` check is now used so that + :class:`~isaaclab_newton.physics.newton_collision_cfg.HydroelasticSDFCfg` + with all-default values is no longer silently skipped. + + +0.5.12 (2026-04-13) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``set_friction_index/mask`` and ``set_restitution_index/mask`` methods to + Newton assets for native material property randomization. + + +0.5.11 (2026-04-13) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.sensors.frame_transformer.FrameTransformer` sensor + wrapping Newton's ``SensorFrameTransform``. Supports per-env source/target site + registration, wildcard body matching, and zero-copy transform views. + + +0.5.10 (2026-04-05) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed NaN after env reset caused by stale ``body_q`` in the collision + pipeline. Added :meth:`~isaaclab_newton.physics.NewtonManager.invalidate_fk` + so articulation write methods trigger ``eval_fk`` before the next + ``collide()``. + +Fixed +^^^^^ + +* Fixed ``test_body_incoming_joint_wrench_b_single_joint`` computing the expected + wrench in the parent body's frame instead of the child body's frame. The expected + wrench is now expressed in the child body's own frame and body indices are resolved + by name to be robust across backends. + + +0.5.9 (2026-03-13) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed overly tight numerical tolerances in + ``test_object_state_properties`` for + :class:`~isaaclab_newton.assets.RigidObjectCollection` that caused + spurious failures on CPU. Aligned tolerances with the equivalent + rigid object test (``test_rigid_object.py``, ``atol=2e-3, rtol=2e-3``). + + +0.5.8 (2026-03-13) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fix ``test_filter_enables_force_matrix`` failing with ``TypeError`` due to + ``pytest.mark.flaky(reruns=3)`` being incompatible with the installed + ``flaky`` plugin. Replace with ``@flaky(max_runs=4, min_passes=1)`` decorator. + + +0.5.7 (2026-03-13) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Removed verbose ``logger.info`` calls from + :class:`~isaaclab_newton.assets.RigidObject`, + :class:`~isaaclab_newton.assets.RigidObjectCollection`, and + :class:`~isaaclab_newton.assets.Articulation` initialization that logged body + names, joint names, and instance counts. Articulation joint parameter tables and + actuator group summaries are retained. + + +0.5.6 (2026-03-10) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed dtype mismatch in :class:`~isaaclab_newton.assets.RigidObjectCollection` + where ``write_body_com_pose_to_sim_index`` and ``write_body_link_velocity_to_sim_index`` + passed ``body_com_pose_b`` (``wp.transformf``) instead of ``body_com_pos_b`` + (``wp.vec3f``) to the underlying warp kernels. + +* Fixed :attr:`~isaaclab_newton.assets.ArticulationData.body_inertia`, + :attr:`~isaaclab_newton.assets.RigidObjectData.body_inertia`, and + :attr:`~isaaclab_newton.assets.RigidObjectCollectionData.body_inertia` + returning raw ``mat33f`` arrays instead of ``(N, B, 9)`` float32. The + previous ptr-based reshape assumed ``float32`` with ``ndim == 4``, but + Newton returns ``mat33f`` dtype with ``ndim == 2``. Fixed the pointer + aliasing to correctly reinterpret each 36-byte ``mat33f`` element as 9 + contiguous ``float32`` values. + + +0.5.5 (2026-03-10) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :class:`~isaaclab_newton.renderers.NewtonWarpRenderer` to raise a clear + ``RuntimeError`` when the Newton model is unavailable instead of deferring to + a confusing ``AttributeError`` on ``render_context.world_count``. + + +0.5.4 (2026-02-28) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added contact sensor support via :class:`newton.sensors.SensorContact` with + Isaac Lab pattern conversion (``.*`` to fnmatch, USD path normalization) + inlined in :meth:`~isaaclab_newton.physics.NewtonManager.add_contact_sensor`. + +Changed +^^^^^^^ + +* Changed :class:`~isaaclab_newton.sensors.contact_sensor.ContactSensor` to + flatten Newton's per-world nested ``sensing_objs`` and ``counterparts`` + attributes. + +Fixed +^^^^^ + +* Fixed ``RigidObjectData.body_inertia`` shape from ``(N, B, 3, 3)`` to ``(N, B, 9)``. + + +0.5.3 (2026-03-09) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :attr:`~isaaclab_newton.assets.RigidObjectData.body_inertia` to return a + ``(num_instances, num_bodies, 9)`` float32 strided view, matching the articulation fix in 0.5.2. + +* Fixed non-contiguous array handling in ``RigidObjectData`` position, quaternion, and + spatial-vector extraction helpers. The ``source`` buffer shape and kernel dispatch ``dim`` + now use the input array's shape instead of the (possibly uninitialized) output shape. + + +0.5.2 (2026-03-06) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :attr:`~isaaclab_newton.assets.ArticulationData.body_inertia` in + :class:`~isaaclab_newton.assets.ArticulationData` to return a ``(num_instances, num_bodies, 9)`` + float32 array as documented, instead of a ``(num_instances, num_bodies, 3, 3)`` array. The + ``(N, B, 3, 3)`` shape caused a broadcasting error in + :class:`~isaaclab.envs.mdp.events.randomize_rigid_body_mass` and a dimension mismatch when the + ``write_body_inertia_to_buffer_*`` kernels were called via + :meth:`~isaaclab_newton.assets.Articulation.set_inertias_index` and + :meth:`~isaaclab_newton.assets.Articulation.set_inertias_mask`. The fix creates a ``(N, B, 9)`` + view over the same memory using explicit strides, collapsing the two contiguous trailing + dimensions without copying data. + +* Fixed ``AttributeError: 'NoneType' object has no attribute 'device'`` in + :meth:`~isaaclab_newton.physics.NewtonManager.step` when ``use_cuda_graph=True`` but the CUDA + graph was not captured (e.g., when RTX/Fabric USD sync is active). The step condition now + checks ``cls._graph is not None`` directly instead of repeating the capture-time heuristic. + + +0.5.1 (2026-03-06) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.assets.RigidObjectCollection` and + :class:`~isaaclab_newton.assets.RigidObjectCollectionData` for managing + collections of independent rigid bodies. Uses a single + ``ArticulationView`` with a combined fnmatch pattern to get direct + ``(num_envs, num_bodies)`` bindings into Newton's state, avoiding the + scatter/gather overhead needed by PhysX. + +* Added :class:`~isaaclab_newton.test.mock_interfaces.views.MockNewtonCollectionView` + for unit testing the collection data class without simulation. + +* Added Newton backend to the rigid object collection interface conformance + tests (``test_rigid_object_collection_iface.py``). + + +0.5.0 (2026-03-06) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added full Newton articulation test suite (``test_articulation.py``) — 194 passed, + 8 skipped, 4 xfailed — adapted from PhysX tests with Newton-specific imports, sim + config, and solver tolerance adjustments. + +* Added full Newton rigid body test suite (``test_rigid_object.py``) — 74 passed, + 25 skipped — adapted from PhysX tests with Newton-specific mass/COM APIs and + ``_newton_sim_context()`` helper for device/gravity/dt configuration. + +Fixed +^^^^^ + +* Fixed ``ArticulationData`` and ``RigidObjectData`` to rebind simulation pointers + on full sim reset via ``PHYSICS_READY`` callback, preventing stale warp array + references after ``sim.reset()`` recreates the Newton model. + +* Fixed ``ArticulationData`` to force ``eval_fk`` after joint state writes so that + link poses are consistent with joint positions before the next ``sim.step()``. + +* Fixed lazy initialization of ``TimestampedBuffer`` properties in + ``RigidObjectData`` (velocity-in-body-frame and deprecated state properties) + that were left as ``None`` and caused ``AttributeError`` on first access. + +* Fixed ``None`` guards for timestamp invalidation in ``RigidObject`` write methods + (``write_root_pose_to_sim``, ``write_root_velocity_to_sim``) to avoid + ``AttributeError`` when optional buffers have not been initialized. + +* Fixed ``is_contiguous`` usage in ``RigidObjectData`` — warp 1.12.0rc2 exposes it + as a property, not a method. + +* Fixed ``body_com_pose_b`` → ``body_com_pos_b`` kernel input naming in + ``RigidObjectData`` for ``root_com_pose_w`` and ``root_link_vel_w`` properties. + +* Fixed ``wp.from_torch()`` called on warp arrays in ``RigidObjectData`` body + inertia binding — replaced with direct ``.view()``/``.reshape()`` on warp arrays. + +* Improved CPU support in ``NewtonManager``: added device guards for CUDA graph + operations that are not available on CPU. + +* Fixed explicit mask resolution in asset write methods to correctly handle both + index-based and mask-based sparse writes. + + +0.4.1 (2026-03-03) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fix asset writer methods in :class:`~isaaclab_newton.assets.Articulation` and + :class:`~isaaclab_newton.assets.RigidObject` to use public data properties + instead of internal timestamped buffer ``.data`` fields, removing redundant + manual timestamp updates. + + +0.4.0 (2026-03-01) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_newton.physics.NewtonManager` physics backend with + MuJoCo-Warp, XPBD, and Featherstone solvers, CUDA-graph support, and + backend-agnostic callback dispatch via :class:`~isaaclab.physics.PhysicsEvent`. + +Changed +^^^^^^^ + +* Implemented ``newton_replicate`` to build per-environment worlds from USD + prototypes using Newton's ``ModelBuilder``. + +* Renamed ``NewtonContactSensorCfg`` to ``ContactSensorCfg`` and made it + backend-agnostic with lazy ``class_type`` resolution. + +* Pinned ``mujoco-warp==3.5.0`` and ``warp-lang==1.12.0rc2`` in ``setup.py``. + + +0.3.0 (2026-02-25) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_newton.test.mock_interfaces` test infrastructure module with + structured mock views, factory functions, and unit tests — mirroring the + ``isaaclab_physx`` mock interface pattern: + + * :class:`~isaaclab_newton.test.mock_interfaces.views.MockNewtonArticulationView`: + extracted from monolithic ``mock_newton.py`` into its own module with lazy + initialization, individual ``set_mock_*`` methods, ``_noop_setters`` flag, + and numpy-based ``set_random_mock_data()``. + + * Factory functions: ``create_mock_articulation_view()``, + ``create_mock_quadruped_view()``, ``create_mock_humanoid_view()`` for + convenient test setup. + +* Added unit tests for mock interfaces: + ``test_mock_articulation_view.py`` and ``test_factories.py``. + +Changed +^^^^^^^ + +* Restructured ``mock_newton.py``: moved ``MockNewtonArticulationView`` to + ``views/mock_articulation_view.py`` and removed ``torch`` dependency from + the mock module (replaced with ``numpy`` for random data generation). + + +0.2.3 (2026-02-27) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added runtime shape and dtype validation to all write methods in + :class:`~isaaclab_newton.assets.Articulation` and + :class:`~isaaclab_newton.assets.RigidObject` using + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype` and + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype_mask`. + + +0.2.2 (2026-02-26) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added runtime shape and dtype validation to all write methods in + :class:`~isaaclab_newton.assets.Articulation` and + :class:`~isaaclab_newton.assets.RigidObject` using + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype` and + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype_mask`. + + +0.2.1 (2026-02-25) + +Removed +^^^^^^^ + +imgui-bundle dependency. + +0.2.0 (2026-02-24) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_newton.assets` module containing Newton-specific asset implementations: + + * :class:`~isaaclab_newton.assets.Articulation` and :class:`~isaaclab_newton.assets.ArticulationData`: + Newton-specific implementation for articulated rigid body systems (e.g., robots). Extends + :class:`~isaaclab.assets.BaseArticulation` with Newton's ``ArticulationView`` API for + GPU-accelerated simulation of multi-joint systems. + + * :class:`~isaaclab_newton.assets.RigidObject` and :class:`~isaaclab_newton.assets.RigidObjectData`: + Newton-specific implementation for single rigid body assets. Extends + :class:`~isaaclab.assets.BaseRigidObject` with Newton's simulation API for efficient + rigid body state queries and writes. + +* Added warp kernel modules for fused GPU computations: + + * :mod:`isaaclab_newton.assets.kernels` — shared kernels for root state extraction, + velocity transforms, COM/link frame conversions, and data write-back. + * :mod:`isaaclab_newton.assets.articulation.kernels` — articulation-specific kernels + for joint state, soft limits, actuator state updates, and friction properties. + +* All ``.data.*`` properties use ``wp.array`` with structured warp types + (``wp.vec3f``, ``wp.quatf``, ``wp.transformf``, ``wp.spatial_vectorf``), + matching the same convention used by ``isaaclab_physx``. + +* All write methods follow the ``_index`` / ``_mask`` split for explicit + sparse-index vs. boolean-mask semantics. + + +0.1.0 (2026-02-16) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added empty package diff --git a/source/isaaclab_newton/docs/README.md b/source/isaaclab_newton/docs/README.md new file mode 100644 index 000000000000..7ca4c1e09ff2 --- /dev/null +++ b/source/isaaclab_newton/docs/README.md @@ -0,0 +1 @@ +# Isaac Lab Newton Simulation interfaces diff --git a/source/isaaclab_newton/isaaclab_newton/__init__.py b/source/isaaclab_newton/isaaclab_newton/__init__.py new file mode 100644 index 000000000000..00050699bc8c --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package containing the Newton simulation interfaces for IsaacLab core package.""" + +import os +import toml + +# Conveniences to other module directories via relative paths +ISAACLAB_NEWTON_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) +"""Path to the extension source directory.""" + +# Find config/extension.toml: bundled inside the package (wheel install) or in the +# parent directory (editable install). +_pkg_dir = os.path.dirname(os.path.abspath(__file__)) +_toml_path = os.path.join(_pkg_dir, "config", "extension.toml") +if not os.path.isfile(_toml_path): + _toml_path = os.path.join(ISAACLAB_NEWTON_EXT_DIR, "config", "extension.toml") + +ISAACLAB_NEWTON_METADATA = toml.load(_toml_path) +"""Extension metadata dictionary parsed from the extension.toml file.""" + +# Configure the module-level variables +__version__ = ISAACLAB_NEWTON_METADATA["package"]["version"] diff --git a/source/isaaclab_newton/isaaclab_newton/assets/__init__.py b/source/isaaclab_newton/isaaclab_newton/assets/__init__.py new file mode 100644 index 000000000000..e14e0f6d52c5 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/assets/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/assets/__init__.pyi new file mode 100644 index 000000000000..b9359445960c --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/__init__.pyi @@ -0,0 +1,17 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Articulation", + "ArticulationData", + "RigidObject", + "RigidObjectData", + "RigidObjectCollection", + "RigidObjectCollectionData", +] + +from .articulation import Articulation, ArticulationData +from .rigid_object import RigidObject, RigidObjectData +from .rigid_object_collection import RigidObjectCollection, RigidObjectCollectionData diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/__init__.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/__init__.py new file mode 100644 index 000000000000..e14e0f6d52c5 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/assets/articulation/__init__.pyi new file mode 100644 index 000000000000..9e482491fe54 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Articulation", + "ArticulationData", +] + +from .articulation import Articulation +from .articulation_data import ArticulationData diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py new file mode 100644 index 000000000000..c3c6eca044f7 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -0,0 +1,3952 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Flag for pyright to ignore type errors in this file. +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import logging +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp +from newton import JointType +from newton.selection import ArticulationView +from newton.solvers import SolverNotifyFlags +from prettytable import PrettyTable + +from pxr import UsdPhysics + +from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator +from isaaclab.assets.articulation.base_articulation import BaseArticulation +from isaaclab.physics import PhysicsEvent +from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims +from isaaclab.utils.string import resolve_matching_names, resolve_matching_names_values +from isaaclab.utils.types import ArticulationActions +from isaaclab.utils.version import get_isaac_sim_version, has_kit +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab_newton.assets import kernels as shared_kernels +from isaaclab_newton.assets.articulation import kernels as articulation_kernels +from isaaclab_newton.physics import NewtonManager as SimulationManager + +from .articulation_data import ArticulationData + +if TYPE_CHECKING: + from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg + +# import logger +logger = logging.getLogger(__name__) + + +class Articulation(BaseArticulation): + """An articulation asset class. + + An articulation is a collection of rigid bodies connected by joints. The joints can be either + fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. + However, the articulation class has currently been tested with revolute and prismatic joints. + The class supports both floating-base and fixed-base articulations. The type of articulation + is determined based on the root joint of the articulation. If the root joint is fixed, then + the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base + system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. + + For an asset to be considered an articulation, the root prim of the asset must have the + `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using + the reduced coordinate formulation. On playing the simulation, the physics engine parses the + articulation root prim and creates the corresponding articulation in the physics engine. The + articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. + + The articulation class also provides the functionality to augment the simulation of an articulated + system with custom actuator models. These models can either be explicit or implicit, as detailed in + the :mod:`isaaclab.actuators` module. The actuator models are specified using the + :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the + corresponding actuator models, when the simulation is played. + + During the simulation step, the articulation class first applies the actuator models to compute + the joint commands based on the user-specified targets. These joint commands are then applied + into the simulation. The joint commands can be either position, velocity, or effort commands. + As an example, the following snippet shows how this can be used for position commands: + + .. code-block:: python + + # an example instance of the articulation class + my_articulation = Articulation(cfg) + + # set joint position targets + my_articulation.set_joint_position_target(position) + # propagate the actuator models and apply the computed commands into the simulation + my_articulation.write_data_to_sim() + + # step the simulation using the simulation context + sim_context.step() + + # update the articulation state, where dt is the simulation time step + my_articulation.update(dt) + + .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html + + """ + + cfg: ArticulationCfg + """Configuration instance for the articulations.""" + + __backend_name__: str = "newton" + """The name of the backend for the articulation.""" + + actuators: dict + """Dictionary of actuator instances for the articulation. + + The keys are the actuator names and the values are the actuator instances. The actuator instances + are initialized based on the actuator configurations specified in the :attr:`ArticulationCfg.actuators` + attribute. They are used to compute the joint commands during the :meth:`write_data_to_sim` function. + """ + + def __init__(self, cfg: ArticulationCfg): + """Initialize the articulation. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> ArticulationData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation is a fixed-base or floating-base system.""" + return self.root_view.is_fixed_base + + @property + def num_joints(self) -> int: + """Number of joints in articulation.""" + return self.root_view.joint_dof_count + + @property + def num_fixed_tendons(self) -> int: + """Number of fixed tendons in articulation.""" + return 0 + + @property + def num_spatial_tendons(self) -> int: + """Number of spatial tendons in articulation.""" + return 0 + + @property + def num_bodies(self) -> int: + """Number of bodies in articulation.""" + return self.root_view.link_count + + @property + def num_shapes_per_body(self) -> list[int]: + """Number of collision shapes per body in the articulation. + + This property returns a list where each element represents the number of collision + shapes for the corresponding body in the articulation. This is cached for efficient + access during material property randomization and other operations. + + Returns: + List of integers representing the number of shapes per body. + """ + if not hasattr(self, "_num_shapes_per_body"): + self._num_shapes_per_body = [] + for shapes in self._root_view.body_shapes: + self._num_shapes_per_body.append(len(shapes)) + return self._num_shapes_per_body + + @property + def joint_names(self) -> list[str]: + """Ordered names of joints in articulation.""" + return self.root_view.joint_dof_names + + @property + def fixed_tendon_names(self) -> list[str]: + """Ordered names of fixed tendons in articulation.""" + return [] + + @property + def spatial_tendon_names(self) -> list[str]: + """Ordered names of spatial tendons in articulation.""" + return [] + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in articulation.""" + return self.root_view.link_names + + @property + def root_view(self) -> ArticulationView: + """Root view for the asset. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the articulation. + + .. caution:: + If both `env_ids` and `env_mask` are provided, then `env_mask` takes precedence over `env_ids`. + + Args: + env_ids: Environment indices. If None, then all indices are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # use ellipses object to skip initial indices. + if (env_ids is None) or (env_ids == slice(None)): + env_ids = slice(None) + # reset actuators + for actuator in self.actuators.values(): + actuator.reset(env_ids) + # reset external wrenches. + self._instantaneous_wrench_composer.reset(env_ids, env_mask) + self._permanent_wrench_composer.reset(env_ids, env_mask) + + def write_data_to_sim(self): + """Write external wrenches and joint commands to the simulation. + + If any explicit actuators are present, then the actuator models are used to compute the + joint commands. Otherwise, the joint commands are directly set into the simulation. + + .. note:: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + composer = self._instantaneous_wrench_composer + composer.add_raw_buffers_from(self._permanent_wrench_composer) + else: + composer = self._permanent_wrench_composer + composer.compose_to_body_frame() + wp.launch( + shared_kernels.update_wrench_array_with_force_and_torque, + dim=(self.num_instances, self.num_bodies), + device=self.device, + inputs=[ + composer.out_force_b, + composer.out_torque_b, + self._data._sim_bind_body_external_wrench, + self._ALL_ENV_MASK, + self._ALL_BODY_MASK, + ], + ) + self._instantaneous_wrench_composer.reset() + + # apply actuator models + self._apply_actuator_model() + # write actions into simulation via Newton bindings + self.data._sim_bind_joint_effort.assign(self._joint_effort_target_sim) + # position and velocity targets only for implicit actuators + if self._has_implicit_actuators: + self.data._sim_bind_joint_position_target.assign(self._joint_pos_target_sim) + self.data._sim_bind_joint_velocity_target.assign(self._joint_vel_target_sim) + + def update(self, dt: float): + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return resolve_matching_names(name_keys, self.body_names, preserve_order) + + def find_joints( + self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find joints in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint names. + joint_subset: A subset of joints to search for. Defaults to None, which means all joints + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the joint indices and names. + """ + if joint_subset is None: + joint_subset = self.joint_names + # find joints + return resolve_matching_names(name_keys, joint_subset, preserve_order) + + def find_fixed_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find fixed tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint + names with fixed tendons. + tendon_subsets: A subset of joints with fixed tendons to search for. Defaults to None, which means + all joints in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + # tendons follow the joint names they are attached to + tendon_subsets = self.fixed_tendon_names + # find tendons + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + def find_spatial_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find spatial tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the tendon names. + tendon_subsets: A subset of tendons to search for. Defaults to None, which means all tendons + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + tendon_subsets = self.spatial_tendon_names + # find tendons + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + """ + Operations - State Writers. + """ + + def write_root_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) + or (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids) + + def write_root_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_link_pose_to_sim_mask(root_pose=root_pose, env_mask=env_mask) + + def write_root_link_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) + or (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.set_root_link_pose_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_pose, + env_ids, + ], + outputs=[ + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + self.data._fk_timestamp = -1.0 # Forces a kinematic update to get the latest body link poses. + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_root_link_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") + + wp.launch( + shared_kernels.set_root_link_pose_to_sim_mask, + dim=root_pose.shape[0], + inputs=[ + root_pose, + env_mask, + ], + outputs=[ + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + self.data._fk_timestamp = -1.0 # Forces a kinematic update to get the latest body link poses. + SimulationManager.invalidate_fk(env_mask=env_mask, articulation_ids=self._root_view.articulation_ids) + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_root_com_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7) + or (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would call + # write_root_link_pose_to_sim after this. + wp.launch( + shared_kernels.set_root_com_pose_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_pose, + self.data.body_com_pos_b, + env_ids, + ], + outputs=[ + self.data.root_com_pose_w, + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + self.data._fk_timestamp = -1.0 # Forces a kinematic update to get the latest body link poses. + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_root_com_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") + wp.launch( + shared_kernels.set_root_com_pose_to_sim_mask, + dim=root_pose.shape[0], + inputs=[ + root_pose, + self.data.body_com_pos_b, + env_mask, + ], + outputs=[ + self.data.root_com_pose_w, + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + self.data._fk_timestamp = -1.0 # Forces a kinematic update to get the latest body link poses. + SimulationManager.invalidate_fk(env_mask=env_mask, articulation_ids=self._root_view.articulation_ids) + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_root_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (len(env_ids), 6) or (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (num_instances, 6) or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_com_velocity_to_sim_mask(root_velocity=root_velocity, env_mask=env_mask) + + def write_root_com_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (len(env_ids), 6) or (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.set_root_com_velocity_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + env_ids, + self.data._num_bodies, + ], + outputs=[ + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + + def write_root_com_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (num_instances, 6) or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") + wp.launch( + shared_kernels.set_root_com_velocity_to_sim_mask, + dim=root_velocity.shape[0], + inputs=[ + root_velocity, + env_mask, + self.data._num_bodies, + ], + outputs=[ + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + + def write_root_link_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root frame velocities in simulation world frame. + Shape is (len(env_ids), 6) or (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. + wp.launch( + shared_kernels.set_root_link_velocity_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + self.data.body_com_pos_b, + self.data.root_link_pose_w, + env_ids, + self.data._num_bodies, + ], + outputs=[ + self.data.root_link_vel_w, + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + + def write_root_link_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root frame velocities in simulation world frame. + Shape is (num_instances, 6) or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") + wp.launch( + shared_kernels.set_root_link_velocity_to_sim_mask, + dim=root_velocity.shape[0], + inputs=[ + root_velocity, + self.data.body_com_pos_b, + self.data.root_link_pose_w, + env_mask, + self.data._num_bodies, + ], + outputs=[ + self.data.root_link_vel_w, + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Only invalidate if the buffer has been accessed (not None). + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + + def write_joint_state_to_sim_index( + self, + *, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint positions and velocities in a single fused kernel launch. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)). + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") + wp.launch( + articulation_kernels.write_joint_state_data_index, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + position, + velocity, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_pos, + self.data.joint_vel, + self.data._previous_joint_vel, + self.data.joint_acc, + ], + device=self.device, + ) + # Invalidate FK timestamp so body poses are recomputed on next access. + self.data._fk_timestamp = -1.0 + SimulationManager.invalidate_fk() + if self.data._body_link_vel_w is not None: + self.data._body_link_vel_w.timestamp = -1.0 + if self.data._body_com_pose_b is not None: + self.data._body_com_pose_b.timestamp = -1.0 + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_joint_state_to_sim_mask( + self, + *, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint positions and velocities over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + position: Joint positions. Shape is (num_instances, num_joints). + velocity: Joint velocities. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + self.assert_shape_and_dtype_mask(position, (env_mask, joint_mask), wp.float32, "position") + self.assert_shape_and_dtype_mask(velocity, (env_mask, joint_mask), wp.float32, "velocity") + wp.launch( + articulation_kernels.write_joint_state_data_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + position, + velocity, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_pos, + self.data.joint_vel, + self.data._previous_joint_vel, + self.data.joint_acc, + ], + device=self.device, + ) + # Invalidate FK timestamp so body poses are recomputed on next access. + self.data._fk_timestamp = -1.0 + SimulationManager.invalidate_fk() + if self.data._body_link_vel_w is not None: + self.data._body_link_vel_w.timestamp = -1.0 + if self.data._body_com_pose_b is not None: + self.data._body_com_pose_b.timestamp = -1.0 + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_joint_position_to_sim_index( + self, + *, + position: torch.Tensor, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint positions over selected environment indices into the simulation. + + .. note:: + This method expects partial or full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + position, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_pos, + ], + device=self.device, + ) + # Invalidate FK timestamp so body poses are recomputed on next access. + self.data._fk_timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + # Need to invalidate the buffer to trigger the update with the new root pose. + # Only invalidate if the buffer has been accessed (not None). + if self.data._body_link_vel_w is not None: + self.data._body_link_vel_w.timestamp = -1.0 + if self.data._body_com_pose_b is not None: + self.data._body_com_pose_b.timestamp = -1.0 + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_joint_position_to_sim_mask( + self, + *, + position: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint positions over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + position: Joint positions. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + self.assert_shape_and_dtype_mask(position, (env_mask, joint_mask), wp.float32, "position") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + position, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_pos, + ], + device=self.device, + ) + # Invalidate FK timestamp so body poses are recomputed on next access. + self.data._fk_timestamp = -1.0 + SimulationManager.invalidate_fk(env_mask=env_mask, articulation_ids=self._root_view.articulation_ids) + # Need to invalidate the buffer to trigger the update with the new root pose. + # Only invalidate if the buffer has been accessed (not None). + if self.data._body_link_vel_w is not None: + self.data._body_link_vel_w.timestamp = -1.0 + if self.data._body_com_pose_b is not None: + self.data._body_com_pose_b.timestamp = -1.0 + if self.data._body_com_pose_w is not None: + self.data._body_com_pose_w.timestamp = -1.0 + if self.data._body_state_w is not None: + self.data._body_state_w.timestamp = -1.0 + if self.data._body_link_state_w is not None: + self.data._body_link_state_w.timestamp = -1.0 + if self.data._body_com_state_w is not None: + self.data._body_com_state_w.timestamp = -1.0 + + def write_joint_velocity_to_sim_index( + self, + *, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint velocities to the simulation. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + articulation_kernels.write_joint_vel_data_index, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + velocity, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_vel, + self.data._previous_joint_vel, + self.data.joint_acc, + ], + device=self.device, + ) + + def write_joint_velocity_to_sim_mask( + self, + *, + velocity: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint velocities over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + velocity: Joint velocities. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + self.assert_shape_and_dtype_mask(velocity, (env_mask, joint_mask), wp.float32, "velocity") + wp.launch( + articulation_kernels.write_joint_vel_data_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + velocity, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_vel, + self.data._previous_joint_vel, + self.data.joint_acc, + ], + device=self.device, + ) + + """ + Operations - Simulation Parameters Writers. + """ + + def write_joint_stiffness_to_sim_index( + self, + *, + stiffness: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint stiffness over selected environment indices into the simulation. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_stiffness, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "stiffness") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_stiffness, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_stiffness_to_sim_mask( + self, + *, + stiffness: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint stiffness over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + stiffness: Joint stiffness. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + if isinstance(stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + stiffness, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_stiffness, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype_mask(stiffness, (env_mask, joint_mask), wp.float32, "stiffness") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + stiffness, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_stiffness, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_damping_to_sim_index( + self, + *, + damping: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint damping over selected environment indices into the simulation. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + damping: Joint damping. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(damping, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + damping, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_damping, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "damping") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + damping, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_damping, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_damping_to_sim_mask( + self, + *, + damping: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint damping over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + damping: Joint damping. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + if isinstance(damping, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + damping, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_damping, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype_mask(damping, (env_mask, joint_mask), wp.float32, "damping") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + damping, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_damping, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_position_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + warn_limit_violation: bool = True, + ): + """Write joint position limits over selected environment indices into the simulation. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + warn_limit_violation: Whether to use warning or info level logging when default joint positions + exceed the new limits. Defaults to True. + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + + clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do this in multiple launches. + if isinstance(limits, float): + raise ValueError("Joint position limits must be a tensor or array, not a float.") + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f, "limits") + wp.launch( + articulation_kernels.write_joint_limit_data_to_buffer_index, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + self.cfg.soft_joint_pos_limit_factor, + env_ids, + joint_ids, + ], + outputs=[ + self.data._sim_bind_joint_pos_limits_lower, + self.data._sim_bind_joint_pos_limits_upper, + self.data._soft_joint_pos_limits, + self.data._default_joint_pos, + clamped_defaults, + ], + device=self.device, + ) + # Log a warning if the default joint positions are outside of the new limits. + if clamped_defaults.numpy()[0] > 0: + violation_message = ( + "Some default joint positions are outside of the range of the new joint limits. Default joint positions" + " will be clamped to be within the new joint limits." + ) + if warn_limit_violation: + logger.warning(violation_message) + else: + logger.info(violation_message) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_position_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + warn_limit_violation: bool = True, + ): + """Write joint position limits over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limits: Joint limits. Shape is (num_instances, num_joints, 2). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + warn_limit_violation: Whether to use warning or info level logging when default joint positions + exceed the new limits. Defaults to True. + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) + if isinstance(limits, float): + raise ValueError("Joint position limits must be a tensor or array, not a float.") + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.vec2f, "limits") + wp.launch( + articulation_kernels.write_joint_limit_data_to_buffer_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + limits, + self.cfg.soft_joint_pos_limit_factor, + env_mask, + joint_mask, + ], + outputs=[ + self.data._sim_bind_joint_pos_limits_lower, + self.data._sim_bind_joint_pos_limits_upper, + self.data._soft_joint_pos_limits, + self.data._default_joint_pos, + clamped_defaults, + ], + device=self.device, + ) + # Log a warning if the default joint positions are outside of the new limits. + if clamped_defaults.numpy()[0] > 0: + violation_message = ( + "Some default joint positions are outside of the range of the new joint limits. Default joint positions" + " will be clamped to be within the new joint limits." + ) + if warn_limit_violation: + logger.warning(violation_message) + else: + logger.info(violation_message) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_velocity_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint max velocity over selected environment indices into the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only + be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving + faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limits: Joint max velocity. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(limits, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_vel_limits, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_vel_limits, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_velocity_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint max velocity over selected environment mask into the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only + be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving + faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limits: Joint max velocity. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + if isinstance(limits, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + limits, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_vel_limits, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32, "limits") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + limits, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_vel_limits, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_effort_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint effort limits over selected environment indices into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. If the + computed effort exceeds this limit, the physics engine will clip the effort to this value. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(limits, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_effort_limits, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_effort_limits, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_effort_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint effort limits over selected environment mask into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. If the + computed effort exceeds this limit, the physics engine will clip the effort to this value. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limits: Joint torque limits. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + if isinstance(limits, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + limits, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_effort_limits, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32, "limits") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + limits, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_effort_limits, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_armature_to_sim_index( + self, + *, + armature: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Write joint armature over selected environment indices into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + armature: Joint armature. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(armature, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + armature, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_armature, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "armature") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + armature, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_armature, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_armature_to_sim_mask( + self, + *, + armature: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint armature over selected environment mask into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + armature: Joint armature. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + if isinstance(armature, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + armature, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_armature, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype_mask(armature, (env_mask, joint_mask), wp.float32, "armature") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + armature, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_armature, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_friction_coefficient_to_sim_index( + self, + *, + joint_friction_coeff: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + r"""Write joint friction coefficients over selected environment indices into the simulation. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + joint_friction_coeff: Static friction coefficient :math:`\mu_s`. + Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(joint_friction_coeff, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + joint_friction_coeff, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_friction_coeff, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype( + joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff" + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + joint_friction_coeff, + env_ids, + joint_ids, + ], + outputs=[ + self.data.joint_friction_coeff, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + def write_joint_friction_coefficient_to_sim_mask( + self, + *, + joint_friction_coeff: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + r"""Write joint friction coefficients over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + joint_friction_coeff: Static friction coefficient :math:`\mu_s`. + Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + if isinstance(joint_friction_coeff, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + joint_friction_coeff, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_friction_coeff, + ], + device=self.device, + ) + else: + self.assert_shape_and_dtype_mask( + joint_friction_coeff, (env_mask, joint_mask), wp.float32, "joint_friction_coeff" + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + joint_friction_coeff, + env_mask, + joint_mask, + ], + outputs=[ + self.data.joint_friction_coeff, + ], + device=self.device, + ) + # tell the physics engine that some of the joint properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.JOINT_DOF_PROPERTIES) + + """ + Operations - Setters. + """ + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set masses of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + masses: Masses of all bodies. Shape is (len(env_ids), len(body_ids)). + body_ids: Body indices. If None, then all bodies are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + masses, + env_ids, + body_ids, + ], + outputs=[ + self.data.body_mass, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set masses of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_mask: Body mask. If None, then all bodies are used. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + body_mask = self._resolve_mask(body_mask, self._ALL_BODY_MASK) + self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32, "masses") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + masses, + env_mask, + body_mask, + ], + outputs=[ + self.data.body_mass, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set center of mass position of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + .. caution:: + Unlike the PhysX version of this method, this method does not set the center of mass orientation. + Only the position is set. This is because Newton considers the center of mass orientation to always be + aligned with the body frame. + + Args: + coms: Center of mass position of all bodies. Shape is (len(env_ids), len(body_ids), 3). In warp + the expected shape is (num_instances, num_bodies), with dtype wp.vec3f. + body_ids: Body indices. If None, then all bodies are used. + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f, "coms") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_com_position_to_buffer_index, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + coms, + env_ids, + body_ids, + ], + outputs=[ + self.data.body_com_pos_b, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set center of mass position of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + .. caution:: + Unlike the PhysX version of this method, this method does not set the center of mass orientation. + Only the position is set. This is because Newton considers the center of mass orientation to always be + aligned with the body frame. + + Args: + coms: Center of mass position of all bodies. Shape is (num_instances, num_bodies, 3) or + (num_instances, num_bodies, 7) (transformf convention — only position is used). In warp + the expected shape is (num_instances, num_bodies), with dtype wp.vec3f or wp.transformf. + body_mask: Body mask. If None, then all bodies are used. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + body_mask = self._resolve_mask(body_mask, self._ALL_BODY_MASK) + self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f, "coms") + wp.launch( + shared_kernels.write_body_com_position_to_buffer_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + coms, + env_mask, + body_mask, + ], + outputs=[ + self.data.body_com_pos_b, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set inertias of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 9). In warp + the expected shape is (num_instances, num_bodies, 9), with dtype wp.float32. + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_inertia_to_buffer_index, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + inertias, + env_ids, + body_ids, + ], + outputs=[ + self.data.body_inertia, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set inertias of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 9). + body_mask: Body mask. If None, then all bodies are used. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + body_mask = self._resolve_mask(body_mask, self._ALL_BODY_MASK) + self.assert_shape_and_dtype_mask(inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,)) + wp.launch( + shared_kernels.write_body_inertia_to_buffer_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + inertias, + env_mask, + body_mask, + ], + outputs=[ + self.data.body_inertia, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_joint_position_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set joint position targets into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + target: Joint position targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + target, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_pos_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_position_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint position targets into internal buffers using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + target: Joint position targets. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32, "target") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + target, + env_mask, + joint_mask, + ], + outputs=[ + self.data._joint_pos_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_velocity_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set joint velocity targets into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + target, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_vel_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_velocity_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint velocity targets into internal buffers using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + target: Joint velocity targets. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32, "target") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + target, + env_mask, + joint_mask, + ], + outputs=[ + self.data._joint_vel_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_effort_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set joint efforts into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + target, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_effort_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_effort_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint efforts into internal buffers using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + target: Joint effort targets. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. Shape is (num_joints,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + joint_mask = self._resolve_mask(joint_mask, self._ALL_JOINT_MASK) + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32, "target") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], joint_mask.shape[0]), + inputs=[ + target, + env_mask, + joint_mask, + ], + outputs=[ + self.data._joint_effort_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + """ + Operations - Tendons. + """ + + def set_fixed_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon stiffness into internal buffers using indices. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_fixed_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon stiffness into internal buffers using masks. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + stiffness: Fixed tendon stiffness. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + Shape is (num_fixed_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_fixed_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon damping into internal buffers using indices. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim_index` + function. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_fixed_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon damping into internal buffers using masks. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + damping: Fixed tendon damping. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + Shape is (num_fixed_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_fixed_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon limit stiffness into internal buffers using indices. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_fixed_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon limit stiffness into internal buffers using masks. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + Shape is (num_fixed_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_fixed_tendon_position_limit_index( + self, + *, + limit: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon position limit into internal buffers using indices. + + This function does not apply the tendon position limit to the simulation. It only fills the buffers with + the desired values. To apply the tendon position limit, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limit: Fixed tendon position limit. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the position limit for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_fixed_tendon_position_limit_mask( + self, + *, + limit: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon position limit into internal buffers using masks. + + This function does not apply the tendon position limit to the simulation. It only fills the buffers with + the desired values. To apply the tendon position limit, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limit: Fixed tendon position limit. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + Shape is (num_fixed_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_fixed_tendon_rest_length_index( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon rest length into internal buffers using indices. + + This function does not apply the tendon rest length to the simulation. It only fills the buffers with + the desired values. To apply the tendon rest length, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_fixed_tendon_rest_length_mask( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon rest length into internal buffers using masks. + + This function does not apply the tendon rest length to the simulation. It only fills the buffers with + the desired values. To apply the tendon rest length, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + rest_length: Fixed tendon rest length. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + Shape is (num_fixed_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_fixed_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon offset into internal buffers using indices. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_fixed_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon offset into internal buffers using masks. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + offset: Fixed tendon offset. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + Shape is (num_fixed_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def write_fixed_tendon_properties_to_sim_index( + self, + *, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write fixed tendon properties into the simulation using indices. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + fixed_tendon_ids: The fixed tendon indices to write the properties for. Defaults to None + (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def write_fixed_tendon_properties_to_sim_mask( + self, + *, + env_mask: wp.array | None = None, + ) -> None: + """Write fixed tendon properties into the simulation using masks. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_spatial_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon stiffness into internal buffers using indices. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_spatial_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon stiffness into internal buffers using masks. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + stiffness: Spatial tendon stiffness. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + Shape is (num_spatial_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_spatial_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon damping into internal buffers using indices. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_spatial_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon damping into internal buffers using masks. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + damping: Spatial tendon damping. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + Shape is (num_spatial_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_spatial_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon limit stiffness into internal buffers using indices. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None + (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_spatial_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon limit stiffness into internal buffers using masks. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + Shape is (num_spatial_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def set_spatial_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon offset into internal buffers using indices. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def set_spatial_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon offset into internal buffers using masks. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + offset: Spatial tendon offset. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + Shape is (num_spatial_tendons,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + def write_spatial_tendon_properties_to_sim_index( + self, + *, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write spatial tendon properties into the simulation using indices. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + def write_spatial_tendon_properties_to_sim_mask( + self, + *, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write spatial tendon properties into the simulation using masks. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + raise NotImplementedError() + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + + if self.cfg.articulation_root_prim_path is not None: + # The articulation root prim path is specified explicitly, so we can just use this. + root_prim_path_expr = self.cfg.prim_path + self.cfg.articulation_root_prim_path + else: + # No articulation root prim path was specified, so we need to search + # for it. We search for this in the first environment and then + # create a regex that matches all environments. + first_env_matching_prim = find_first_matching_prim(self.cfg.prim_path) + if first_env_matching_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString + + # Find all articulation root prims in the first environment. + first_env_root_prims = get_all_matching_child_prims( + first_env_matching_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(first_env_root_prims) == 0: + raise RuntimeError( + f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." + " Please ensure that the prim has 'USD ArticulationRootAPI' applied." + ) + if len(first_env_root_prims) > 1: + raise RuntimeError( + f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." + f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." + " Please ensure that there is only one articulation in the prim path tree." + ) + + # Now we convert the found articulation root from the first + # environment back into a regex that matches all environments. + first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString + root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] + root_prim_path_expr = self.cfg.prim_path + root_prim_path_relative_to_prim_path + + # -- articulation + self._root_view = ArticulationView( + SimulationManager.get_model(), + root_prim_path_expr.replace(".*", "*"), + verbose=False, + exclude_joint_types=[JointType.FREE, JointType.FIXED], + ) + # Register view with Newton manager so sensors (e.g. FrameTransformer) can find it. + SimulationManager.get_physics_sim_view().append(self._root_view) + + # container for data access + self._data = ArticulationData(self.root_view, self.device) + + # Register callback to rebind simulation data after a full reset (model/state recreation). + self._physics_ready_handle = SimulationManager.register_callback( + lambda _: self._data._create_simulation_bindings(), + PhysicsEvent.PHYSICS_READY, + name=f"articulation_rebind_{self.cfg.prim_path}", + ) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + self._process_actuators_cfg() + self._process_tendons() + # validate configuration + self._validate_cfg() + # update the robot data + self.update(0.0) + # log joint information + self._log_articulation_info() + # Let the articulation data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _clear_callbacks(self) -> None: + """Clears all registered callbacks, including the physics-ready rebind handle.""" + super()._clear_callbacks() + if hasattr(self, "_physics_ready_handle") and self._physics_ready_handle is not None: + self._physics_ready_handle.deregister() + self._physics_ready_handle = None + + def _create_buffers(self): + self._ALL_INDICES = wp.array(np.arange(self.num_instances, dtype=np.int32), device=self.device) + self._ALL_ENV_MASK = wp.ones((self.num_instances,), dtype=wp.bool, device=self.device) + self._ALL_JOINT_INDICES = wp.array(np.arange(self.num_joints, dtype=np.int32), device=self.device) + self._ALL_JOINT_MASK = wp.ones((self.num_joints,), dtype=wp.bool, device=self.device) + self._ALL_BODY_INDICES = wp.array(np.arange(self.num_bodies, dtype=np.int32), device=self.device) + self._ALL_BODY_MASK = wp.ones((self.num_bodies,), dtype=wp.bool, device=self.device) + self._ALL_FIXED_TENDON_INDICES = wp.array(np.arange(self.num_fixed_tendons, dtype=np.int32), device=self.device) + self._ALL_FIXED_TENDON_MASK = wp.ones((self.num_fixed_tendons,), dtype=wp.bool, device=self.device) + self._ALL_SPATIAL_TENDON_INDICES = wp.array( + np.arange(self.num_spatial_tendons, dtype=np.int32), device=self.device + ) + self._ALL_SPATIAL_TENDON_MASK = wp.ones((self.num_spatial_tendons,), dtype=wp.bool, device=self.device) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # asset named data + self.data.joint_names = self.joint_names + self.data.body_names = self.body_names + # tendon names are set in _process_tendons function + + # -- joint commands (sent to the simulation after actuator processing) + self._joint_pos_target_sim = wp.zeros_like(self.data.joint_pos_target.warp, device=self.device) + self._joint_vel_target_sim = wp.zeros_like(self.data.joint_pos_target.warp, device=self.device) + self._joint_effort_target_sim = wp.zeros_like(self.data.joint_pos_target.warp, device=self.device) + + # soft joint position limits (recommended not to be too close to limits). + wp.launch( + articulation_kernels.update_soft_joint_pos_limits, + dim=(self.num_instances, self.num_joints), + inputs=[ + self.data.joint_pos_limits, + self.cfg.soft_joint_pos_limit_factor, + ], + outputs=[ + self.data.soft_joint_pos_limits, + ], + device=self.device, + ) + + def _process_cfg(self): + """Post processing of configuration parameters.""" + # default state + # -- root state + # Note we cast to tuple to avoid torch/numpy type mismatch. + default_root_pose = tuple(self.cfg.init_state.pos) + tuple(self.cfg.init_state.rot) + default_root_vel = tuple(self.cfg.init_state.lin_vel) + tuple(self.cfg.init_state.ang_vel) + default_root_pose = np.tile(np.array(default_root_pose, dtype=np.float32), (self.num_instances, 1)) + default_root_vel = np.tile(np.array(default_root_vel, dtype=np.float32), (self.num_instances, 1)) + self.data.default_root_pose = wp.array(default_root_pose, dtype=wp.transformf, device=self.device) + self.data.default_root_vel = wp.array(default_root_vel, dtype=wp.spatial_vectorf, device=self.device) + + # -- joint state + pos_idx_list, _, pos_val_list = resolve_matching_names_values(self.cfg.init_state.joint_pos, self.joint_names) + vel_idx_list, _, vel_val_list = resolve_matching_names_values(self.cfg.init_state.joint_vel, self.joint_names) + wp.launch( + articulation_kernels.update_default_joint_values, + dim=(self.num_instances, len(pos_idx_list)), + inputs=[ + wp.array(pos_val_list, dtype=wp.float32, device=self.device), + wp.array(pos_idx_list, dtype=wp.int32, device=self.device), + ], + outputs=[ + self.data.default_joint_pos, + ], + device=self.device, + ) + wp.launch( + articulation_kernels.update_default_joint_values, + dim=(self.num_instances, len(vel_idx_list)), + inputs=[ + wp.array(vel_val_list, dtype=wp.float32, device=self.device), + wp.array(vel_idx_list, dtype=wp.int32, device=self.device), + ], + outputs=[ + self.data.default_joint_vel, + ], + device=self.device, + ) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + self._root_view = None + + """ + Internal helpers -- Actuators. + """ + + def _process_actuators_cfg(self): + """Process and apply articulation joint properties.""" + # create actuators + self.actuators = dict() + # flag for implicit actuators + # if this is false, we by-pass certain checks when doing actuator-related operations + self._has_implicit_actuators = False + + # iterate over all actuator configurations + for actuator_name, actuator_cfg in self.cfg.actuators.items(): + # type annotation for type checkers + actuator_cfg: ActuatorBaseCfg + # create actuator group + joint_ids, joint_names = self.find_joints(actuator_cfg.joint_names_expr) + # check if any joints are found + if len(joint_names) == 0: + raise ValueError( + f"No joints found for actuator group: {actuator_name} with joint name expression:" + f" {actuator_cfg.joint_names_expr}." + ) + # resolve joint indices + # we pass a slice if all joints are selected to avoid indexing overhead + if len(joint_names) == self.num_joints: + joint_ids = slice(None) + else: + joint_ids = torch.tensor(joint_ids, device=self.device, dtype=torch.int32) + # create actuator collection + # note: for efficiency avoid indexing when over all indices + actuator: ActuatorBase = actuator_cfg.class_type( + cfg=actuator_cfg, + joint_names=joint_names, + joint_ids=joint_ids, + num_envs=self.num_instances, + device=self.device, + stiffness=self._data.joint_stiffness.torch[:, joint_ids], + damping=self._data.joint_damping.torch[:, joint_ids], + armature=self._data.joint_armature.torch[:, joint_ids], + friction=self._data.joint_friction_coeff.torch[:, joint_ids], + effort_limit=self._data.joint_effort_limits.torch[:, joint_ids].clone(), + velocity_limit=self._data.joint_vel_limits.torch[:, joint_ids], + ) + # store actuator group + self.actuators[actuator_name] = actuator + # set the passed gains and limits into the simulation + if isinstance(actuator, ImplicitActuator): + self._has_implicit_actuators = True + # the gains and limits are set into the simulation since actuator model is implicit + self.write_joint_stiffness_to_sim_index(stiffness=actuator.stiffness, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim_index(damping=actuator.damping, joint_ids=actuator.joint_indices) + else: + # the gains and limits are processed by the actuator model + # we set gains to zero, and torque limit to a high value in simulation to avoid any interference + self.write_joint_stiffness_to_sim_index(stiffness=0.0, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim_index(damping=0.0, joint_ids=actuator.joint_indices) + + # Set common properties into the simulation + self.write_joint_effort_limit_to_sim_index( + limits=actuator.effort_limit_sim, joint_ids=actuator.joint_indices + ) + self.write_joint_velocity_limit_to_sim_index( + limits=actuator.velocity_limit_sim, joint_ids=actuator.joint_indices + ) + self.write_joint_armature_to_sim_index(armature=actuator.armature, joint_ids=actuator.joint_indices) + self.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=actuator.friction, joint_ids=actuator.joint_indices + ) + + # Store the configured values from the actuator model + # note: this is the value configured in the actuator model (for implicit and explicit actuators) + joint_ids = actuator.joint_indices + if joint_ids == slice(None): + joint_ids = self._ALL_JOINT_INDICES + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.stiffness, + self._ALL_INDICES, + joint_ids, + ], + outputs=[ + self.data._sim_bind_joint_stiffness_sim, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.damping, + self._ALL_INDICES, + joint_ids, + ], + outputs=[ + self.data._sim_bind_joint_damping_sim, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.armature, + self._ALL_INDICES, + joint_ids, + ], + outputs=[ + self.data._sim_bind_joint_armature, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.friction, + self._ALL_INDICES, + joint_ids, + ], + outputs=[ + self.data._sim_bind_joint_friction_coeff, + ], + device=self.device, + ) + + # perform some sanity checks to ensure actuators are prepared correctly + total_act_joints = sum(actuator.num_joints for actuator in self.actuators.values()) + if total_act_joints != (self.num_joints - self.num_fixed_tendons): + logger.warning( + "Not all actuators are configured! Total number of actuated joints not equal to number of" + f" joints available: {total_act_joints} != {self.num_joints - self.num_fixed_tendons}." + ) + + if self.cfg.actuator_value_resolution_debug_print: + t = PrettyTable(["Group", "Property", "Name", "ID", "USD Value", "ActutatorCfg Value", "Applied"]) + for actuator_group, actuator in self.actuators.items(): + group_count = 0 + for property, resolution_details in actuator.joint_property_resolution_table.items(): + for prop_idx, resolution_detail in enumerate(resolution_details): + actuator_group_str = actuator_group if group_count == 0 else "" + property_str = property if prop_idx == 0 else "" + fmt = [f"{v:.2e}" if isinstance(v, float) else str(v) for v in resolution_detail] + t.add_row([actuator_group_str, property_str, *fmt]) + group_count += 1 + logger.warning(f"\nActuatorCfg-USD Value Discrepancy Resolution (matching values are skipped): \n{t}") + + def _process_tendons(self): + """Process fixed and spatial tendons.""" + # create a list to store the fixed tendon names + self._fixed_tendon_names = list() + self._spatial_tendon_names = list() + # parse fixed tendons properties if they exist + if self.num_fixed_tendons > 0 or self.num_spatial_tendons > 0: + raise NotImplementedError("Fixed and spatial tendons are not supported yet.") + + def _apply_actuator_model(self): + """Processes joint commands for the articulation by forwarding them to the actuators. + + The actions are first processed using actuator models. Depending on the robot configuration, + the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. + """ + # process actions per group + for actuator in self.actuators.values(): + # prepare input for actuator model based on cached data + # TODO : A tensor dict would be nice to do the indexing of all tensors together + control_action = ArticulationActions( + joint_positions=self._data.joint_pos_target.torch[:, actuator.joint_indices], + joint_velocities=self._data.joint_vel_target.torch[:, actuator.joint_indices], + joint_efforts=self._data.joint_effort_target.torch[:, actuator.joint_indices], + joint_indices=actuator.joint_indices, + ) + # compute joint command from the actuator model + control_action = actuator.compute( + control_action, + joint_pos=self._data.joint_pos.torch[:, actuator.joint_indices], + joint_vel=self._data.joint_vel.torch[:, actuator.joint_indices], + ) + # update targets (these are set into the simulation) + joint_indices = actuator.joint_indices + if actuator.joint_indices == slice(None) or actuator.joint_indices is None: + joint_indices = self._ALL_JOINT_INDICES + if hasattr(actuator, "gear_ratio"): + gear_ratio = actuator.gear_ratio + else: + gear_ratio = None + wp.launch( + articulation_kernels.update_targets, + dim=(self.num_instances, joint_indices.shape[0]), + inputs=[ + control_action.joint_positions, + control_action.joint_velocities, + control_action.joint_efforts, + joint_indices, + ], + outputs=[ + self._joint_pos_target_sim, + self._joint_vel_target_sim, + self._joint_effort_target_sim, + ], + device=self.device, + ) + # update state of the actuator model + wp.launch( + articulation_kernels.update_actuator_state_model, + dim=(self.num_instances, joint_indices.shape[0]), + inputs=[ + actuator.computed_effort, + actuator.applied_effort, + gear_ratio, + actuator.velocity_limit, + joint_indices, + ], + outputs=[ + self._data.computed_torque, + self._data.applied_torque, + self._data.gear_ratio, + self._data.soft_joint_vel_limits, + ], + device=self.device, + ) + + """ + Internal helpers -- Debugging. + """ + + def _validate_cfg(self): + """Validate the configuration after processing. + + .. note:: + This function should be called only after the configuration has been processed and the buffers have been + created. Otherwise, some settings that are altered during processing may not be validated. + For instance, the actuator models may change the joint max velocity limits. + """ + # Skip validation if there are no joints (e.g., fixed-base articulation with 0 DOF) + if self.num_joints == 0: + return + + # check that the default values are within the limits + joint_pos_limits_lower = self._data.joint_pos_limits_lower.torch[0] + joint_pos_limits_upper = self._data.joint_pos_limits_upper.torch[0] + default_joint_pos = self._data.default_joint_pos.torch[0] + out_of_range = default_joint_pos < joint_pos_limits_lower + out_of_range |= default_joint_pos > joint_pos_limits_upper + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + # throw error if any of the default joint positions are out of the limits + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default positions out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = [joint_pos_limits_lower[idx], joint_pos_limits_upper[idx]] + joint_pos = default_joint_pos[idx] + # add to message + msg += f"\t- '{joint_name}': {joint_pos:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + # check that the default joint velocities are within the limits + joint_max_vel = self._data.joint_vel_limits.torch[0] + default_joint_vel = self._data.default_joint_vel.torch[0] + out_of_range = torch.abs(default_joint_vel) > joint_max_vel + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default velocities out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = [-joint_max_vel[idx], joint_max_vel[idx]] + joint_vel = default_joint_vel[idx] + # add to message + msg += f"\t- '{joint_name}': {joint_vel:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + def _log_articulation_info(self): + """Log information about the articulation. + + .. note:: We purposefully read the values from the simulator to ensure that the values are configured as + expected. + """ + + # define custom formatters for large numbers and limit ranges + def format_large_number(_, v: float) -> str: + """Format large numbers using scientific notation.""" + if abs(v) >= 1e3: + return f"{v:.1e}" + else: + return f"{v:.3f}" + + def format_limits(_, v: tuple[float, float]) -> str: + """Format limit ranges using scientific notation.""" + if abs(v[0]) >= 1e3 or abs(v[1]) >= 1e3: + return f"[{v[0]:.1e}, {v[1]:.1e}]" + else: + return f"[{v[0]:.3f}, {v[1]:.3f}]" + + # read out all joint parameters from simulation + # -- gains + # Use data properties which have already been cloned and stored during initialization + # This avoids issues with indexedarray or empty arrays from root_view + stiffnesses = self.data.joint_stiffness.torch[0].cpu().tolist() + dampings = self.data.joint_damping.torch[0].cpu().tolist() + # -- properties + armatures = self.data.joint_armature.torch[0].cpu().tolist() + # For friction, use the individual components from data + friction_coeff = self.data.joint_friction_coeff.torch[0].cpu() + static_frictions = friction_coeff.tolist() + # -- limits + # joint_pos_limits is vec2f array, convert to torch and extract [lower, upper] pairs + position_limits_torch = self.data.joint_pos_limits.torch[0].cpu() # shape: (num_joints, 2) + position_limits = [tuple(pos_limit.tolist()) for pos_limit in position_limits_torch] + velocity_limits = self.data.joint_vel_limits.torch[0].cpu().tolist() + effort_limits = self.data.joint_effort_limits.torch[0].cpu().tolist() + # create table for term information + joint_table = PrettyTable() + joint_table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" + # build field names based on Isaac Sim version + field_names = ["Index", "Name", "Stiffness", "Damping", "Armature"] + field_names.extend(["Static Friction"]) + field_names.extend(["Position Limits", "Velocity Limits", "Effort Limits"]) + joint_table.field_names = field_names + + # apply custom formatters to numeric columns + joint_table.custom_format["Stiffness"] = format_large_number + joint_table.custom_format["Damping"] = format_large_number + joint_table.custom_format["Armature"] = format_large_number + joint_table.custom_format["Static Friction"] = format_large_number + joint_table.custom_format["Position Limits"] = format_limits + joint_table.custom_format["Velocity Limits"] = format_large_number + joint_table.custom_format["Effort Limits"] = format_large_number + + # set alignment of table columns + joint_table.align["Name"] = "l" + # add info on each term + for index, name in enumerate(self.joint_names): + # build row data based on Isaac Sim version + row_data = [index, name, stiffnesses[index], dampings[index], armatures[index]] + if has_kit() and get_isaac_sim_version().major < 5: + row_data.append(static_frictions[index]) + else: + row_data.extend([static_frictions[index]]) + row_data.extend([position_limits[index], velocity_limits[index], effort_limits[index]]) + # add row to table + joint_table.add_row(row_data) + # convert table to string + logger.info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + joint_table.get_string()) + + # read out all fixed tendon parameters from simulation + if self.num_fixed_tendons > 0: + raise NotImplementedError("Fixed tendons are not supported yet.") + + if self.num_spatial_tendons > 0: + raise NotImplementedError("Spatial tendons are not supported yet.") + + def _resolve_env_ids(self, env_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array: + """Resolve environment indices to a warp array. + + Args: + env_ids: Environment indices. If None, then all indices are used. + + Returns: + A warp array of environment indices. + """ + if (env_ids is None) or (env_ids == slice(None)): + return self._ALL_INDICES + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids, dtype=wp.int32) + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + return env_ids + + def _resolve_joint_ids(self, joint_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve joint indices to a warp array or tensor. + + Args: + joint_ids: Joint indices. If None, then all indices are used. + + Returns: + A warp array of joint indices or a tensor of joint indices. + """ + if isinstance(joint_ids, list): + return wp.array(joint_ids, dtype=wp.int32, device=self.device) + if (joint_ids is None) or (joint_ids == slice(None)): + return self._ALL_JOINT_INDICES + if isinstance(joint_ids, torch.Tensor): + if joint_ids.dtype == torch.int64: + joint_ids = joint_ids.to(torch.int32) + return wp.from_torch(joint_ids, dtype=wp.int32) + return joint_ids + + def _resolve_body_ids(self, body_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve body indices to a warp array or tensor. + + Args: + body_ids: Body indices. If None, then all indices are used. + + Returns: + A warp array of body indices or a tensor of body indices. + """ + if isinstance(body_ids, list): + return wp.array(body_ids, dtype=wp.int32, device=self.device) + if (body_ids is None) or (body_ids == slice(None)): + return self._ALL_BODY_INDICES + if isinstance(body_ids, torch.Tensor): + if body_ids.dtype == torch.int64: + body_ids = body_ids.to(torch.int32) + return wp.from_torch(body_ids, dtype=wp.int32) + return body_ids + + def _resolve_fixed_tendon_ids( + self, tendon_ids: Sequence[int] | torch.Tensor | wp.array | None + ) -> wp.array | torch.Tensor: + """Resolve tendon indices to a warp array or tensor. + + Args: + tendon_ids: Tendon indices. If None, then all indices are used. + + Returns: + A warp array of tendon indices or a tensor of tendon indices. + """ + if isinstance(tendon_ids, list): + return wp.array(tendon_ids, dtype=wp.int32, device=self.device) + if (tendon_ids is None) or (tendon_ids == slice(None)): + return self._ALL_FIXED_TENDON_INDICES + return tendon_ids + + def _resolve_spatial_tendon_ids( + self, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None + ) -> wp.array | torch.Tensor: + """Resolve spatial tendon indices to a warp array or tensor. + + Args: + spatial_tendon_ids: Spatial tendon indices. If None, then all indices are used. + + Returns: + A warp array of spatial tendon indices or a tensor of spatial tendon indices. + """ + if isinstance(spatial_tendon_ids, list): + return wp.array(spatial_tendon_ids, dtype=wp.int32, device=self.device) + if (spatial_tendon_ids is None) or (spatial_tendon_ids == slice(None)): + return self._ALL_SPATIAL_TENDON_INDICES + return spatial_tendon_ids + + def _resolve_mask(self, mask: wp.array | torch.Tensor | None, full_mask: wp.array) -> wp.array: + """Resolve a mask to a warp array. + + Args: + mask: Mask. If None, then all indices are used. + + Returns: + A warp array of mask. + """ + if mask is None: + return full_mask + + if isinstance(mask, torch.Tensor): + return wp.from_torch(mask, dtype=wp.bool) + return mask + + """ + Deprecated methods. + """ + + def write_joint_friction_coefficient_to_sim( + self, + joint_friction_coeff: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Deprecated, same as :meth:`write_joint_friction_coefficient_to_sim_index`.""" + warnings.warn( + "The function 'write_joint_friction_coefficient_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_friction_coefficient_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=full_data, + ) + + def write_root_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_link_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated, same as :meth:`write_root_com_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_com_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_com_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_com_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_link_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_link_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_link_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_link_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_joint_state_to_sim( + self, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Deprecated, same as :meth:`write_joint_state_to_sim_index`.""" + warnings.warn( + "The function 'write_joint_state_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_state_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_joint_state_to_sim_index(position=position, velocity=velocity, joint_ids=joint_ids, env_ids=env_ids) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation_data.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation_data.py new file mode 100644 index 000000000000..a22ba73e1725 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation_data.py @@ -0,0 +1,2120 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +import logging +import warnings +import weakref +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.math import normalize +from isaaclab.utils.warp import ProxyArray +from isaaclab.utils.warp.utils import capture_unsafe + +from isaaclab_newton.assets import kernels as shared_kernels +from isaaclab_newton.assets.articulation import kernels as articulation_kernels +from isaaclab_newton.physics import NewtonManager as SimulationManager + +if TYPE_CHECKING: + from newton.selection import ArticulationView + +# import logger +logger = logging.getLogger(__name__) + +_LAZY_CAPTURE_REASON = ( + "This is a lazily-computed derived property guarded by a Python timestamp check " + "that is invisible during graph replay. Use Tier 1 base data (root_link_pose_w, " + "root_com_vel_w, body_link_pose_w, body_com_vel_w, joint_pos, joint_vel) and " + "inline the computation in your warp kernel. See GRAPH_CAPTURE_MIGRATION.md." +) + + +class ArticulationData(BaseArticulationData): + """Data container for an articulation. + + This class contains the data for an articulation in the simulation. The data includes the state of + the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is + stored in the simulation world frame unless otherwise specified. + + An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames + of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame + can be interpreted as the link frame. + """ + + __backend_name__: str = "newton" + """The name of the backend for the articulation data.""" + + def __init__(self, root_view: ArticulationView, device: str): + """Initializes the articulation data. + + Args: + root_view: The root articulation view. + device: The device used for processing. + """ + super().__init__(root_view, device) + # Set the root articulation view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: ArticulationView = weakref.proxy(root_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + self._fk_timestamp = 0.0 + + # Convert to direction vector + gravity = wp.to_torch(SimulationManager.get_model().gravity)[0] + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir = gravity_dir.repeat(self._root_view.count, 1) + forward_vec = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_view.count, 1) + + # Initialize constants + self.GRAVITY_VEC_W = ProxyArray(wp.from_torch(gravity_dir, dtype=wp.vec3f)) + self.FORWARD_VEC_B = ProxyArray(wp.from_torch(forward_vec, dtype=wp.vec3f)) + + self._create_simulation_bindings() + self._create_buffers() + + @property + def is_primed(self) -> bool: + """Whether the articulation data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the articulation data is fully instantiated and ready to use. + + .. note:: + Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the articulation data is already primed. + """ + if self._is_primed: + raise ValueError("The articulation data is already primed.") + self._is_primed = True + + def update(self, dt: float) -> None: + """Updates the data for the articulation. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + # FK is current after a sim step — keep fk_timestamp in sync unless it was explicitly invalidated + if self._fk_timestamp >= 0.0: + self._fk_timestamp = self._sim_timestamp + # Trigger an update of the joint and body com acceleration buffers at a higher frequency + # since we do finite differencing. + self.joint_acc + self.body_com_acc_w + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + joint_names: list[str] = None + """Joint names in the order parsed by the simulation view.""" + + fixed_tendon_names: list[str] = None + """Fixed tendon names in the order parsed by the simulation view.""" + + spatial_tendon_names: list[str] = None + """Spatial tendon names in the order parsed by the simulation view.""" + + """ + Defaults - Initial state. + """ + + @property + def default_root_pose(self) -> ProxyArray: + """Default root pose ``[pos, quat]`` in the local environment frame. + + The position and quaternion are of the articulation root's actor frame. Shape is (num_instances), + dtype = wp.transformf. In torch this resolves to (num_instances, 7). + """ + return self._default_root_pose_ta + + @default_root_pose.setter + def default_root_pose(self, value: wp.array) -> None: + """Set the default root pose. + + Args: + value: The default root pose. Shape is (num_instances, 7). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_pose.assign(value) + + @property + def default_root_vel(self) -> ProxyArray: + """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. + + The linear and angular velocities are of the articulation root's center of mass frame. + Shape is (num_instances), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + """ + return self._default_root_vel_ta + + @default_root_vel.setter + def default_root_vel(self, value: wp.array) -> None: + """Set the default root velocity. + + Args: + value: The default root velocity. Shape is (num_instances, 6). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_vel.assign(value) + + @property + def default_joint_pos(self) -> ProxyArray: + """Default joint positions of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + return self._default_joint_pos_ta + + @default_joint_pos.setter + def default_joint_pos(self, value: wp.array) -> None: + """Set the default joint positions. + + Args: + value: The default joint positions. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_pos.assign(value) + + @property + def default_joint_vel(self) -> ProxyArray: + """Default joint velocities of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + return self._default_joint_vel_ta + + @default_joint_vel.setter + def default_joint_vel(self, value: wp.array) -> None: + """Set the default joint velocities. + + Args: + value: The default joint velocities. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_vel.assign(value) + + """ + Joint commands -- Set into simulation. + """ + + @property + def joint_pos_target(self) -> ProxyArray: + """Joint position targets commanded by the user. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + return self._joint_pos_target_ta + + @property + def joint_vel_target(self) -> ProxyArray: + """Joint velocity targets commanded by the user. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + return self._joint_vel_target_ta + + @property + def joint_effort_target(self) -> ProxyArray: + """Joint effort targets commanded by the user. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + return self._joint_effort_target_ta + + """ + Joint commands -- Explicit actuators. + """ + + @property + def computed_torque(self) -> ProxyArray: + """Joint torques computed from the actuator model (before clipping). + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + This quantity is the raw torque output from the actuator mode, before any clipping is applied. + It is exposed for users who want to inspect the computations inside the actuator model. + For instance, to penalize the learning agent for a difference between the computed and applied torques. + """ + return self._computed_torque_ta + + @property + def applied_torque(self) -> ProxyArray: + """Joint torques applied from the actuator model (after clipping). + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the + actuator model. + """ + return self._applied_torque_ta + + """ + Joint properties + """ + + @property + def joint_stiffness(self) -> ProxyArray: + """Joint stiffness provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + return self._joint_stiffness_ta + + @property + def joint_damping(self) -> ProxyArray: + """Joint damping provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + return self._joint_damping_ta + + @property + def joint_armature(self) -> ProxyArray: + """Joint armature provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + return self._joint_armature_ta + + @property + def joint_friction_coeff(self) -> ProxyArray: + """Joint static friction coefficient provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + return self._joint_friction_coeff_ta + + @property + def joint_pos_limits_lower(self) -> ProxyArray: + """Joint position limits lower provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_pos_limits_lower_ta + + @property + def joint_pos_limits_upper(self) -> ProxyArray: + """Joint position limits upper provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_pos_limits_upper_ta + + @property + def joint_pos_limits(self) -> ProxyArray: + """Joint position limits provided to the simulation. + + Shape is (num_instances, num_joints, 2), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. + """ + if self._joint_pos_limits is None: + self._joint_pos_limits = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.vec2f, device=self.device + ) + self._joint_pos_limits_ta = ProxyArray(self._joint_pos_limits) + wp.launch( + articulation_kernels.concat_joint_pos_limits_lower_and_upper, + dim=(self._num_instances, self._num_joints), + inputs=[ + self._sim_bind_joint_pos_limits_lower, + self._sim_bind_joint_pos_limits_upper, + ], + outputs=[ + self._joint_pos_limits, + ], + device=self.device, + ) + return self._joint_pos_limits_ta + + @property + def joint_vel_limits(self) -> ProxyArray: + """Joint maximum velocity provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + return self._joint_vel_limits_ta + + @property + def joint_effort_limits(self) -> ProxyArray: + """Joint maximum effort provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + return self._joint_effort_limits_ta + + """ + Joint properties - Custom. + """ + + @property + def soft_joint_pos_limits(self) -> ProxyArray: + r"""Soft joint positions limits for all joints. + + Shape is (num_instances, num_joints), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`.The soft joint position limits are computed as + a sub-region of the :attr:`joint_pos_limits` based on the + :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. + + Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits + :math:`[soft_lower, soft_upper]`. The soft joint position limits are computed as: + + .. math:: + + soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 + soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 + + The soft joint position limits help specify a safety region around the joint limits. It isn't used by the + simulation, but is useful for learning agents to prevent the joint positions from violating the limits. + """ + return self._soft_joint_pos_limits_ta + + @property + def soft_joint_vel_limits(self) -> ProxyArray: + """Soft joint velocity limits for all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model + has a variable velocity limit model. For instance, in a variable gear ratio actuator model. + """ + return self._soft_joint_vel_limits_ta + + @property + def gear_ratio(self) -> ProxyArray: + """Gear ratio for relating motor torques to applied Joint torques. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + return self._gear_ratio_ta + + """ + Fixed tendon properties. + """ + + @property + def fixed_tendon_stiffness(self) -> ProxyArray: + """Fixed tendon stiffness provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + raise NotImplementedError + + @property + def fixed_tendon_damping(self) -> ProxyArray: + """Fixed tendon damping provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + raise NotImplementedError + + @property + def fixed_tendon_limit_stiffness(self) -> ProxyArray: + """Fixed tendon limit stiffness provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + raise NotImplementedError + + @property + def fixed_tendon_rest_length(self) -> ProxyArray: + """Fixed tendon rest length provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + raise NotImplementedError + + @property + def fixed_tendon_offset(self) -> ProxyArray: + """Fixed tendon offset provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + raise NotImplementedError + + @property + def fixed_tendon_pos_limits(self) -> ProxyArray: + """Fixed tendon position limits provided to the simulation. + + Shape is (num_instances, num_fixed_tendons, 2), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_fixed_tendons, 2). + """ + raise NotImplementedError + + """ + Spatial tendon properties. + """ + + @property + def spatial_tendon_stiffness(self) -> ProxyArray: + """Spatial tendon stiffness provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + raise NotImplementedError + + @property + def spatial_tendon_damping(self) -> ProxyArray: + """Spatial tendon damping provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + raise NotImplementedError + + @property + def spatial_tendon_limit_stiffness(self) -> ProxyArray: + """Spatial tendon limit stiffness provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + raise NotImplementedError + + @property + def spatial_tendon_offset(self) -> ProxyArray: + """Spatial tendon offset provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + raise NotImplementedError + + """ + Root state properties. + """ + + @property + def root_link_pose_w(self) -> ProxyArray: + """Root link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + + This quantity is the pose of the articulation root's actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + return self._root_link_pose_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def root_link_vel_w(self) -> ProxyArray: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's actor frame + relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_root_link_vel_from_root_com_vel, + dim=self._num_instances, + inputs=[ + self.root_com_vel_w.warp, + self.root_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._root_link_vel_w.data, + ], + device=self.device, + ) + self._root_link_vel_w.timestamp = self._sim_timestamp + + return self._root_link_vel_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def root_com_pose_w(self) -> ProxyArray: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + + This quantity is the pose of the articulation root's center of mass frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + wp.launch( + shared_kernels.get_root_com_pose_from_root_link_pose, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._root_com_pose_w.data, + ], + device=self.device, + ) + self._root_com_pose_w.timestamp = self._sim_timestamp + + return self._root_com_pose_w_ta + + @property + def root_com_vel_w(self) -> ProxyArray: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's center of mass frame + relative to the world. + """ + return self._root_com_vel_w_ta + + """ + Body state properties. + """ + + @property + def body_mass(self) -> ProxyArray: + """Body mass ``wp.float32`` in the world frame. + + Shape is (num_instances, num_bodies), dtype = wp.float32. In torch this resolves to (num_instances, num_bodies). + """ + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Flattened body inertia in the world frame. + + Shape is (num_instances, num_bodies, 9), dtype = wp.float32. In torch this resolves to + (num_instances, num_bodies, 9). + """ + return self._body_inertia_ta + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the articulation links' actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._fk_timestamp < self._sim_timestamp: + SimulationManager.forward() + self._fk_timestamp = self._sim_timestamp + return self._body_link_pose_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' actor frame + relative to the world. + """ + if self._body_link_vel_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_link_vel_from_body_com_vel, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_com_vel_w.warp, + self.body_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._body_link_vel_w.data, + ], + device=self.device, + ) + self._body_link_vel_w.timestamp = self._sim_timestamp + + return self._body_link_vel_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def body_com_pose_w(self) -> ProxyArray: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the articulation links relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_com_pose_from_body_link_pose, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._body_com_pose_w.data, + ], + device=self.device, + ) + self._body_com_pose_w.timestamp = self._sim_timestamp + + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' center of mass frame + relative to the world. + """ + return self._body_com_vel_w_ta + + @property + def body_com_acc_w(self) -> ProxyArray: + """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + All values are relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.derive_body_acceleration_from_body_com_velocities, + dim=(self._num_instances, self._num_bodies), + device=self.device, + inputs=[ + self._sim_bind_body_com_vel_w, + SimulationManager.get_dt(), + self._previous_body_com_vel, + ], + outputs=[ + self._body_com_acc_w.data, + ], + ) + # set the buffer data and timestamp + self._body_com_acc_w.timestamp = self._sim_timestamp + # update the previous velocity + return self._body_com_acc_w_ta + + @property + def body_com_pos_b(self) -> ProxyArray: + """Center of mass position of all of the bodies in their respective link frames. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the center of mass location relative to its body's link frame. + """ + return self._body_com_pos_b_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + warnings.warn( + "In Newton, body com pose always has unit quaternion. Consider using body_com_pos_b instead." + "Querying this property requires appending a unit quaternion to the position which is expensive.", + category=UserWarning, + stacklevel=2, + ) + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # set the buffer data and timestamp + wp.launch( + shared_kernels.make_dummy_body_com_pose_b, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_com_pos_b.warp, + ], + outputs=[ + self._body_com_pose_b.data, + ], + device=self.device, + ) + self._body_com_pose_b.timestamp = self._sim_timestamp + return self._body_com_pose_b_ta + + """ + Joint state properties. + """ + + @property + def joint_pos(self) -> ProxyArray: + """Joint positions of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + return self._joint_pos_ta + + @property + def joint_vel(self) -> ProxyArray: + """Joint velocities of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + return self._joint_vel_ta + + @property + def joint_acc(self) -> ProxyArray: + """Joint acceleration of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + if self._joint_acc.timestamp < self._sim_timestamp: + # note: we use finite differencing to compute acceleration + time_elapsed = self._sim_timestamp - self._joint_acc.timestamp + wp.launch( + articulation_kernels.get_joint_acc_from_joint_vel, + dim=(self._num_instances, self._num_joints), + inputs=[ + self.joint_vel.warp, + self._previous_joint_vel, + time_elapsed, + ], + outputs=[ + self._joint_acc.data, + ], + device=self.device, + ) + self._joint_acc.timestamp = self._sim_timestamp + return self._joint_acc_ta + + """ + Derived Properties. + """ + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def projected_gravity_b(self) -> ProxyArray: + """Projection of the gravity direction on base frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._projected_gravity_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.GRAVITY_VEC_W.warp, self.root_link_quat_w.warp], + outputs=[self._projected_gravity_b.data], + device=self.device, + ) + self._projected_gravity_b.timestamp = self._sim_timestamp + return self._projected_gravity_b_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def heading_w(self) -> ProxyArray: + """Yaw heading of the base frame (in radians). + + Shape is (num_instances), dtype = wp.float32. In torch this resolves to (num_instances,). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + if self._heading_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.root_heading_w, + dim=self._num_instances, + inputs=[self.FORWARD_VEC_B.warp, self.root_link_quat_w.warp], + outputs=[self._heading_w.data], + device=self.device, + ) + self._heading_w.timestamp = self._sim_timestamp + return self._heading_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def root_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in base frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the articulation root's actor frame with respect to + its actor frame. + """ + if self._root_link_lin_vel_b is None: + self._root_link_lin_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_link_lin_vel_b_ta = ProxyArray(self._root_link_lin_vel_b.data) + if self._root_link_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_lin_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_link_lin_vel_b.data], + device=self.device, + ) + self._root_link_lin_vel_b.timestamp = self._sim_timestamp + return self._root_link_lin_vel_b_ta + + @property + def root_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in base frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the articulation root's actor frame with respect to + its actor frame. + """ + if self._root_link_ang_vel_b is None: + self._root_link_ang_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_link_ang_vel_b_ta = ProxyArray(self._root_link_ang_vel_b.data) + if self._root_link_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_ang_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_link_ang_vel_b.data], + device=self.device, + ) + self._root_link_ang_vel_b.timestamp = self._sim_timestamp + return self._root_link_ang_vel_b_ta + + @property + def root_com_lin_vel_b(self) -> ProxyArray: + """Root center of mass linear velocity in base frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the articulation root's center of mass frame with respect to + its actor frame. + """ + if self._root_com_lin_vel_b is None: + self._root_com_lin_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_com_lin_vel_b_ta = ProxyArray(self._root_com_lin_vel_b.data) + if self._root_com_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_lin_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_com_lin_vel_b.data], + device=self.device, + ) + self._root_com_lin_vel_b.timestamp = self._sim_timestamp + return self._root_com_lin_vel_b_ta + + @property + def root_com_ang_vel_b(self) -> ProxyArray: + """Root center of mass angular velocity in base frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the articulation root's center of mass frame with respect to + its actor frame. + """ + if self._root_com_ang_vel_b is None: + self._root_com_ang_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_com_ang_vel_b_ta = ProxyArray(self._root_com_ang_vel_b.data) + if self._root_com_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_ang_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_com_ang_vel_b.data], + device=self.device, + ) + self._root_com_ang_vel_b.timestamp = self._sim_timestamp + return self._root_com_ang_vel_b_ta + + """ + Sliced properties. + """ + + @property + def root_link_pos_w(self) -> ProxyArray: + """Root link position in simulation world frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + self._root_link_pos_w = self._get_pos_from_transform(self._root_link_pos_w, self.root_link_pose_w.warp) + if self._root_link_pos_w_ta is None: + self._root_link_pos_w_ta = ProxyArray(self._root_link_pos_w) + return self._root_link_pos_w_ta + + @property + def root_link_quat_w(self) -> ProxyArray: + """Root link orientation (x, y, z, w) in simulation world frame. + + Shape is (num_instances), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + self._root_link_quat_w = self._get_quat_from_transform(self._root_link_quat_w, self.root_link_pose_w.warp) + if self._root_link_quat_w_ta is None: + self._root_link_quat_w_ta = ProxyArray(self._root_link_quat_w) + return self._root_link_quat_w_ta + + @property + def root_link_lin_vel_w(self) -> ProxyArray: + """Root linear velocity in simulation world frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + self._root_link_lin_vel_w = self._get_top_from_spatial_vector( + self._root_link_lin_vel_w, self.root_link_vel_w.warp + ) + if self._root_link_lin_vel_w_ta is None: + self._root_link_lin_vel_w_ta = ProxyArray(self._root_link_lin_vel_w) + return self._root_link_lin_vel_w_ta + + @property + def root_link_ang_vel_w(self) -> ProxyArray: + """Root link angular velocity in simulation world frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + self._root_link_ang_vel_w = self._get_bottom_from_spatial_vector( + self._root_link_ang_vel_w, self.root_link_vel_w.warp + ) + if self._root_link_ang_vel_w_ta is None: + self._root_link_ang_vel_w_ta = ProxyArray(self._root_link_ang_vel_w) + return self._root_link_ang_vel_w_ta + + @property + def root_com_pos_w(self) -> ProxyArray: + """Root center of mass position in simulation world frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the position of the center of mass frame of the root rigid body relative to the world. + """ + self._root_com_pos_w = self._get_pos_from_transform(self._root_com_pos_w, self.root_com_pose_w.warp) + if self._root_com_pos_w_ta is None: + self._root_com_pos_w_ta = ProxyArray(self._root_com_pos_w) + return self._root_com_pos_w_ta + + @property + def root_com_quat_w(self) -> ProxyArray: + """Root center of mass orientation (x, y, z, w) in simulation world frame. + + Shape is (num_instances), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + + This quantity is the orientation of the principal axes of inertia of the root rigid body relative to the world. + """ + self._root_com_quat_w = self._get_quat_from_transform(self._root_com_quat_w, self.root_com_pose_w.warp) + if self._root_com_quat_w_ta is None: + self._root_com_quat_w_ta = ProxyArray(self._root_com_quat_w) + return self._root_com_quat_w_ta + + @property + def root_com_lin_vel_w(self) -> ProxyArray: + """Root center of mass linear velocity in simulation world frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + self._root_com_lin_vel_w = self._get_top_from_spatial_vector(self._root_com_lin_vel_w, self.root_com_vel_w.warp) + if self._root_com_lin_vel_w_ta is None: + self._root_com_lin_vel_w_ta = ProxyArray(self._root_com_lin_vel_w) + return self._root_com_lin_vel_w_ta + + @property + def root_com_ang_vel_w(self) -> ProxyArray: + """Root center of mass angular velocity in simulation world frame. + + Shape is (num_instances), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + self._root_com_ang_vel_w = self._get_bottom_from_spatial_vector( + self._root_com_ang_vel_w, self.root_com_vel_w.warp + ) + if self._root_com_ang_vel_w_ta is None: + self._root_com_ang_vel_w_ta = ProxyArray(self._root_com_ang_vel_w) + return self._root_com_ang_vel_w_ta + + @property + def body_link_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame relative to the world. + """ + self._body_link_pos_w = self._get_pos_from_transform(self._body_link_pos_w, self.body_link_pose_w.warp) + if self._body_link_pos_w_ta is None: + self._body_link_pos_w_ta = ProxyArray(self._body_link_pos_w) + return self._body_link_pos_w_ta + + @property + def body_link_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame relative to the world. + """ + self._body_link_quat_w = self._get_quat_from_transform(self._body_link_quat_w, self.body_link_pose_w.warp) + if self._body_link_quat_w_ta is None: + self._body_link_quat_w_ta = ProxyArray(self._body_link_quat_w) + return self._body_link_quat_w_ta + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' actor frame relative to the world. + """ + self._body_link_lin_vel_w = self._get_top_from_spatial_vector( + self._body_link_lin_vel_w, self.body_link_vel_w.warp + ) + if self._body_link_lin_vel_w_ta is None: + self._body_link_lin_vel_w_ta = ProxyArray(self._body_link_lin_vel_w) + return self._body_link_lin_vel_w_ta + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' actor frame relative to the world. + """ + self._body_link_ang_vel_w = self._get_bottom_from_spatial_vector( + self._body_link_ang_vel_w, self.body_link_vel_w.warp + ) + if self._body_link_ang_vel_w_ta is None: + self._body_link_ang_vel_w_ta = ProxyArray(self._body_link_ang_vel_w) + return self._body_link_ang_vel_w_ta + + @property + def body_com_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' center of mass frame. + """ + self._body_com_pos_w = self._get_pos_from_transform(self._body_com_pos_w, self.body_com_pose_w.warp) + if self._body_com_pos_w_ta is None: + self._body_com_pos_w_ta = ProxyArray(self._body_com_pos_w) + return self._body_com_pos_w_ta + + @property + def body_com_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the principal axes of inertia of the articulation bodies. + """ + self._body_com_quat_w = self._get_quat_from_transform(self._body_com_quat_w, self.body_com_pose_w.warp) + if self._body_com_quat_w_ta is None: + self._body_com_quat_w_ta = ProxyArray(self._body_com_quat_w) + return self._body_com_quat_w_ta + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame. + """ + self._body_com_lin_vel_w = self._get_top_from_spatial_vector(self._body_com_lin_vel_w, self.body_com_vel_w.warp) + if self._body_com_lin_vel_w_ta is None: + self._body_com_lin_vel_w_ta = ProxyArray(self._body_com_lin_vel_w) + return self._body_com_lin_vel_w_ta + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame. + """ + self._body_com_ang_vel_w = self._get_bottom_from_spatial_vector( + self._body_com_ang_vel_w, self.body_com_vel_w.warp + ) + if self._body_com_ang_vel_w_ta is None: + self._body_com_ang_vel_w_ta = ProxyArray(self._body_com_ang_vel_w) + return self._body_com_ang_vel_w_ta + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Linear acceleration of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear acceleration of the articulation bodies' center of mass frame. + """ + self._body_com_lin_acc_w = self._get_top_from_spatial_vector(self._body_com_lin_acc_w, self.body_com_acc_w.warp) + if self._body_com_lin_acc_w_ta is None: + self._body_com_lin_acc_w_ta = ProxyArray(self._body_com_lin_acc_w) + return self._body_com_lin_acc_w_ta + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Angular acceleration of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular acceleration of the articulation bodies' center of mass frame. + """ + self._body_com_ang_acc_w = self._get_bottom_from_spatial_vector( + self._body_com_ang_acc_w, self.body_com_acc_w.warp + ) + if self._body_com_ang_acc_w_ta is None: + self._body_com_ang_acc_w_ta = ProxyArray(self._body_com_ang_acc_w) + return self._body_com_ang_acc_w_ta + + @property + def body_com_quat_b(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their respective link + frames. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the principal axes of inertia relative to its body's link frame. + """ + self._body_com_quat_b = self._get_quat_from_transform(self._body_com_quat_b, self.body_com_pose_b.warp) + if self._body_com_quat_b_ta is None: + self._body_com_quat_b_ta = ProxyArray(self._body_com_quat_b) + return self._body_com_quat_b_ta + + def _create_simulation_bindings(self) -> None: + """Create simulation bindings for the root data. + + Direct simulation bindings are pointers to the simulation data, their data is not copied, and should + only be updated using warp kernels. Any modifications made to the bindings will be reflected in the simulation. + Hence we encourage users to carefully think about the data they modify and in which order it should be updated. + + .. caution:: This is possible if and only if the properties that we access are strided from newton and not + indexed. Newton willing this is the case all the time, but we should pay attention to this if things look off. + """ + # Short-hand for the number of instances, number of links, and number of joints. + self._num_instances = self._root_view.count + self._num_joints = self._root_view.joint_dof_count + self._num_bodies = self._root_view.link_count + self._num_fixed_tendons = 0 # self._root_view.max_fixed_tendons + self._num_spatial_tendons = 0 # self._root_view.max_spatial_tendons + + # -- root properties + self._sim_bind_root_link_pose_w = self._root_view.get_root_transforms(SimulationManager.get_state_0())[:, 0] + self._sim_bind_root_com_vel_w = self._root_view.get_root_velocities(SimulationManager.get_state_0()) + if self._sim_bind_root_com_vel_w is not None: + if self._root_view.is_fixed_base: + self._sim_bind_root_com_vel_w = self._sim_bind_root_com_vel_w[:, 0, 0] + else: + self._sim_bind_root_com_vel_w = self._sim_bind_root_com_vel_w[:, 0] + # -- body properties + self._sim_bind_body_com_pos_b = self._root_view.get_attribute("body_com", SimulationManager.get_model())[:, 0] + self._sim_bind_body_link_pose_w = self._root_view.get_link_transforms(SimulationManager.get_state_0())[:, 0] + self._sim_bind_body_com_vel_w = self._root_view.get_link_velocities(SimulationManager.get_state_0()) + if self._sim_bind_body_com_vel_w is not None: + self._sim_bind_body_com_vel_w = self._sim_bind_body_com_vel_w[:, 0] + self._sim_bind_body_mass = self._root_view.get_attribute("body_mass", SimulationManager.get_model())[:, 0] + # Newton stores body_inertia as (N, 1, B) mat33f — the [:, 0] removes the padding dim + # giving (N, B) mat33f. Reinterpret as (N, B, 9) float32 via pointer aliasing. + # Each mat33f element is 9 contiguous float32 values (36 bytes), so the inner stride is 4. + # The slice may be non-contiguous in the outer dims, so we preserve those strides. + _body_inertia_raw = self._root_view.get_attribute("body_inertia", SimulationManager.get_model())[:, 0] + self._sim_bind_body_inertia = wp.array( + ptr=_body_inertia_raw.ptr, + dtype=wp.float32, + shape=(self._num_instances, self._num_bodies, 9), + strides=(_body_inertia_raw.strides[0], _body_inertia_raw.strides[1], 4), + device=_body_inertia_raw.device, + copy=False, + ) + self._sim_bind_body_external_wrench = self._root_view.get_attribute("body_f", SimulationManager.get_state_0())[ + :, 0 + ] + try: + self._sim_bind_body_parent_f = self._root_view.get_attribute( + "body_parent_f", SimulationManager.get_state_0() + )[:, 0] + except Exception: + self._sim_bind_body_parent_f = None + # -- joint properties + if self._num_joints > 0: + self._sim_bind_joint_pos_limits_lower = self._root_view.get_attribute( + "joint_limit_lower", SimulationManager.get_model() + )[:, 0] + self._sim_bind_joint_pos_limits_upper = self._root_view.get_attribute( + "joint_limit_upper", SimulationManager.get_model() + )[:, 0] + self._sim_bind_joint_stiffness_sim = self._root_view.get_attribute( + "joint_target_ke", SimulationManager.get_model() + )[:, 0] + self._sim_bind_joint_damping_sim = self._root_view.get_attribute( + "joint_target_kd", SimulationManager.get_model() + )[:, 0] + self._sim_bind_joint_armature = self._root_view.get_attribute( + "joint_armature", SimulationManager.get_model() + )[:, 0] + self._sim_bind_joint_friction_coeff = self._root_view.get_attribute( + "joint_friction", SimulationManager.get_model() + )[:, 0] + self._sim_bind_joint_vel_limits_sim = self._root_view.get_attribute( + "joint_velocity_limit", SimulationManager.get_model() + )[:, 0] + self._sim_bind_joint_effort_limits_sim = self._root_view.get_attribute( + "joint_effort_limit", SimulationManager.get_model() + )[:, 0] + # -- joint states + self._sim_bind_joint_pos = self._root_view.get_dof_positions(SimulationManager.get_state_0())[:, 0] + self._sim_bind_joint_vel = self._root_view.get_dof_velocities(SimulationManager.get_state_0())[:, 0] + # -- joint commands (sent to the simulation) + self._sim_bind_joint_effort = self._root_view.get_attribute("joint_f", SimulationManager.get_control())[ + :, 0 + ] + self._sim_bind_joint_position_target = self._root_view.get_attribute( + "joint_target_pos", SimulationManager.get_control() + )[:, 0] + self._sim_bind_joint_velocity_target = self._root_view.get_attribute( + "joint_target_vel", SimulationManager.get_control() + )[:, 0] + else: + # No joints (e.g., free-floating rigid body) - set bindings to empty arrays + self._sim_bind_joint_pos_limits_lower = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + self._sim_bind_joint_pos_limits_upper = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + self._sim_bind_joint_stiffness_sim = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + self._sim_bind_joint_damping_sim = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + self._sim_bind_joint_armature = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + self._sim_bind_joint_friction_coeff = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + self._sim_bind_joint_vel_limits_sim = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + self._sim_bind_joint_effort_limits_sim = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + self._sim_bind_joint_pos = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + self._sim_bind_joint_vel = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + self._sim_bind_joint_effort = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + self._sim_bind_joint_position_target = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + self._sim_bind_joint_velocity_target = wp.zeros( + (self._num_instances, 0), dtype=wp.float32, device=self.device + ) + + # Re-pin ProxyArray wrappers to the newly created sim bindings. + # On first init, _create_buffers() handles this after all buffers exist. + if hasattr(self, "_root_link_pose_w_ta"): + self._pin_proxy_arrays() + + def _create_buffers(self) -> None: + """Create buffers for the root data.""" + super()._create_buffers() + + # Initialize history for finite differencing. If the articulation is fixed, the root com velocity is not + # available, so we use zeros. + if self._root_view.get_root_velocities(SimulationManager.get_state_0()) is None: + logger.warning( + "Failed to get root com velocity. If the articulation is fixed, this is expected. " + "Setting root com velocity to zeros." + ) + self._sim_bind_root_com_vel_w = wp.zeros( + (self._num_instances), dtype=wp.spatial_vectorf, device=self.device + ) + self._sim_bind_body_com_vel_w = wp.zeros( + (self._num_instances, self._num_bodies), dtype=wp.spatial_vectorf, device=self.device + ) + # -- default root pose and velocity + self._default_root_pose = wp.zeros((self._num_instances,), dtype=wp.transformf, device=self.device) + self._default_root_vel = wp.zeros((self._num_instances,), dtype=wp.spatial_vectorf, device=self.device) + # -- default joint positions and velocities + self._default_joint_pos = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._default_joint_vel = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + # -- joint commands (sent to the actuator from the user) + self._joint_pos_target = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._joint_vel_target = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._joint_effort_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + # -- computed joint efforts from the actuator models + self._computed_torque = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._applied_torque = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + # -- joint properties for the actuator models + if self._num_joints > 0: + self._actuator_stiffness = wp.clone(self._sim_bind_joint_stiffness_sim) + self._actuator_damping = wp.clone(self._sim_bind_joint_damping_sim) + else: + self._actuator_stiffness = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + self._actuator_damping = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + # -- other data that are filled based on explicit actuator models + self._joint_dynamic_friction = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_viscous_friction = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._soft_joint_vel_limits = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._gear_ratio = wp.ones((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + # -- update the soft joint position limits + self._soft_joint_pos_limits = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.vec2f, device=self.device + ) + + # Initialize history for finite differencing + if self._num_joints > 0: + self._previous_joint_vel = wp.clone( + self._root_view.get_dof_velocities(SimulationManager.get_state_0())[:, 0] + ) + else: + self._previous_joint_vel = wp.zeros((self._num_instances, 0), dtype=wp.float32, device=self.device) + self._previous_body_com_vel = wp.clone(self._sim_bind_body_com_vel_w) + + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._root_link_vel_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._root_link_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._body_link_vel_w = TimestampedBuffer( + shape=(self._num_instances, self._num_bodies), dtype=wp.spatial_vectorf, device=self.device + ) + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer( + shape=(self._num_instances, self._num_bodies), dtype=wp.transformf, device=self.device + ) + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer(shape=(self._num_instances,), dtype=wp.transformf, device=self.device) + self._root_com_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._root_com_acc_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._body_com_pose_w = TimestampedBuffer( + shape=(self._num_instances, self._num_bodies), dtype=wp.transformf, device=self.device + ) + self._body_com_acc_w = TimestampedBuffer( + shape=(self._num_instances, self._num_bodies), dtype=wp.spatial_vectorf, device=self.device + ) + # -- derived properties (these are cached to avoid repeated memory allocations) + self._projected_gravity_b = TimestampedBuffer(shape=(self._num_instances,), dtype=wp.vec3f, device=self.device) + self._heading_w = TimestampedBuffer(shape=(self._num_instances,), dtype=wp.float32, device=self.device) + # -- joint state + self._joint_acc = TimestampedBuffer( + shape=(self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + # Empty memory pre-allocations + self._root_link_lin_vel_b = None + self._root_link_ang_vel_b = None + self._root_com_lin_vel_b = None + self._root_com_ang_vel_b = None + self._joint_pos_limits = None + self._root_state_w = None + self._root_link_state_w = None + self._root_com_state_w = None + self._body_com_quat_b = None + self._root_link_pos_w = None + self._root_link_quat_w = None + self._root_link_lin_vel_w = None + self._root_link_ang_vel_w = None + self._root_com_pos_w = None + self._root_com_quat_w = None + self._root_com_lin_vel_w = None + self._root_com_ang_vel_w = None + self._body_state_w = None + self._body_link_state_w = None + self._body_com_state_w = None + self._body_link_pos_w = None + self._body_link_quat_w = None + self._body_link_lin_vel_w = None + self._body_link_ang_vel_w = None + self._body_com_pos_w = None + self._body_com_quat_w = None + self._body_com_lin_vel_w = None + self._body_com_ang_vel_w = None + self._body_com_lin_acc_w = None + self._body_com_ang_acc_w = None + self._default_root_state = None + + # Pin all ProxyArray wrappers to current buffers. + self._pin_proxy_arrays() + + def _pin_proxy_arrays(self) -> None: + """Create or rebind all pinned ProxyArray wrappers. + + Called from :meth:`_create_buffers` on first initialization and from + :meth:`_create_simulation_bindings` after a full simulation reset when + the solver recreates its internal arrays. + """ + is_rebind = hasattr(self, "_root_link_pose_w_ta") + + if is_rebind: + # Rebind sim-bound ProxyArrays to new solver arrays + self._root_link_pose_w_ta = ProxyArray(self._sim_bind_root_link_pose_w) + self._root_com_vel_w_ta = ProxyArray(self._sim_bind_root_com_vel_w) + self._body_link_pose_w_ta = ProxyArray(self._sim_bind_body_link_pose_w) + self._body_com_vel_w_ta = ProxyArray(self._sim_bind_body_com_vel_w) + self._joint_pos_ta = ProxyArray(self._sim_bind_joint_pos) + self._joint_vel_ta = ProxyArray(self._sim_bind_joint_vel) + self._joint_stiffness_ta = ProxyArray(self._sim_bind_joint_stiffness_sim) + self._joint_damping_ta = ProxyArray(self._sim_bind_joint_damping_sim) + self._joint_armature_ta = ProxyArray(self._sim_bind_joint_armature) + self._joint_friction_coeff_ta = ProxyArray(self._sim_bind_joint_friction_coeff) + self._joint_pos_limits_lower_ta = ProxyArray(self._sim_bind_joint_pos_limits_lower) + self._joint_pos_limits_upper_ta = ProxyArray(self._sim_bind_joint_pos_limits_upper) + self._joint_vel_limits_ta = ProxyArray(self._sim_bind_joint_vel_limits_sim) + self._joint_effort_limits_ta = ProxyArray(self._sim_bind_joint_effort_limits_sim) + self._body_mass_ta = ProxyArray(self._sim_bind_body_mass) + self._body_inertia_ta = ProxyArray(self._sim_bind_body_inertia) + self._body_com_pos_b_ta = ProxyArray(self._sim_bind_body_com_pos_b) + else: + # First-time creation: pin ProxyArrays to current buffers + # Category 1: sim-bound and pre-allocated buffers + # Sim-bound pointers are re-created on full reset; _create_simulation_bindings() + # calls rebind() on each ProxyArray to keep them in sync. + self._root_link_pose_w_ta = ProxyArray(self._sim_bind_root_link_pose_w) + self._root_com_vel_w_ta = ProxyArray(self._sim_bind_root_com_vel_w) + self._body_link_pose_w_ta = ProxyArray(self._sim_bind_body_link_pose_w) + self._body_com_vel_w_ta = ProxyArray(self._sim_bind_body_com_vel_w) + self._joint_pos_ta = ProxyArray(self._sim_bind_joint_pos) + self._joint_vel_ta = ProxyArray(self._sim_bind_joint_vel) + self._default_root_pose_ta = ProxyArray(self._default_root_pose) + self._default_root_vel_ta = ProxyArray(self._default_root_vel) + self._default_joint_pos_ta = ProxyArray(self._default_joint_pos) + self._default_joint_vel_ta = ProxyArray(self._default_joint_vel) + self._joint_pos_target_ta = ProxyArray(self._joint_pos_target) + self._joint_vel_target_ta = ProxyArray(self._joint_vel_target) + self._joint_effort_target_ta = ProxyArray(self._joint_effort_target) + self._computed_torque_ta = ProxyArray(self._computed_torque) + self._applied_torque_ta = ProxyArray(self._applied_torque) + self._joint_stiffness_ta = ProxyArray(self._sim_bind_joint_stiffness_sim) + self._joint_damping_ta = ProxyArray(self._sim_bind_joint_damping_sim) + self._joint_armature_ta = ProxyArray(self._sim_bind_joint_armature) + self._joint_friction_coeff_ta = ProxyArray(self._sim_bind_joint_friction_coeff) + self._joint_pos_limits_lower_ta = ProxyArray(self._sim_bind_joint_pos_limits_lower) + self._joint_pos_limits_upper_ta = ProxyArray(self._sim_bind_joint_pos_limits_upper) + self._joint_vel_limits_ta = ProxyArray(self._sim_bind_joint_vel_limits_sim) + self._joint_effort_limits_ta = ProxyArray(self._sim_bind_joint_effort_limits_sim) + self._soft_joint_pos_limits_ta = ProxyArray(self._soft_joint_pos_limits) + self._soft_joint_vel_limits_ta = ProxyArray(self._soft_joint_vel_limits) + self._gear_ratio_ta = ProxyArray(self._gear_ratio) + self._body_mass_ta = ProxyArray(self._sim_bind_body_mass) + self._body_inertia_ta = ProxyArray(self._sim_bind_body_inertia) + self._body_com_pos_b_ta = ProxyArray(self._sim_bind_body_com_pos_b) + + # Category 2: TimestampedBuffer properties + self._root_link_vel_w_ta = ProxyArray(self._root_link_vel_w.data) + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w.data) + self._root_com_pose_w_ta = ProxyArray(self._root_com_pose_w.data) + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w.data) + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w.data) + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b.data) + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b.data) + self._heading_w_ta = ProxyArray(self._heading_w.data) + self._joint_acc_ta = ProxyArray(self._joint_acc.data) + + # -- deprecated state properties (lazy); type annotations declared once here + self._root_state_w_ta: ProxyArray | None = None + self._root_link_state_w_ta: ProxyArray | None = None + self._root_com_state_w_ta: ProxyArray | None = None + self._default_root_state_ta: ProxyArray | None = None + self._body_state_w_ta: ProxyArray | None = None + self._body_link_state_w_ta: ProxyArray | None = None + self._body_com_state_w_ta: ProxyArray | None = None + + # Invalidate lazy sliced ProxyArrays AND their backing wp.arrays so they are + # re-created from fresh data on next access. On first init the backing fields + # are already None (set by _create_buffers), so the assignments below are + # harmless no-ops. On rebind they reset stale pointers into freed transform + # memory after a sim reset. + self._root_link_pos_w_ta: ProxyArray | None = None + self._root_link_pos_w = None + self._root_link_quat_w_ta: ProxyArray | None = None + self._root_link_quat_w = None + self._root_link_lin_vel_w_ta: ProxyArray | None = None + self._root_link_lin_vel_w = None + self._root_link_ang_vel_w_ta: ProxyArray | None = None + self._root_link_ang_vel_w = None + self._root_com_pos_w_ta: ProxyArray | None = None + self._root_com_pos_w = None + self._root_com_quat_w_ta: ProxyArray | None = None + self._root_com_quat_w = None + self._root_com_lin_vel_w_ta: ProxyArray | None = None + self._root_com_lin_vel_w = None + self._root_com_ang_vel_w_ta: ProxyArray | None = None + self._root_com_ang_vel_w = None + self._body_link_pos_w_ta: ProxyArray | None = None + self._body_link_pos_w = None + self._body_link_quat_w_ta: ProxyArray | None = None + self._body_link_quat_w = None + self._body_link_lin_vel_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w = None + self._body_link_ang_vel_w_ta: ProxyArray | None = None + self._body_link_ang_vel_w = None + self._body_com_pos_w_ta: ProxyArray | None = None + self._body_com_pos_w = None + self._body_com_quat_w_ta: ProxyArray | None = None + self._body_com_quat_w = None + self._body_com_lin_vel_w_ta: ProxyArray | None = None + self._body_com_lin_vel_w = None + self._body_com_ang_vel_w_ta: ProxyArray | None = None + self._body_com_ang_vel_w = None + self._body_com_lin_acc_w_ta: ProxyArray | None = None + self._body_com_lin_acc_w = None + self._body_com_ang_acc_w_ta: ProxyArray | None = None + self._body_com_ang_acc_w = None + self._body_com_quat_b_ta: ProxyArray | None = None + self._body_com_quat_b = None + self._joint_pos_limits_ta: ProxyArray | None = None + self._joint_pos_limits = None + self._root_link_lin_vel_b_ta: ProxyArray | None = None + self._root_link_lin_vel_b = None + self._root_link_ang_vel_b_ta: ProxyArray | None = None + self._root_link_ang_vel_b = None + self._root_com_lin_vel_b_ta: ProxyArray | None = None + self._root_com_lin_vel_b = None + self._root_com_ang_vel_b_ta: ProxyArray | None = None + self._root_com_ang_vel_b = None + + """ + Internal helpers. + """ + + def _get_pos_from_transform(self, source: wp.array | None, transform: wp.array) -> wp.array: + """Generates a position array from a transform array. + + Args: + transform: The transform array. Shape is (N) dtype=wp.transformf. + + Returns: + The position array. Shape is (N) dtype=wp.vec3f. + """ + # Check if we already created the lazy buffer. + if source is None: + if transform.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=transform.ptr, + shape=transform.shape, + dtype=wp.vec3f, + strides=transform.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches transform.shape since each element is vec3f (already contains 3 floats) + source = wp.zeros(transform.shape, dtype=wp.vec3f, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the position part of the transform. + if not transform.is_contiguous: + # Launch the right kernel based on the shape of the transform array. + if len(transform.shape) > 1: + wp.launch( + shared_kernels.split_transform_to_pos_2d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_transform_to_pos_1d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + return source + + def _get_quat_from_transform(self, source: wp.array | None, transform: wp.array) -> wp.array: + """Generates a quaternion array from a transform array. + + Args: + transform: The transform array. Shape is (N) dtype=wp.transformf. + + Returns: + The quaternion array. Shape is (N) dtype=wp.quatf. + """ + # Check if we already created the lazy buffer. + if source is None: + if transform.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=transform.ptr + 3 * 4, + shape=transform.shape, + dtype=wp.quatf, + strides=transform.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches transform.shape since each element is quatf (already contains 4 floats) + source = wp.zeros(transform.shape, dtype=wp.quatf, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the quaternion part of the transform. + if not transform.is_contiguous: + # Launch the right kernel based on the shape of the transform array. + if len(transform.shape) > 1: + wp.launch( + shared_kernels.split_transform_to_quat_2d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_transform_to_quat_1d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + # Return the source array. (no-op if the array is contiguous.) + return source + + def _get_top_from_spatial_vector(self, source: wp.array | None, spatial_vector: wp.array) -> wp.array: + """Gets the top part of a spatial vector array. + + For instance the linear velocity is the top part of a velocity vector. + + Args: + spatial_vector: The spatial vector array. Shape is (N) dtype=wp.spatial_vectorf. + + Returns: + The top part of the spatial vector array. Shape is (N) dtype=wp.vec3f. + """ + # Check if we already created the lazy buffer. + if source is None: + if spatial_vector.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=spatial_vector.ptr, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches spatial_vector.shape since each element is vec3f (already contains 3 floats) + source = wp.zeros(spatial_vector.shape, dtype=wp.vec3f, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the top part of the spatial vector. + if not spatial_vector.is_contiguous: + # Launch the right kernel based on the shape of the spatial_vector array. + if len(spatial_vector.shape) > 1: + wp.launch( + shared_kernels.split_spatial_vector_to_top_2d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_spatial_vector_to_top_1d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + # Return the source array. (no-op if the array is contiguous.) + return source + + def _get_bottom_from_spatial_vector(self, source: wp.array | None, spatial_vector: wp.array) -> wp.array: + """Gets the bottom part of a spatial vector array. + + For instance the angular velocity is the bottom part of a velocity vector. + + Args: + spatial_vector: The spatial vector array. Shape is (N) dtype=wp.spatial_vectorf. + + Returns: + The bottom part of the spatial vector array. Shape is (N) dtype=wp.vec3f. + """ + # Check if we already created the lazy buffer. + if source is None: + if spatial_vector.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=spatial_vector.ptr + 3 * 4, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches spatial_vector.shape since each element is vec3f (already contains 3 floats) + source = wp.zeros(spatial_vector.shape, dtype=wp.vec3f, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the bottom part of the spatial vector. + if not spatial_vector.is_contiguous: + # Launch the right kernel based on the shape of the spatial_vector array. + if len(spatial_vector.shape) > 1: + wp.launch( + shared_kernels.split_spatial_vector_to_bottom_2d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_spatial_vector_to_bottom_1d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + # Return the source array. (no-op if the array is contiguous.) + return source + + """ + Deprecated properties. + """ + + @property + def root_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_state_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_state_w is None: + self._root_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_state_w_ta = ProxyArray(self._root_state_w.data) + if self._root_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=(self._num_instances), + inputs=[ + self.root_link_pose_w.warp, + self.root_com_vel_w.warp, + ], + outputs=[ + self._root_state_w.data, + ], + device=self.device, + ) + self._root_state_w.timestamp = self._sim_timestamp + + return self._root_state_w_ta + + @property + def root_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_link_vel_w`.""" + warnings.warn( + "The `root_link_state_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_link_state_w is None: + self._root_link_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_link_state_w_ta = ProxyArray(self._root_link_state_w.data) + if self._root_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w.warp, + self.root_link_vel_w.warp, + ], + outputs=[ + self._root_link_state_w.data, + ], + device=self.device, + ) + self._root_link_state_w.timestamp = self._sim_timestamp + + return self._root_link_state_w_ta + + @property + def root_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_com_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_com_state_w` property will be deprecated in a IsaacLab 4.0. Please use `root_com_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_com_state_w is None: + self._root_com_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_com_state_w_ta = ProxyArray(self._root_com_state_w.data) + if self._root_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_com_pose_w.warp, + self.root_com_vel_w.warp, + ], + outputs=[ + self._root_com_state_w.data, + ], + device=self.device, + ) + self._root_com_state_w.timestamp = self._sim_timestamp + + return self._root_com_state_w_ta + + @property + def default_root_state(self) -> ProxyArray: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. + + + The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular + velocities are of its center of mass frame. Shape is (num_instances, 13). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + warnings.warn( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._default_root_state is None: + self._default_root_state = wp.zeros((self._num_instances), dtype=shared_kernels.vec13f, device=self.device) + self._default_root_state_ta = ProxyArray(self._default_root_state) + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self._default_root_pose, + self._default_root_vel, + ], + outputs=[ + self._default_root_state, + ], + device=self.device, + ) + return self._default_root_state_ta + + @property + def body_state_w(self) -> ProxyArray: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position and quaternion are of all the articulation links' actor frame. Meanwhile, the linear and angular + velocities are of the articulation links's center of mass frame. + """ + warnings.warn( + "The `body_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_state_w is None: + self._body_state_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, shared_kernels.vec13f + ) + self._body_state_w_ta = ProxyArray(self._body_state_w.data) + if self._body_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_link_pose_w.warp, + self.body_com_vel_w.warp, + ], + outputs=[ + self._body_state_w.data, + ], + device=self.device, + ) + self._body_state_w.timestamp = self._sim_timestamp + + return self._body_state_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """State of all bodies' link frame`[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + """ + warnings.warn( + "The `body_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_link_state_w is None: + self._body_link_state_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, shared_kernels.vec13f + ) + self._body_link_state_w_ta = ProxyArray(self._body_link_state_w.data) + if self._body_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_link_pose_w.warp, + self.body_link_vel_w.warp, + ], + outputs=[ + self._body_link_state_w.data, + ], + device=self.device, + ) + self._body_link_state_w.timestamp = self._sim_timestamp + + return self._body_link_state_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """State of all bodies center of mass `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principal inertia. + """ + warnings.warn( + "The `body_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_com_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_com_state_w is None: + self._body_com_state_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, shared_kernels.vec13f + ) + self._body_com_state_w_ta = ProxyArray(self._body_com_state_w.data) + if self._body_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_com_pose_w.warp, + self.body_com_vel_w.warp, + ], + outputs=[ + self._body_com_state_w.data, + ], + device=self.device, + ) + self._body_com_state_w.timestamp = self._sim_timestamp + + return self._body_com_state_w_ta diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/kernels.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/kernels.py new file mode 100644 index 000000000000..5e66b867c09a --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/kernels.py @@ -0,0 +1,620 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + +""" +Articulation-specific warp functions. +""" + + +@wp.func +def compute_soft_joint_pos_limits_func( + joint_pos_limits: wp.vec2f, + soft_limit_factor: wp.float32, +): + """Compute the soft joint position limits. + + Args: + joint_pos_limits: The joint position limits. + soft_limit_factor: The soft limit factor. + + Returns: + The soft joint position limits. + """ + joint_pos_mean = (joint_pos_limits[0] + joint_pos_limits[1]) / 2.0 + joint_pos_range = joint_pos_limits[1] - joint_pos_limits[0] + return wp.vec2f( + joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor, + joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor, + ) + + +""" +Articulation-specific warp kernels. +""" + + +@wp.kernel +def get_joint_acc_from_joint_vel( + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + dt: wp.float32, + joint_acc: wp.array2d(dtype=wp.float32), +): + """Compute the joint acceleration from the joint velocity using finite differencing. + + This kernel computes the joint acceleration by taking the difference between the current + and previous joint velocities, divided by the time step. It also updates the previous + joint velocity buffer with the current values. + + Args: + joint_vel: Input array of current joint velocities. Shape is (num_envs, num_joints). + prev_joint_vel: Input/output array of previous joint velocities. Shape is (num_envs, num_joints). + This buffer is updated with the current joint velocities after computing acceleration. + dt: Input time step (scalar) used for finite differencing. + joint_acc: Output array where joint accelerations are written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + joint_acc[i, j] = (joint_vel[i, j] - prev_joint_vel[i, j]) / dt + prev_joint_vel[i, j] = joint_vel[i, j] + + +@wp.kernel +def write_joint_vel_data_index( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + joint_acc: wp.array2d(dtype=wp.float32), +): + """Write joint velocity data to the output buffers. + + This kernel writes joint velocity data from the input array to the output buffers. + It also updates the previous joint velocity buffer and resets the joint acceleration to 0.0. + + Args: + in_data: Input array containing joint velocity data. Shape is (num_selected_envs, num_selected_joints). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + joint_vel: Output array where joint velocities are written. Shape is (num_envs, num_joints). + prev_joint_vel: Output array where previous joint velocities are written. Shape is + (num_envs, num_joints). + joint_acc: Output array where joint accelerations are reset to 0.0. Shape is + (num_envs, num_joints). + """ + i, j = wp.tid() + joint_vel[env_ids[i], joint_ids[j]] = in_data[i, j] + prev_joint_vel[env_ids[i], joint_ids[j]] = in_data[i, j] + joint_acc[env_ids[i], joint_ids[j]] = 0.0 + + +@wp.kernel +def write_joint_vel_data_mask( + in_data: wp.array2d(dtype=wp.float32), + env_mask: wp.array(dtype=wp.bool), + joint_mask: wp.array(dtype=wp.bool), + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + joint_acc: wp.array2d(dtype=wp.float32), +): + """Write joint velocity data to the output buffers. + + This kernel writes joint velocity data from the input array to the output buffers. + It also updates the previous joint velocity buffer and resets the joint acceleration to 0.0. + + Args: + in_data: Input array containing joint velocity data. Shape is (num_envs, num_joints). + env_mask: Input array of environment mask. Shape is (num_envs,). + joint_mask: Input array of joint mask. Shape is (num_joints,). + joint_vel: Output array where joint velocities are written. Shape is (num_envs, num_joints). + prev_joint_vel: Output array where previous joint velocities are written. Shape is + (num_envs, num_joints). + joint_acc: Output array where joint accelerations are reset to 0.0. Shape is + (num_envs, num_joints). + """ + i, j = wp.tid() + if env_mask[i] and joint_mask[j]: + joint_vel[i, j] = in_data[i, j] + prev_joint_vel[i, j] = in_data[i, j] + joint_acc[i, j] = 0.0 + + +@wp.kernel +def write_joint_state_data_index( + pos_data: wp.array2d(dtype=wp.float32), + vel_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + joint_acc: wp.array2d(dtype=wp.float32), +): + """Write joint position and velocity data in a single kernel launch. + + Args: + pos_data: Input joint positions. Shape is (num_selected_envs, num_selected_joints). + vel_data: Input joint velocities. Shape is (num_selected_envs, num_selected_joints). + env_ids: Environment indices. Shape is (num_selected_envs,). + joint_ids: Joint indices. Shape is (num_selected_joints,). + joint_pos: Output joint positions. Shape is (num_envs, num_joints). + joint_vel: Output joint velocities. Shape is (num_envs, num_joints). + prev_joint_vel: Output previous joint velocities. Shape is (num_envs, num_joints). + joint_acc: Output joint accelerations (reset to 0). Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + joint_pos[env_ids[i], joint_ids[j]] = pos_data[i, j] + joint_vel[env_ids[i], joint_ids[j]] = vel_data[i, j] + prev_joint_vel[env_ids[i], joint_ids[j]] = vel_data[i, j] + joint_acc[env_ids[i], joint_ids[j]] = 0.0 + + +@wp.kernel +def write_joint_state_data_mask( + pos_data: wp.array2d(dtype=wp.float32), + vel_data: wp.array2d(dtype=wp.float32), + env_mask: wp.array(dtype=wp.bool), + joint_mask: wp.array(dtype=wp.bool), + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + joint_acc: wp.array2d(dtype=wp.float32), +): + """Write joint position and velocity data in a single kernel launch using masks. + + Args: + pos_data: Input joint positions. Shape is (num_envs, num_joints). + vel_data: Input joint velocities. Shape is (num_envs, num_joints). + env_mask: Environment mask. Shape is (num_envs,). + joint_mask: Joint mask. Shape is (num_joints,). + joint_pos: Output joint positions. Shape is (num_envs, num_joints). + joint_vel: Output joint velocities. Shape is (num_envs, num_joints). + prev_joint_vel: Output previous joint velocities. Shape is (num_envs, num_joints). + joint_acc: Output joint accelerations (reset to 0). Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + if env_mask[i] and joint_mask[j]: + joint_pos[i, j] = pos_data[i, j] + joint_vel[i, j] = vel_data[i, j] + prev_joint_vel[i, j] = vel_data[i, j] + joint_acc[i, j] = 0.0 + + +@wp.kernel +def write_joint_limit_data_to_buffer_index( + in_data: wp.array2d(dtype=wp.vec2f), + soft_limit_factor: wp.float32, + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + joint_pos_limits_lower: wp.array2d(dtype=wp.float32), + joint_pos_limits_upper: wp.array2d(dtype=wp.float32), + soft_joint_pos_limits: wp.array2d(dtype=wp.vec2f), + default_joint_pos: wp.array2d(dtype=wp.float32), + clamped_defaults: wp.array(dtype=wp.int32), +): + """Write joint limit data to the output buffers and compute soft limits. + + This kernel writes joint position limits from the input array to the output buffer, + computes soft joint position limits, and clamps default joint positions if they + fall outside the limits. + + Args: + in_data: Input array containing joint position limits as vec2f (lower, upper). + Shape is (num_selected_envs, num_selected_joints). + soft_limit_factor: Input scalar factor for computing soft limits (typically 0.0-1.0). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + joint_pos_limits_lower: Output array where joint position limits lower are written. Shape is + (num_envs, num_joints). + joint_pos_limits_upper: Output array where joint position limits upper are written. Shape is + (num_envs, num_joints). + soft_joint_pos_limits: Output array where soft joint position limits are written. + Shape is (num_envs, num_joints). + default_joint_pos: Input/output array of default joint positions. If any values fall + outside the limits, they are clamped. Shape is (num_envs, num_joints). + clamped_defaults: Output 1-element array flag indicating whether any default joint + positions were clamped. Non-zero if any clamping occurred. Shape is (1,). + """ + i, j = wp.tid() + joint_pos_limits_lower[env_ids[i], joint_ids[j]] = in_data[i, j][0] + joint_pos_limits_upper[env_ids[i], joint_ids[j]] = in_data[i, j][1] + if ( + default_joint_pos[env_ids[i], joint_ids[j]] < joint_pos_limits_lower[env_ids[i], joint_ids[j]] + ) or default_joint_pos[env_ids[i], joint_ids[j]] > joint_pos_limits_upper[env_ids[i], joint_ids[j]]: + wp.atomic_add(clamped_defaults, 0, 1) + default_joint_pos[env_ids[i], joint_ids[j]] = wp.clamp( + default_joint_pos[env_ids[i], joint_ids[j]], + joint_pos_limits_lower[env_ids[i], joint_ids[j]], + joint_pos_limits_upper[env_ids[i], joint_ids[j]], + ) + soft_joint_pos_limits[env_ids[i], joint_ids[j]] = compute_soft_joint_pos_limits_func( + wp.vec2f(joint_pos_limits_lower[env_ids[i], joint_ids[j]], joint_pos_limits_upper[env_ids[i], joint_ids[j]]), + soft_limit_factor, + ) + + +@wp.kernel +def write_joint_limit_data_to_buffer_mask( + in_data: wp.array2d(dtype=wp.vec2f), + soft_limit_factor: wp.float32, + env_mask: wp.array(dtype=wp.bool), + joint_mask: wp.array(dtype=wp.bool), + joint_pos_limits_lower: wp.array2d(dtype=wp.float32), + joint_pos_limits_upper: wp.array2d(dtype=wp.float32), + soft_joint_pos_limits: wp.array2d(dtype=wp.vec2f), + default_joint_pos: wp.array2d(dtype=wp.float32), + clamped_defaults: wp.array(dtype=wp.int32), +): + """Write joint limit data to the output buffers and compute soft limits. + + This kernel writes joint position limits from the input array to the output buffer, + computes soft joint position limits, and clamps default joint positions if they + fall outside the limits. + + Args: + in_data: Input array containing joint position limits as vec2f (lower, upper). + Shape is (num_envs, num_joints). + soft_limit_factor: Input scalar factor for computing soft limits (typically 0.0-1.0). + env_mask: Input array of environment mask. Shape is (num_envs,). + joint_mask: Input array of joint mask. Shape is (num_joints,). + joint_pos_limits_lower: Output array where joint position limits lower are written. Shape is + (num_envs, num_joints). + joint_pos_limits_upper: Output array where joint position limits upper are written. Shape is + (num_envs, num_joints). + soft_joint_pos_limits: Output array where soft joint position limits are written. + Shape is (num_envs, num_joints). + default_joint_pos: Input/output array of default joint positions. If any values fall + outside the limits, they are clamped. Shape is (num_envs, num_joints). + clamped_defaults: Output 1-element array flag indicating whether any default joint + positions were clamped. Non-zero if any clamping occurred. Shape is (1,). + """ + i, j = wp.tid() + if env_mask[i] and joint_mask[j]: + joint_pos_limits_lower[i, j] = in_data[i, j][0] + joint_pos_limits_upper[i, j] = in_data[i, j][1] + if (default_joint_pos[i, j] < joint_pos_limits_lower[i, j]) or default_joint_pos[i, j] > joint_pos_limits_upper[ + i, j + ]: + wp.atomic_add(clamped_defaults, 0, 1) + default_joint_pos[i, j] = wp.clamp( + default_joint_pos[i, j], + joint_pos_limits_lower[i, j], + joint_pos_limits_upper[i, j], + ) + soft_joint_pos_limits[i, j] = compute_soft_joint_pos_limits_func( + wp.vec2f(joint_pos_limits_lower[i, j], joint_pos_limits_upper[i, j]), soft_limit_factor + ) + + +@wp.kernel +def write_joint_friction_data_to_buffer( + in_friction: wp.array2d(dtype=wp.float32), + in_dynamic_friction: wp.array2d(dtype=wp.float32), + in_viscous_friction: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_friction: wp.array2d(dtype=wp.float32), + out_dynamic_friction: wp.array2d(dtype=wp.float32), + out_viscous_friction: wp.array2d(dtype=wp.float32), + friction_props: wp.array3d(dtype=wp.float32), +): + """Write joint friction data to the output buffers. + + This kernel writes joint friction coefficients from input arrays to output buffers + and updates the friction properties array used by the physics simulation. + + Args: + in_friction: Input array containing joint friction coefficients. Shape is + (num_envs, num_joints) or (num_selected_envs, num_selected_joints) depending + on from_mask. Can be None if not provided. + in_dynamic_friction: Input array containing joint dynamic friction coefficients. + Shape is (num_envs, num_joints) or (num_selected_envs, num_selected_joints). + Can be None if not provided. + in_viscous_friction: Input array containing joint viscous friction coefficients. + Shape is (num_envs, num_joints) or (num_selected_envs, num_selected_joints). + Can be None if not provided. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + from_mask: Input flag indicating whether to use masked indexing. If True, indices from + env_ids and joint_ids are used to index into input arrays. If False, input arrays + are indexed directly using the thread indices. + out_friction: Output array where joint friction coefficients are written. Shape is + (num_envs, num_joints). + out_dynamic_friction: Output array where joint dynamic friction coefficients are written. + Shape is (num_envs, num_joints). + out_viscous_friction: Output array where joint viscous friction coefficients are written. + Shape is (num_envs, num_joints). + friction_props: Output array where friction properties are written for the physics + simulation. Shape is (num_envs, num_joints, 3) where the last dimension contains + [friction, dynamic_friction, viscous_friction]. + """ + i, j = wp.tid() + # First update the output buffers + if from_mask: + out_friction[env_ids[i], joint_ids[j]] = in_friction[env_ids[i], joint_ids[j]] + if in_dynamic_friction: + out_dynamic_friction[env_ids[i], joint_ids[j]] = in_dynamic_friction[env_ids[i], joint_ids[j]] + if in_viscous_friction: + out_viscous_friction[env_ids[i], joint_ids[j]] = in_viscous_friction[env_ids[i], joint_ids[j]] + else: + out_friction[env_ids[i], joint_ids[j]] = in_friction[i, j] + if in_dynamic_friction: + out_dynamic_friction[env_ids[i], joint_ids[j]] = in_dynamic_friction[i, j] + if in_viscous_friction: + out_viscous_friction[env_ids[i], joint_ids[j]] = in_viscous_friction[i, j] + # Then update the friction properties + friction_props[env_ids[i], joint_ids[j], 0] = out_friction[env_ids[i], joint_ids[j]] + if in_dynamic_friction: + friction_props[env_ids[i], joint_ids[j], 1] = out_dynamic_friction[env_ids[i], joint_ids[j]] + if in_viscous_friction: + friction_props[env_ids[i], joint_ids[j], 2] = out_viscous_friction[env_ids[i], joint_ids[j]] + + +@wp.kernel +def write_joint_friction_param_to_buffer( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + buffer_index: wp.int32, + from_mask: bool, + out_data: wp.array2d(dtype=wp.float32), + out_buffer: wp.array3d(dtype=wp.float32), +): + """Write a joint friction parameter to the output buffers. + + This kernel writes a single joint friction parameter (e.g., dynamic or viscous friction) + from the input array to both a 2D output array and a specific slice of a 3D buffer array. + + Args: + in_data: Input array containing joint friction parameter values. Shape is + (num_envs, num_joints) or (num_selected_envs, num_selected_joints) depending + on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + buffer_index: Input scalar index specifying which slice of the 3D buffer to write to. + Typically 0 for friction, 1 for dynamic friction, or 2 for viscous friction. + from_mask: Input flag indicating whether to use masked indexing. If True, indices from + env_ids and joint_ids are used to index into in_data. If False, in_data is indexed + directly using the thread indices. + out_data: Output array where friction parameter values are written. Shape is + (num_envs, num_joints). + out_buffer: Output 3D array where friction parameter values are written to the specified + slice. Shape is (num_envs, num_joints, num_friction_params). + """ + i, j = wp.tid() + if from_mask: + out_data[env_ids[i], joint_ids[j]] = in_data[env_ids[i], joint_ids[j]] + out_buffer[env_ids[i], joint_ids[j], buffer_index] = in_data[env_ids[i], joint_ids[j]] + else: + out_data[env_ids[i], joint_ids[j]] = in_data[i, j] + out_buffer[env_ids[i], joint_ids[j], buffer_index] = in_data[i, j] + + +@wp.kernel +def float_data_to_buffer_with_indices( + in_data: wp.float32, + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + out_data: wp.array2d(dtype=wp.float32), +): + """Write a scalar float value to a 2D buffer at specified indices. + + This kernel broadcasts a single scalar float value to all specified (env_id, joint_id) + locations in the output buffer. + + Args: + in_data: Input scalar float value to broadcast. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + out_data: Output array where the scalar value is written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + out_data[env_ids[i], joint_ids[j]] = in_data + + +@wp.kernel +def float_data_to_buffer_with_mask( + in_data: wp.float32, + env_mask: wp.array(dtype=wp.bool), + joint_mask: wp.array(dtype=wp.bool), + out_data: wp.array2d(dtype=wp.float32), +): + """Write a scalar float value to a 2D buffer at specified mask. + + This kernel broadcasts a single scalar float value to all the positions that are marked as True in the environment + and joint masks. + + Args: + in_data: Input scalar float value to broadcast. + env_mask: Input array of environment mask. Shape is (num_envs,). + joint_mask: Input array of joint mask. Shape is (num_joints,). + out_data: Output array where the scalar value is written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + if env_mask[i] and joint_mask[j]: + out_data[i, j] = in_data + + +@wp.kernel +def update_soft_joint_pos_limits( + joint_pos_limits: wp.array2d(dtype=wp.vec2f), + soft_limit_factor: wp.float32, + soft_joint_pos_limits: wp.array2d(dtype=wp.vec2f), +): + """Update soft joint position limits based on hard limits and a soft limit factor. + + This kernel computes soft joint position limits from hard joint position limits using + a soft limit factor. Soft limits are typically used to provide a safety margin before + reaching the hard limits. + + Args: + joint_pos_limits: Input array of hard joint position limits as vec2f (lower, upper). + Shape is (num_envs, num_joints). + soft_limit_factor: Input scalar factor for computing soft limits (typically 0.0-1.0). + A value of 1.0 means soft limits equal hard limits, while smaller values create + a tighter range. + soft_joint_pos_limits: Output array where soft joint position limits are written. + Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + soft_joint_pos_limits[i, j] = compute_soft_joint_pos_limits_func(joint_pos_limits[i, j], soft_limit_factor) + + +@wp.kernel +def update_default_joint_values( + source: wp.array(dtype=wp.float32), + ids: wp.array(dtype=wp.int32), + target: wp.array2d(dtype=wp.float32), +): + """Update default joint values from a source array using joint indices. + + This kernel writes values from a 1D source array to specific joint indices in a 2D + target array for all environments. + + Args: + source: Input array containing joint values to write. Shape is (num_joints,). + ids: Input array of joint indices specifying which joints to update. Shape is + (num_selected_joints,). + target: Output array where joint values are written. Shape is (num_envs, num_joints). + Values are written to target[i, ids[j]] for all environments i. + """ + i, j = wp.tid() + target[i, ids[j]] = source[j] + + +@wp.kernel +def update_targets( + source_joint_positions: wp.array2d(dtype=wp.float32), + source_joint_velocities: wp.array2d(dtype=wp.float32), + source_joint_efforts: wp.array2d(dtype=wp.float32), + joint_indices: wp.array(dtype=wp.int32), + target_joint_positions: wp.array2d(dtype=wp.float32), + target_joint_velocities: wp.array2d(dtype=wp.float32), + target_joint_efforts: wp.array2d(dtype=wp.float32), +): + """Update joint target values from source arrays using joint indices. + + This kernel copies joint positions, velocities, and efforts from source arrays to + target arrays, remapping joint indices using the provided joint_indices array. + Only non-None source arrays are processed. + + Args: + source_joint_positions: Input array of source joint positions. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + source_joint_velocities: Input array of source joint velocities. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + source_joint_efforts: Input array of source joint efforts. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + joint_indices: Input array of joint indices for remapping. Shape is + (num_selected_joints,). Specifies which joints in the target arrays to update. + target_joint_positions: Output array where joint positions are written. Shape is + (num_envs, num_joints). + target_joint_velocities: Output array where joint velocities are written. Shape is + (num_envs, num_joints). + target_joint_efforts: Output array where joint efforts are written. Shape is + (num_envs, num_joints). + """ + i, j = wp.tid() + if source_joint_positions: + target_joint_positions[i, joint_indices[j]] = source_joint_positions[i, j] + if source_joint_velocities: + target_joint_velocities[i, joint_indices[j]] = source_joint_velocities[i, j] + if source_joint_efforts: + target_joint_efforts[i, joint_indices[j]] = source_joint_efforts[i, j] + + +@wp.kernel +def update_actuator_state_model( + source_computed_effort: wp.array2d(dtype=wp.float32), + source_applied_effort: wp.array2d(dtype=wp.float32), + source_gear_ratio: wp.array2d(dtype=wp.float32), + source_vel_limits: wp.array2d(dtype=wp.float32), + joint_indices: wp.array(dtype=wp.int32), + target_computed_effort: wp.array2d(dtype=wp.float32), + target_applied_effort: wp.array2d(dtype=wp.float32), + target_gear_ratio: wp.array2d(dtype=wp.float32), + target_soft_joint_vel_limits: wp.array2d(dtype=wp.float32), +): + """Update actuator state model parameters from source arrays using joint indices. + + This kernel copies actuator state model parameters (computed effort, applied effort, + gear ratio, and velocity limits) from source arrays to target arrays, remapping + joint indices using the provided joint_indices array. + + Args: + source_computed_effort: Input array of source computed effort values. Shape is + (num_envs, num_selected_joints). + source_applied_effort: Input array of source applied effort values. Shape is + (num_envs, num_selected_joints). + source_gear_ratio: Input array of source gear ratio values. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + source_vel_limits: Input array of source velocity limit values. Shape is + (num_envs, num_selected_joints). + joint_indices: Input array of joint indices for remapping. Shape is + (num_selected_joints,). Specifies which joints in the target arrays to update. + target_computed_effort: Output array where computed effort values are written. + Shape is (num_envs, num_joints). + target_applied_effort: Output array where applied effort values are written. + Shape is (num_envs, num_joints). + target_gear_ratio: Output array where gear ratio values are written. Shape is + (num_envs, num_joints). + target_soft_joint_vel_limits: Output array where soft joint velocity limits are + written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + target_computed_effort[i, joint_indices[j]] = source_computed_effort[i, j] + target_applied_effort[i, joint_indices[j]] = source_applied_effort[i, j] + target_soft_joint_vel_limits[i, joint_indices[j]] = source_vel_limits[i, j] + if source_gear_ratio: + target_gear_ratio[i, joint_indices[j]] = source_gear_ratio[i, j] + + +@wp.kernel +def extract_friction_properties( + friction_props: wp.array3d(dtype=wp.float32), + out_friction: wp.array2d(dtype=wp.float32), + out_dynamic_friction: wp.array2d(dtype=wp.float32), + out_viscous_friction: wp.array2d(dtype=wp.float32), +): + """Extract friction properties from a 3D array into separate 2D arrays. + + This kernel extracts the three friction components (static friction, dynamic friction, + and viscous friction) from a 3D friction properties array into three separate 2D arrays. + + Args: + friction_props: Input 3D array containing friction properties. Shape is + (num_envs, num_joints, 3) where the last dimension contains + [friction, dynamic_friction, viscous_friction]. + out_friction: Output array where static friction coefficients are written. + Shape is (num_envs, num_joints). + out_dynamic_friction: Output array where dynamic friction coefficients are written. + Shape is (num_envs, num_joints). + out_viscous_friction: Output array where viscous friction coefficients are written. + Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + out_friction[i, j] = friction_props[i, j, 0] + out_dynamic_friction[i, j] = friction_props[i, j, 1] + out_viscous_friction[i, j] = friction_props[i, j, 2] + + +@wp.kernel +def concat_joint_pos_limits_lower_and_upper( + joint_pos_limits_lower: wp.array2d(dtype=wp.float32), + joint_pos_limits_upper: wp.array2d(dtype=wp.float32), + joint_pos_limits: wp.array2d(dtype=wp.vec2f), +): + """Concatenate joint position limits lower and upper in a single array. + + Args: + joint_pos_limits_lower: Input array of joint position limits lower. Shape is (num_envs, num_joints). + joint_pos_limits_upper: Input array of joint position limits upper. Shape is (num_envs, num_joints). + joint_pos_limits: Output array where joint position limits are written. Shape is (num_envs, num_joints, 2). + """ + i, j = wp.tid() + joint_pos_limits[i, j] = wp.vec2f(joint_pos_limits_lower[i, j], joint_pos_limits_upper[i, j]) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/kernels.py b/source/isaaclab_newton/isaaclab_newton/assets/kernels.py new file mode 100644 index 000000000000..a6435af0dbb8 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/kernels.py @@ -0,0 +1,1324 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + +vec13f = wp.types.vector(length=13, dtype=wp.float32) + +""" +Shared @wp.func helpers. +""" + + +@wp.func +def update_wrench_with_force_and_torque( + force: wp.vec3f, + torque: wp.vec3f, +) -> wp.spatial_vectorf: + return wp.spatial_vector(force, torque, wp.float32) + + +@wp.func +def get_link_vel_from_root_com_vel_func( + com_vel: wp.spatial_vectorf, + link_pose: wp.transformf, + body_com_pos_b: wp.vec3f, +): + """Compute link velocity from center-of-mass velocity. + + Transforms a COM spatial velocity into a link-frame velocity by projecting + the angular velocity contribution from the COM offset relative to the link frame. + + Args: + com_vel: COM spatial velocity (angular, linear). + link_pose: Link pose in world frame. + body_com_pos_b: COM position in body (link) frame. + + Returns: + Link spatial velocity (angular, linear). + """ + projected_vel = wp.cross( + wp.spatial_bottom(com_vel), + wp.quat_rotate(wp.transform_get_rotation(link_pose), -body_com_pos_b), + ) + return wp.spatial_vector(wp.spatial_top(com_vel) + projected_vel, wp.spatial_bottom(com_vel)) + + +@wp.func +def get_com_pose_from_link_pose_func( + link_pose: wp.transformf, + body_com_pos_b: wp.vec3f, +): + """Compute COM pose in world frame from link pose and body-frame COM offset. + + Args: + link_pose: Link pose in world frame. + body_com_pos_b: COM position in body (link) frame. + + Returns: + COM pose in world frame. + """ + return link_pose * wp.transformf(body_com_pos_b, wp.quatf(0.0, 0.0, 0.0, 1.0)) + + +@wp.func +def concat_pose_and_vel_to_state_func( + pose: wp.transformf, + vel: wp.spatial_vectorf, +) -> vec13f: + """Concatenate a pose and velocity into a 13-element state vector. + + The state vector layout is [pos(3), quat(4), ang_vel(3), lin_vel(3)]. + + Args: + pose: Pose as a transform (position + quaternion). + vel: Spatial velocity (angular, linear). + + Returns: + 13-element state vector. + """ + return vec13f( + pose[0], pose[1], pose[2], pose[3], pose[4], pose[5], pose[6], vel[0], vel[1], vel[2], vel[3], vel[4], vel[5] + ) + + +@wp.func +def compute_heading_w_func( + forward_vec: wp.vec3f, + quat: wp.quatf, +): + """Compute heading angle (yaw) in world frame from a forward vector and orientation. + + Rotates the forward vector by the quaternion and computes atan2(y, x). + + Args: + forward_vec: Forward direction vector in body frame. + quat: Orientation quaternion. + + Returns: + Heading angle in radians. + """ + forward_w = wp.quat_rotate(quat, forward_vec) + return wp.atan2(forward_w[1], forward_w[0]) + + +@wp.func +def set_state_transforms_func( + state: vec13f, + transform: wp.transformf, +) -> vec13f: + """Set the pose portion (first 7 elements) of a 13-element state vector. + + Overwrites elements [0..6] (position + quaternion) with the given transform, + leaving the velocity portion [7..12] unchanged. + + Args: + state: 13-element state vector to modify. + transform: New pose (position + quaternion). + + Returns: + Updated 13-element state vector. + """ + state[0] = transform[0] + state[1] = transform[1] + state[2] = transform[2] + state[3] = transform[3] + state[4] = transform[4] + state[5] = transform[5] + state[6] = transform[6] + return state + + +@wp.func +def set_state_velocities_func( + state: vec13f, + velocity: wp.spatial_vectorf, +) -> vec13f: + """Set the velocity portion (last 6 elements) of a 13-element state vector. + + Overwrites elements [7..12] (angular + linear velocity) with the given spatial velocity, + leaving the pose portion [0..6] unchanged. + + Args: + state: 13-element state vector to modify. + velocity: New spatial velocity (angular, linear). + + Returns: + Updated 13-element state vector. + """ + state[7] = velocity[0] + state[8] = velocity[1] + state[9] = velocity[2] + state[10] = velocity[3] + state[11] = velocity[4] + state[12] = velocity[5] + return state + + +@wp.func +def get_link_velocity_in_com_frame_func( + link_velocity_w: wp.spatial_vectorf, + link_pose_w: wp.transformf, + body_com_pos_b: wp.vec3f, +): + """Compute COM velocity from link velocity by accounting for the COM offset. + + Transforms a link-frame spatial velocity into a COM-frame velocity by adding + the cross-product contribution of the COM offset rotated into the world frame. + + Args: + link_velocity_w: Link spatial velocity in world frame (angular, linear). + link_pose_w: Link pose in world frame. + body_com_pos_b: COM position in body (link) frame. + + Returns: + COM spatial velocity in world frame (angular, linear). + """ + return wp.spatial_vector( + wp.spatial_top(link_velocity_w) + + wp.cross( + wp.spatial_bottom(link_velocity_w), + wp.quat_rotate(wp.transform_get_rotation(link_pose_w), body_com_pos_b), + ), + wp.spatial_bottom(link_velocity_w), + ) + + +@wp.func +def get_com_pose_in_link_frame_func( + com_pose_w: wp.transformf, + com_pos_b: wp.vec3f, +): + """Compute link pose in world frame from COM pose by inverting the body-frame COM offset. + + This is the inverse of ``get_com_pose_from_link_pose_func``. Given the COM pose in + world frame and the COM position offset in body frame, it recovers the link pose in + world frame. Newton COM always has identity orientation, so only position is needed. + + Args: + com_pose_w: COM pose in world frame. + com_pos_b: COM position in body (link) frame. + + Returns: + Link pose in world frame. + """ + com_rot_w = wp.transform_get_rotation(com_pose_w) + link_pos_w = wp.transform_get_translation(com_pose_w) - wp.quat_rotate(com_rot_w, com_pos_b) + return wp.transform(link_pos_w, com_rot_w) + + +""" +Root-level @wp.kernel (1D — used by RigidObject + Articulation). +""" + + +@wp.kernel +def get_root_link_vel_from_root_com_vel( + com_vel: wp.array(dtype=wp.spatial_vectorf), + link_pose: wp.array(dtype=wp.transformf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + link_vel: wp.array(dtype=wp.spatial_vectorf), +): + """Compute root link velocity from root center-of-mass velocity. + + This kernel transforms the root COM velocity into link-frame velocity by projecting + the angular velocity contribution from the COM offset. + + Args: + com_vel: Input array of root COM spatial velocities. Shape is (num_envs,). + link_pose: Input array of root link poses in world frame. Shape is (num_envs,). + body_com_pos_b: Input array of body COM positions in body frame. Shape is (num_envs, num_bodies). + Only the first body (index 0) is used for the root. + link_vel: Output array where root link velocities are written. Shape is (num_envs,). + """ + i = wp.tid() + link_vel[i] = get_link_vel_from_root_com_vel_func(com_vel[i], link_pose[i], body_com_pos_b[i, 0]) + + +@wp.kernel +def get_root_com_pose_from_root_link_pose( + link_pose: wp.array(dtype=wp.transformf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + com_pose_w: wp.array(dtype=wp.transformf), +): + """Compute root COM pose from root link pose. + + This kernel transforms the root link pose to the root COM pose using the body COM offset. + + Args: + link_pose: Input array of root link poses in world frame. Shape is (num_envs,). + body_com_pos_b: Input array of body COM positions in body frame. Shape is (num_envs, num_bodies). + Only the first body (index 0) is used for the root. + com_pose_w: Output array where root COM poses are written. Shape is (num_envs,). + """ + i = wp.tid() + com_pose_w[i] = get_com_pose_from_link_pose_func(link_pose[i], body_com_pos_b[i, 0]) + + +@wp.kernel +def concat_root_pose_and_vel_to_state( + pose: wp.array(dtype=wp.transformf), + vel: wp.array(dtype=wp.spatial_vectorf), + state: wp.array(dtype=vec13f), +): + """Concatenate root pose and velocity into a 13-element state vector. + + This kernel combines a 7-element pose (pos + quat) and a 6-element velocity + (angular + linear) into a single 13-element state vector. + + Args: + pose: Input array of root poses in world frame. Shape is (num_envs,). + vel: Input array of root spatial velocities. Shape is (num_envs,). + state: Output array where concatenated state vectors are written. Shape is (num_envs,). + """ + i = wp.tid() + state[i] = concat_pose_and_vel_to_state_func(pose[i], vel[i]) + + +@wp.kernel +def split_state_to_root_pose_and_vel( + state: wp.array2d(dtype=wp.float32), + pose: wp.array(dtype=wp.transformf), + vel: wp.array(dtype=wp.spatial_vectorf), +): + """Split a 13-element state vector into root pose and velocity. + + This kernel extracts a 7-element pose (pos + quat) and a 6-element velocity + (angular + linear) from a 13-element state vector. + + Args: + state: Input array of root states. Shape is (num_envs, 13). + pose: Output array where root poses are written. Shape is (num_envs,). + vel: Output array where root spatial velocities are written. Shape is (num_envs,). + """ + i = wp.tid() + # Extract pose: [pos(3), quat(4)] = state[0:7] + pose[i] = wp.transform( + wp.vec3f(state[i, 0], state[i, 1], state[i, 2]), wp.quatf(state[i, 3], state[i, 4], state[i, 5], state[i, 6]) + ) + # Extract velocity: [ang_vel(3), lin_vel(3)] = state[7:13] + vel[i] = wp.spatial_vector( + wp.vec3f(state[i, 7], state[i, 8], state[i, 9]), # angular velocity + wp.vec3f(state[i, 10], state[i, 11], state[i, 12]), # linear velocity + ) + + +""" +Body-level @wp.kernel (2D — used by Articulation + RigidObjectCollection). +""" + + +@wp.kernel +def get_body_link_vel_from_body_com_vel( + body_com_vel: wp.array2d(dtype=wp.spatial_vectorf), + body_link_pose: wp.array2d(dtype=wp.transformf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + body_link_vel: wp.array2d(dtype=wp.spatial_vectorf), +): + """Compute body link velocities from body COM velocities for all bodies. + + This kernel transforms COM velocities into link-frame velocities by projecting + the angular velocity contribution from the COM offset, for each body in each environment. + + Args: + body_com_vel: Input array of body COM spatial velocities. Shape is (num_envs, num_bodies). + body_link_pose: Input array of body link poses in world frame. Shape is (num_envs, num_bodies). + body_com_pos_b: Input array of body COM positions in body frame. Shape is (num_envs, num_bodies). + body_link_vel: Output array where body link velocities are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + body_link_vel[i, j] = get_link_vel_from_root_com_vel_func( + body_com_vel[i, j], body_link_pose[i, j], body_com_pos_b[i, j] + ) + + +@wp.kernel +def get_body_com_pose_from_body_link_pose( + body_link_pose: wp.array2d(dtype=wp.transformf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + body_com_pose_w: wp.array2d(dtype=wp.transformf), +): + """Compute body COM poses from body link poses for all bodies. + + This kernel transforms link poses to COM poses using the body COM offset in the body frame. + + Args: + body_link_pose: Input array of body link poses in world frame. Shape is (num_envs, num_bodies). + body_com_pos_b: Input array of body COM positions in body frame. Shape is (num_envs, num_bodies). + body_com_pose_w: Output array where body COM poses in world frame are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + body_com_pose_w[i, j] = get_com_pose_from_link_pose_func(body_link_pose[i, j], body_com_pos_b[i, j]) + + +@wp.kernel +def concat_body_pose_and_vel_to_state( + pose: wp.array2d(dtype=wp.transformf), + vel: wp.array2d(dtype=wp.spatial_vectorf), + state: wp.array2d(dtype=vec13f), +): + """Concatenate body pose and velocity into 13-element state vectors for all bodies. + + This kernel combines a 7-element pose (pos + quat) and a 6-element velocity + (angular + linear) into a single 13-element state vector, for each body in each environment. + + Args: + pose: Input array of body poses in world frame. Shape is (num_envs, num_bodies). + vel: Input array of body spatial velocities. Shape is (num_envs, num_bodies). + state: Output array where concatenated state vectors are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + state[i, j] = concat_pose_and_vel_to_state_func(pose[i, j], vel[i, j]) + + +""" +Derived property kernels. +""" + + +@wp.kernel +def quat_apply_inverse_1D_kernel( + gravity: wp.array(dtype=wp.vec3f), + quat: wp.array(dtype=wp.quatf), + projected_gravity: wp.array(dtype=wp.vec3f), +): + """Apply inverse quaternion rotation to gravity vectors (1D). + + This kernel rotates gravity vectors into the local frame of each environment + using the inverse of the provided quaternion. + + Args: + gravity: Input array of gravity vectors in world frame. Shape is (num_envs,). + quat: Input array of quaternions representing orientations. Shape is (num_envs,). + projected_gravity: Output array where projected gravity vectors are written. + Shape is (num_envs,). + """ + i = wp.tid() + projected_gravity[i] = wp.quat_rotate_inv(quat[i], gravity[i]) + + +@wp.kernel +def root_heading_w( + forward_vec: wp.array(dtype=wp.vec3f), + quat: wp.array(dtype=wp.quatf), + heading_w: wp.array(dtype=wp.float32), +): + """Compute root heading angle in the world frame. + + This kernel computes the heading angle (yaw) by rotating the forward vector + by the root quaternion and computing atan2 of the resulting x and y components. + + Args: + forward_vec: Input array of forward direction vectors. Shape is (num_envs,). + quat: Input array of root quaternions. Shape is (num_envs,). + heading_w: Output array where heading angles (radians) are written. Shape is (num_envs,). + """ + i = wp.tid() + heading_w[i] = compute_heading_w_func(forward_vec[i], quat[i]) + + +@wp.kernel +def quat_apply_inverse_2D_kernel( + vec: wp.array2d(dtype=wp.vec3f), + quat: wp.array2d(dtype=wp.quatf), + result: wp.array2d(dtype=wp.vec3f), +): + """Apply inverse quaternion rotation to vectors (2D). + + This kernel rotates vectors into the local frame of each body in each environment + using the inverse of the provided quaternion. + + Args: + vec: Input array of vectors in world frame. Shape is (num_envs, num_bodies). + quat: Input array of quaternions representing orientations. Shape is (num_envs, num_bodies). + result: Output array where rotated vectors are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + result[i, j] = wp.quat_rotate_inv(quat[i, j], vec[i, j]) + + +@wp.kernel +def body_heading_w( + forward_vec: wp.array2d(dtype=wp.vec3f), + quat: wp.array2d(dtype=wp.quatf), + heading_w: wp.array2d(dtype=wp.float32), +): + """Compute body heading angles in the world frame for all bodies. + + This kernel computes heading angles (yaw) by rotating forward vectors + by body quaternions and computing atan2 of the resulting x and y components. + + Args: + forward_vec: Input array of forward direction vectors. Shape is (num_envs, num_bodies). + quat: Input array of body quaternions. Shape is (num_envs, num_bodies). + heading_w: Output array where heading angles (radians) are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + heading_w[i, j] = compute_heading_w_func(forward_vec[i, j], quat[i, j]) + + +""" +Root-level write kernels (1D — used by RigidObject + Articulation). +""" + + +@wp.kernel +def set_root_link_pose_to_sim_index( + data: wp.array(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + root_link_pose_w: wp.array(dtype=wp.transformf), +): + """Write root link pose data to simulation buffers. + + This kernel writes root link poses from the input array to the output buffer. + + Args: + data: Input array of root link poses. Shape is (num_selected_envs,). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + root_link_pose_w: Output array where root link poses are written. Shape is (num_envs,). + """ + i = wp.tid() + root_link_pose_w[env_ids[i]] = data[i] + + +@wp.kernel +def set_root_link_pose_to_sim_mask( + data: wp.array(dtype=wp.transformf), + env_mask: wp.array(dtype=wp.bool), + root_link_pose_w: wp.array(dtype=wp.transformf), +): + """Write root link pose data to simulation buffers. + + This kernel writes root link poses from the input array to the output buffer. + + Args: + data: Input array of root link poses. Shape is (num_instances,). + env_mask: Input array of environment mask. Shape is (num_instances,). + root_link_pose_w: Output array where root link poses are written. Shape is (num_envs,). + """ + i = wp.tid() + if env_mask[i]: + root_link_pose_w[i] = data[i] + + +@wp.kernel +def set_root_com_pose_to_sim_index( + data: wp.array(dtype=wp.transformf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + env_ids: wp.array(dtype=wp.int32), + root_com_pose_w: wp.array(dtype=wp.transformf), + root_link_pose_w: wp.array(dtype=wp.transformf), +): + """Write root COM pose data to simulation buffers. + + This kernel writes root COM poses from the input array to the output buffers + and computes the corresponding link pose from the COM pose. + + Args: + data: Input array of root COM poses. Shape is (num_selected_envs,). + body_com_pos_b: Input array of body COM positions in body frame. Shape is + (num_envs, num_bodies). Only the first body (index 0) is used for the root. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + root_com_pose_w: Output array where root COM poses are written. Shape is (num_envs,). + root_link_pose_w: Output array where root link poses (derived from COM) are written. + Shape is (num_envs,). + """ + i = wp.tid() + root_com_pose_w[env_ids[i]] = data[i] + # Get the com pose in the link frame + root_link_pose_w[env_ids[i]] = get_com_pose_in_link_frame_func( + root_com_pose_w[env_ids[i]], body_com_pos_b[env_ids[i], 0] + ) + + +@wp.kernel +def set_root_com_pose_to_sim_mask( + data: wp.array(dtype=wp.transformf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + env_mask: wp.array(dtype=wp.bool), + root_com_pose_w: wp.array(dtype=wp.transformf), + root_link_pose_w: wp.array(dtype=wp.transformf), +): + """Write root COM pose data to simulation buffers. + + This kernel writes root COM poses from the input array to the output buffers + and computes the corresponding link pose from the COM pose. + + Args: + data: Input array of root COM poses. Shape is (num_instances,). + body_com_pos_b: Input array of body COM positions in body frame. Shape is + (num_envs, num_bodies). Only the first body (index 0) is used for the root. + env_mask: Input array of environment mask. Shape is (num_instances,). + root_com_pose_w: Output array where root COM poses are written. Shape is (num_envs,). + root_link_pose_w: Output array where root link poses (derived from COM) are written. + Shape is (num_envs,). + """ + i = wp.tid() + if env_mask[i]: + root_com_pose_w[i] = data[i] + # Get the com pose in the link frame + root_link_pose_w[i] = get_com_pose_in_link_frame_func(root_com_pose_w[i], body_com_pos_b[i, 0]) + + +@wp.kernel +def set_root_com_velocity_to_sim_index( + data: wp.array(dtype=wp.spatial_vectorf), + env_ids: wp.array(dtype=wp.int32), + num_bodies: wp.int32, + root_com_velocity_w: wp.array(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write root COM velocity data to simulation buffers. + + This kernel writes root COM velocities from the input array to the output buffers + and zeros out the body acceleration buffer to prevent reporting stale values. + + Args: + data: Input array of root COM spatial velocities. Shape is (num_selected_envs,). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + num_bodies: Input scalar number of bodies per environment. + root_com_velocity_w: Output array where root COM velocities are written. Shape is (num_envs,). + body_acc_w: Output array where body accelerations are zeroed. Shape is + (num_envs, num_bodies). + """ + i = wp.tid() + root_com_velocity_w[env_ids[i]] = data[i] + # Make the acceleration zero to prevent reporting old values + for j in range(num_bodies): + body_acc_w[env_ids[i], j] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +@wp.kernel +def set_root_com_velocity_to_sim_mask( + data: wp.array(dtype=wp.spatial_vectorf), + env_mask: wp.array(dtype=wp.bool), + num_bodies: wp.int32, + root_com_velocity_w: wp.array(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write root COM velocity data to simulation buffers. + + This kernel writes root COM velocities from the input array to the output buffers + and zeros out the body acceleration buffer to prevent reporting stale values. + + Args: + data: Input array of root COM spatial velocities. Shape is (num_instances,). + env_mask: Input array of environment mask. Shape is (num_instances,). + num_bodies: Input scalar number of bodies per environment. + root_com_velocity_w: Output array where root COM velocities are written. Shape is (num_envs,). + body_acc_w: Output array where body accelerations are zeroed. Shape is + (num_envs, num_bodies). + """ + i = wp.tid() + if env_mask[i]: + root_com_velocity_w[i] = data[i] + # Make the acceleration zero to prevent reporting old values + for j in range(num_bodies): + body_acc_w[i, j] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +@wp.kernel +def set_root_link_velocity_to_sim_index( + data: wp.array(dtype=wp.spatial_vectorf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + link_pose_w: wp.array(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + num_bodies: wp.int32, + root_link_velocity_w: wp.array(dtype=wp.spatial_vectorf), + root_com_velocity_w: wp.array(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write root link velocity data to simulation buffers. + + This kernel writes root link velocities from the input array to the output buffers, + computes the corresponding COM velocity from the link velocity, and zeros out + the body acceleration buffer. + + Args: + data: Input array of root link spatial velocities. Shape is (num_selected_envs,). + body_com_pos_b: Input array of body COM positions in body frame. Shape is + (num_envs, num_bodies). Only the first body (index 0) is used for the root. + link_pose_w: Input array of root link poses in world frame. Shape is (num_envs,). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + num_bodies: Input scalar number of bodies per environment. + root_link_velocity_w: Output array where root link velocities are written. + Shape is (num_envs,). + root_com_velocity_w: Output array where root COM velocities (derived from link) + are written. Shape is (num_envs,). + body_acc_w: Output array where body accelerations are zeroed. + Shape is (num_envs, num_bodies). + """ + i = wp.tid() + root_link_velocity_w[env_ids[i]] = data[i] + # Get the link velocity in the com frame + root_com_velocity_w[env_ids[i]] = get_link_velocity_in_com_frame_func( + root_link_velocity_w[env_ids[i]], link_pose_w[env_ids[i]], body_com_pos_b[env_ids[i], 0] + ) + # Make the acceleration zero to prevent reporting old values + for j in range(num_bodies): + body_acc_w[env_ids[i], j] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +@wp.kernel +def set_root_link_velocity_to_sim_mask( + data: wp.array(dtype=wp.spatial_vectorf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + link_pose_w: wp.array(dtype=wp.transformf), + env_mask: wp.array(dtype=wp.bool), + num_bodies: wp.int32, + root_link_velocity_w: wp.array(dtype=wp.spatial_vectorf), + root_com_velocity_w: wp.array(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write root link velocity data to simulation buffers. + + This kernel writes root link velocities from the input array to the output buffers, + computes the corresponding COM velocity from the link velocity, and zeros out + the body acceleration buffer. + + Args: + data: Input array of root link spatial velocities. Shape is (num_instances,). + body_com_pos_b: Input array of body COM positions in body frame. Shape is + (num_envs, num_bodies). Only the first body (index 0) is used for the root. + link_pose_w: Input array of root link poses in world frame. Shape is (num_envs,). + env_mask: Input array of environment mask. Shape is (num_instances,). + num_bodies: Input scalar number of bodies per environment. + root_link_velocity_w: Output array where root link velocities are written. + Shape is (num_envs,). + root_com_velocity_w: Output array where root COM velocities (derived from link) + are written. Shape is (num_envs,). + body_acc_w: Output array where body accelerations are zeroed. + Shape is (num_envs, num_bodies). + """ + i = wp.tid() + if env_mask[i]: + root_link_velocity_w[i] = data[i] + # Get the link velocity in the com frame + root_com_velocity_w[i] = get_link_velocity_in_com_frame_func( + root_link_velocity_w[i], link_pose_w[i], body_com_pos_b[i, 0] + ) + # Make the acceleration zero to prevent reporting old values + for j in range(num_bodies): + body_acc_w[i, j] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +""" +Body-level write kernels (2D — used by RigidObjectCollection). +""" + + +@wp.kernel +def set_body_link_pose_to_sim( + data: wp.array2d(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_link_pose_w: wp.array2d(dtype=wp.transformf), + body_link_state_w: wp.array2d(dtype=vec13f), + body_state_w: wp.array2d(dtype=vec13f), +): + """Write body link pose data to simulation buffers. + + This kernel writes body link poses from the input array to the output buffers + and optionally updates the corresponding state vectors, for each body in each environment. + + Args: + data: Input array of body link poses. Shape is (num_envs, num_bodies) or + (num_selected_envs, num_selected_bodies) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_link_pose_w: Output array where body link poses are written. + Shape is (num_envs, num_bodies). + body_link_state_w: Output array where body link states are updated (pose portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + body_state_w: Output array where body states are updated (pose portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + """ + i, j = wp.tid() + if from_mask: + body_link_pose_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + if body_link_state_w: + body_link_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_link_state_w[env_ids[i], body_ids[j]], data[env_ids[i], body_ids[j]] + ) + if body_state_w: + body_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_state_w[env_ids[i], body_ids[j]], data[env_ids[i], body_ids[j]] + ) + else: + body_link_pose_w[env_ids[i], body_ids[j]] = data[i, j] + if body_link_state_w: + body_link_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_link_state_w[env_ids[i], body_ids[j]], data[i, j] + ) + if body_state_w: + body_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_state_w[env_ids[i], body_ids[j]], data[i, j] + ) + + +@wp.kernel +def set_body_com_pose_to_sim( + data: wp.array2d(dtype=wp.transformf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_com_pose_w: wp.array2d(dtype=wp.transformf), + body_link_pose_w: wp.array2d(dtype=wp.transformf), + body_com_state_w: wp.array2d(dtype=vec13f), + body_link_state_w: wp.array2d(dtype=vec13f), + body_state_w: wp.array2d(dtype=vec13f), +): + """Write body COM pose data to simulation buffers. + + This kernel writes body COM poses from the input array to the output buffers, + computes the corresponding link poses from the COM poses, and optionally updates + the corresponding state vectors, for each body in each environment. + + Args: + data: Input array of body COM poses. Shape is (num_envs, num_bodies) or + (num_selected_envs, num_selected_bodies) depending on from_mask. + body_com_pos_b: Input array of body COM positions in body frame. Shape is + (num_envs, num_bodies). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_com_pose_w: Output array where body COM poses are written. + Shape is (num_envs, num_bodies). + body_link_pose_w: Output array where body link poses (derived from COM) are written. + Shape is (num_envs, num_bodies). + body_com_state_w: Output array where body COM states are updated (pose portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + body_link_state_w: Output array where body link states are updated (pose portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + body_state_w: Output array where body states are updated (pose portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + """ + i, j = wp.tid() + if from_mask: + body_com_pose_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + if body_com_state_w: + body_com_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_com_state_w[env_ids[i], body_ids[j]], data[env_ids[i], body_ids[j]] + ) + else: + body_com_pose_w[env_ids[i], body_ids[j]] = data[i, j] + if body_com_state_w: + body_com_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_com_state_w[env_ids[i], body_ids[j]], data[i, j] + ) + # Get the link pose from com pose + body_link_pose_w[env_ids[i], body_ids[j]] = get_com_pose_in_link_frame_func( + body_com_pose_w[env_ids[i], body_ids[j]], body_com_pos_b[env_ids[i], body_ids[j]] + ) + if body_link_state_w: + body_link_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_link_state_w[env_ids[i], body_ids[j]], body_link_pose_w[env_ids[i], body_ids[j]] + ) + if body_state_w: + body_state_w[env_ids[i], body_ids[j]] = set_state_transforms_func( + body_state_w[env_ids[i], body_ids[j]], body_link_pose_w[env_ids[i], body_ids[j]] + ) + + +@wp.kernel +def set_body_com_velocity_to_sim( + data: wp.array2d(dtype=wp.spatial_vectorf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_com_velocity_w: wp.array2d(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), + body_state_w: wp.array2d(dtype=vec13f), + body_com_state_w: wp.array2d(dtype=vec13f), +): + """Write body COM velocity data to simulation buffers. + + This kernel writes body COM velocities from the input array to the output buffers, + optionally updates the corresponding state vectors, and zeros out the body + acceleration buffer, for each body in each environment. + + Args: + data: Input array of body COM spatial velocities. Shape is (num_envs, num_bodies) or + (num_selected_envs, num_selected_bodies) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_com_velocity_w: Output array where body COM velocities are written. + Shape is (num_envs, num_bodies). + body_acc_w: Output array where body accelerations are zeroed. + Shape is (num_envs, num_bodies). + body_state_w: Output array where body states are updated (velocity portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + body_com_state_w: Output array where body COM states are updated (velocity portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + """ + i, j = wp.tid() + if from_mask: + body_com_velocity_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + if body_state_w: + body_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_state_w[env_ids[i], body_ids[j]], data[env_ids[i], body_ids[j]] + ) + if body_com_state_w: + body_com_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_com_state_w[env_ids[i], body_ids[j]], data[env_ids[i], body_ids[j]] + ) + else: + body_com_velocity_w[env_ids[i], body_ids[j]] = data[i, j] + if body_state_w: + body_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_state_w[env_ids[i], body_ids[j]], data[i, j] + ) + if body_com_state_w: + body_com_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_com_state_w[env_ids[i], body_ids[j]], data[i, j] + ) + # Make the acceleration zero to prevent reporting old values + body_acc_w[env_ids[i], body_ids[j]] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +@wp.kernel +def set_body_link_velocity_to_sim( + data: wp.array2d(dtype=wp.spatial_vectorf), + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + body_link_pose_w: wp.array2d(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_link_velocity_w: wp.array2d(dtype=wp.spatial_vectorf), + body_com_velocity_w: wp.array2d(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), + body_link_state_w: wp.array2d(dtype=vec13f), + body_state_w: wp.array2d(dtype=vec13f), + body_com_state_w: wp.array2d(dtype=vec13f), +): + """Write body link velocity data to simulation buffers. + + This kernel writes body link velocities from the input array to the output buffers, + computes the corresponding COM velocities from the link velocities, optionally updates + the corresponding state vectors, and zeros out the body acceleration buffer. + + Args: + data: Input array of body link spatial velocities. Shape is (num_envs, num_bodies) + or (num_selected_envs, num_selected_bodies) depending on from_mask. + body_com_pos_b: Input array of body COM positions in body frame. Shape is + (num_envs, num_bodies). + body_link_pose_w: Input array of body link poses in world frame. Shape is + (num_envs, num_bodies). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_link_velocity_w: Output array where body link velocities are written. + Shape is (num_envs, num_bodies). + body_com_velocity_w: Output array where body COM velocities (derived from link) + are written. Shape is (num_envs, num_bodies). + body_acc_w: Output array where body accelerations are zeroed. + Shape is (num_envs, num_bodies). + body_link_state_w: Output array where body link states are updated (velocity portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + body_state_w: Output array where body states are updated (velocity portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + body_com_state_w: Output array where body COM states are updated (velocity portion). + Shape is (num_envs, num_bodies). Can be None if not needed. + """ + i, j = wp.tid() + if from_mask: + body_link_velocity_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + if body_link_state_w: + body_link_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_link_state_w[env_ids[i], body_ids[j]], data[env_ids[i], body_ids[j]] + ) + else: + body_link_velocity_w[env_ids[i], body_ids[j]] = data[i, j] + if body_link_state_w: + body_link_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_link_state_w[env_ids[i], body_ids[j]], data[i, j] + ) + # Get the link velocity in the com frame + body_com_velocity_w[env_ids[i], body_ids[j]] = get_link_velocity_in_com_frame_func( + body_link_velocity_w[env_ids[i], body_ids[j]], + body_link_pose_w[env_ids[i], body_ids[j]], + body_com_pos_b[env_ids[i], body_ids[j]], + ) + if body_com_state_w: + body_com_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_com_state_w[env_ids[i], body_ids[j]], body_com_velocity_w[env_ids[i], body_ids[j]] + ) + if body_state_w: + body_state_w[env_ids[i], body_ids[j]] = set_state_velocities_func( + body_state_w[env_ids[i], body_ids[j]], body_com_velocity_w[env_ids[i], body_ids[j]] + ) + # Make the acceleration zero to prevent reporting old values + body_acc_w[env_ids[i], body_ids[j]] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +""" +Generic buffer-writing kernels (used by Articulation + RigidObject + RigidObjectCollection). +""" + + +@wp.kernel +def write_2d_data_to_buffer_with_indices( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + out_data: wp.array2d(dtype=wp.float32), +): + """Write 2D float data to a buffer at specified indices. + + This kernel copies float data from an input array to an output buffer at the specified + environment and joint/body indices. + + Args: + in_data: Input array containing float data. Shape is (num_selected_envs, num_selected_joints). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint/body indices to write to. Shape is (num_selected_joints,). + out_data: Output array where data is written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + out_data[env_ids[i], joint_ids[j]] = in_data[i, j] + + +@wp.kernel +def write_2d_data_to_buffer_with_mask( + in_data: wp.array2d(dtype=wp.float32), + env_mask: wp.array(dtype=wp.bool), + joint_mask: wp.array(dtype=wp.bool), + out_data: wp.array2d(dtype=wp.float32), +): + """Write 2D float data to a buffer at specified indices. + + This kernel copies float data from an input array to an output buffer at the specified + environment and joint/body indices. + + Args: + in_data: Input array containing float data. Shape is (num_instances, num_joints). + env_mask: Input array of environment mask. Shape is (num_instances,). + joint_mask: Input array of joint/body mask. Shape is (num_instances, num_joints). + out_data: Output array where data is written. Shape is (num_instances, num_joints). + """ + i, j = wp.tid() + if env_mask[i] and joint_mask[j]: + out_data[i, j] = in_data[i, j] + + +@wp.kernel +def write_body_inertia_to_buffer_index( + in_data: wp.array3d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + out_data: wp.array3d(dtype=wp.float32), +): + """Write body inertia data to a buffer at specified indices. + + This kernel copies 3x3 inertia tensor data (stored as 9 floats) from an input array + to an output buffer at the specified environment and body indices. + + Args: + in_data: Input array containing inertia data. Shape is (num_selected_envs, num_selected_bodies, 9). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + out_data: Output array where inertia data is written. Shape is (num_envs, num_bodies, 9). + """ + i, j = wp.tid() + for k in range(9): + out_data[env_ids[i], body_ids[j], k] = in_data[i, j, k] + + +@wp.kernel +def write_body_inertia_to_buffer_mask( + in_data: wp.array3d(dtype=wp.float32), + env_mask: wp.array(dtype=wp.bool), + body_mask: wp.array(dtype=wp.bool), + out_data: wp.array3d(dtype=wp.float32), +): + """Write body inertia data to a buffer at specified indices. + + This kernel copies 3x3 inertia tensor data (stored as 9 floats) from an input array + to an output buffer at the specified environment and body indices. + + Args: + in_data: Input array containing inertia data. Shape is (num_selected_envs, num_selected_bodies, 9). + env_mask: Input array of environment mask. Shape is (num_selected_envs,). + body_mask: Input array of body mask. Shape is (num_selected_bodies,). + out_data: Output array where inertia data is written. Shape is (num_envs, num_bodies, 9). + """ + i, j = wp.tid() + if env_mask[i] and body_mask[j]: + for k in range(9): + out_data[i, j, k] = in_data[i, j, k] + + +@wp.kernel +def write_single_body_inertia_to_buffer( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_data: wp.array2d(dtype=wp.float32), +): + """Write body inertia data to a buffer at specified indices. + + This kernel copies 3x3 inertia tensor data (stored as 9 floats) from an input array + to an output buffer at the specified environment and body indices. + + Args: + in_data: Input array containing inertia data. Shape is (num_envs, 9) or + (num_selected_envs, 9) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + from_mask: Input flag indicating whether to use masked indexing. + out_data: Output array where inertia data is written. Shape is (num_envs, 9). + """ + i = wp.tid() + if from_mask: + for k in range(9): + out_data[env_ids[i], k] = in_data[env_ids[i], k] + else: + for k in range(9): + out_data[env_ids[i], k] = in_data[i, k] + + +@wp.kernel +def write_body_com_position_to_buffer_index( + in_data: wp.array2d(dtype=wp.vec3f), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + out_data: wp.array2d(dtype=wp.vec3f), +): + """Write body COM position data to a buffer at specified indices. + + This kernel copies body COM position data from an input array to an output buffer at the + specified environment and body indices. + + Args: + in_data: Input array containing body COM positions. Shape is (num_selected_envs, num_selected_bodies). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + out_data: Output array where body COM positions are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + out_data[env_ids[i], body_ids[j]] = in_data[i, j] + + +@wp.kernel +def write_body_com_position_to_buffer_mask( + in_data: wp.array2d(dtype=wp.vec3f), + env_mask: wp.array(dtype=wp.bool), + body_mask: wp.array(dtype=wp.bool), + out_data: wp.array2d(dtype=wp.vec3f), +): + """Write body COM position data to a buffer at specified masks. + + This kernel copies body COM position data from an input array to an output buffer at the + specified environment and body masks. + + Args: + in_data: Input array containing body COM positions. Shape is (num_instances, num_bodies). + env_mask: Input array of environment mask. Shape is (num_instances,). + body_mask: Input array of body mask. Shape is (num_bodies). + out_data: Output array where body COM positions are written. Shape is (num_instances, num_bodies). + """ + i, j = wp.tid() + if env_mask[i] and body_mask[j]: + out_data[i, j] = in_data[i, j] + + +@wp.kernel +def split_transform_to_pos_1d( + transform: wp.array(dtype=wp.transformf), + pos: wp.array(dtype=wp.vec3f), +): + """Split a 1D transform array into a position array. + + This kernel splits a 1D transform array into a position array. + + Args: + transform: Input array of transforms. Shape is (num_envs, 7). + pos: Output array where positions are written. Shape is (num_envs, 3). + """ + i = wp.tid() + pos[i] = wp.transform_get_translation(transform[i]) + + +@wp.kernel +def split_transform_to_quat_1d( + transform: wp.array(dtype=wp.transformf), + quat: wp.array(dtype=wp.quatf), +): + """Split a 1D transform array into a quaternion array. + + This kernel splits a 1D transform array into a quaternion array. + + Args: + transform: Input array of transforms. Shape is (num_envs, 7). + quat: Output array where quaternions are written. Shape is (num_envs, 4). + """ + i = wp.tid() + quat[i] = wp.transform_get_rotation(transform[i]) + + +@wp.kernel +def split_transform_to_pos_2d( + transform: wp.array2d(dtype=wp.transformf), + pos: wp.array2d(dtype=wp.vec3f), +): + """Split a 2D transform array into a position array. + + This kernel splits a 2D transform array into a position array. + + Args: + transform: Input array of transforms. Shape is (num_envs, num_bodies, 7). + pos: Output array where positions are written. Shape is (num_envs, num_bodies, 3). + """ + i, j = wp.tid() + pos[i, j] = wp.transform_get_translation(transform[i, j]) + + +@wp.kernel +def split_transform_to_quat_2d( + transform: wp.array2d(dtype=wp.transformf), + quat: wp.array2d(dtype=wp.quatf), +): + """Split a 2D transform array into a quaternion array. + + This kernel splits a 2D transform array into a quaternion array. + + Args: + transform: Input array of transforms. Shape is (num_envs, num_bodies, 7). + quat: Output array where quaternions are written. Shape is (num_envs, num_bodies, 4). + """ + i, j = wp.tid() + quat[i, j] = wp.transform_get_rotation(transform[i, j]) + + +@wp.kernel +def split_spatial_vector_to_top_1d( + spatial_vector: wp.array(dtype=wp.spatial_vectorf), + top_part: wp.array(dtype=wp.vec3f), +): + """Split a 1D spatial vector array into a top part array. + + This kernel splits a 1D spatial vector array into a top part array. + + Args: + spatial_vector: Input array of spatial vectors. Shape is (num_envs, 6). + top_part: Output array where top parts are written. Shape is (num_envs, 3). + """ + i = wp.tid() + top_part[i] = wp.spatial_top(spatial_vector[i]) + + +@wp.kernel +def split_spatial_vector_to_bottom_1d( + spatial_vector: wp.array(dtype=wp.spatial_vectorf), + bottom_part: wp.array(dtype=wp.vec3f), +): + """Split a 1D spatial vector array into a bottom part array. + + This kernel splits a 1D spatial vector array into a bottom part array. + + Args: + spatial_vector: Input array of spatial vectors. Shape is (num_envs, 6). + bottom_part: Output array where bottom parts are written. Shape is (num_envs, 3). + """ + i = wp.tid() + bottom_part[i] = wp.spatial_bottom(spatial_vector[i]) + + +@wp.kernel +def split_spatial_vector_to_top_2d( + spatial_vector: wp.array2d(dtype=wp.spatial_vectorf), + top_part: wp.array2d(dtype=wp.vec3f), +): + """Split a 2D spatial vector array into a top part array. + + This kernel splits a 2D spatial vector array into a top part array. + + Args: + spatial_vector: Input array of spatial vectors. Shape is (num_envs, num_bodies, 6). + top_part: Output array where top parts are written. Shape is (num_envs, num_bodies, 3). + """ + i, j = wp.tid() + top_part[i, j] = wp.spatial_top(spatial_vector[i, j]) + + +@wp.kernel +def split_spatial_vector_to_bottom_2d( + spatial_vector: wp.array2d(dtype=wp.spatial_vectorf), + bottom_part: wp.array2d(dtype=wp.vec3f), +): + """Split a 2D spatial vector array into a bottom part array. + + This kernel splits a 2D spatial vector array into a bottom part array. + + Args: + spatial_vector: Input array of spatial vectors. Shape is (num_envs, num_bodies, 6). + bottom_part: Output array where bottom parts are written. Shape is (num_envs, num_bodies, 3). + """ + i, j = wp.tid() + bottom_part[i, j] = wp.spatial_bottom(spatial_vector[i, j]) + + +@wp.kernel +def make_dummy_body_com_pose_b( + body_com_pos_b: wp.array2d(dtype=wp.vec3f), + body_com_pose_b: wp.array2d(dtype=wp.transformf), +): + """Make a body COM pose from position by appending an identity quaternion. + + Needed by the ``body_com_pose_b`` property to match the base API that returns + ``wp.transformf`` (pos + quat). + + Args: + body_com_pos_b: Input array of body COM positions in body frame. Shape is (num_envs, num_bodies). + body_com_pose_b: Output array where body COM poses are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + body_com_pose_b[i, j] = wp.transformf(body_com_pos_b[i, j], wp.quatf(0.0, 0.0, 0.0, 1.0)) + + +@wp.kernel +def derive_body_acceleration_from_body_com_velocities( + body_com_vel: wp.array2d(dtype=wp.spatial_vectorf), + dt: wp.float32, + prev_body_com_vel: wp.array2d(dtype=wp.spatial_vectorf), + body_acc: wp.array2d(dtype=wp.spatial_vectorf), +): + """Derive body acceleration from body COM velocities. + + This kernel derives body acceleration from body COM velocities using finite differencing. + + Args: + body_com_vel: Input array of body COM velocities. Shape is (num_envs, num_bodies). + dt: Input time step (scalar) used for finite differencing. + prev_body_com_vel: Input/output array of previous body COM velocities. Shape is (num_envs, num_bodies). + body_acc: Output array where body accelerations are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + # Compute the acceleration + body_acc[i, j] = (body_com_vel[i, j] - prev_body_com_vel[i, j]) / dt + # Update the previous body COM velocity + prev_body_com_vel[i, j] = body_com_vel[i, j] + + +@wp.kernel +def update_wrench_array_with_force_and_torque( + forces: wp.array2d(dtype=wp.vec3f), + torques: wp.array2d(dtype=wp.vec3f), + wrench: wp.array2d(dtype=wp.spatial_vectorf), + env_ids: wp.array(dtype=wp.bool), + body_ids: wp.array(dtype=wp.bool), +): + env_index, body_index = wp.tid() + if env_ids[env_index] and body_ids[body_index]: + wrench[env_index, body_index] = update_wrench_with_force_and_torque( + forces[env_index, body_index], + torques[env_index, body_index], + ) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/__init__.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/__init__.py new file mode 100644 index 000000000000..e14e0f6d52c5 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/__init__.pyi new file mode 100644 index 000000000000..1c96a5aa4550 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "RigidObject", + "RigidObjectData", +] + +from .rigid_object import RigidObject +from .rigid_object_data import RigidObjectData diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py new file mode 100644 index 000000000000..7e31fa22b60f --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py @@ -0,0 +1,1181 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp +from newton.selection import ArticulationView +from newton.solvers import SolverNotifyFlags + +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.string as string_utils +from isaaclab.assets.rigid_object.base_rigid_object import BaseRigidObject +from isaaclab.physics import PhysicsEvent +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab_newton.assets import kernels as shared_kernels +from isaaclab_newton.physics import NewtonManager as SimulationManager + +from .rigid_object_data import RigidObjectData + +if TYPE_CHECKING: + from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg + + +class RigidObject(BaseRigidObject): + """A rigid object asset class. + + Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects + such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. + + For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid body. On playing the + simulation, the physics engine will automatically register the rigid body and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "newton" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> RigidObjectData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count + + @property + def num_bodies(self) -> int: + """Number of bodies in the asset. + + This is always 1 since each object is a single rigid body. + """ + return 1 + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object.""" + return self.root_view.link_names + + @property + def root_view(self) -> ArticulationView: + """Root view for the asset. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the rigid object. + + Args: + env_ids: Environment indices. If None, then all indices are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve all indices + if (env_ids is None) or (env_ids == slice(None)): + env_ids = slice(None) + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + .. note:: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + composer = self._instantaneous_wrench_composer + composer.add_raw_buffers_from(self._permanent_wrench_composer) + else: + composer = self._permanent_wrench_composer + composer.compose_to_body_frame() + wp.launch( + shared_kernels.update_wrench_array_with_force_and_torque, + dim=(self.num_instances, self.num_bodies), + device=self.device, + inputs=[ + composer.out_force_b, + composer.out_torque_b, + self._data._sim_bind_body_external_wrench, + self._ALL_ENV_MASK, + self._ALL_BODY_MASK, + ], + ) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the rigid body based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + """ + Operations - Write to simulation. + """ + + def write_root_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) + or (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids) + + def write_root_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root pose over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_link_pose_to_sim_mask(root_pose=root_pose, env_mask=env_mask) + + def write_root_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) + or (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (num_instances, 6) + or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_com_velocity_to_sim_mask(root_velocity=root_velocity, env_mask=env_mask) + + def write_root_link_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7), + or (len(env_ids),) / (num_instances,) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.set_root_link_pose_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_pose, + env_ids, + ], + outputs=[ + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_root_link_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is None: + env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") + + wp.launch( + shared_kernels.set_root_link_pose_to_sim_mask, + dim=root_pose.shape[0], + inputs=[ + root_pose, + env_mask, + ], + outputs=[ + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_mask=env_mask, articulation_ids=self._root_view.articulation_ids) + + def write_root_com_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7), + or (len(env_ids),) / (num_instances,) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would call + # write_root_link_pose_to_sim after this. + wp.launch( + shared_kernels.set_root_com_pose_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_pose, + self.data.body_com_pos_b, + env_ids, + ], + outputs=[ + self.data.root_com_pose_w, + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_root_com_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is None: + env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") + wp.launch( + shared_kernels.set_root_com_pose_to_sim_mask, + dim=root_pose.shape[0], + inputs=[ + root_pose, + self.data.body_com_pos_b, + env_mask, + ], + outputs=[ + self.data.root_com_pose_w, + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new state. + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_mask=env_mask, articulation_ids=self._root_view.articulation_ids) + + def write_root_com_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (len(env_ids), 6) or (num_instances, 6), + or (len(env_ids),) / (num_instances,) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.set_root_com_velocity_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + env_ids, + 1, + ], + outputs=[ + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_root_com_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (num_instances, 6) + or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is None: + env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") + wp.launch( + shared_kernels.set_root_com_velocity_to_sim_mask, + dim=root_velocity.shape[0], + inputs=[ + root_velocity, + env_mask, + 1, + ], + outputs=[ + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_mask=env_mask, articulation_ids=self._root_view.articulation_ids) + + def write_root_link_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects partial data or full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root frame velocities in simulation world frame. + Shape is (len(env_ids), 6) or (num_instances, 6), + or (len(env_ids),) / (num_instances,) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. + wp.launch( + shared_kernels.set_root_link_velocity_to_sim_index, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + self.data.body_com_pos_b, + self.data.root_link_pose_w, + env_ids, + 1, + ], + outputs=[ + self.data.root_link_vel_w, + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_root_link_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (num_instances, 6) + or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is None: + env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") + wp.launch( + shared_kernels.set_root_link_velocity_to_sim_mask, + dim=root_velocity.shape[0], + inputs=[ + root_velocity, + self.data.body_com_pos_b, + self.data.root_link_pose_w, + env_mask, + 1, + ], + outputs=[ + self.data.root_link_vel_w, + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + if self.data._root_link_state_w is not None: + self.data._root_link_state_w.timestamp = -1.0 + if self.data._root_state_w is not None: + self.data._root_state_w.timestamp = -1.0 + if self.data._root_com_state_w is not None: + self.data._root_com_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_mask=env_mask, articulation_ids=self._root_view.articulation_ids) + + """ + Operations - Setters. + """ + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set masses of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + masses: Masses of all bodies. Shape is (len(env_ids), len(body_ids)). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + masses, + env_ids, + body_ids, + ], + outputs=[ + self.data.body_mass, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set masses of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32, "masses") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + masses, + env_mask, + body_mask, + ], + outputs=[ + self.data.body_mass, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set center of mass position of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + .. caution:: + Unlike the PhysX version of this method, this method does not set the center of mass orientation. + Only the position is set. This is because Newton considers the center of mass orientation to always be + aligned with the body frame. + + Args: + coms: Center of mass position of all bodies. Shape is (len(env_ids), len(body_ids), 3). + body_ids: The body indices to set the center of mass pose for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass pose for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f, "coms") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_com_position_to_buffer_index, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + coms, + env_ids, + body_ids, + ], + outputs=[ + self.data.body_com_pos_b, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set center of mass position of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + .. caution:: + Unlike the PhysX version of this method, this method does not set the center of mass orientation. + Only the position is set. This is because Newton considers the center of mass orientation to always be + aligned with the body frame. + + Args: + coms: Center of mass position of all bodies. Shape is (num_instances, num_bodies, 3). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f, "coms") + wp.launch( + shared_kernels.write_body_com_position_to_buffer_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + coms, + env_mask, + body_mask, + ], + outputs=[ + self.data.body_com_pos_b, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set inertias of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 9). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_inertia_to_buffer_index, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + inertias, + env_ids, + body_ids, + ], + outputs=[ + self.data.body_inertia, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set inertias of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 9). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,)) + wp.launch( + shared_kernels.write_body_inertia_to_buffer_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + inertias, + env_mask, + body_mask, + ], + outputs=[ + self.data.body_inertia, + ], + device=self.device, + ) + # tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{self.cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{self.cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{self.cfg.prim_path}' for rigid objects. These are" + f" located at: '{articulation_prims}' under '{template_prim_path}'. Please disable the articulation" + " root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] + # -- object view + self._root_view = ArticulationView( + SimulationManager.get_model(), + root_prim_path_expr.replace(".*", "*"), + verbose=False, + ) + + # container for data access + self._data = RigidObjectData(self.root_view, self.device) + + # Register callback to rebind simulation data after a full reset (model/state recreation). + self._physics_ready_handle = SimulationManager.register_callback( + lambda _: self._data._create_simulation_bindings(), + PhysicsEvent.PHYSICS_READY, + name=f"rigid_object_rebind_{self.cfg.prim_path}", + ) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + # Let the rigid object data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _clear_callbacks(self) -> None: + """Clears all registered callbacks, including the physics-ready rebind handle.""" + super()._clear_callbacks() + if hasattr(self, "_physics_ready_handle") and self._physics_ready_handle is not None: + self._physics_ready_handle.deregister() + self._physics_ready_handle = None + + def _create_buffers(self): + """Create buffers for storing data.""" + # constants + self._ALL_INDICES = wp.array(np.arange(self.num_instances, dtype=np.int32), device=self.device) + self._ALL_ENV_MASK = wp.ones((self.num_instances,), dtype=wp.bool, device=self.device) + self._ALL_BODY_INDICES = wp.array(np.arange(self.num_bodies, dtype=np.int32), device=self.device) + self._ALL_BODY_MASK = wp.ones((self.num_bodies,), dtype=wp.bool, device=self.device) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # set information about rigid body into data + self._data.body_names = self.body_names + + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + # default state + # -- root state + # note: we cast to tuple to avoid torch/numpy type mismatch. + default_root_pose = tuple(self.cfg.init_state.pos) + tuple(self.cfg.init_state.rot) + default_root_vel = tuple(self.cfg.init_state.lin_vel) + tuple(self.cfg.init_state.ang_vel) + default_root_pose = np.tile(np.array(default_root_pose, dtype=np.float32), (self.num_instances, 1)) + default_root_vel = np.tile(np.array(default_root_vel, dtype=np.float32), (self.num_instances, 1)) + self._data.default_root_pose = wp.array(default_root_pose, dtype=wp.transformf, device=self.device) + self._data.default_root_vel = wp.array(default_root_vel, dtype=wp.spatial_vectorf, device=self.device) + + def _resolve_env_ids(self, env_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve environment indices to a warp array or tensor. + + Args: + env_ids: Environment indices. If None, then all indices are used. + + Returns: + A warp array of environment indices or a tensor of environment indices. + """ + if (env_ids is None) or (env_ids == slice(None)): + return self._ALL_INDICES + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids, dtype=wp.int32) + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + return env_ids + + def _resolve_body_ids(self, body_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve body indices to a warp array or tensor. + + Args: + body_ids: Body indices. If None, then all indices are used. + + Returns: + A warp array of body indices or a tensor of body indices. + """ + if isinstance(body_ids, list): + return wp.array(body_ids, dtype=wp.int32, device=self.device) + if (body_ids is None) or (body_ids == slice(None)): + return self._ALL_BODY_INDICES + if isinstance(body_ids, torch.Tensor): + if body_ids.dtype == torch.int64: + body_ids = body_ids.to(torch.int32) + return wp.from_torch(body_ids, dtype=wp.int32) + return body_ids + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_view = None + + def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | torch.Tensor | None = None): + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_link_pose_to_sim_index(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim( + self, root_state: torch.Tensor, env_ids: Sequence[int] | torch.Tensor | None = None + ): + """Deprecated, same as :meth:`write_root_com_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_com_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_com_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_com_pose_to_sim_index(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim( + self, root_state: torch.Tensor, env_ids: Sequence[int] | torch.Tensor | None = None + ): + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_link_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_link_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_link_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_link_pose_to_sim_index(root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim_index(root_state[:, 7:], env_ids=env_ids) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object_data.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object_data.py new file mode 100644 index 000000000000..0e9ecc8a41d0 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object_data.py @@ -0,0 +1,1445 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +import logging +import warnings +import weakref +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.math import normalize +from isaaclab.utils.warp import ProxyArray +from isaaclab.utils.warp.utils import capture_unsafe + +from isaaclab_newton.assets import kernels as shared_kernels +from isaaclab_newton.physics import NewtonManager as SimulationManager + +if TYPE_CHECKING: + from newton.selection import ArticulationView + + +# import logger +logger = logging.getLogger(__name__) + +_LAZY_CAPTURE_REASON = ( + "This is a lazily-computed derived property guarded by a Python timestamp check " + "that is invisible during graph replay. Use Tier 1 base data (root_link_pose_w, " + "root_com_vel_w, body_link_pose_w, body_com_vel_w) and inline the computation " + "in your warp kernel. See GRAPH_CAPTURE_MIGRATION.md." +) + + +class RigidObjectData(BaseRigidObjectData): + """Data container for a rigid object. + + This class contains the data for a rigid object in the simulation. The data includes the state of + the root rigid body and the state of all the bodies in the object. The data is stored in the simulation + world frame unless otherwise specified. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + __backend_name__: str = "newton" + """The name of the backend for the rigid object data.""" + + def __init__(self, root_view: ArticulationView, device: str): + """Initializes the rigid object data. + + Args: + root_view: The root rigid body view. + device: The device used for processing. + """ + super().__init__(root_view, device) + # Set the root rigid body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: ArticulationView = weakref.proxy(root_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + + # Convert to direction vector + gravity = wp.to_torch(SimulationManager.get_model().gravity)[0] + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir = gravity_dir.repeat(self._root_view.count, 1) + forward_vec = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_view.count, 1) + + # Initialize constants + self.GRAVITY_VEC_W = ProxyArray(wp.from_torch(gravity_dir, dtype=wp.vec3f)) + self.FORWARD_VEC_B = ProxyArray(wp.from_torch(forward_vec, dtype=wp.vec3f)) + + self._create_simulation_bindings() + self._create_buffers() + + @property + def is_primed(self) -> bool: + """Whether the rigid object data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the rigid object data is fully instantiated and ready to use. + + .. note:: + Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._is_primed = value + + def update(self, dt: float) -> None: + """Updates the data for the rigid object. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + # Trigger an update of the body com acceleration buffer at a higher frequency + # since we do finite differencing. + self.body_com_acc_w + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + """ + Defaults. + """ + + @property + def default_root_pose(self) -> ProxyArray: + """Default root pose ``[pos, quat]`` in local environment frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + The position and quaternion are of the rigid body's actor frame. + """ + return self._default_root_pose_ta + + @default_root_pose.setter + def default_root_pose(self, value: wp.array) -> None: + """Set the default root pose. + + Args: + value: The default root pose. Shape is (num_instances, 7). + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._default_root_pose.assign(value) + + @property + def default_root_vel(self) -> ProxyArray: + """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + The linear and angular velocities are of the rigid body's center of mass frame. + """ + return self._default_root_vel_ta + + @default_root_vel.setter + def default_root_vel(self, value: wp.array) -> None: + """Set the default root velocity. + + Args: + value: The default root velocity. Shape is (num_instances, 6). + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._default_root_vel.assign(value) + + """ + Root state properties. + """ + + @property + def root_link_pose_w(self) -> ProxyArray: + """Root link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + This quantity is the pose of the actor frame of the root rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + return self._root_link_pose_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def root_link_vel_w(self) -> ProxyArray: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + # read the CoM velocity and compute link velocity + wp.launch( + shared_kernels.get_root_link_vel_from_root_com_vel, + dim=self._num_instances, + inputs=[ + self.root_com_vel_w.warp, + self.root_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._root_link_vel_w.data, + ], + device=self.device, + ) + self._root_link_vel_w.timestamp = self._sim_timestamp + + return self._root_link_vel_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def root_com_pose_w(self) -> ProxyArray: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + This quantity is the pose of the center of mass frame of the root rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + wp.launch( + shared_kernels.get_root_com_pose_from_root_link_pose, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._root_com_pose_w.data, + ], + device=self.device, + ) + self._root_com_pose_w.timestamp = self._sim_timestamp + + return self._root_com_pose_w_ta + + @property + def root_com_vel_w(self) -> ProxyArray: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + return self._root_com_vel_w_ta + + """ + Body state properties. + """ + + @property + def body_mass(self) -> ProxyArray: + """Mass of all bodies in the simulation world frame. + + Shape is (num_instances, 1, 1), dtype = wp.float32. + In torch this resolves to (num_instances, 1, 1). + """ + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Inertia of all bodies in the simulation world frame. + + Shape is (num_instances, 1, 9), dtype = wp.float32. + In torch this resolves to (num_instances, 1, 9). + """ + return self._body_inertia_ta + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.transformf. In torch this resolves to (num_instances, 1, 7). + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + return self._body_link_pose_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 1, 6). + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(self.root_link_vel_w.warp.reshape((self._num_instances, 1))) + return self._body_link_vel_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def body_com_pose_w(self) -> ProxyArray: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.transformf. In torch this resolves to (num_instances, 1, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(self.root_com_pose_w.warp.reshape((self._num_instances, 1))) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 1, 6). + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + return self._body_com_vel_w_ta + + @property + def body_com_acc_w(self) -> ProxyArray: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + + Shape is (num_instances, 1), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 1, 6). + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.derive_body_acceleration_from_body_com_velocities, + dim=(self._num_instances, 1), + device=self.device, + inputs=[ + self._sim_bind_body_com_vel_w, + SimulationManager.get_dt(), + self._previous_body_com_vel, + ], + outputs=[ + self._body_com_acc_w.data, + ], + ) + # set the buffer data and timestamp + self._body_com_acc_w.timestamp = self._sim_timestamp + # update the previous velocity + return self._body_com_acc_w_ta + + @property + def body_com_pos_b(self) -> ProxyArray: + """Center of mass position of all of the bodies in their respective link frames. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the center of mass location relative to its body's link frame. + """ + return self._body_com_pos_b_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + + Shape is (num_instances, 1), dtype = wp.transformf. In torch this resolves to (num_instances, 1, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + warnings.warn( + "In Newton, body com pose always has unit quaternion. Consider using body_com_pos_b instead." + "Querying this property requires appending a unit quaternion to the position which is expensive.", + category=UserWarning, + stacklevel=2, + ) + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # set the buffer data and timestamp + wp.launch( + shared_kernels.make_dummy_body_com_pose_b, + dim=(self._num_instances, 1), + inputs=[ + self.body_com_pos_b.warp, + ], + outputs=[ + self._body_com_pose_b.data, + ], + device=self.device, + ) + self._body_com_pose_b.timestamp = self._sim_timestamp + return self._body_com_pose_b_ta + + """ + Derived Properties. + """ + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def projected_gravity_b(self) -> ProxyArray: + """Projection of the gravity direction on base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._projected_gravity_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.GRAVITY_VEC_W.warp, self.root_link_quat_w.warp], + outputs=[self._projected_gravity_b.data], + device=self.device, + ) + self._projected_gravity_b.timestamp = self._sim_timestamp + return self._projected_gravity_b_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def heading_w(self) -> ProxyArray: + """Yaw heading of the base frame (in radians). + + Shape is (num_instances,), dtype = wp.float32. In torch this resolves to (num_instances,). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + if self._heading_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.root_heading_w, + dim=self._num_instances, + inputs=[self.FORWARD_VEC_B.warp, self.root_link_quat_w.warp], + outputs=[self._heading_w.data], + device=self.device, + ) + self._heading_w.timestamp = self._sim_timestamp + return self._heading_w_ta + + @property + @capture_unsafe(_LAZY_CAPTURE_REASON) + def root_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._root_link_lin_vel_b is None: + self._root_link_lin_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_link_lin_vel_b_ta = ProxyArray(self._root_link_lin_vel_b.data) + if self._root_link_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_lin_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_link_lin_vel_b.data], + device=self.device, + ) + self._root_link_lin_vel_b.timestamp = self._sim_timestamp + return self._root_link_lin_vel_b_ta + + @property + def root_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._root_link_ang_vel_b is None: + self._root_link_ang_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_link_ang_vel_b_ta = ProxyArray(self._root_link_ang_vel_b.data) + if self._root_link_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_ang_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_link_ang_vel_b.data], + device=self.device, + ) + self._root_link_ang_vel_b.timestamp = self._sim_timestamp + return self._root_link_ang_vel_b_ta + + @property + def root_com_lin_vel_b(self) -> ProxyArray: + """Root center of mass linear velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._root_com_lin_vel_b is None: + self._root_com_lin_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_com_lin_vel_b_ta = ProxyArray(self._root_com_lin_vel_b.data) + if self._root_com_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_lin_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_com_lin_vel_b.data], + device=self.device, + ) + self._root_com_lin_vel_b.timestamp = self._sim_timestamp + return self._root_com_lin_vel_b_ta + + @property + def root_com_ang_vel_b(self) -> ProxyArray: + """Root center of mass angular velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._root_com_ang_vel_b is None: + self._root_com_ang_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.vec3f, device=self.device + ) + self._root_com_ang_vel_b_ta = ProxyArray(self._root_com_ang_vel_b.data) + if self._root_com_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_ang_vel_w.warp, self.root_link_quat_w.warp], + outputs=[self._root_com_ang_vel_b.data], + device=self.device, + ) + self._root_com_ang_vel_b.timestamp = self._sim_timestamp + return self._root_com_ang_vel_b_ta + + """ + Sliced properties. + """ + + @property + def root_link_pos_w(self) -> ProxyArray: + """Root link position in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + self._root_link_pos_w = self._get_pos_from_transform(self._root_link_pos_w, self.root_link_pose_w.warp) + if self._root_link_pos_w_ta is None: + self._root_link_pos_w_ta = ProxyArray(self._root_link_pos_w) + return self._root_link_pos_w_ta + + @property + def root_link_quat_w(self) -> ProxyArray: + """Root link orientation (x, y, z, w) in simulation world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + This quantity is the orientation of the actor frame of the root rigid body. + """ + self._root_link_quat_w = self._get_quat_from_transform(self._root_link_quat_w, self.root_link_pose_w.warp) + if self._root_link_quat_w_ta is None: + self._root_link_quat_w_ta = ProxyArray(self._root_link_quat_w) + return self._root_link_quat_w_ta + + @property + def root_link_lin_vel_w(self) -> ProxyArray: + """Root linear velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + self._root_link_lin_vel_w = self._get_top_from_spatial_vector( + self._root_link_lin_vel_w, self.root_link_vel_w.warp + ) + if self._root_link_lin_vel_w_ta is None: + self._root_link_lin_vel_w_ta = ProxyArray(self._root_link_lin_vel_w) + return self._root_link_lin_vel_w_ta + + @property + def root_link_ang_vel_w(self) -> ProxyArray: + """Root link angular velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + self._root_link_ang_vel_w = self._get_bottom_from_spatial_vector( + self._root_link_ang_vel_w, self.root_link_vel_w.warp + ) + if self._root_link_ang_vel_w_ta is None: + self._root_link_ang_vel_w_ta = ProxyArray(self._root_link_ang_vel_w) + return self._root_link_ang_vel_w_ta + + @property + def root_com_pos_w(self) -> ProxyArray: + """Root center of mass position in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the position of the center of mass frame of the root rigid body relative to the world. + """ + self._root_com_pos_w = self._get_pos_from_transform(self._root_com_pos_w, self.root_com_pose_w.warp) + if self._root_com_pos_w_ta is None: + self._root_com_pos_w_ta = ProxyArray(self._root_com_pos_w) + return self._root_com_pos_w_ta + + @property + def root_com_quat_w(self) -> ProxyArray: + """Root center of mass orientation (x, y, z, w) in simulation world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + This quantity is the orientation of the principal axes of inertia of the root rigid body relative to the world. + """ + self._root_com_quat_w = self._get_quat_from_transform(self._root_com_quat_w, self.root_com_pose_w.warp) + if self._root_com_quat_w_ta is None: + self._root_com_quat_w_ta = ProxyArray(self._root_com_quat_w) + return self._root_com_quat_w_ta + + @property + def root_com_lin_vel_w(self) -> ProxyArray: + """Root center of mass linear velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + self._root_com_lin_vel_w = self._get_top_from_spatial_vector(self._root_com_lin_vel_w, self.root_com_vel_w.warp) + if self._root_com_lin_vel_w_ta is None: + self._root_com_lin_vel_w_ta = ProxyArray(self._root_com_lin_vel_w) + return self._root_com_lin_vel_w_ta + + @property + def root_com_ang_vel_w(self) -> ProxyArray: + """Root center of mass angular velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + self._root_com_ang_vel_w = self._get_bottom_from_spatial_vector( + self._root_com_ang_vel_w, self.root_com_vel_w.warp + ) + if self._root_com_ang_vel_w_ta is None: + self._root_com_ang_vel_w_ta = ProxyArray(self._root_com_ang_vel_w) + return self._root_com_ang_vel_w_ta + + @property + def body_link_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + self._body_link_pos_w = self._get_pos_from_transform(self._body_link_pos_w, self.body_link_pose_w.warp) + if self._body_link_pos_w_ta is None: + self._body_link_pos_w_ta = ProxyArray(self._body_link_pos_w) + return self._body_link_pos_w_ta + + @property + def body_link_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.quatf. In torch this resolves to (num_instances, 1, 4). + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + self._body_link_quat_w = self._get_quat_from_transform(self._body_link_quat_w, self.body_link_pose_w.warp) + if self._body_link_quat_w_ta is None: + self._body_link_quat_w_ta = ProxyArray(self._body_link_quat_w) + return self._body_link_quat_w_ta + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the linear velocity of the rigid bodies' actor frame relative to the world. + """ + self._body_link_lin_vel_w = self._get_top_from_spatial_vector( + self._body_link_lin_vel_w, self.body_link_vel_w.warp + ) + if self._body_link_lin_vel_w_ta is None: + self._body_link_lin_vel_w_ta = ProxyArray(self._body_link_lin_vel_w) + return self._body_link_lin_vel_w_ta + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the angular velocity of the rigid bodies' actor frame relative to the world. + """ + self._body_link_ang_vel_w = self._get_bottom_from_spatial_vector( + self._body_link_ang_vel_w, self.body_link_vel_w.warp + ) + if self._body_link_ang_vel_w_ta is None: + self._body_link_ang_vel_w_ta = ProxyArray(self._body_link_ang_vel_w) + return self._body_link_ang_vel_w_ta + + @property + def body_com_pos_w(self) -> ProxyArray: + """Positions of all bodies' center of mass in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the position of the rigid bodies' center of mass frame. + """ + self._body_com_pos_w = self._get_pos_from_transform(self._body_com_pos_w, self.body_com_pose_w.warp) + if self._body_com_pos_w_ta is None: + self._body_com_pos_w_ta = ProxyArray(self._body_com_pos_w) + return self._body_com_pos_w_ta + + @property + def body_com_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.quatf. In torch this resolves to (num_instances, 1, 4). + This quantity is the orientation of the principal axes of inertia of the rigid bodies. + """ + self._body_com_quat_w = self._get_quat_from_transform(self._body_com_quat_w, self.body_com_pose_w.warp) + if self._body_com_quat_w_ta is None: + self._body_com_quat_w_ta = ProxyArray(self._body_com_quat_w) + return self._body_com_quat_w_ta + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + self._body_com_lin_vel_w = self._get_top_from_spatial_vector(self._body_com_lin_vel_w, self.body_com_vel_w.warp) + if self._body_com_lin_vel_w_ta is None: + self._body_com_lin_vel_w_ta = ProxyArray(self._body_com_lin_vel_w) + return self._body_com_lin_vel_w_ta + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + self._body_com_ang_vel_w = self._get_bottom_from_spatial_vector( + self._body_com_ang_vel_w, self.body_com_vel_w.warp + ) + if self._body_com_ang_vel_w_ta is None: + self._body_com_ang_vel_w_ta = ProxyArray(self._body_com_ang_vel_w) + return self._body_com_ang_vel_w_ta + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Linear acceleration of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + self._body_com_lin_acc_w = self._get_top_from_spatial_vector(self._body_com_lin_acc_w, self.body_com_acc_w.warp) + if self._body_com_lin_acc_w_ta is None: + self._body_com_lin_acc_w_ta = ProxyArray(self._body_com_lin_acc_w) + return self._body_com_lin_acc_w_ta + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Angular acceleration of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + self._body_com_ang_acc_w = self._get_bottom_from_spatial_vector( + self._body_com_ang_acc_w, self.body_com_acc_w.warp + ) + if self._body_com_ang_acc_w_ta is None: + self._body_com_ang_acc_w_ta = ProxyArray(self._body_com_ang_acc_w) + return self._body_com_ang_acc_w_ta + + @property + def body_com_quat_b(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their + respective link frames. + + Shape is (num_instances, 1), dtype = wp.quatf. In torch this resolves to (num_instances, 1, 4). + This quantity is the orientation of the principal axes of inertia relative to its body's link frame. + """ + self._body_com_quat_b = self._get_quat_from_transform(self._body_com_quat_b, self.body_com_pose_b.warp) + if self._body_com_quat_b_ta is None: + self._body_com_quat_b_ta = ProxyArray(self._body_com_quat_b) + return self._body_com_quat_b_ta + + def _create_simulation_bindings(self) -> None: + """Create simulation bindings for the root data. + + Direct simulation bindings are pointers to the simulation data, their data is not copied, and should + only be updated using warp kernels. Any modifications made to the bindings will be reflected in the simulation. + Hence we encourage users to carefully think about the data they modify and in which order it should be updated. + + .. caution:: This is possible if and only if the properties that we access are strided from newton and not + indexed. Newton willing this is the case all the time, but we should pay attention to this if things look off. + """ + # Short-hand for the number of instances, number of links, and number of joints. + self._num_instances = self._root_view.count + self._num_bodies = self._root_view.link_count + + # -- root properties + if self._root_view.is_fixed_base: + self._sim_bind_root_link_pose_w = self._root_view.get_root_transforms(SimulationManager.get_state_0())[ + :, 0, 0 + ] + else: + self._sim_bind_root_link_pose_w = self._root_view.get_root_transforms(SimulationManager.get_state_0())[:, 0] + self._sim_bind_root_com_vel_w = self._root_view.get_root_velocities(SimulationManager.get_state_0()) + if self._sim_bind_root_com_vel_w is not None: + if self._root_view.is_fixed_base: + self._sim_bind_root_com_vel_w = self._sim_bind_root_com_vel_w[:, 0, 0] + else: + self._sim_bind_root_com_vel_w = self._sim_bind_root_com_vel_w[:, 0] + # -- body properties + self._sim_bind_body_com_pos_b = self._root_view.get_attribute("body_com", SimulationManager.get_model())[:, 0] + self._sim_bind_body_link_pose_w = self._root_view.get_link_transforms(SimulationManager.get_state_0())[:, 0] + self._sim_bind_body_com_vel_w = self._root_view.get_link_velocities(SimulationManager.get_state_0())[:, 0] + self._sim_bind_body_mass = self._root_view.get_attribute("body_mass", SimulationManager.get_model())[:, 0] + # Newton stores body_inertia as (N, 1, 1) mat33f — the [:, 0] removes the padding dim + # giving (N, 1) mat33f. Reinterpret as (N, 1, 9) float32 via pointer aliasing. + # Each mat33f element is 9 contiguous float32 values (36 bytes), so the inner stride is 4. + # The slice may be non-contiguous in the outer dims, so we preserve those strides. + _body_inertia_raw = self._root_view.get_attribute("body_inertia", SimulationManager.get_model())[:, 0] + self._sim_bind_body_inertia = wp.array( + ptr=_body_inertia_raw.ptr, + dtype=wp.float32, + shape=(self._num_instances, 1, 9), + strides=(_body_inertia_raw.strides[0], _body_inertia_raw.strides[1], 4), + device=_body_inertia_raw.device, + copy=False, + ) + self._sim_bind_body_external_wrench = self._root_view.get_attribute("body_f", SimulationManager.get_state_0())[ + :, 0 + ] + + # Re-pin ProxyArray wrappers to the newly created sim bindings. + # On first init, _create_buffers() handles this after all buffers exist. + if hasattr(self, "_root_link_pose_w_ta"): + self._pin_proxy_arrays() + + def _create_buffers(self) -> None: + """Create buffers for the root data.""" + super()._create_buffers() + self._num_instances = self._root_view.count + # Initialize history for finite differencing. If the rigid object is fixed, the root com velocity is not + # available, so we use zeros. + if self._root_view.get_root_velocities(SimulationManager.get_state_0()) is None: + logger.warning( + "Failed to get root com velocity. If the rigid object is fixed, this is expected. " + "Setting root com velocity to zeros." + ) + self._sim_bind_root_com_vel_w = wp.zeros( + (self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._sim_bind_body_com_vel_w = wp.zeros( + (self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + # -- default root pose and velocity + self._default_root_pose = wp.zeros((self._num_instances,), dtype=wp.transformf, device=self.device) + self._default_root_vel = wp.zeros((self._num_instances,), dtype=wp.spatial_vectorf, device=self.device) + self._default_root_state = None # lazily allocated by deprecated default_root_state property + + # Initialize history for finite differencing + self._previous_body_com_vel = wp.clone(self._root_view.get_link_velocities(SimulationManager.get_state_0()))[ + :, 0 + ] + + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._root_link_vel_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._root_link_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._projected_gravity_b = TimestampedBuffer(shape=(self._num_instances,), dtype=wp.vec3f, device=self.device) + self._heading_w = TimestampedBuffer(shape=(self._num_instances,), dtype=wp.float32, device=self.device) + self._body_link_vel_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer(shape=(self._num_instances,), dtype=wp.transformf, device=self.device) + self._root_com_vel_b = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._root_com_acc_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=wp.spatial_vectorf, device=self.device + ) + self._body_com_acc_w = TimestampedBuffer( + shape=(self._num_instances, 1), dtype=wp.spatial_vectorf, device=self.device + ) + self._body_com_pose_b = TimestampedBuffer( + shape=(self._num_instances, 1), dtype=wp.transformf, device=self.device + ) + # Empty memory pre-allocations + self._root_state_w = None + self._root_link_state_w = None + self._root_com_state_w = None + self._body_com_quat_b = None + self._root_link_lin_vel_b = None + self._root_link_ang_vel_b = None + self._root_com_lin_vel_b = None + self._root_com_ang_vel_b = None + self._root_link_pos_w = None + self._root_link_quat_w = None + self._root_link_lin_vel_w = None + self._root_link_ang_vel_w = None + self._root_com_pos_w = None + self._root_com_quat_w = None + self._root_com_lin_vel_w = None + self._root_com_ang_vel_w = None + self._body_link_pos_w = None + self._body_link_quat_w = None + self._body_link_lin_vel_w = None + self._body_link_ang_vel_w = None + self._body_com_pos_w = None + self._body_com_quat_w = None + self._body_com_lin_vel_w = None + self._body_com_ang_vel_w = None + self._body_com_lin_acc_w = None + self._body_com_ang_acc_w = None + + # Pin all ProxyArray wrappers to current buffers. + self._pin_proxy_arrays() + + def _pin_proxy_arrays(self) -> None: + """Create or rebind all pinned ProxyArray wrappers. + + Called from :meth:`_create_buffers` on first initialization and from + :meth:`_create_simulation_bindings` after a full simulation reset when + the solver recreates its internal arrays. + """ + is_rebind = hasattr(self, "_root_link_pose_w_ta") + + if is_rebind: + # Rebind sim-bound ProxyArrays to new solver arrays + self._root_link_pose_w_ta = ProxyArray(self._sim_bind_root_link_pose_w) + self._root_com_vel_w_ta = ProxyArray(self._sim_bind_root_com_vel_w) + self._body_link_pose_w_ta = ProxyArray(self._sim_bind_body_link_pose_w) + self._body_com_vel_w_ta = ProxyArray(self._sim_bind_body_com_vel_w) + self._body_mass_ta = ProxyArray(self._sim_bind_body_mass) + self._body_inertia_ta = ProxyArray(self._sim_bind_body_inertia) + self._body_com_pos_b_ta = ProxyArray(self._sim_bind_body_com_pos_b) + else: + # First-time creation: pin ProxyArrays to current buffers + # Category 1: sim-bound and pre-allocated buffers + # Newton wp.array pointers are stable, so a ProxyArray wrapping them is valid forever. + self._root_link_pose_w_ta = ProxyArray(self._sim_bind_root_link_pose_w) + self._root_com_vel_w_ta = ProxyArray(self._sim_bind_root_com_vel_w) + self._body_link_pose_w_ta = ProxyArray(self._sim_bind_body_link_pose_w) + self._body_com_vel_w_ta = ProxyArray(self._sim_bind_body_com_vel_w) + self._default_root_pose_ta = ProxyArray(self._default_root_pose) + self._default_root_vel_ta = ProxyArray(self._default_root_vel) + self._body_mass_ta = ProxyArray(self._sim_bind_body_mass) + self._body_inertia_ta = ProxyArray(self._sim_bind_body_inertia) + self._body_com_pos_b_ta = ProxyArray(self._sim_bind_body_com_pos_b) + + # Category 2: TimestampedBuffer properties + self._root_link_vel_w_ta = ProxyArray(self._root_link_vel_w.data) + self._root_com_pose_w_ta = ProxyArray(self._root_com_pose_w.data) + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w.data) + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b.data) + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b.data) + self._heading_w_ta = ProxyArray(self._heading_w.data) + + # -- deprecated state properties (lazy); type annotations declared once here + self._root_link_lin_vel_b_ta: ProxyArray | None = None + self._root_link_ang_vel_b_ta: ProxyArray | None = None + self._root_com_lin_vel_b_ta: ProxyArray | None = None + self._root_com_ang_vel_b_ta: ProxyArray | None = None + self._root_state_w_ta: ProxyArray | None = None + self._root_link_state_w_ta: ProxyArray | None = None + self._root_com_state_w_ta: ProxyArray | None = None + self._default_root_state_ta: ProxyArray | None = None + self._body_state_w_ta: ProxyArray | None = None + self._body_link_state_w_ta: ProxyArray | None = None + self._body_com_state_w_ta: ProxyArray | None = None + + # Invalidate lazy sliced ProxyArrays AND their backing wp.arrays so they are + # re-created from fresh data on next access. On first init the backing fields + # are already None (set by _create_buffers), so the assignments below are + # harmless no-ops. On rebind they reset stale pointers into freed transform + # memory after a sim reset. + self._root_link_pos_w_ta: ProxyArray | None = None + self._root_link_pos_w = None + self._root_link_quat_w_ta: ProxyArray | None = None + self._root_link_quat_w = None + self._root_link_lin_vel_w_ta: ProxyArray | None = None + self._root_link_lin_vel_w = None + self._root_link_ang_vel_w_ta: ProxyArray | None = None + self._root_link_ang_vel_w = None + self._root_com_pos_w_ta: ProxyArray | None = None + self._root_com_pos_w = None + self._root_com_quat_w_ta: ProxyArray | None = None + self._root_com_quat_w = None + self._root_com_lin_vel_w_ta: ProxyArray | None = None + self._root_com_lin_vel_w = None + self._root_com_ang_vel_w_ta: ProxyArray | None = None + self._root_com_ang_vel_w = None + self._body_link_pos_w_ta: ProxyArray | None = None + self._body_link_pos_w = None + self._body_link_quat_w_ta: ProxyArray | None = None + self._body_link_quat_w = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w = None + self._body_link_ang_vel_w_ta: ProxyArray | None = None + self._body_link_ang_vel_w = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_pos_w_ta: ProxyArray | None = None + self._body_com_pos_w = None + self._body_com_quat_w_ta: ProxyArray | None = None + self._body_com_quat_w = None + self._body_com_lin_vel_w_ta: ProxyArray | None = None + self._body_com_lin_vel_w = None + self._body_com_ang_vel_w_ta: ProxyArray | None = None + self._body_com_ang_vel_w = None + self._body_com_lin_acc_w_ta: ProxyArray | None = None + self._body_com_lin_acc_w = None + self._body_com_ang_acc_w_ta: ProxyArray | None = None + self._body_com_ang_acc_w = None + self._body_com_quat_b_ta: ProxyArray | None = None + self._body_com_quat_b = None + + """ + Internal helpers. + """ + + def _get_pos_from_transform(self, source: wp.array | None, transform: wp.array) -> wp.array: + """Generates a position array from a transform array. + + Args: + transform: The transform array. Shape is (N) dtype=wp.transformf. + + Returns: + The position array. Shape is (N) dtype=wp.vec3f. + """ + # Check if we already created the lazy buffer. + if source is None: + if transform.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=transform.ptr, + shape=transform.shape, + dtype=wp.vec3f, + strides=transform.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches transform.shape since each element is vec3f (already contains 3 floats) + source = wp.zeros(transform.shape, dtype=wp.vec3f, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the position part of the transform. + if not transform.is_contiguous: + # Launch the right kernel based on the shape of the transform array. + if len(transform.shape) > 1: + wp.launch( + shared_kernels.split_transform_to_pos_2d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_transform_to_pos_1d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + return source + + def _get_quat_from_transform(self, source: wp.array | None, transform: wp.array) -> wp.array: + """Generates a quaternion array from a transform array. + + Args: + transform: The transform array. Shape is (N) dtype=wp.transformf. + + Returns: + The quaternion array. Shape is (N) dtype=wp.quatf. + """ + # Check if we already created the lazy buffer. + if source is None: + if transform.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=transform.ptr + 3 * 4, + shape=transform.shape, + dtype=wp.quatf, + strides=transform.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches transform.shape since each element is quatf (already contains 4 floats) + source = wp.zeros(transform.shape, dtype=wp.quatf, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the quaternion part of the transform. + if not transform.is_contiguous: + # Launch the right kernel based on the shape of the transform array. + if len(transform.shape) > 1: + wp.launch( + shared_kernels.split_transform_to_quat_2d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_transform_to_quat_1d, + dim=transform.shape, + inputs=[transform], + outputs=[source], + device=self.device, + ) + # Return the source array. (no-op if the array is contiguous.) + return source + + def _get_top_from_spatial_vector(self, source: wp.array | None, spatial_vector: wp.array) -> wp.array: + """Gets the top part of a spatial vector array. + + For instance the linear velocity is the top part of a velocity vector. + + Args: + spatial_vector: The spatial vector array. Shape is (N) dtype=wp.spatial_vectorf. + + Returns: + The top part of the spatial vector array. Shape is (N) dtype=wp.vec3f. + """ + # Check if we already created the lazy buffer. + if source is None: + if spatial_vector.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=spatial_vector.ptr, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches spatial_vector.shape since each element is vec3f (already contains 3 floats) + source = wp.zeros(spatial_vector.shape, dtype=wp.vec3f, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the top part of the spatial vector. + if not spatial_vector.is_contiguous: + # Launch the right kernel based on the shape of the spatial_vector array. + if len(spatial_vector.shape) > 1: + wp.launch( + shared_kernels.split_spatial_vector_to_top_2d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_spatial_vector_to_top_1d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + # Return the source array. (no-op if the array is contiguous.) + return source + + def _get_bottom_from_spatial_vector(self, source: wp.array | None, spatial_vector: wp.array) -> wp.array: + """Gets the bottom part of a spatial vector array. + + For instance the angular velocity is the bottom part of a velocity vector. + + Args: + spatial_vector: The spatial vector array. Shape is (N) dtype=wp.spatial_vectorf. + + Returns: + The bottom part of the spatial vector array. Shape is (N) dtype=wp.vec3f. + """ + # Check if we already created the lazy buffer. + if source is None: + if spatial_vector.is_contiguous: + # Check if the array is contiguous. If so, we can just return a strided array. + # Then this update becomes a no-op. + return wp.array( + ptr=spatial_vector.ptr + 3 * 4, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + else: + # If the array is not contiguous, we need to create a new array to write to. + # Shape matches spatial_vector.shape since each element is vec3f (already contains 3 floats) + source = wp.zeros(spatial_vector.shape, dtype=wp.vec3f, device=self.device) + + # If the array is not contiguous, we need to launch the kernel to get the bottom part of the spatial vector. + if not spatial_vector.is_contiguous: + # Launch the right kernel based on the shape of the spatial_vector array. + if len(spatial_vector.shape) > 1: + wp.launch( + shared_kernels.split_spatial_vector_to_bottom_2d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + else: + wp.launch( + shared_kernels.split_spatial_vector_to_bottom_1d, + dim=spatial_vector.shape, + inputs=[spatial_vector], + outputs=[source], + device=self.device, + ) + # Return the source array. (no-op if the array is contiguous.) + return source + + """ + Deprecated properties. + """ + + @property + def root_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_state_w` property will be deprecated in IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_state_w is None: + self._root_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_state_w_ta = ProxyArray(self._root_state_w.data) + if self._root_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w.warp, + self.root_com_vel_w.warp, + ], + outputs=[ + self._root_state_w.data, + ], + device=self.device, + ) + self._root_state_w.timestamp = self._sim_timestamp + + return self._root_state_w_ta + + @property + def root_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_link_vel_w`.""" + warnings.warn( + "The `root_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_link_state_w is None: + self._root_link_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_link_state_w_ta = ProxyArray(self._root_link_state_w.data) + if self._root_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w.warp, + self.root_link_vel_w.warp, + ], + outputs=[ + self._root_link_state_w.data, + ], + device=self.device, + ) + self._root_link_state_w.timestamp = self._sim_timestamp + + return self._root_link_state_w_ta + + @property + def root_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_com_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `root_com_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_com_state_w is None: + self._root_com_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_com_state_w_ta = ProxyArray(self._root_com_state_w.data) + if self._root_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_com_pose_w.warp, + self.root_com_vel_w.warp, + ], + outputs=[ + self._root_com_state_w.data, + ], + device=self.device, + ) + self._root_com_state_w.timestamp = self._sim_timestamp + + return self._root_com_state_w_ta + + @property + def default_root_state(self) -> ProxyArray: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, 13). + """ + warnings.warn( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._default_root_state is None: + self._default_root_state = wp.zeros((self._num_instances), dtype=shared_kernels.vec13f, device=self.device) + self._default_root_state_ta = ProxyArray(self._default_root_state) + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self._default_root_pose, + self._default_root_vel, + ], + outputs=[ + self._default_root_state, + ], + device=self.device, + ) + return self._default_root_state_ta + + @property + def body_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + # Access internal buffer directly to avoid cascading deprecation warnings from root_state_w + if self._root_state_w is None: + self._root_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_state_w_ta = ProxyArray(self._root_state_w.data) + if self._root_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w.warp, + self.root_com_vel_w.warp, + ], + outputs=[ + self._root_state_w.data, + ], + device=self.device, + ) + self._root_state_w.timestamp = self._sim_timestamp + if self._body_state_w_ta is None: + self._body_state_w_ta = ProxyArray(self._root_state_w.data.reshape((self._num_instances, 1))) + return self._body_state_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_link_vel_w`.""" + warnings.warn( + "The `body_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + # Access internal buffer directly to avoid cascading deprecation warnings from root_link_state_w + if self._root_link_state_w is None: + self._root_link_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_link_state_w_ta = ProxyArray(self._root_link_state_w.data) + if self._root_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w.warp, + self.root_link_vel_w.warp, + ], + outputs=[ + self._root_link_state_w.data, + ], + device=self.device, + ) + self._root_link_state_w.timestamp = self._sim_timestamp + if self._body_link_state_w_ta is None: + self._body_link_state_w_ta = ProxyArray(self._root_link_state_w.data.reshape((self._num_instances, 1))) + return self._body_link_state_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_com_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_com_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_com_state_w is None: + self._root_com_state_w = TimestampedBuffer( + shape=(self._num_instances,), dtype=shared_kernels.vec13f, device=self.device + ) + self._root_com_state_w_ta = ProxyArray(self._root_com_state_w.data) + if self._root_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_com_pose_w.warp, + self.root_com_vel_w.warp, + ], + outputs=[ + self._root_com_state_w.data, + ], + device=self.device, + ) + self._root_com_state_w.timestamp = self._sim_timestamp + if self._body_com_state_w_ta is None: + self._body_com_state_w_ta = ProxyArray(self._root_com_state_w.data.reshape((self._num_instances, 1))) + return self._body_com_state_w_ta diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/__init__.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/__init__.py new file mode 100644 index 000000000000..e14e0f6d52c5 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/__init__.pyi new file mode 100644 index 000000000000..8b12ec95e7a2 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "RigidObjectCollection", + "RigidObjectCollectionData", +] + +from .rigid_object_collection import RigidObjectCollection +from .rigid_object_collection_data import RigidObjectCollectionData diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection.py new file mode 100644 index 000000000000..8c499d75396c --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection.py @@ -0,0 +1,1398 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import re +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp +from newton.selection import ArticulationView +from newton.solvers import SolverNotifyFlags + +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.string as string_utils +from isaaclab.assets.rigid_object_collection.base_rigid_object_collection import BaseRigidObjectCollection +from isaaclab.physics import PhysicsEvent +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab_newton.assets import kernels as shared_kernels +from isaaclab_newton.physics import NewtonManager as SimulationManager + +from .rigid_object_collection_data import RigidObjectCollectionData + +if TYPE_CHECKING: + from isaaclab.assets.rigid_object_collection.rigid_object_collection_cfg import RigidObjectCollectionCfg + + +class RigidObjectCollection(BaseRigidObjectCollection): + """A rigid object collection class. + + This class represents a collection of rigid objects in the simulation, where the state of the + rigid objects can be accessed and modified using a batched ``(env_ids, object_ids)`` API. + + For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the + simulation, the physics engine will automatically register the rigid bodies and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. + + Rigid objects in the collection are uniquely identified via the key of the dictionary + :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the + :class:`~isaaclab.assets.RigidObjectCollectionCfg` configuration class. + This differs from the :class:`~isaaclab.assets.RigidObject` class, where a rigid object is identified by + the name of the Xform where the `USD RigidBodyAPI`_ is applied. This would not be possible for the rigid + object collection since the :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` dictionary + could contain the same rigid object multiple times, leading to ambiguity. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCollectionCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "newton" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCollectionCfg): + """Initialize the rigid object collection. + + Args: + cfg: A configuration instance. + """ + # Note: We never call the parent constructor as it tries to call its own spawning which we don't want. + # check that the config is valid + cfg.validate() + # store inputs + self.cfg = cfg.copy() + # flag for whether the asset is initialized + self._is_initialized = False + # spawn the rigid objects + for rigid_body_cfg in self.cfg.rigid_objects.values(): + # spawn the asset + if rigid_body_cfg.spawn is not None: + rigid_body_cfg.spawn.func( + rigid_body_cfg.prim_path, + rigid_body_cfg.spawn, + translation=rigid_body_cfg.init_state.pos, + orientation=rigid_body_cfg.init_state.rot, + ) + # check that spawn was successful + matching_prims = sim_utils.find_matching_prims(rigid_body_cfg.prim_path) + if len(matching_prims) == 0: + raise RuntimeError(f"Could not find prim with path {rigid_body_cfg.prim_path}.") + # stores object names + self._body_names_list = [] + + # register various callback functions + self._register_callbacks() + self._debug_vis_handle = None + + """ + Properties + """ + + @property + def data(self) -> RigidObjectCollectionData: + return self._data + + @property + def num_instances(self) -> int: + return self._root_view.count // self.num_bodies + + @property + def num_bodies(self) -> int: + """Number of bodies in the rigid object collection.""" + return len(self.body_names) + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object collection.""" + return self._body_names_list + + @property + def root_view(self) -> ArticulationView: + """Root view for the rigid object collection. + + A single :class:`ArticulationView` matching all body types. The 2nd dimension + (matches per world) corresponds to the different body types. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset( + self, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + object_ids: slice | torch.Tensor | None = None, + env_mask: wp.array | None = None, + object_mask: wp.array | None = None, + ) -> None: + """Resets all internal buffers of selected environments and objects. + + Args: + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + object_mask: Object mask. Not used currently. + """ + # resolve all indices + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + if object_ids is None: + object_ids = self._ALL_BODY_INDICES + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + .. note:: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + composer = self._instantaneous_wrench_composer + composer.add_raw_buffers_from(self._permanent_wrench_composer) + else: + composer = self._permanent_wrench_composer + composer.compose_to_body_frame() + wp.launch( + shared_kernels.update_wrench_array_with_force_and_torque, + dim=(self.num_instances, self.num_bodies), + device=self.device, + inputs=[ + composer.out_force_b, + composer.out_torque_b, + self._wrench_buffer, + self._ALL_ENV_MASK, + self._ALL_BODY_MASK, + ], + ) + # Write the wrench buffer directly to the Newton binding (already 2D) + wp.copy(self._data._sim_bind_body_external_wrench, self._wrench_buffer) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size [s]. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str]]: + """Find bodies in the rigid body collection based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + obj_ids, obj_names = string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + return torch.tensor(obj_ids, device=self.device, dtype=torch.int32), obj_names + + """ + Operations - Write to simulation. + """ + + def write_body_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the body pose over selected environment and body indices into the simulation. + + The body pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_poses: Body poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7) + or (len(env_ids), len(body_ids)) with dtype wp.transformf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_body_link_pose_to_sim_index(body_poses=body_poses, env_ids=env_ids, body_ids=body_ids) + + def write_body_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set the body pose over selected environment mask into the simulation. + + The body pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_poses: Body poses in simulation frame. Shape is (num_instances, num_bodies, 7) + or (num_instances, num_bodies) with dtype wp.transformf. + body_mask: Body mask. If None, then all bodies are updated. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + self.write_body_link_pose_to_sim_index( + body_poses=body_poses, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the body velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_velocities: Body velocities in simulation frame. + Shape is (len(env_ids), len(body_ids), 6) or (num_instances, num_bodies, 6), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_body_com_velocity_to_sim_index(body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids) + + def write_body_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set the body velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_velocities: Body velocities in simulation frame. + Shape is (num_instances, num_bodies, 6) + or (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_mask: Body mask. If None, then all bodies are updated. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + self.write_body_com_velocity_to_sim_index( + body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_link_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body link pose over selected environment and body indices into the simulation. + + The body link pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_poses: Body link poses in simulation frame. + Shape is (len(env_ids), len(body_ids), 7) or (num_instances, num_bodies, 7), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.transformf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(body_poses, (self.num_instances, self.num_bodies), wp.transformf, "body_poses") + else: + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") + # Write to consolidated buffer + wp.launch( + shared_kernels.set_body_link_pose_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_poses, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_link_pose_w, + None, # body_link_state_w + None, # body_state_w + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_body_link_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body link pose over selected environment mask into the simulation. + + The body link pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_poses: Body link poses in simulation frame. Shape is (num_instances, num_bodies, 7) + or (num_instances, num_bodies) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_mask = self._ALL_ENV_MASK + env_ids = self._ALL_ENV_INDICES + self.write_body_link_pose_to_sim_index( + body_poses=body_poses, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_com_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body center of mass pose over selected environment and body indices into the simulation. + + The body center of mass pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_poses: Body center of mass poses in simulation frame. + Shape is (len(env_ids), len(body_ids), 7) or (num_instances, num_bodies, 7), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.transformf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(body_poses, (self.num_instances, self.num_bodies), wp.transformf, "body_poses") + else: + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") + # Write to consolidated buffers (updates both com_pose_w and link_pose_w) + wp.launch( + shared_kernels.set_body_com_pose_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_poses, + self.data.body_com_pos_b, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_com_pose_w, + self.data.body_link_pose_w, + None, # body_com_state_w + None, # body_link_state_w + None, # body_state_w + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_body_com_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body center of mass pose over selected environment mask into the simulation. + + The body center of mass pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_poses: Body center of mass poses in simulation frame. Shape is (num_instances, num_bodies, 7) + or (num_instances, num_bodies) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_mask = self._ALL_ENV_MASK + env_ids = self._ALL_ENV_INDICES + self.write_body_com_pose_to_sim_index(body_poses=body_poses, env_ids=env_ids, body_ids=body_ids, full_data=True) + + def write_body_com_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body center of mass velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_velocities: Body center of mass velocities in simulation frame. + Shape is (len(env_ids), len(body_ids), 6) or (num_instances, num_bodies, 6), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype( + body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities" + ) + else: + self.assert_shape_and_dtype( + body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities" + ) + # Write to consolidated buffer + wp.launch( + shared_kernels.set_body_com_velocity_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_velocities, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_com_vel_w, + self.data.body_com_acc_w, + None, # body_state_w + None, # body_com_state_w + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_link_vel_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_body_com_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_velocities: Body center of mass velocities in simulation frame. + Shape is (num_instances, num_bodies, 6) + or (num_instances, num_bodies) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_mask = self._ALL_ENV_MASK + env_ids = self._ALL_ENV_INDICES + self.write_body_com_velocity_to_sim_index( + body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_link_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body link velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's frame rather than the body's center of mass. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_velocities: Body link velocities in simulation frame. + Shape is (len(env_ids), len(body_ids), 6) or (num_instances, num_bodies, 6), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype( + body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities" + ) + else: + self.assert_shape_and_dtype( + body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities" + ) + # Access body_com_pos_b and body_link_pose_w to ensure they are current. + wp.launch( + shared_kernels.set_body_link_velocity_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_velocities, + self.data.body_com_pos_b, + self.data.body_link_pose_w, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_link_vel_w, + self.data.body_com_vel_w, + self.data.body_com_acc_w, + None, # body_link_state_w + None, # body_state_w + None, # body_com_state_w + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + SimulationManager.invalidate_fk(env_ids=env_ids, articulation_ids=self._root_view.articulation_ids) + + def write_body_link_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body link velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's frame rather than the body's center of mass. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + body_velocities: Body link velocities in simulation frame. Shape is (num_instances, num_bodies, 6) + or (num_instances, num_bodies) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + + Note: + Triggers per-environment FK recomputation and solver reset (Kamino) for the affected environments. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_mask = self._ALL_ENV_MASK + env_ids = self._ALL_ENV_INDICES + self.write_body_link_velocity_to_sim_index( + body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + """ + Operations - Setters. + """ + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set masses of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + masses: Masses of all bodies. Shape is (len(env_ids), len(body_ids)). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + # Write to consolidated buffer + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + masses, + env_ids, + body_ids, + ], + outputs=[ + self.data._sim_bind_body_mass, + ], + device=self.device, + ) + # No copy-back needed — writes go directly to Newton's state via the 2D binding + # Tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set masses of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32, "masses") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + masses, + env_mask, + body_mask, + ], + outputs=[ + self.data._sim_bind_body_mass, + ], + device=self.device, + ) + # No copy-back needed — writes go directly to Newton's state via the 2D binding + # Tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set center of mass position of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + .. caution:: + Unlike the PhysX version of this method, this method does not set the center of mass orientation. + Only the position is set. This is because Newton considers the center of mass orientation to always be + aligned with the body frame. + + Args: + coms: Center of mass position of all bodies. Shape is (len(env_ids), len(body_ids), 3). + body_ids: The body indices to set the center of mass pose for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass pose for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f, "coms") + # Write to consolidated buffer + wp.launch( + shared_kernels.write_body_com_position_to_buffer_index, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + coms, + env_ids, + body_ids, + ], + outputs=[ + self.data._sim_bind_body_com_pos_b, + ], + device=self.device, + ) + # Invalidate derived buffers that depend on com position + self.data._body_com_pose_b.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + # Tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set center of mass position of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + .. caution:: + Unlike the PhysX version of this method, this method does not set the center of mass orientation. + Only the position is set. This is because Newton considers the center of mass orientation to always be + aligned with the body frame. + + Args: + coms: Center of mass position of all bodies. Shape is (num_instances, num_bodies, 3). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f, "coms") + wp.launch( + shared_kernels.write_body_com_position_to_buffer_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + coms, + env_mask, + body_mask, + ], + outputs=[ + self.data._sim_bind_body_com_pos_b, + ], + device=self.device, + ) + # Invalidate derived buffers that depend on com position + self.data._body_com_pose_b.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + # Tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set inertias of all bodies using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 9). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + # Write to consolidated buffer + wp.launch( + shared_kernels.write_body_inertia_to_buffer_index, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + inertias, + env_ids, + body_ids, + ], + outputs=[ + self.data._body_inertia, + ], + device=self.device, + ) + # No copy-back needed — writes go directly to Newton's state via the 2D binding + # Tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set inertias of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 9). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + if body_mask is None: + body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,)) + wp.launch( + shared_kernels.write_body_inertia_to_buffer_mask, + dim=(env_mask.shape[0], body_mask.shape[0]), + inputs=[ + inertias, + env_mask, + body_mask, + ], + outputs=[ + self.data._body_inertia, + ], + device=self.device, + ) + # No copy-back needed — writes go directly to Newton's state via the 2D binding + # Tell the physics engine that some of the body properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # clear body names list to prevent double counting on re-initialization + self._body_names_list.clear() + root_prim_path_exprs: list[str] = [] + + for name, rigid_body_cfg in self.cfg.rigid_objects.items(): + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(rigid_body_cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{rigid_body_cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{rigid_body_cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{rigid_body_cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + # check that no rigid object has an articulation root API, which decreases simulation performance + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{rigid_body_cfg.prim_path}' in the rigid object" + f" collection. These are located at: '{articulation_prims}' under '{template_prim_path}'." + " Please disable the articulation root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = rigid_body_cfg.prim_path + root_prim_path[len(template_prim_path) :] + root_prim_path_exprs.append(root_prim_path_expr.replace(".*", "*")) + self._body_names_list.append(name) + + # Build a single pattern that matches ALL body types by wildcarding the differing path segment + combined_pattern = self._build_combined_pattern(root_prim_path_exprs) + + # Create a single ArticulationView matching all body types. + # The 2nd dimension (matches per world) corresponds to the body types. + self._root_view = ArticulationView( + SimulationManager.get_model(), + combined_pattern, + verbose=False, + ) + + # container for data access + self._data = RigidObjectCollectionData(self._root_view, self.num_bodies, self.device) + + # Register callback to rebind simulation data after a full reset (model/state recreation). + self._physics_ready_handle = SimulationManager.register_callback( + lambda _: self._data._create_simulation_bindings(), + PhysicsEvent.PHYSICS_READY, + name=f"rigid_object_collection_rebind_{self.cfg.rigid_objects}", + ) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + # Let the rigid object collection data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _create_buffers(self): + """Create buffers for storing data.""" + # constants + self._ALL_ENV_INDICES = wp.array( + np.arange(self.num_instances, dtype=np.int32), device=self.device, dtype=wp.int32 + ) + self._ALL_BODY_INDICES = wp.array( + np.arange(self.num_bodies, dtype=np.int32), device=self.device, dtype=wp.int32 + ) + self._ALL_ENV_MASK = wp.ones((self.num_instances,), dtype=wp.bool, device=self.device) + self._ALL_BODY_MASK = wp.ones((self.num_bodies,), dtype=wp.bool, device=self.device) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # Temporary 2D wrench buffer for write_data_to_sim + self._wrench_buffer = wp.zeros( + (self.num_instances, self.num_bodies), dtype=wp.spatial_vectorf, device=self.device + ) + + # set information about rigid body into data + self._data.body_names = self.body_names + + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + # default state + # -- body state + default_body_poses = [] + default_body_vels = [] + for rigid_object_cfg in self.cfg.rigid_objects.values(): + default_body_pose = tuple(rigid_object_cfg.init_state.pos) + tuple(rigid_object_cfg.init_state.rot) + default_body_vel = tuple(rigid_object_cfg.init_state.lin_vel) + tuple(rigid_object_cfg.init_state.ang_vel) + default_body_pose = np.tile(np.array(default_body_pose, dtype=np.float32), (self.num_instances, 1)) + default_body_vel = np.tile(np.array(default_body_vel, dtype=np.float32), (self.num_instances, 1)) + default_body_poses.append(default_body_pose) + default_body_vels.append(default_body_vel) + # Stack: each has shape (num_instances, data_size) -> (num_instances, num_bodies, data_size) + default_body_poses = np.stack(default_body_poses, axis=1) + default_body_vels = np.stack(default_body_vels, axis=1) + self.data.default_body_pose = wp.array(default_body_poses, dtype=wp.transformf, device=self.device) + self.data.default_body_vel = wp.array(default_body_vels, dtype=wp.spatial_vectorf, device=self.device) + + def _resolve_env_ids(self, env_ids) -> wp.array: + """Resolve environment indices to a warp array. + + Args: + env_ids: Environment indices. If None, then all indices are used. + + Returns: + A warp array of environment indices. + """ + if (env_ids is None) or (env_ids == slice(None)): + return self._ALL_ENV_INDICES + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids, dtype=wp.int32) + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + return env_ids + + def _resolve_body_ids(self, body_ids) -> wp.array: + """Resolve body indices to a warp array. + + Args: + body_ids: Body indices. If None, then all indices are used. + + Returns: + A warp array of body indices. + """ + if isinstance(body_ids, list): + return wp.array(body_ids, dtype=wp.int32, device=self.device) + if (body_ids is None) or (body_ids == slice(None)): + return self._ALL_BODY_INDICES + if isinstance(body_ids, slice): + return wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 + ) + if isinstance(body_ids, torch.Tensor): + if body_ids.dtype == torch.int64: + body_ids = body_ids.to(torch.int32) + return wp.from_torch(body_ids, dtype=wp.int32) + return body_ids + + def _resolve_env_mask(self, env_mask: wp.array | None) -> wp.array | torch.Tensor: + """Resolve environment mask to indices via torch.nonzero.""" + if env_mask is not None: + if isinstance(env_mask, wp.array): + env_mask = wp.to_torch(env_mask) + env_ids = torch.nonzero(env_mask)[:, 0].to(torch.int32) + else: + env_ids = self._ALL_ENV_INDICES + return env_ids + + def _resolve_body_mask(self, body_mask: wp.array | None) -> wp.array | torch.Tensor: + """Resolve body mask to indices via torch.nonzero.""" + if body_mask is not None: + if isinstance(body_mask, wp.array): + body_mask = wp.to_torch(body_mask) + body_ids = torch.nonzero(body_mask)[:, 0].to(torch.int32) + else: + body_ids = self._ALL_BODY_INDICES + return body_ids + + @staticmethod + def _build_combined_pattern(prim_path_exprs: list[str]) -> str: + """Build a single fnmatch pattern that matches all body types. + + Compares path segments across all expressions and wildcards the segments that differ. + For example, given:: + + ["/World/Env_*/DexCube/Cube", "/World/Env_*/DexSphere/Sphere"] + + produces ``"/World/Env_*/*/*"``. + + Args: + prim_path_exprs: List of prim path expressions, one per body type. + + Returns: + A single fnmatch pattern string. + + Raises: + ValueError: If the expressions have different numbers of path segments. + """ + if len(prim_path_exprs) == 1: + return prim_path_exprs[0] + + split_paths = [p.split("/") for p in prim_path_exprs] + lengths = {len(s) for s in split_paths} + if len(lengths) != 1: + raise ValueError( + f"Cannot build combined pattern: path expressions have different segment counts: {prim_path_exprs}" + ) + + combined_segments = [] + for segments in zip(*split_paths): + unique = set(segments) + if len(unique) == 1: + combined_segments.append(segments[0]) + else: + combined_segments.append("*") + return "/".join(combined_segments) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event) -> None: + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_view = None + + def _on_prim_deletion(self, prim_path: str) -> None: + """Invalidates and deletes the callbacks when the prim is deleted. + + Args: + prim_path: The path to the prim that is being deleted. + + .. note:: + This function is called when the prim is deleted. + """ + if prim_path == "/": + self._clear_callbacks() + return + for prim_path_expr in [obj.prim_path for obj in self.cfg.rigid_objects.values()]: + result = re.match( + pattern="^" + "/".join(prim_path_expr.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path + ) + if result: + self._clear_callbacks() + return + + """ + Deprecated properties and methods. + """ + + @property + def root_physx_view(self): + """Deprecated property. Please use :attr:`root_view` instead.""" + warnings.warn( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.root_view + + def write_body_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated, same as :meth:`write_body_link_pose_to_sim_index` and + :meth:`write_body_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_body_state_to_sim' will be deprecated in a future release. Please" + " use 'write_body_link_pose_to_sim_index' and 'write_body_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_body_link_pose_to_sim_index(body_poses=body_states[:, :, :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_com_velocity_to_sim_index( + body_velocities=body_states[:, :, 7:], env_ids=env_ids, body_ids=body_ids + ) + + def write_body_com_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated, same as :meth:`write_body_com_pose_to_sim_index` and + :meth:`write_body_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_body_com_state_to_sim' will be deprecated in a future release. Please" + " use 'write_body_com_pose_to_sim_index' and 'write_body_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_body_com_pose_to_sim_index(body_poses=body_states[:, :, :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_com_velocity_to_sim_index( + body_velocities=body_states[:, :, 7:], env_ids=env_ids, body_ids=body_ids + ) + + def write_body_link_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated, same as :meth:`write_body_link_pose_to_sim_index` and + :meth:`write_body_link_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_body_link_state_to_sim' will be deprecated in a future release. Please" + " use 'write_body_link_pose_to_sim_index' and 'write_body_link_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_body_link_pose_to_sim_index(body_poses=body_states[:, :, :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_link_velocity_to_sim_index( + body_velocities=body_states[:, :, 7:], env_ids=env_ids, body_ids=body_ids + ) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection_data.py new file mode 100644 index 000000000000..9fe3ec125de0 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection_data.py @@ -0,0 +1,913 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +import logging +import warnings +import weakref +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.math import normalize +from isaaclab.utils.warp import ProxyArray + +from isaaclab_newton.assets import kernels as shared_kernels +from isaaclab_newton.physics import NewtonManager as SimulationManager + +if TYPE_CHECKING: + from newton.selection import ArticulationView + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObjectCollectionData(BaseRigidObjectCollectionData): + """Data container for a rigid object collection. + + This class contains the data for a rigid object collection in the simulation. The data includes the state of + all the bodies in the collection. The data is stored in the simulation world frame unless otherwise specified. + The data is in the order ``(num_instances, num_objects, data_size)``, where data_size is the size of the data. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + __backend_name__: str = "newton" + """The name of the backend for the rigid object collection data.""" + + def __init__(self, root_view: ArticulationView, num_bodies: int, device: str): + """Initializes the rigid object collection data. + + Args: + root_view: A single articulation view matching all body types across all environments. + The view's 2nd dimension (matches per world) corresponds to the body types. + num_bodies: The number of bodies in the collection. + device: The device used for processing. + """ + super().__init__(root_view, num_bodies, device) + self.num_bodies = num_bodies + # Store the view as a weak reference to avoid circular references + self._root_view: ArticulationView = weakref.proxy(root_view) + self.num_instances = self._root_view.count // num_bodies + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + + # Convert gravity to direction vector + gravity = wp.to_torch(SimulationManager.get_model().gravity)[0] + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = ProxyArray( + wp.from_torch(gravity_dir.repeat(self.num_instances, self.num_bodies, 1), dtype=wp.vec3f) + ) + self.FORWARD_VEC_B = ProxyArray( + wp.from_torch( + torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self.num_instances, self.num_bodies, 1), + dtype=wp.vec3f, + ) + ) + + self._create_simulation_bindings() + self._create_buffers() + + @property + def is_primed(self) -> bool: + """Whether the rigid object collection data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the rigid object collection data is fully instantiated and ready to use. + + .. note:: + Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._is_primed = value + + def update(self, dt: float) -> None: + """Updates the data for the rigid object collection. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + # Trigger an update of the body com acceleration buffer at a higher frequency + # since we do finite differencing. + self.body_com_acc_w + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + """ + Defaults. + """ + + @property + def default_body_pose(self) -> ProxyArray: + """Default body pose ``[pos, quat]`` in local environment frame. + + The position and quaternion are of the rigid body's actor frame. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + """ + return self._default_body_pose_ta + + @default_body_pose.setter + def default_body_pose(self, value: wp.array) -> None: + """Set the default body pose. + + Args: + value: The default body pose. Shape is (num_instances, num_bodies, 7). + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._default_body_pose.assign(value) + + @property + def default_body_vel(self) -> ProxyArray: + """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. + + The linear and angular velocities are of the rigid body's center of mass frame. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + """ + return self._default_body_vel_ta + + @default_body_vel.setter + def default_body_vel(self, value: wp.array) -> None: + """Set the default body velocity. + + Args: + value: The default body velocity. Shape is (num_instances, num_bodies, 6). + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._default_body_vel.assign(value) + + """ + Body state properties. + """ + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._body_link_vel_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_link_vel_from_body_com_vel, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_com_vel_w.warp, + self.body_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._body_link_vel_w.data, + ], + device=self.device, + ) + self._body_link_vel_w.timestamp = self._sim_timestamp + + return self._body_link_vel_w_ta + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_com_pose_from_body_link_pose, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_link_pose_w.warp, + self.body_com_pos_b.warp, + ], + outputs=[ + self._body_com_pose_w.data, + ], + device=self.device, + ) + self._body_com_pose_w.timestamp = self._sim_timestamp + + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + return self._body_com_vel_w_ta + + @property + def body_com_acc_w(self) -> ProxyArray: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.derive_body_acceleration_from_body_com_velocities, + dim=(self.num_instances, self.num_bodies), + device=self.device, + inputs=[ + self.body_com_vel_w.warp, + SimulationManager.get_dt(), + self._previous_body_com_vel, + ], + outputs=[ + self._body_com_acc_w.data, + ], + ) + self._body_com_acc_w.timestamp = self._sim_timestamp + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + warnings.warn( + "In Newton, body com pose always has unit quaternion. Consider using body_com_pos_b instead." + "Querying this property requires appending a unit quaternion to the position which is expensive.", + category=UserWarning, + stacklevel=2, + ) + if self._body_com_pose_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.make_dummy_body_com_pose_b, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_com_pos_b.warp, + ], + outputs=[ + self._body_com_pose_b.data, + ], + device=self.device, + ) + self._body_com_pose_b.timestamp = self._sim_timestamp + return self._body_com_pose_b_ta + + @property + def body_com_pos_b(self) -> ProxyArray: + """Center of mass position of all of the bodies in their respective link frames. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the center of mass location relative to its body's link frame. + """ + return self._body_com_pos_b_ta + + @property + def body_mass(self) -> ProxyArray: + """Mass of all bodies in the simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.float32. + In torch this resolves to (num_instances, num_bodies). + """ + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Inertia of all bodies in the simulation world frame. + + Shape is (num_instances, num_bodies, 9), dtype = wp.float32. + In torch this resolves to (num_instances, num_bodies, 9). + """ + return self._body_inertia_ta + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self) -> ProxyArray: + """Projection of the gravity direction on base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + """ + if self._projected_gravity_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.GRAVITY_VEC_W.warp, self.body_link_quat_w.warp], + outputs=[self._projected_gravity_b.data], + device=self.device, + ) + self._projected_gravity_b.timestamp = self._sim_timestamp + return self._projected_gravity_b_ta + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading of the base frame (in radians). + + Shape is (num_instances, num_bodies), dtype = wp.float32. In torch this resolves to (num_instances, num_bodies). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + if self._heading_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.body_heading_w, + dim=(self.num_instances, self.num_bodies), + inputs=[self.FORWARD_VEC_B.warp, self.body_link_quat_w.warp], + outputs=[self._heading_w.data], + device=self.device, + ) + self._heading_w.timestamp = self._sim_timestamp + return self._heading_w_ta + + @property + def body_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._body_link_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_link_lin_vel_w.warp, self.body_link_quat_w.warp], + outputs=[self._body_link_lin_vel_b.data], + device=self.device, + ) + self._body_link_lin_vel_b.timestamp = self._sim_timestamp + return self._body_link_lin_vel_b_ta + + @property + def body_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._body_link_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_link_ang_vel_w.warp, self.body_link_quat_w.warp], + outputs=[self._body_link_ang_vel_b.data], + device=self.device, + ) + self._body_link_ang_vel_b.timestamp = self._sim_timestamp + return self._body_link_ang_vel_b_ta + + @property + def body_com_lin_vel_b(self) -> ProxyArray: + """Root center of mass linear velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._body_com_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_com_lin_vel_w.warp, self.body_link_quat_w.warp], + outputs=[self._body_com_lin_vel_b.data], + device=self.device, + ) + self._body_com_lin_vel_b.timestamp = self._sim_timestamp + return self._body_com_lin_vel_b_ta + + @property + def body_com_ang_vel_b(self) -> ProxyArray: + """Root center of mass angular velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._body_com_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_com_ang_vel_w.warp, self.body_link_quat_w.warp], + outputs=[self._body_com_ang_vel_b.data], + device=self.device, + ) + self._body_com_ang_vel_b.timestamp = self._sim_timestamp + return self._body_com_ang_vel_b_ta + + """ + Sliced properties. + """ + + @property + def body_link_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + if self._body_link_pos_w_ta is None: + self._body_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(self.body_link_pose_w.warp)) + return self._body_link_pos_w_ta + + @property + def body_link_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + if self._body_link_quat_w_ta is None: + self._body_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(self.body_link_pose_w.warp)) + return self._body_link_quat_w_ta + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the rigid bodies' actor frame relative to the world. + """ + if self._body_link_lin_vel_w_ta is None: + self._body_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(self.body_link_vel_w.warp)) + return self._body_link_lin_vel_w_ta + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the rigid bodies' actor frame relative to the world. + """ + if self._body_link_ang_vel_w_ta is None: + self._body_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(self.body_link_vel_w.warp)) + return self._body_link_ang_vel_w_ta + + @property + def body_com_pos_w(self) -> ProxyArray: + """Positions of all bodies' center of mass in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the position of the rigid bodies' center of mass frame. + """ + if self._body_com_pos_w_ta is None: + self._body_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(self.body_com_pose_w.warp)) + return self._body_com_pos_w_ta + + @property + def body_com_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + This quantity is the orientation of the principal axes of inertia of the rigid bodies. + """ + if self._body_com_quat_w_ta is None: + self._body_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(self.body_com_pose_w.warp)) + return self._body_com_quat_w_ta + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + if self._body_com_lin_vel_w_ta is None: + self._body_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(self.body_com_vel_w.warp)) + return self._body_com_lin_vel_w_ta + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + if self._body_com_ang_vel_w_ta is None: + self._body_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(self.body_com_vel_w.warp)) + return self._body_com_ang_vel_w_ta + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Linear acceleration of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + if self._body_com_lin_acc_w_ta is None: + self._body_com_lin_acc_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(self.body_com_acc_w.warp)) + return self._body_com_lin_acc_w_ta + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Angular acceleration of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + if self._body_com_ang_acc_w_ta is None: + self._body_com_ang_acc_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(self.body_com_acc_w.warp)) + return self._body_com_ang_acc_w_ta + + @property + def body_com_quat_b(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their + respective link frames. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + This quantity is the orientation of the principal axes of inertia relative to its body's link frame. + """ + if self._body_com_quat_b_ta is None: + self._body_com_quat_b_ta = ProxyArray(self._get_quat_from_transform(self.body_com_pose_b.warp)) + return self._body_com_quat_b_ta + + def _create_simulation_bindings(self) -> None: + """Create simulation bindings for the body data. + + For a rigid object collection in Newton, a single :class:`ArticulationView` matches all body + types. Its data is shaped ``(num_envs, num_bodies, ...)``, where the 2nd dimension (matches + per world) corresponds to the different body types. This gives us direct 2D bindings into + Newton's state with no scatter/gather overhead. + """ + state_0 = SimulationManager.get_state_0() + model = SimulationManager.get_model() + + # Root transforms/velocities are (num_envs, num_bodies) — direct 2D bindings + self._sim_bind_body_link_pose_w = self._root_view.get_root_transforms(state_0) + self._sim_bind_body_com_vel_w = self._root_view.get_root_velocities(state_0) + # Attributes have a trailing link dim of 1: (num_envs, num_bodies, 1) -> [:, :, 0] + self._sim_bind_body_com_pos_b = self._root_view.get_attribute("body_com", model)[:, :, 0] + self._sim_bind_body_external_wrench = self._root_view.get_attribute("body_f", state_0)[:, :, 0] + # -- Body mass: (num_envs, num_bodies, 1) float32 → squeeze to (num_envs, num_bodies) + self._sim_bind_body_mass = self._root_view.get_attribute("body_mass", model)[:, :, 0] + # -- Body inertia: (num_envs, num_bodies, 1) mat33f → squeeze, reinterpret as (N, B, 9) float32. + # Each mat33f element is 9 contiguous float32 values (36 bytes), so the inner stride is 4. + # The slice may be non-contiguous in the outer dims, so we preserve those strides. + _body_inertia_raw = self._root_view.get_attribute("body_inertia", model)[:, :, 0] + self._body_inertia = wp.array( + ptr=_body_inertia_raw.ptr, + dtype=wp.float32, + shape=(self.num_instances, self.num_bodies, 9), + strides=(_body_inertia_raw.strides[0], _body_inertia_raw.strides[1], 4), + device=_body_inertia_raw.device, + copy=False, + ) + + # Re-pin ProxyArray wrappers to the newly created sim bindings. + # On first init, _create_buffers() handles this after all buffers exist. + if hasattr(self, "_body_link_pose_w_ta"): + self._pin_proxy_arrays() + + def _create_buffers(self) -> None: + """Create buffers for computing and caching derived quantities.""" + super()._create_buffers() + + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame (computed from com vel) + self._body_link_vel_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, wp.spatial_vectorf + ) + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.transformf) + # -- com frame w.r.t. world frame + self._body_com_pose_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.transformf) + self._body_com_acc_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.spatial_vectorf) + # -- combined state (these are cached as they concatenate) + self._body_state_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, shared_kernels.vec13f + ) + self._body_link_state_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, shared_kernels.vec13f + ) + self._body_com_state_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, shared_kernels.vec13f + ) + + # -- Default state + self._default_body_pose = wp.zeros( + (self.num_instances, self.num_bodies), dtype=wp.transformf, device=self.device + ) + self._default_body_vel = wp.zeros( + (self.num_instances, self.num_bodies), dtype=wp.spatial_vectorf, device=self.device + ) + self._default_body_state = None + + # -- Derived properties + self._projected_gravity_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._heading_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.float32) + self._body_link_lin_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._body_link_ang_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._body_com_lin_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._body_com_ang_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + + # -- Initialize history for finite differencing + self._previous_body_com_vel = wp.clone(self._sim_bind_body_com_vel_w) + + # Pin all ProxyArray wrappers to current buffers. + self._pin_proxy_arrays() + + def _pin_proxy_arrays(self) -> None: + """Create or rebind all pinned ProxyArray wrappers. + + Called from :meth:`_create_buffers` on first initialization and from + :meth:`_create_simulation_bindings` after a full simulation reset when + the solver recreates its internal arrays. + """ + is_rebind = hasattr(self, "_body_link_pose_w_ta") + + if is_rebind: + # Rebind sim-bound ProxyArrays to new solver arrays + self._body_link_pose_w_ta = ProxyArray(self._sim_bind_body_link_pose_w) + self._body_com_vel_w_ta = ProxyArray(self._sim_bind_body_com_vel_w) + self._body_com_pos_b_ta = ProxyArray(self._sim_bind_body_com_pos_b) + self._body_mass_ta = ProxyArray(self._sim_bind_body_mass) + self._body_inertia_ta = ProxyArray(self._body_inertia) + else: + # First-time creation: pin ProxyArrays to current buffers + # Category 1: sim-bound and pre-allocated buffers + # Newton wp.array pointers are stable, so a ProxyArray wrapping them is valid forever. + self._body_link_pose_w_ta = ProxyArray(self._sim_bind_body_link_pose_w) + self._body_com_vel_w_ta = ProxyArray(self._sim_bind_body_com_vel_w) + self._body_com_pos_b_ta = ProxyArray(self._sim_bind_body_com_pos_b) + self._body_mass_ta = ProxyArray(self._sim_bind_body_mass) + self._body_inertia_ta = ProxyArray(self._body_inertia) + self._default_body_pose_ta = ProxyArray(self._default_body_pose) + self._default_body_vel_ta = ProxyArray(self._default_body_vel) + + # Category 2: TimestampedBuffer properties + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w.data) + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w.data) + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w.data) + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b.data) + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b.data) + self._heading_w_ta = ProxyArray(self._heading_w.data) + self._body_link_lin_vel_b_ta = ProxyArray(self._body_link_lin_vel_b.data) + self._body_link_ang_vel_b_ta = ProxyArray(self._body_link_ang_vel_b.data) + self._body_com_lin_vel_b_ta = ProxyArray(self._body_com_lin_vel_b.data) + self._body_com_ang_vel_b_ta = ProxyArray(self._body_com_ang_vel_b.data) + self._body_state_w_ta = ProxyArray(self._body_state_w.data) + self._body_link_state_w_ta = ProxyArray(self._body_link_state_w.data) + self._body_com_state_w_ta = ProxyArray(self._body_com_state_w.data) + + # -- deprecated state properties (lazy); type annotation declared once here + self._default_body_state_ta: ProxyArray | None = None + + # Invalidate lazy sliced ProxyArrays AND their backing wp.arrays so they are + # re-created from fresh data on next access. On first init the backing fields + # are already None (set by _create_buffers), so the assignments below are + # harmless no-ops. On rebind they reset stale pointers into freed transform + # memory after a sim reset. + self._body_link_pos_w_ta: ProxyArray | None = None + self._body_link_quat_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w_ta: ProxyArray | None = None + self._body_link_ang_vel_w_ta: ProxyArray | None = None + self._body_com_pos_w_ta: ProxyArray | None = None + self._body_com_quat_w_ta: ProxyArray | None = None + self._body_com_lin_vel_w_ta: ProxyArray | None = None + self._body_com_ang_vel_w_ta: ProxyArray | None = None + self._body_com_lin_acc_w_ta: ProxyArray | None = None + self._body_com_ang_acc_w_ta: ProxyArray | None = None + self._body_com_quat_b_ta: ProxyArray | None = None + self._default_body_state_ta: ProxyArray | None = None + self._default_body_state = None + + """ + Helpers. + """ + + def _get_pos_from_transform(self, transform: wp.array) -> wp.array: + """Generates a position array from a transform array.""" + return wp.array( + ptr=transform.ptr, + shape=transform.shape, + dtype=wp.vec3f, + strides=transform.strides, + device=self.device, + ) + + def _get_quat_from_transform(self, transform: wp.array) -> wp.array: + """Generates a quaternion array from a transform array.""" + return wp.array( + ptr=transform.ptr + 3 * 4, + shape=transform.shape, + dtype=wp.quatf, + strides=transform.strides, + device=self.device, + ) + + def _get_lin_vel_from_spatial_vector(self, spatial_vector: wp.array) -> wp.array: + """Generates a linear velocity array from a spatial vector array.""" + return wp.array( + ptr=spatial_vector.ptr, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + def _get_ang_vel_from_spatial_vector(self, spatial_vector: wp.array) -> wp.array: + """Generates an angular velocity array from a spatial vector array.""" + return wp.array( + ptr=spatial_vector.ptr + 3 * 4, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + """ + Deprecated properties. + """ + + @property + def default_body_state(self) -> ProxyArray: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, num_bodies, 13). + """ + warnings.warn( + "Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_body_pose and default_body_vel properties instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._default_body_state is None: + self._default_body_state = wp.zeros( + (self.num_instances, self.num_bodies), dtype=shared_kernels.vec13f, device=self.device + ) + self._default_body_state_ta = ProxyArray(self._default_body_state) + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self._default_body_pose, + self._default_body_vel, + ], + outputs=[ + self._default_body_state, + ], + device=self.device, + ) + return self._default_body_state_ta + + @property + def body_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_link_pose_w.warp, + self.body_com_vel_w.warp, + ], + outputs=[ + self._body_state_w.data, + ], + device=self.device, + ) + self._body_state_w.timestamp = self._sim_timestamp + + return self._body_state_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_link_vel_w`.""" + warnings.warn( + "The `body_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_link_pose_w.warp, + self.body_link_vel_w.warp, + ], + outputs=[ + self._body_link_state_w.data, + ], + device=self.device, + ) + self._body_link_state_w.timestamp = self._sim_timestamp + + return self._body_link_state_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_com_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_com_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_com_pose_w.warp, + self.body_com_vel_w.warp, + ], + outputs=[ + self._body_com_state_w.data, + ], + device=self.device, + ) + self._body_com_state_w.timestamp = self._sim_timestamp + + return self._body_com_state_w_ta diff --git a/source/isaaclab_newton/isaaclab_newton/cloner/__init__.py b/source/isaaclab_newton/isaaclab_newton/cloner/__init__.py new file mode 100644 index 000000000000..e14e0f6d52c5 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/cloner/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/cloner/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/cloner/__init__.pyi new file mode 100644 index 000000000000..f55377295a3e --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/cloner/__init__.pyi @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "newton_physics_replicate", + "newton_visualizer_prebuild", +] + +from .newton_replicate import newton_physics_replicate, newton_visualizer_prebuild diff --git a/source/isaaclab_newton/isaaclab_newton/cloner/newton_replicate.py b/source/isaaclab_newton/isaaclab_newton/cloner/newton_replicate.py new file mode 100644 index 000000000000..34cd35de4fa2 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/cloner/newton_replicate.py @@ -0,0 +1,241 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +import warp as wp +from newton import ModelBuilder, solvers +from newton._src.usd.schemas import SchemaResolverNewton, SchemaResolverPhysx + +from pxr import Usd + +from isaaclab_newton.physics import NewtonManager + + +def _build_newton_builder_from_mapping( + stage: Usd.Stage, + sources: list[str], + env_ids: torch.Tensor, + mapping: torch.Tensor, + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + up_axis: str = "Z", + simplify_meshes: bool = True, +) -> tuple[ModelBuilder, object, dict]: + """Build a Newton model builder from clone mapping inputs. + + Args: + stage: USD stage containing source assets. + sources: Source prim paths used for cloning. + env_ids: Environment ids for destination worlds. + mapping: Boolean source-to-environment mapping matrix. + positions: Optional per-environment world positions. + quaternions: Optional per-environment orientations in xyzw order. + up_axis: Up axis for the Newton model builder. + simplify_meshes: Whether to run convex-hull mesh approximation. + + Returns: + Tuple of the populated Newton model builder, stage metadata returned + by ``add_usd``, and a site index map for + :attr:`NewtonManager._cl_site_index_map`. + """ + if positions is None: + positions = torch.zeros((mapping.size(1), 3), device=mapping.device, dtype=torch.float32) + if quaternions is None: + quaternions = torch.zeros((mapping.size(1), 4), device=mapping.device, dtype=torch.float32) + quaternions[:, 3] = 1.0 + + schema_resolvers = [SchemaResolverNewton(), SchemaResolverPhysx()] + + builder = NewtonManager.create_builder(up_axis=up_axis) + stage_info = builder.add_usd( + stage, + ignore_paths=["/World/envs"] + sources, + schema_resolvers=schema_resolvers, + ) + + # The prototype is built from env_0 in absolute world coordinates. + # add_builder xforms are deltas from env_0 so positions don't get double-counted. + env0_pos = positions[0] + protos: dict[str, ModelBuilder] = {} + for src_path in sources: + p = NewtonManager.create_builder(up_axis=up_axis) + solvers.SolverMuJoCo.register_custom_attributes(p) + p.add_usd( + stage, + root_path=src_path, + load_visual_shapes=True, + skip_mesh_approximation=True, + schema_resolvers=schema_resolvers, + ) + if simplify_meshes: + p.approximate_meshes("convex_hull", keep_visual_shapes=True) + protos[src_path] = p + + # Inject registered sites into prototypes (and global sites into main builder) + global_sites, proto_sites = NewtonManager._cl_inject_sites(builder, protos) + + # Global sites: (int, None) + global_site_map: dict[str, tuple[int, None]] = {label: (idx, None) for label, idx in global_sites.items()} + + # Local sites: per-world sublists, populated in the loop below + num_worlds = mapping.size(1) + local_site_map: dict[str, list[list[int]]] = {} + + # create a separate world for each environment (heterogeneous spawning) + # Newton assigns sequential world IDs (0, 1, 2, ...), so we need to track the mapping + for col, _ in enumerate(env_ids.tolist()): + # begin a new world context (Newton assigns world ID = col) + builder.begin_world() + # add all active sources for this world + delta_pos = (positions[col] - env0_pos).tolist() + for row in torch.nonzero(mapping[:, col], as_tuple=True)[0].tolist(): + proto = protos[sources[row]] + offset = builder.shape_count + builder.add_builder( + proto, + xform=wp.transform(delta_pos, quaternions[col].tolist()), + ) + # Compute final shape indices for sites in this proto + for label, proto_shape_indices in proto_sites.get(id(proto), {}).items(): + if label not in local_site_map: + local_site_map[label] = [[] for _ in range(num_worlds)] + for proto_shape_idx in proto_shape_indices: + local_site_map[label][col].append(offset + proto_shape_idx) + # end the world context + builder.end_world() + + site_index_map = { + **global_site_map, + **{label: (None, per_world) for label, per_world in local_site_map.items()}, + } + + return builder, stage_info, site_index_map + + +def _rename_builder_labels( + builder: ModelBuilder, sources: list[str], destinations: list[str], env_ids: torch.Tensor, mapping: torch.Tensor +) -> None: + """Rename builder labels/keys from source roots to destination roots. + + Args: + builder: Newton model builder to update in-place. + sources: Source prim root paths. + destinations: Destination prim path templates. + env_ids: Environment ids corresponding to mapping columns. + mapping: Boolean source-to-environment mapping matrix. + """ + # per-source, per-world renaming (strict prefix swap), compact style preserved + for i, src_path in enumerate(sources): + src_prefix_len = len(src_path.rstrip("/")) + swap = lambda name, new_root: new_root + name[src_prefix_len:] # noqa: E731 + world_cols = torch.nonzero(mapping[i], as_tuple=True)[0].tolist() + # Map Newton world IDs (sequential) to destination paths using env_ids + world_roots = {int(env_ids[c]): destinations[i].format(int(env_ids[c])) for c in world_cols} + + for t in ("body", "joint", "shape", "articulation"): + labels = getattr(builder, f"{t}_label", None) + if labels is None: + labels = getattr(builder, f"{t}_key") + worlds_arr = getattr(builder, f"{t}_world") + for k, w in enumerate(worlds_arr): + world_id = int(w) + if world_id in world_roots and labels[k].startswith(src_path): + labels[k] = swap(labels[k], world_roots[world_id]) + + +def newton_physics_replicate( + stage: Usd.Stage, + sources: list[str], + destinations: list[str], + env_ids: torch.Tensor, + mapping: torch.Tensor, + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + device: str = "cpu", + up_axis: str = "Z", + simplify_meshes: bool = True, +): + """Replicate prims into a Newton ``ModelBuilder`` using a per-source mapping. + + Args: + stage: USD stage containing source assets. + sources: Source prim paths used for cloning. + destinations: Destination prim path templates. + env_ids: Environment ids for destination worlds. + mapping: Boolean source-to-environment mapping matrix. + positions: Optional per-environment world positions. + quaternions: Optional per-environment orientations in xyzw order. + device: Device used by the finalized Newton model builder. + up_axis: Up axis for the Newton model builder. + simplify_meshes: Whether to run convex-hull mesh approximation. + + Returns: + Tuple of the populated Newton model builder and stage metadata. + """ + builder, stage_info, site_index_map = _build_newton_builder_from_mapping( + stage=stage, + sources=sources, + env_ids=env_ids, + mapping=mapping, + positions=positions, + quaternions=quaternions, + up_axis=up_axis, + simplify_meshes=simplify_meshes, + ) + _rename_builder_labels(builder, sources, destinations, env_ids, mapping) + NewtonManager._cl_site_index_map = site_index_map + NewtonManager.set_builder(builder) + NewtonManager._num_envs = mapping.size(1) + return builder, stage_info + + +def newton_visualizer_prebuild( + stage: Usd.Stage, + sources: list[str], + destinations: list[str], + env_ids: torch.Tensor, + mapping: torch.Tensor, + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + device: str = "cpu", + up_axis: str = "Z", + simplify_meshes: bool = True, +): + """Replicate a clone plan into a finalized Newton model/state for visualization. + + Unlike :func:`newton_physics_replicate`, this path does not mutate ``NewtonManager`` and is intended + for prebuilding visualizer-only artifacts that can be consumed by scene data providers. + + Args: + stage: USD stage containing source assets. + sources: Source prim paths used for cloning. + destinations: Destination prim path templates. + env_ids: Environment ids for destination worlds. + mapping: Boolean source-to-environment mapping matrix. + positions: Optional per-environment world positions. + quaternions: Optional per-environment orientations in xyzw order. + device: Device used by the finalized Newton model. + up_axis: Up axis for the Newton model builder. + simplify_meshes: Whether to run convex-hull mesh approximation. + + Returns: + Tuple of finalized Newton model and state. + """ + builder, _, _site_index_map = _build_newton_builder_from_mapping( + stage=stage, + sources=sources, + env_ids=env_ids, + mapping=mapping, + positions=positions, + quaternions=quaternions, + up_axis=up_axis, + simplify_meshes=simplify_meshes, + ) + _rename_builder_labels(builder, sources, destinations, env_ids, mapping) + model = builder.finalize(device=device) + state = model.state() + return model, state diff --git a/source/isaaclab_newton/isaaclab_newton/kernels/state_kernels.py b/source/isaaclab_newton/isaaclab_newton/kernels/state_kernels.py new file mode 100644 index 000000000000..9bb7a39e3b2c --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/kernels/state_kernels.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp helper functions for body-frame state computations. + +These ``@wp.func`` helpers are used by warp-first MDP terms (observations, +rewards) that need to project root-frame quantities into body frames. +""" + +import warp as wp + + +@wp.func +def rotate_vec_to_body_frame(vec_w: wp.vec3f, pose_w: wp.transformf) -> wp.vec3f: + """Rotate a world-frame vector into the body frame defined by pose_w.""" + return wp.quat_rotate_inv(wp.transform_get_rotation(pose_w), vec_w) + + +@wp.func +def body_lin_vel_from_root(pose_w: wp.transformf, vel_w: wp.spatial_vectorf) -> wp.vec3f: + """Extract body-frame linear velocity from root pose and spatial velocity.""" + return wp.quat_rotate_inv(wp.transform_get_rotation(pose_w), wp.spatial_top(vel_w)) + + +@wp.func +def body_ang_vel_from_root(pose_w: wp.transformf, vel_w: wp.spatial_vectorf) -> wp.vec3f: + """Extract body-frame angular velocity from root pose and spatial velocity.""" + return wp.quat_rotate_inv(wp.transform_get_rotation(pose_w), wp.spatial_bottom(vel_w)) diff --git a/source/isaaclab_newton/isaaclab_newton/physics/__init__.py b/source/isaaclab_newton/isaaclab_newton/physics/__init__.py new file mode 100644 index 000000000000..7254081c805a --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Implementation backends for simulation interfaces.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/physics/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/physics/__init__.pyi new file mode 100644 index 000000000000..176973cd5400 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/__init__.pyi @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "NewtonFeatherstoneManager", + "FeatherstoneSolverCfg", + "HydroelasticSDFCfg", + "NewtonKaminoManager", + "KaminoSolverCfg", + "NewtonMJWarpManager", + "MJWarpSolverCfg", + "NewtonCfg", + "NewtonCollisionPipelineCfg", + "NewtonManager", + "NewtonShapeCfg", + "NewtonSolverCfg", + "NewtonXPBDManager", + "XPBDSolverCfg", +] + +from .featherstone_manager import NewtonFeatherstoneManager +from .featherstone_manager_cfg import FeatherstoneSolverCfg +from .kamino_manager import NewtonKaminoManager +from .kamino_manager_cfg import KaminoSolverCfg +from .mjwarp_manager import NewtonMJWarpManager +from .mjwarp_manager_cfg import MJWarpSolverCfg +from .newton_collision_cfg import HydroelasticSDFCfg, NewtonCollisionPipelineCfg +from .newton_manager import NewtonManager +from .newton_manager_cfg import ( + NewtonCfg, + NewtonShapeCfg, + NewtonSolverCfg, +) +from .xpbd_manager import NewtonXPBDManager +from .xpbd_manager_cfg import XPBDSolverCfg diff --git a/source/isaaclab_newton/isaaclab_newton/physics/_cubric.py b/source/isaaclab_newton/isaaclab_newton/physics/_cubric.py new file mode 100644 index 000000000000..cc549d4b82be --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/_cubric.py @@ -0,0 +1,273 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Pure-Python ctypes bindings for the cubric GPU transform-hierarchy API. + +Acquires the ``omni::cubric::IAdapter`` carb interface directly from the +Carbonite framework and wraps its function-pointer methods so that Newton +can call cubric's GPU transform propagation without C++ pybind11 changes. + +The flow mirrors PhysX's ``DirectGpuHelper::updateXForms_GPU()``: + +1. ``IAdapter::create`` → allocate a cubric adapter ID +2. ``IAdapter::bindToStage`` → bind to the current Fabric stage +3. ``IAdapter::compute`` → GPU kernel: propagate world transforms +4. ``IAdapter::release`` → free the adapter + +When cubric is unavailable (e.g. CPU-only machine, plugin not loaded), the +caller falls back to the CPU ``update_world_xforms()`` path. +""" + +from __future__ import annotations + +import ctypes +import logging + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Carb Framework struct layout (CARB_ABI function-pointer offsets, x86_64) +# --------------------------------------------------------------------------- +# Counting only CARB_ABI fields from the top of ``struct Framework``: +# 0: loadPluginsEx +# 8: unloadAllPlugins +# 16: acquireInterfaceWithClient +# 24: tryAcquireInterfaceWithClient ← we use this one +_FW_OFF_TRY_ACQUIRE = 24 + +# --------------------------------------------------------------------------- +# IAdapter struct layout (from omni/cubric/IAdapter.h) +# --------------------------------------------------------------------------- +# 0: getAttribute +# 8: create(AdapterId*) +# 16: refcount +# 24: retain +# 32: release(AdapterId) +# 40: bindToStage(AdapterId, const FabricId&) +# 48: unbind +# 56: compute(AdapterId, options, dirtyMode, outFlags*) +_IA_OFF_CREATE = 8 +_IA_OFF_RELEASE = 32 +_IA_OFF_BIND = 40 +_IA_OFF_COMPUTE = 56 + +# AdapterId sentinel +_INVALID_ADAPTER_ID = ctypes.c_uint64(~0).value + +# AdapterComputeOptions flags (from IAdapter.h) +_OPT_FORCE_UPDATE = 1 << 0 # Force update, ignoring invalidation status +_OPT_FORCE_STATE_RECONSTRUCTION = 1 << 1 # Force full rebuild of internal accel structures +_OPT_SKIP_ISOLATED = 1 << 2 # Skip prims with connectivity degree 0 +_OPT_RIGID_BODY = 1 << 3 # Use PhysicsRigidBodyAPI tag for inverse propagation + +# Newton prims get tagged with PhysicsRigidBodyAPI at init time so +# cubric's eRigidBody mode can distinguish rigid-body buckets +# (Inverse: preserve world matrix written by Newton, derive local) +# from non-rigid-body buckets (Forward: propagate to children). +# eForceUpdate is ORed in to bypass the change-listener check. +_OPT_DEFAULT = _OPT_RIGID_BODY | _OPT_FORCE_UPDATE + +# AdapterDirtyMode +_DIRTY_ALL = 0 # eAll — dirty all prims in the stage +_DIRTY_COARSE = 1 # eCoarse — dirty all prims in visited buckets + + +# --------------------------------------------------------------------------- +# ctypes struct mirrors +# --------------------------------------------------------------------------- +class _Version(ctypes.Structure): + _fields_ = [("major", ctypes.c_uint32), ("minor", ctypes.c_uint32)] + + +class _InterfaceDesc(ctypes.Structure): + """``carb::InterfaceDesc`` — {const char* name, Version version}.""" + + _fields_ = [ + ("name", ctypes.c_char_p), + ("version", _Version), + ] + + +def _read_u64(addr: int) -> int: + return ctypes.c_uint64.from_address(addr).value + + +# --------------------------------------------------------------------------- +# Public API +# --------------------------------------------------------------------------- +class CubricBindings: + """Typed wrappers around the cubric ``IAdapter`` API. + + Call :meth:`initialize` once; if it returns ``True``, the four adapter + methods are available. + """ + + def __init__(self) -> None: + self._ia_ptr: int = 0 + self._create_fn = None + self._release_fn = None + self._bind_fn = None + self._compute_fn = None + + # -- lifecycle ----------------------------------------------------------- + + def initialize(self) -> bool: + """Acquire the cubric ``IAdapter`` from the carb framework.""" + # Ensure the omni.cubric extension (native carb plugin) is loaded. + try: + import omni.kit.app + + ext_mgr = omni.kit.app.get_app().get_extension_manager() + if not ext_mgr.is_extension_enabled("omni.cubric"): + ext_mgr.set_extension_enabled_immediate("omni.cubric", True) + if not ext_mgr.is_extension_enabled("omni.cubric"): + logger.warning("Failed to enable omni.cubric extension") + return False + except Exception as exc: + logger.warning("Cannot enable omni.cubric: %s", exc) + return False + + # Get Framework* via libcarb.so acquireFramework (singleton). + try: + libcarb = ctypes.CDLL("libcarb.so") + except OSError: + logger.warning("Could not load libcarb.so") + return False + + libcarb.acquireFramework.restype = ctypes.c_void_p + libcarb.acquireFramework.argtypes = [ctypes.c_char_p, _Version] + fw_ptr = libcarb.acquireFramework(b"isaaclab.cubric", _Version(0, 0)) + if not fw_ptr: + logger.warning("acquireFramework returned null") + return False + + # Read tryAcquireInterfaceWithClient fn-ptr from Framework vtable. + try_acquire_addr = _read_u64(fw_ptr + _FW_OFF_TRY_ACQUIRE) + if try_acquire_addr == 0: + logger.warning("tryAcquireInterfaceWithClient is null in Framework") + return False + + try_acquire_fn = ctypes.CFUNCTYPE( + ctypes.c_void_p, # return: void* (IAdapter*) + ctypes.c_char_p, # clientName + _InterfaceDesc, # desc (by value) + ctypes.c_char_p, # pluginName + )(try_acquire_addr) + + desc = _InterfaceDesc( + name=b"omni::cubric::IAdapter", + version=_Version(0, 1), + ) + + # Try several acquisition strategies — the required client name + # varies across Kit configurations. + ia_ptr = try_acquire_fn(b"carb.scripting-python.plugin", desc, None) + if not ia_ptr: + ia_ptr = try_acquire_fn(None, desc, None) + if not ia_ptr: + acquire_addr = _read_u64(fw_ptr + 16) # acquireInterfaceWithClient + if acquire_addr: + acquire_fn = ctypes.CFUNCTYPE( + ctypes.c_void_p, + ctypes.c_char_p, + _InterfaceDesc, + ctypes.c_char_p, + )(acquire_addr) + ia_ptr = acquire_fn(b"isaaclab.cubric", desc, None) + if not ia_ptr: + logger.warning( + "Could not acquire omni::cubric::IAdapter — " + "cubric plugin may not be registered or interface version mismatch" + ) + return False + self._ia_ptr = ia_ptr + + # Wrap the four IAdapter function pointers we need. + create_addr = _read_u64(ia_ptr + _IA_OFF_CREATE) + release_addr = _read_u64(ia_ptr + _IA_OFF_RELEASE) + bind_addr = _read_u64(ia_ptr + _IA_OFF_BIND) + compute_addr = _read_u64(ia_ptr + _IA_OFF_COMPUTE) + + if not all([create_addr, release_addr, bind_addr, compute_addr]): + logger.warning("One or more IAdapter function pointers are null") + return False + + self._create_fn = ctypes.CFUNCTYPE( + ctypes.c_bool, + ctypes.POINTER(ctypes.c_uint64), + )(create_addr) + + self._release_fn = ctypes.CFUNCTYPE( + ctypes.c_bool, + ctypes.c_uint64, + )(release_addr) + + # FabricId is uint64, passed by const-ref -> pointer on x86_64 + self._bind_fn = ctypes.CFUNCTYPE( + ctypes.c_bool, + ctypes.c_uint64, + ctypes.POINTER(ctypes.c_uint64), + )(bind_addr) + + self._compute_fn = ctypes.CFUNCTYPE( + ctypes.c_bool, + ctypes.c_uint64, # adapterId + ctypes.c_uint32, # options (AdapterComputeOptions) + ctypes.c_int32, # dirtyMode (AdapterDirtyMode) + ctypes.c_void_p, # outAccountFlags* (nullable) + )(compute_addr) + + logger.info("cubric IAdapter bindings ready") + return True + + @property + def available(self) -> bool: + return self._ia_ptr != 0 + + # -- cubric adapter methods ---------------------------------------------- + + def create_adapter(self) -> int | None: + """Create a cubric adapter. Returns an adapter ID or ``None``.""" + if not self._create_fn: + return None + adapter_id = ctypes.c_uint64(_INVALID_ADAPTER_ID) + ok = self._create_fn(ctypes.byref(adapter_id)) + if not ok or adapter_id.value == _INVALID_ADAPTER_ID: + logger.warning("IAdapter::create failed") + return None + return adapter_id.value + + def bind_to_stage(self, adapter_id: int, fabric_id: int) -> bool: + """Bind the adapter to a Fabric stage.""" + if not self._bind_fn: + return False + fid = ctypes.c_uint64(fabric_id) + ok = self._bind_fn(adapter_id, ctypes.byref(fid)) + if not ok: + logger.warning("IAdapter::bindToStage failed (adapter=%d, fabricId=%d)", adapter_id, fabric_id) + return ok + + def compute(self, adapter_id: int) -> bool: + """Run the GPU transform-hierarchy compute pass. + + Uses ``eRigidBody | eForceUpdate`` with ``eAll`` dirty mode. + ``eRigidBody`` makes cubric apply Inverse propagation on buckets + tagged with ``PhysicsRigidBodyAPI`` (keeps Newton's world transforms, + derives local) and Forward on everything else (propagates to children). + ``eForceUpdate`` bypasses the change-listener dirty check. + """ + if not self._compute_fn: + return False + flags = ctypes.c_uint32(0) + ok = self._compute_fn(adapter_id, _OPT_DEFAULT, _DIRTY_ALL, ctypes.byref(flags)) + if not ok: + logger.warning("IAdapter::compute returned false (flags=0x%x)", flags.value) + return ok + + def release_adapter(self, adapter_id: int) -> None: + """Release an adapter.""" + if not adapter_id or not self._release_fn: + return + self._release_fn(adapter_id) diff --git a/source/isaaclab_newton/isaaclab_newton/physics/featherstone_manager.py b/source/isaaclab_newton/isaaclab_newton/physics/featherstone_manager.py new file mode 100644 index 000000000000..a1e918990c19 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/featherstone_manager.py @@ -0,0 +1,36 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Featherstone Newton manager.""" + +from __future__ import annotations + +import inspect + +from newton import Model +from newton.solvers import SolverFeatherstone + +from .featherstone_manager_cfg import FeatherstoneSolverCfg +from .newton_manager import NewtonManager + + +class NewtonFeatherstoneManager(NewtonManager): + """:class:`NewtonManager` specialization for the Featherstone solver. + + Always uses Newton's :class:`CollisionPipeline` for contact handling. + """ + + @classmethod + def _build_solver(cls, model: Model, solver_cfg: FeatherstoneSolverCfg) -> None: + """Construct :class:`SolverFeatherstone` and populate the base-class slots. + + Featherstone always uses Newton's :class:`CollisionPipeline` and steps + with separate input/output states, so the flags are fixed. + """ + valid = set(inspect.signature(SolverFeatherstone.__init__).parameters) - {"self", "model"} + kwargs = {k: v for k, v in solver_cfg.to_dict().items() if k in valid} + NewtonManager._solver = SolverFeatherstone(model, **kwargs) + NewtonManager._use_single_state = False + NewtonManager._needs_collision_pipeline = True diff --git a/source/isaaclab_newton/isaaclab_newton/physics/featherstone_manager_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/featherstone_manager_cfg.py new file mode 100644 index 000000000000..fc4278b49788 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/featherstone_manager_cfg.py @@ -0,0 +1,55 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton physics manager.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils import configclass + +from .newton_manager_cfg import NewtonSolverCfg + +if TYPE_CHECKING: + from isaaclab_newton.physics import NewtonManager + + +@configclass +class FeatherstoneSolverCfg(NewtonSolverCfg): + """A semi-implicit integrator using symplectic Euler. + + It operates on reduced (also called generalized) coordinates to simulate articulated rigid body dynamics + based on Featherstone's composite rigid body algorithm (CRBA). + + See: Featherstone, Roy. Rigid Body Dynamics Algorithms. Springer US, 2014. + + Semi-implicit time integration is a variational integrator that + preserves energy, however it is not unconditionally stable, and requires a time-step + small enough to support the required stiffness and damping forces. + + See: https://en.wikipedia.org/wiki/Semi-implicit_Euler_method + """ + + class_type: type[NewtonManager] | str = "{DIR}.featherstone_manager:NewtonFeatherstoneManager" + """Manager class for the Featherstone solver.""" + + solver_type: str = "featherstone" + """Solver type. Can be "featherstone".""" + + angular_damping: float = 0.05 + """Angular damping parameter for rigid contact simulation.""" + + update_mass_matrix_interval: int = 1 + """Frequency (in simulation steps) at which to update the mass matrix.""" + + friction_smoothing: float = 1.0 + """Friction smoothing parameter.""" + + use_tile_gemm: bool = False + """Whether to use tile-based GEMM for the mass matrix.""" + + fuse_cholesky: bool = True + """Whether to fuse the Cholesky decomposition.""" diff --git a/source/isaaclab_newton/isaaclab_newton/physics/kamino_manager.py b/source/isaaclab_newton/isaaclab_newton/physics/kamino_manager.py new file mode 100644 index 000000000000..39d0fafbc1bb --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/kamino_manager.py @@ -0,0 +1,158 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kamino Newton manager.""" + +from __future__ import annotations + +import logging + +import warp as wp +from newton import Model, eval_fk +from newton.solvers import SolverKamino + +from isaaclab.physics import PhysicsManager +from isaaclab.utils.timer import Timer + +from .kamino_manager_cfg import KaminoSolverCfg +from .newton_manager import NewtonManager + +logger = logging.getLogger(__name__) + + +class NewtonKaminoManager(NewtonManager): + """:class:`NewtonManager` specialization for the Kamino solver. + + Uses Newton's :class:`CollisionPipeline` unless + :attr:`KaminoSolverCfg.use_collision_detector` is ``True``, in which case + Kamino's internal collision detector handles contact generation. + """ + + @classmethod + def _forward_kamino(cls, world_mask: wp.array | None = None) -> None: + """Kamino-specific forward kinematics via ``solver.reset()``. + + Kamino's ``joint_q`` / ``joint_u`` include coordinates for **all** joints + (including free joints), so we pass Newton's full state arrays directly. + + Args: + world_mask: Per-world mask indicating which worlds to reset. + Shape ``(num_worlds,)``, dtype ``wp.int32``. If None, resets all worlds. + """ + cls._solver.reset( + state_out=cls._state_0, + joint_q=cls._state_0.joint_q, + joint_u=cls._state_0.joint_qd, + world_mask=world_mask, + ) + + @classmethod + def step(cls) -> None: + """Step the physics simulation.""" + sim = PhysicsManager._sim + if sim is None or not sim.is_playing(): + return + + # Kamino: run solver.reset() with the accumulated world mask to reinitialise + # internal state (warm-start containers, constraint multipliers) for reset worlds. + # Note: runs every step. solver.reset() with an all-False world_mask is a no-op + # (kernels check mask per-world and skip). The cost of a no-op launch is negligible + # compared to the complexity of maintaining a separate boolean guard. + cls._forward_kamino(world_mask=cls._world_reset_mask) + + # Notify solver of model changes + if cls._model_changes: + with wp.ScopedDevice(PhysicsManager._device): + for change in cls._model_changes: + cls._solver.notify_model_changed(change) + NewtonManager._model_changes = set() + + # Lazy CUDA graph capture: deferred from initialize_solver() when RTX was active. + # By the time step() is first called, RTX has fully initialized (all cudaImportExternalMemory + # calls are done) and is idle between render frames — giving us a clean capture window. + cfg = PhysicsManager._cfg + device = PhysicsManager._device + if cls._graph_capture_pending and cfg is not None and cfg.use_cuda_graph and "cuda" in device: # type: ignore[union-attr] + NewtonManager._graph_capture_pending = False + NewtonManager._graph = cls._capture_relaxed_graph(device) + if cls._graph is not None: + # Kamino: StateKamino.from_newton() lazily allocates body_f_total, + # joint_q_prev, and joint_lambdas via wp.clone/wp.zeros during the + # first step() inside graph capture. Replay once to pin those + # memory-pool addresses before any eager solver.reset() call. + wp.capture_launch(cls._graph) + logger.info("Newton CUDA graph captured (deferred relaxed mode, RTX-compatible)") + else: + logger.warning("Newton deferred CUDA graph capture failed; using eager execution") + + # Ensure body_q is up-to-date before collision detection. + # After env resets, joint_q is written but body_q (used by + # broadphase/narrowphase) is stale until FK runs. + # Only runs FK for dirtied articulations via the accumulated mask. + if cls._needs_collision_pipeline: + eval_fk(cls._model, cls._state_0.joint_q, cls._state_0.joint_qd, cls._state_0, cls._fk_reset_mask) + + # Zero both masks after consumption + NewtonManager._world_reset_mask.zero_() + NewtonManager._fk_reset_mask.zero_() + + # Step simulation (graphed or not; _graph is None when capture is disabled or failed) + if cfg is not None and cfg.use_cuda_graph and cls._graph is not None and "cuda" in device: # type: ignore[union-attr] + wp.capture_launch(cls._graph) + if cls._usdrt_stage is not None: + cls._mark_transforms_dirty() + else: + with wp.ScopedDevice(device): + cls._simulate() + + # Launch solver-specific debug logging after stepping. + cls._log_solver_debug() + + PhysicsManager._sim_time += cls._solver_dt * cls._num_substeps + + @classmethod + def _build_solver(cls, model: Model, solver_cfg: KaminoSolverCfg) -> None: + """Construct :class:`SolverKamino` and populate the base-class slots. + + Sets :attr:`NewtonManager._needs_collision_pipeline` to ``True`` only + when ``use_collision_detector=False`` (Kamino's internal detector + handles contacts otherwise). + """ + NewtonManager._solver = SolverKamino(model, solver_cfg.to_solver_config()) + NewtonManager._use_single_state = False + NewtonManager._needs_collision_pipeline = not solver_cfg.use_collision_detector + + @classmethod + def _capture_or_defer_cuda_graph(cls) -> None: + """Capture the physics CUDA graph, or defer if RTX is initializing.""" + cfg = PhysicsManager._cfg + device = PhysicsManager._device + use_cuda_graph = cfg is not None and cfg.use_cuda_graph and "cuda" in device # type: ignore[union-attr] + + with Timer(name="newton_cuda_graph", msg="CUDA graph took:"): + if not use_cuda_graph: + NewtonManager._graph = None + return + if cls._usdrt_stage is None: + # No RTX active — use standard Warp capture (cudaStreamCaptureModeGlobal). + with wp.ScopedCapture() as capture: + cls._simulate() + NewtonManager._graph = capture.graph + logger.info("Newton CUDA graph captured (standard Warp mode)") + + # TODO: streamline this with base NewtonManager + # Kamino: StateKamino.from_newton() lazily allocates body_f_total, + # joint_q_prev, and joint_lambdas via wp.clone/wp.zeros during the + # first step() inside graph capture. Replay once to pin those + # memory-pool addresses before any eager solver.reset() call. + wp.capture_launch(cls._graph) + else: + # RTX is active during initialization — cudaImportExternalMemory and other + # non-capturable RTX ops run on background CUDA streams right now. + # Defer capture to the first step() call, after RTX is fully initialized + # and idle between render frames (clean capture window). + NewtonManager._graph = None + NewtonManager._graph_capture_pending = True + logger.info("Newton CUDA graph capture deferred until first step() (RTX active)") diff --git a/source/isaaclab_newton/isaaclab_newton/physics/kamino_manager_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/kamino_manager_cfg.py new file mode 100644 index 000000000000..71c871f9d54d --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/kamino_manager_cfg.py @@ -0,0 +1,214 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton physics manager.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils import configclass + +from .newton_manager_cfg import NewtonSolverCfg + +if TYPE_CHECKING: + from newton.solvers import SolverKamino + + from isaaclab_newton.physics import NewtonManager + + +@configclass +class KaminoSolverCfg(NewtonSolverCfg): + """Configuration for Kamino solver-related parameters. + + Kamino is a Proximal Alternating Direction Method of Multipliers (P-ADMM) based solver for + constrained multi-body dynamics. It operates in maximal coordinates and supports rigid bodies + and articulations with hard frictional contacts. + + .. note:: + + This solver is currently in **Beta**. Its API and behavior may change in future releases. + + For more information, see the `Newton Kamino documentation`_. + + .. _Newton Kamino documentation: https://newton.readthedocs.io/en/latest/ + """ + + class_type: type[NewtonManager] | str = "{DIR}.kamino_manager:NewtonKaminoManager" + """Manager class for the Kamino solver.""" + + solver_type: str = "kamino" + """Solver type. Can be "kamino".""" + + integrator: str = "euler" + """Integrator type. Can be "euler" or "moreau".""" + + use_collision_detector: bool = False + """Whether to use Kamino's internal collision detector instead of Newton's pipeline.""" + + use_fk_solver: bool = True + """Whether to enable the forward kinematics solver for state resets. + + Required for proper environment resets. The FK solver computes consistent body poses + from joint angles after state writes, which is essential for maximal-coordinate solvers. + """ + + sparse_jacobian: bool = False + """Whether to use sparse Jacobian computation.""" + + sparse_dynamics: bool = False + """Whether to use sparse dynamics computation.""" + + rotation_correction: str = "twopi" + """Rotation correction mode. Can be "twopi", "continuous", or "none".""" + + angular_velocity_damping: float = 0.0 + """Angular velocity damping factor. Valid range is [0.0, 1.0].""" + + constraints_alpha: float = 0.01 + """Baumgarte stabilization parameter for bilateral joint constraints. Valid range is [0, 1].""" + + constraints_beta: float = 0.01 + """Baumgarte stabilization parameter for unilateral joint-limit constraints. Valid range is [0, 1].""" + + constraints_gamma: float = 0.01 + """Baumgarte stabilization parameter for unilateral contact constraints. Valid range is [0, 1].""" + + constraints_delta: float = 1.0e-6 + """Contact penetration margin [m].""" + + padmm_max_iterations: int = 200 + """Maximum number of P-ADMM solver iterations.""" + + padmm_primal_tolerance: float = 1e-6 + """Primal residual convergence tolerance for P-ADMM.""" + + padmm_dual_tolerance: float = 1e-6 + """Dual residual convergence tolerance for P-ADMM.""" + + padmm_compl_tolerance: float = 1e-6 + """Complementarity residual convergence tolerance for P-ADMM.""" + + padmm_rho_0: float = 1.0 + """Initial penalty parameter for P-ADMM.""" + + padmm_use_acceleration: bool = True + """Whether to use acceleration in the P-ADMM solver.""" + + padmm_warmstart_mode: str = "containers" + """Warmstart mode for P-ADMM. Can be "none", "internal", or "containers".""" + + padmm_eta: float = 1e-5 + """Proximal regularization parameter for P-ADMM. Must be greater than zero.""" + + padmm_contact_warmstart_method: str = "key_and_position" + """Contact warm-start method for P-ADMM. + + Can be "key_and_position", "geom_pair_net_force", "geom_pair_net_wrench", + "key_and_position_with_net_force_backup", or "key_and_position_with_net_wrench_backup". + """ + + padmm_use_graph_conditionals: bool = True + """Whether to use CUDA graph conditional nodes in the P-ADMM iterative solver. + + When ``False``, replaces ``wp.capture_while`` with unrolled for-loops over max iterations. + """ + + collect_solver_info: bool = False + """Whether to collect solver convergence and performance info at each step. + + .. warning:: + + Enabling this significantly increases solver runtime and should only be used for debugging. + """ + + compute_solution_metrics: bool = False + """Whether to compute solution metrics at each step. + + .. warning:: + + Enabling this significantly increases solver runtime and should only be used for debugging. + """ + + collision_detector_pipeline: str | None = None + """Collision detection pipeline type. Can be "primitive" or "unified". + + Only used when :attr:`use_collision_detector` is ``True``. If ``None``, Newton's default + (``"unified"``) is used. + """ + + collision_detector_max_contacts_per_pair: int | None = None + """Maximum number of contacts to generate per candidate geometry pair. + + Only used when :attr:`use_collision_detector` is ``True``. If ``None``, Newton's default is used. + """ + + dynamics_preconditioning: bool = True + """Whether to use preconditioning in the constrained dynamics solver. + + Preconditioning improves convergence of the PADMM solver by rescaling the + problem. Disabling may be useful for debugging or profiling solver behavior. + """ + + def to_solver_config(self) -> SolverKamino.Config: + """Build a :class:`SolverKamino.Config` from this configuration. + + Converts the flat field layout of :class:`KaminoSolverCfg` into the + nested dataclass hierarchy expected by :class:`SolverKamino`. + + Returns: + A ``SolverKamino.Config`` instance ready for solver construction. + """ + from newton._src.solvers.kamino.config import ( + CollisionDetectorConfig, + ConstrainedDynamicsConfig, + ConstraintStabilizationConfig, + PADMMSolverConfig, + ) + from newton.solvers import SolverKamino + + # Build collision detector config if using Kamino's internal detector + collision_detector = None + if self.use_collision_detector: + cd_kwargs: dict = {} + if self.collision_detector_pipeline is not None: + cd_kwargs["pipeline"] = self.collision_detector_pipeline + if self.collision_detector_max_contacts_per_pair is not None: + cd_kwargs["max_contacts_per_pair"] = self.collision_detector_max_contacts_per_pair + collision_detector = CollisionDetectorConfig(**cd_kwargs) + + return SolverKamino.Config( + integrator=self.integrator, + use_collision_detector=self.use_collision_detector, + use_fk_solver=self.use_fk_solver, + sparse_jacobian=self.sparse_jacobian, + sparse_dynamics=self.sparse_dynamics, + rotation_correction=self.rotation_correction, + angular_velocity_damping=self.angular_velocity_damping, + collect_solver_info=self.collect_solver_info, + compute_solution_metrics=self.compute_solution_metrics, + collision_detector=collision_detector, + constraints=ConstraintStabilizationConfig( + alpha=self.constraints_alpha, + beta=self.constraints_beta, + gamma=self.constraints_gamma, + delta=self.constraints_delta, + ), + dynamics=ConstrainedDynamicsConfig( + preconditioning=self.dynamics_preconditioning, + ), + padmm=PADMMSolverConfig( + max_iterations=self.padmm_max_iterations, + primal_tolerance=self.padmm_primal_tolerance, + dual_tolerance=self.padmm_dual_tolerance, + compl_tolerance=self.padmm_compl_tolerance, + rho_0=self.padmm_rho_0, + eta=self.padmm_eta, + use_acceleration=self.padmm_use_acceleration, + use_graph_conditionals=self.padmm_use_graph_conditionals, + warmstart_mode=self.padmm_warmstart_mode, + contact_warmstart_method=self.padmm_contact_warmstart_method, + ), + ) diff --git a/source/isaaclab_newton/isaaclab_newton/physics/mjwarp_manager.py b/source/isaaclab_newton/isaaclab_newton/physics/mjwarp_manager.py new file mode 100644 index 000000000000..01cf6f1996e2 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/mjwarp_manager.py @@ -0,0 +1,103 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""MuJoCo Warp Newton manager.""" + +from __future__ import annotations + +import inspect +import logging + +import numpy as np +from newton import Contacts, Model +from newton.solvers import SolverMuJoCo + +from isaaclab.physics import PhysicsManager + +from .mjwarp_manager_cfg import MJWarpSolverCfg +from .newton_manager import NewtonManager + +logger = logging.getLogger(__name__) + + +class NewtonMJWarpManager(NewtonManager): + """:class:`NewtonManager` specialization for the MuJoCo Warp solver. + + Owns construction of :class:`SolverMuJoCo`, contact-buffer allocation in + both internal-MuJoCo and Newton-pipeline contact modes, and the debug + convergence logging emitted from :meth:`_log_solver_debug` when + :attr:`NewtonCfg.debug_mode` is enabled. + """ + + @classmethod + def _build_solver(cls, model: Model, solver_cfg: MJWarpSolverCfg) -> None: + """Construct :class:`SolverMuJoCo` and populate the base-class slots. + + Filters cfg fields against the solver's ``__init__`` signature so + non-constructor metadata (``solver_type``, ``class_type``) is not + forwarded. Sets :attr:`NewtonManager._needs_collision_pipeline` to + ``True`` only when ``use_mujoco_contacts=False``. + """ + valid = set(inspect.signature(SolverMuJoCo.__init__).parameters) - {"self", "model"} + kwargs = {k: v for k, v in solver_cfg.to_dict().items() if k in valid} + NewtonManager._solver = SolverMuJoCo(model, **kwargs) + NewtonManager._use_single_state = True + NewtonManager._needs_collision_pipeline = not solver_cfg.use_mujoco_contacts + + cfg = PhysicsManager._cfg + # Cross-config validation that needs both halves. + if solver_cfg.use_mujoco_contacts and cfg.collision_cfg is not None: + raise ValueError( + "NewtonCfg: collision_cfg cannot be set when " + "solver_cfg.use_mujoco_contacts=True. Either set " + "use_mujoco_contacts=False or remove collision_cfg." + ) + + @classmethod + def _initialize_contacts(cls) -> None: + """Allocate contact buffers. + + Delegates to the base implementation when Newton's + :class:`CollisionPipeline` is active. When ``use_mujoco_contacts=True`` + the solver runs MuJoCo's internal collision detection, so this method + instead pre-allocates a :class:`Contacts` buffer sized to the solver's + maximum contact count; ``solver.update_contacts`` later populates it + from MuJoCo data for contact-sensor reporting. + """ + if cls._needs_collision_pipeline: + super()._initialize_contacts() + return + if cls._solver is not None: + NewtonManager._contacts = Contacts( + rigid_contact_max=cls._solver.get_max_contact_count(), + soft_contact_max=0, + device=PhysicsManager._device, + requested_attributes=cls._model.get_requested_contact_attributes(), + ) + + @classmethod + def _log_solver_debug(cls) -> None: + """Optionally log MuJoCo solver convergence at the end of step.""" + cfg = PhysicsManager._cfg + if cfg is not None and cfg.debug_mode: # type: ignore[union-attr] + data = cls._get_solver_convergence_steps() + logger.info(f"Solver convergence data: {data}") + if data["max"] == cls._solver.mjw_model.opt.iterations: + logger.warning(f"Solver didn't converge! max_iter={data['max']}") + + @classmethod + def _get_solver_convergence_steps(cls) -> dict[str, float | int]: + """Return MuJoCo Warp solver convergence statistics. + + Reads ``mjw_data.solver_niter`` (only available on + :class:`SolverMuJoCo`) and summarizes per-environment iteration counts. + """ + niter = cls._solver.mjw_data.solver_niter.numpy() + return { + "max": np.max(niter), + "mean": np.mean(niter), + "min": np.min(niter), + "std": np.std(niter), + } diff --git a/source/isaaclab_newton/isaaclab_newton/physics/mjwarp_manager_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/mjwarp_manager_cfg.py new file mode 100644 index 000000000000..673867902c2c --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/mjwarp_manager_cfg.py @@ -0,0 +1,115 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton physics manager.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils import configclass + +from .newton_manager_cfg import NewtonSolverCfg + +if TYPE_CHECKING: + from isaaclab_newton.physics import NewtonManager + + +@configclass +class MJWarpSolverCfg(NewtonSolverCfg): + """Configuration for MuJoCo Warp solver-related parameters. + + These parameters are used to configure the MuJoCo Warp solver. For more information, see the + `MuJoCo Warp documentation`_. + + .. _MuJoCo Warp documentation: https://github.com/google-deepmind/mujoco_warp + """ + + class_type: type[NewtonManager] | str = "{DIR}.mjwarp_manager:NewtonMJWarpManager" + """Manager class for the MuJoCo Warp solver.""" + + solver_type: str = "mujoco_warp" + """Solver type. Can be "mujoco_warp".""" + + njmax: int = 300 + """Number of constraints per environment (world).""" + + nconmax: int | None = None + """Number of contact points per environment (world).""" + + iterations: int = 100 + """Number of solver iterations.""" + + ls_iterations: int = 50 + """Number of line search iterations for the solver.""" + + solver: str = "newton" + """Solver type. Can be "cg" or "newton", or their corresponding MuJoCo integer constants.""" + + integrator: str = "euler" + """Integrator type. Can be "euler", "rk4", or "implicitfast", or their corresponding MuJoCo integer constants.""" + + use_mujoco_cpu: bool = False + """Whether to use the pure MuJoCo backend instead of `mujoco_warp`.""" + + disable_contacts: bool = False + """Whether to disable contact computation in MuJoCo.""" + + default_actuator_gear: float | None = None + """Default gear ratio for all actuators.""" + + actuator_gears: dict[str, float] | None = None + """Dictionary mapping joint names to specific gear ratios, overriding the `default_actuator_gear`.""" + + update_data_interval: int = 1 + """Frequency (in simulation steps) at which to update the MuJoCo Data object from the Newton state. + + If 0, Data is never updated after initialization. + """ + + save_to_mjcf: str | None = None + """Optional path to save the generated MJCF model file. + + If None, the MJCF model is not saved. + """ + + impratio: float = 1.0 + """Frictional-to-normal constraint impedance ratio.""" + + cone: str = "pyramidal" + """The type of contact friction cone. Can be "pyramidal" or "elliptic".""" + + ccd_iterations: int = 35 + """Maximum iterations for convex collision detection (GJK/EPA). + + Increase this if you see warnings about ``opt.ccd_iterations`` needing to be increased, + which typically occurs with complex collision geometries (e.g. multi-finger hands). + """ + + ls_parallel: bool = False + """Whether to use parallel line search.""" + + use_mujoco_contacts: bool = True + """Whether to use MuJoCo's internal contact solver. + + If ``True`` (default), MuJoCo handles collision detection and contact resolution internally. + If ``False``, Newton's :class:`CollisionPipeline` is used instead. A default pipeline + (``broad_phase="explicit"``) is created automatically when :attr:`NewtonCfg.collision_cfg` + is ``None``. Set :attr:`NewtonCfg.collision_cfg` to a :class:`NewtonCollisionPipelineCfg` + to customize pipeline parameters (broad phase, contact limits, hydroelastic, etc.). + + .. note:: + Setting ``collision_cfg`` while ``use_mujoco_contacts=True`` raises + :class:`ValueError` because the two collision modes are mutually exclusive. + """ + + tolerance: float = 1e-6 + """Solver convergence tolerance for the constraint residual. + + The solver iterates until the residual drops below this threshold or + ``iterations`` is reached. Lower values give more precise constraint + satisfaction at the cost of more iterations. MuJoCo default is ``1e-8``; + Newton default is ``1e-6``. + """ diff --git a/source/isaaclab_newton/isaaclab_newton/physics/newton_collision_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/newton_collision_cfg.py new file mode 100644 index 000000000000..c8b0db0b3f4a --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/newton_collision_cfg.py @@ -0,0 +1,186 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton collision pipeline.""" + +from __future__ import annotations + +from typing import Any, Literal + +from isaaclab.utils import configclass + + +@configclass +class HydroelasticSDFCfg: + """Configuration for SDF-based hydroelastic collision handling. + + Hydroelastic contacts generate distributed contact areas instead of point contacts, + providing more realistic force distribution for manipulation and compliant surfaces. + + For more details, see the `Newton Collisions Guide`_. + + .. _Newton Collisions Guide: https://newton-physics.github.io/newton/latest/concepts/collisions.html#hydroelastic-contacts + """ + + reduce_contacts: bool = True + """Whether to reduce contacts to a smaller representative set per shape pair. + + When False, all generated contacts are passed through without reduction. + + Defaults to ``True`` (same as Newton's default). + """ + + buffer_fraction: float = 1.0 + """Fraction of worst-case hydroelastic buffer allocations. Range: (0, 1]. + + Lower values reduce memory usage but may cause overflows in dense scenes. + Overflows are bounds-safe and emit warnings; increase this value when warnings appear. + + Defaults to ``1.0`` (same as Newton's default). + """ + + normal_matching: bool = True + """Whether to rotate reduced contact normals to align with aggregate force direction. + + Only active when ``reduce_contacts`` is True. + + Defaults to ``True`` (same as Newton's default). + """ + + anchor_contact: bool = False + """Whether to add an anchor contact at the center of pressure for each normal bin. + + The anchor contact helps preserve moment balance. Only active when ``reduce_contacts`` is True. + + Defaults to ``False`` (same as Newton's default). + """ + + margin_contact_area: float = 0.01 + """Contact area [m^2] used for non-penetrating contacts at the margin. + + Defaults to ``0.01`` (same as Newton's default). + """ + + output_contact_surface: bool = False + """Whether to output hydroelastic contact surface vertices for visualization. + + Defaults to ``False`` (same as Newton's default). + """ + + +@configclass +class NewtonCollisionPipelineCfg: + """Configuration for Newton collision pipeline. + + Full-featured collision pipeline with GJK/MPR narrow phase and pluggable broad phase. + When this config is set on :attr:`NewtonCfg.collision_cfg`: + + - **MJWarpSolverCfg**: Newton's collision pipeline replaces MuJoCo's internal contact solver. + - **Other solvers** (XPBD, Featherstone, etc.): Configures the collision pipeline parameters + (these solvers always use Newton's collision pipeline). + + Key features: + + - GJK/MPR algorithms for convex-convex collision detection + - Multiple broad phase options: NXN (all-pairs), SAP (sweep-and-prune), EXPLICIT (precomputed pairs) + - Mesh-mesh collision via SDF with contact reduction + - Optional hydroelastic contact model for compliant surfaces + + For more details, see the `Newton Collisions Guide`_ and `CollisionPipeline API`_. + + .. _Newton Collisions Guide: https://newton-physics.github.io/newton/latest/concepts/collisions.html + .. _CollisionPipeline API: https://newton-physics.github.io/newton/api/_generated/newton.CollisionPipeline.html + """ + + broad_phase: Literal["explicit", "nxn", "sap"] = "explicit" + """Broad phase algorithm for collision detection. + + Options: + + - ``"explicit"``: Use precomputed shape pairs from ``model.shape_contact_pairs``. + - ``"nxn"``: All-pairs brute force. Simple but O(n^2) complexity. + - ``"sap"``: Sweep-and-prune. Good for scenes with many dynamic objects. + + Defaults to ``"explicit"`` (same as Newton's default when ``broad_phase=None``). + """ + + reduce_contacts: bool = True + """Whether to reduce contacts for mesh-mesh collisions. + + When True, uses shared memory contact reduction to select representative contacts. + Improves performance and stability for meshes with many vertices. + + Defaults to ``True`` (same as Newton's default). + """ + + rigid_contact_max: int | None = None + """Maximum number of rigid contacts to allocate. + + Resolution order: + + 1. If provided, use this value. + 2. Else if ``model.rigid_contact_max > 0``, use the model value. + 3. Else estimate automatically from model shape and pair metadata. + + Defaults to ``None`` (auto-estimate, same as Newton's default). + """ + + max_triangle_pairs: int = 1_000_000 + """Maximum number of triangle pairs allocated by narrow phase for mesh and heightfield collisions. + + Increase this when scenes with large/complex meshes or heightfields report + triangle-pair overflow warnings. + + Defaults to ``1_000_000`` (same as Newton's default). + """ + + soft_contact_max: int | None = None + """Maximum number of soft contacts to allocate. + + If None, computed as ``shape_count * particle_count``. + + Defaults to ``None`` (auto-compute, same as Newton's default). + """ + + soft_contact_margin: float = 0.01 + """Margin [m] for soft contact generation. + + Defaults to ``0.01`` (same as Newton's default). + """ + + requires_grad: bool | None = None + """Whether to enable gradient computation for collision. + + If ``None``, uses ``model.requires_grad``. + + Defaults to ``None`` (same as Newton's default). + """ + + sdf_hydroelastic_config: HydroelasticSDFCfg | None = None + """Configuration for SDF-based hydroelastic collision handling. + + If ``None``, hydroelastic contacts are disabled. + If set, enables hydroelastic contacts with the specified parameters. + + Defaults to ``None`` (hydroelastic disabled, same as Newton's default). + """ + + def to_pipeline_args(self) -> dict[str, Any]: + """Build keyword arguments for :class:`newton.CollisionPipeline`. + + Converts this configuration into the dict expected by + ``CollisionPipeline.__init__``, handling nested config conversion + (e.g. :class:`HydroelasticSDFCfg` → ``HydroelasticSDF.Config``). + + Returns: + Keyword arguments suitable for ``CollisionPipeline(model, **args)``. + """ + from newton.geometry import HydroelasticSDF + + cfg_dict = self.to_dict() + hydro_cfg = cfg_dict.pop("sdf_hydroelastic_config", None) + if hydro_cfg is not None: + cfg_dict["sdf_hydroelastic_config"] = HydroelasticSDF.Config(**hydro_cfg) + return cfg_dict diff --git a/source/isaaclab_newton/isaaclab_newton/physics/newton_manager.py b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager.py new file mode 100644 index 000000000000..2bb2f1f37397 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager.py @@ -0,0 +1,1413 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton physics manager for Isaac Lab.""" + +from __future__ import annotations + +import contextlib +import ctypes +import logging +from abc import abstractmethod +from collections.abc import Callable +from typing import TYPE_CHECKING + +import warp as wp + +# Load CUDA runtime for relaxed-mode graph capture (RTX-compatible). +# cudaStreamCaptureModeRelaxed (2) allows the RTX compositor's background +# CUDA stream to keep running during capture without invalidating it. +try: + _cudart = ctypes.CDLL("libcudart.so.12") +except OSError: + try: + _cudart = ctypes.CDLL("libcudart.so") + except OSError: + _cudart = None +from newton import Axis, CollisionPipeline, Contacts, Control, Model, ModelBuilder, State, eval_fk +from newton._src.usd.schemas import SchemaResolverNewton, SchemaResolverPhysx +from newton.sensors import SensorContact as NewtonContactSensor +from newton.sensors import SensorFrameTransform +from newton.sensors import SensorIMU as NewtonSensorIMU +from newton.solvers import SolverBase, SolverNotifyFlags + +from isaaclab.physics import CallbackHandle, PhysicsEvent, PhysicsManager +from isaaclab.sim.utils.newton_model_utils import replace_newton_shape_colors +from isaaclab.sim.utils.stage import get_current_stage +from isaaclab.utils import checked_apply +from isaaclab.utils.string import resolve_matching_names +from isaaclab.utils.timer import Timer + +from .newton_manager_cfg import NewtonCfg, NewtonShapeCfg, NewtonSolverCfg + +if TYPE_CHECKING: + from isaaclab.sim.simulation_context import SimulationContext + + from .newton_collision_cfg import NewtonCollisionPipelineCfg + +logger = logging.getLogger(__name__) + +# Tagged union for entries in _cl_site_index_map. +# _GlobalSite: (global_shape_idx, None) — body_pattern was None +# _LocalSite: (None, [[env0_idx, ...], ...]) — per-world site indices + + +@wp.kernel(enable_backward=False) +def _set_fabric_transforms( + fabric_transforms: wp.fabricarray(dtype=wp.mat44d), + newton_indices: wp.fabricarray(dtype=wp.uint32), + newton_body_q: wp.array(ndim=1, dtype=wp.transformf), +): + """Write Newton body transforms to Fabric world matrices. + + For each Fabric prim at thread ``i``, reads the Newton body transform at + ``newton_body_q[newton_indices[i]]`` and stores it as a column-major + ``mat44d`` in ``fabric_transforms[i]``. + """ + i = int(wp.tid()) + idx = int(newton_indices[i]) + transform = newton_body_q[idx] + fabric_transforms[i] = wp.transpose(wp.mat44d(wp.math.transform_to_matrix(transform))) + + +@wp.kernel(enable_backward=False) +def _or_reset_masks_from_mask( + env_mask: wp.array(dtype=wp.bool), + articulation_ids: wp.array2d(dtype=int), + world_mask: wp.array(dtype=wp.int32), + fk_mask: wp.array(dtype=wp.bool), +): + """OR env_mask into world_mask and set corresponding articulation bits in fk_mask.""" + world, arti = wp.tid() + if env_mask[world]: + world_mask[world] = wp.int32(1) + fk_mask[articulation_ids[world, arti]] = True + + +@wp.kernel(enable_backward=False) +def _scatter_reset_masks_from_ids( + env_ids: wp.array(dtype=int), + articulation_ids: wp.array2d(dtype=int), + world_mask: wp.array(dtype=wp.int32), + fk_mask: wp.array(dtype=wp.bool), +): + """Scatter-set world_mask and fk_mask from sparse env_ids.""" + i, arti = wp.tid() + world = env_ids[i] + world_mask[world] = wp.int32(1) + fk_mask[articulation_ids[world, arti]] = True + + +class NewtonManager(PhysicsManager): + """Abstract Newton physics manager for Isaac Lab. + + Class-level (singleton-like) manager that owns simulation lifecycle, model + state, contacts/collision pipeline, sensors, replication, and CUDA-graph + orchestration. + Concrete subclasses (one per solver) implement :meth:`_build_solver` and + may extend :meth:`_initialize_contacts`, :meth:`_step_solver`, + :meth:`_solver_specific_clear`, and :meth:`_log_solver_debug`. + + Subclasses are selected via :attr:`NewtonSolverCfg.class_type`, which + :meth:`NewtonCfg.__post_init__` propagates onto :attr:`NewtonCfg.class_type` + so that ``SimulationContext`` resolves the matching subclass automatically. + + Lifecycle: ``initialize() -> reset() -> step()`` (repeated) ``-> close()``. + + .. note:: + Shared state lives on :class:`NewtonManager` (the base) by design — the + framework imports ``NewtonManager`` directly and reads attributes such + as ``_model`` / ``_state_0`` / ``_builder`` from many places. Lifecycle + methods therefore assign through the explicit base class + (``NewtonManager._foo = ...``) rather than through ``cls`` so that the + canonical state remains discoverable from external readers regardless of + which subclass is active. + """ + + _solver_dt: float = 1.0 / 200.0 + _num_substeps: int = 1 + _num_envs: int | None = None + + # Newton model and state + _builder: ModelBuilder = None + _model: Model = None + _solver: SolverBase | None = None + _use_single_state: bool | None = None + """Use only one state for both input and output for solver stepping. Requires solver support.""" + _state_0: State = None + _state_1: State = None + _control: Control = None + + # Physics settings + _gravity_vector: tuple[float, float, float] = (0.0, 0.0, -9.81) + _up_axis: str = "Z" + + # Collision and contacts + _contacts: Contacts | None = None + _needs_collision_pipeline: bool = False + _collision_pipeline = None + _collision_cfg: NewtonCollisionPipelineCfg | None = None + _newton_contact_sensors: dict = {} # Maps sensor_key to NewtonContactSensor + _newton_frame_transform_sensors: list = [] # List of SensorFrameTransform + _newton_imu_sensors: list = [] # List of NewtonSensorIMU + _pending_extended_state_attributes: set[str] = set() + _pending_extended_contact_attributes: set[str] = set() + _report_contacts: bool = False + # Per-world reset masks (allocated in start_simulation, consumed in step) + _world_reset_mask: wp.array | None = None # (num_envs,) wp.int32 — for SolverKamino.reset(world_mask=...) + _fk_reset_mask: wp.array | None = None # (articulation_count,) wp.bool — for eval_fk(mask=...) + + # CUDA graphing + _graph = None + _graph_capture_pending: bool = False + + # USD/Fabric sync + _newton_stage_path = None + _usdrt_stage = None + _newton_index_attr = "newton:index" + _clone_physics_only = False + _transforms_dirty: bool = False + + # cubric GPU transform hierarchy (replaces CPU update_world_xforms) + _cubric = None + _cubric_adapter: int | None = None + _cubric_bound_fabric_id: int | None = None + + # Model changes (callbacks use unified system from PhysicsManager) + _model_changes: set[int] = set() + + # Views list for assets to register their views + _views: list = [] + + # CL: Cloning / Replication logic + # TODO: These attributes support cloning-specific logic and should be moved into a cloner class + # Pending site requests from sensors. + # Key: (body_pattern, xform_floats), Value: (label, wp.transform) + # identical (body_pattern, transform) reuses the same site. + _cl_pending_sites: dict[tuple[str | None, tuple[float, ...]], tuple[str, wp.transform]] = {} + + # Maps each site label to its resolved global or local site entry. + _GlobalSite = tuple[int, None] + _LocalSite = tuple[None, list[list[int]]] + _SiteEntry = _GlobalSite | _LocalSite + _cl_site_index_map: dict[str, _SiteEntry] = {} + + @classmethod + def initialize(cls, sim_context: SimulationContext) -> None: + """Initialize the manager with simulation context. + + Args: + sim_context: Parent simulation context. + """ + super().initialize(sim_context) + + # Newton-specific setup: get gravity from SimulationCfg (not physics manager cfg) + sim = PhysicsManager._sim + if sim is not None: + NewtonManager._gravity_vector = sim.cfg.gravity # type: ignore[union-attr] + + # USD/Fabric sync for Omniverse rendering (visualizer) or Newton+RTX (Kit cameras) + try: + requested = sim.resolve_visualizer_types() + except Exception: + requested = [] + viz_raw = sim.get_setting("/isaaclab/visualizer/types") + if isinstance(viz_raw, str): + requested = [v for part in viz_raw.split(",") for v in part.split() if v] + from isaaclab.app.settings_manager import get_settings_manager + + cameras_enabled = bool(get_settings_manager().get("/isaaclab/cameras_enabled", False)) + NewtonManager._clone_physics_only = "kit" not in requested and not cameras_enabled + + @classmethod + def reset(cls, soft: bool = False) -> None: + """Reset physics simulation. + + Args: + soft: If True, skip full reinitialization. + """ + if not soft: + cls.start_simulation() + cls.initialize_solver() + + @classmethod + def forward(cls) -> None: + """Update articulation kinematics without stepping physics. + + Runs Newton's generic forward kinematics (``eval_fk``) over **all** + articulations to compute body poses from joint coordinates. This is + the full (unmasked) FK path used during initial setup. For incremental + per-environment updates after resets, see :meth:`invalidate_fk` which + accumulates masks consumed by :meth:`step`. + """ + eval_fk(cls._model, cls._state_0.joint_q, cls._state_0.joint_qd, cls._state_0, None) + + @classmethod + def pre_render(cls) -> None: + """Flush deferred Fabric writes before cameras/visualizers read the scene.""" + cls.sync_transforms_to_usd() + + @classmethod + def sync_transforms_to_usd(cls) -> None: + """Write Newton body_q to USD Fabric world matrices for Kit viewport / RTX rendering. + + No-op when ``_usdrt_stage`` is None (i.e. Kit visualizer is not active) + or when transforms have not changed since the last sync. + + Called at render cadence by :meth:`pre_render` (via + :meth:`~isaaclab.sim.SimulationContext.render`). + Physics stepping marks transforms dirty via :meth:`_mark_transforms_dirty` + so that the expensive Fabric hierarchy update only runs once per render + frame rather than after every physics step. + + Uses ``wp.fabricarray`` directly (no ``isaacsim.physics.newton`` extension needed). + The Warp kernel reads ``state_0.body_q[newton_index[i]]`` and writes the + corresponding ``mat44d`` to ``omni:fabric:worldMatrix`` for each prim. + + When cubric is available the method mirrors PhysX's ``DirectGpuHelper`` + pattern: pause Fabric change tracking, write transforms, resume tracking, + then call ``IAdapter::compute`` on the GPU to propagate the hierarchy and + notify the Fabric Scene Delegate. Otherwise it falls back to the CPU + ``update_world_xforms()`` path. + """ + if cls._usdrt_stage is None or cls._model is None or cls._state_0 is None: + return + if not cls._transforms_dirty: + return + try: + import usdrt + + # Lazy adapter creation: deferred from initialize_solver() to avoid + # startup-ordering issues with the cubric plugin. + if cls._cubric is not None and cls._cubric.available and cls._cubric_adapter is None: + NewtonManager._cubric_adapter = cls._cubric.create_adapter() + if cls._cubric_adapter is not None: + logger.info("cubric GPU transform hierarchy enabled") + else: + logger.warning("cubric adapter creation failed; falling back to update_world_xforms()") + NewtonManager._cubric = None + + use_cubric = cls._cubric is not None and cls._cubric_adapter is not None + + fabric_hierarchy = None + if hasattr(usdrt, "hierarchy"): + fabric_hierarchy = usdrt.hierarchy.IFabricHierarchy().get_fabric_hierarchy( + cls._usdrt_stage.GetFabricId(), cls._usdrt_stage.GetStageIdAsStageId() + ) + + # Pause hierarchy change tracking BEFORE SelectPrims. + # SelectPrims with ReadWrite access calls getAttributeArrayGpu + # internally, which marks Fabric buffers dirty. If tracking is + # still active at that point the hierarchy records the change and + # Kit's updateWorldXforms will do an expensive connectivity + # rebuild every frame. PhysX avoids this via ScopedUSDRT which + # pauses tracking before any Fabric writes. + if use_cubric and fabric_hierarchy is not None: + fabric_hierarchy.track_world_xform_changes(False) + fabric_hierarchy.track_local_xform_changes(False) + + try: + selection = cls._usdrt_stage.SelectPrims( + require_attrs=[ + (usdrt.Sdf.ValueTypeNames.Matrix4d, "omni:fabric:worldMatrix", usdrt.Usd.Access.ReadWrite), + (usdrt.Sdf.ValueTypeNames.UInt, cls._newton_index_attr, usdrt.Usd.Access.Read), + ], + device=str(PhysicsManager._device), + ) + if selection.GetCount() == 0: + NewtonManager._transforms_dirty = False + return + + fabric_transforms = wp.fabricarray(selection, "omni:fabric:worldMatrix") + newton_indices = wp.fabricarray(selection, cls._newton_index_attr) + wp.launch( + _set_fabric_transforms, + dim=newton_indices.shape[0], + inputs=[fabric_transforms, newton_indices, cls._state_0.body_q], + device=PhysicsManager._device, + ) + wp.synchronize_device(PhysicsManager._device) + + NewtonManager._transforms_dirty = False + + if use_cubric and fabric_hierarchy is not None: + fabric_id = cls._usdrt_stage.GetFabricId().id + if fabric_id != cls._cubric_bound_fabric_id: + cls._cubric.bind_to_stage(cls._cubric_adapter, fabric_id) + NewtonManager._cubric_bound_fabric_id = fabric_id + cls._cubric.compute(cls._cubric_adapter) + elif fabric_hierarchy is not None: + fabric_hierarchy.update_world_xforms() + finally: + if use_cubric and fabric_hierarchy is not None: + fabric_hierarchy.track_world_xform_changes(True) + fabric_hierarchy.track_local_xform_changes(True) + except Exception: + logger.exception("[NewtonManager] sync_transforms_to_usd FAILED") + + @classmethod + def _mark_transforms_dirty(cls) -> None: + """Flag that physics state has changed and Fabric needs re-sync. + + Called by :meth:`_simulate` after stepping. The actual sync is deferred + to :meth:`sync_transforms_to_usd`, which runs at render cadence. + """ + NewtonManager._transforms_dirty = True + + @classmethod + def step(cls) -> None: + """Step the physics simulation.""" + sim = PhysicsManager._sim + if sim is None or not sim.is_playing(): + return + + # Notify solver of model changes + if cls._model_changes: + with wp.ScopedDevice(PhysicsManager._device): + for change in cls._model_changes: + cls._solver.notify_model_changed(change) + NewtonManager._model_changes = set() + + # Lazy CUDA graph capture: deferred from initialize_solver() when RTX was active. + # By the time step() is first called, RTX has fully initialized (all cudaImportExternalMemory + # calls are done) and is idle between render frames — giving us a clean capture window. + cfg = PhysicsManager._cfg + device = PhysicsManager._device + if cls._graph_capture_pending and cfg is not None and cfg.use_cuda_graph and "cuda" in device: # type: ignore[union-attr] + NewtonManager._graph_capture_pending = False + NewtonManager._graph = cls._capture_relaxed_graph(device) + if cls._graph is not None: + logger.info("Newton CUDA graph captured (deferred relaxed mode, RTX-compatible)") + else: + logger.warning("Newton deferred CUDA graph capture failed; using eager execution") + + # Ensure body_q is up-to-date before collision detection. + # After env resets, joint_q is written but body_q (used by + # broadphase/narrowphase) is stale until FK runs. + # Only runs FK for dirtied articulations via the accumulated mask. + if cls._needs_collision_pipeline: + eval_fk(cls._model, cls._state_0.joint_q, cls._state_0.joint_qd, cls._state_0, cls._fk_reset_mask) + + # Zero both masks after consumption + NewtonManager._world_reset_mask.zero_() + NewtonManager._fk_reset_mask.zero_() + + # Step simulation (graphed or not; _graph is None when capture is disabled or failed) + if cfg is not None and cfg.use_cuda_graph and cls._graph is not None and "cuda" in device: # type: ignore[union-attr] + wp.capture_launch(cls._graph) + if cls._usdrt_stage is not None: + cls._mark_transforms_dirty() + else: + with wp.ScopedDevice(device): + cls._simulate() + + # Launch solver-specific debug logging after stepping. + cls._log_solver_debug() + + PhysicsManager._sim_time += cls._solver_dt * cls._num_substeps + + @classmethod + def close(cls) -> None: + """Clean up Newton physics resources.""" + cls.clear() + super().close() + + @classmethod + def register_callback( + cls, + callback: Callable, + event: PhysicsEvent, + order: int = 0, + name: str | None = None, + wrap_weak_ref: bool = True, + ) -> CallbackHandle: + """Register a callback. Passes event to parent class.""" + return PhysicsManager.register_callback(callback, event, order, name, wrap_weak_ref) + + @classmethod + def get_physics_sim_view(cls) -> list: + """Get the list of registered views. + + Assets can append their views to this list, and sensors can access them. + Returns a list that callers can append to. + + Returns: + List of registered views (e.g., NewtonArticulationView instances). + """ + return cls._views + + @classmethod + def is_fabric_enabled(cls) -> bool: + """Check if fabric interface is enabled (not applicable for Newton).""" + return False + + @classmethod + def clear(cls): + """Clear all Newton-specific state (callbacks cleared by super().close()). + Start with solver specific cleanup.""" + cls._solver_specific_clear() + if cls._cubric is not None and cls._cubric_adapter is not None: + cls._cubric.release_adapter(cls._cubric_adapter) + NewtonManager._cubric = None + NewtonManager._cubric_adapter = None + NewtonManager._cubric_bound_fabric_id = None + NewtonManager._builder = None + NewtonManager._model = None + NewtonManager._solver = None + NewtonManager._use_single_state = None + NewtonManager._state_0 = None + NewtonManager._state_1 = None + NewtonManager._control = None + NewtonManager._contacts = None + NewtonManager._needs_collision_pipeline = False + NewtonManager._collision_pipeline = None + NewtonManager._collision_cfg = None + NewtonManager._newton_contact_sensors = {} + NewtonManager._newton_frame_transform_sensors = [] + NewtonManager._newton_imu_sensors = [] + NewtonManager._report_contacts = False + # Per-world reset masks + NewtonManager._world_reset_mask = None + NewtonManager._fk_reset_mask = None + NewtonManager._graph = None + NewtonManager._graph_capture_pending = False + NewtonManager._newton_stage_path = None + NewtonManager._usdrt_stage = None + NewtonManager._transforms_dirty = False + NewtonManager._up_axis = "Z" + NewtonManager._model_changes = set() + NewtonManager._cl_pending_sites = {} + NewtonManager._cl_site_index_map = {} + NewtonManager._pending_extended_state_attributes = set() + NewtonManager._pending_extended_contact_attributes = set() + NewtonManager._views = [] + + @classmethod + def set_builder(cls, builder: ModelBuilder) -> None: + """Set the Newton model builder.""" + NewtonManager._builder = builder + + @classmethod + def create_builder(cls, up_axis: str | None = None, **kwargs) -> ModelBuilder: + """Create a :class:`ModelBuilder` configured with default settings. + + Forwards :class:`NewtonShapeCfg` defaults onto Newton's upstream + ``ModelBuilder.default_shape_cfg`` via :func:`~isaaclab.utils.checked_apply`. + Falls back to wrapper defaults when no Newton config is active so + rough-terrain margin/gap still apply during early construction. + + Args: + up_axis: Override for the up-axis. Defaults to ``None``, which uses + the manager's ``_up_axis``. + **kwargs: Forwarded to :class:`ModelBuilder`. + + Returns: + New builder with up-axis and per-shape defaults (gap, margin) applied. + """ + builder = ModelBuilder(up_axis=up_axis or cls._up_axis, **kwargs) + # Resolve which NewtonShapeCfg to apply: user override if active config + # is NewtonCfg, else the wrapper's own defaults so callers from non-Newton + # contexts (tests, early construction) still get the rough-terrain margin. + cfg = PhysicsManager._cfg + shape_cfg = cfg.default_shape_cfg if isinstance(cfg, NewtonCfg) else NewtonShapeCfg() + checked_apply(shape_cfg, builder.default_shape_cfg) + return builder + + @classmethod + def cl_register_site(cls, body_pattern: str | None, xform: wp.transform) -> str: + """Register a site request for injection into prototypes before replication. + + Sensors call this during ``__init__``. Sites are injected into prototype + builders by :meth:`_cl_inject_sites` (called from ``newton_replicate``) + before ``add_builder``, so they replicate correctly per-world. + + Identical ``(body_pattern, transform)`` registrations share sites. + + The *body_pattern* is matched against prototype-local body labels + (e.g. ``"Robot/link.*"``) when replication is active, or against the + flat builder's body labels in the fallback path. Wildcard patterns + that match multiple bodies create one site per matched body. + + Args: + body_pattern: Regex pattern matched against body labels in the + prototype builder (e.g. ``"Robot/link0"`` or ``"Robot/finger.*"`` + for multi-body wildcards), or ``None`` for global sites + (world-origin reference, etc.). + xform: Site transform relative to body. + + Returns: + Assigned site label suffix. + """ + xform_key = tuple(xform) + key = (body_pattern, xform_key) + if key in cls._cl_pending_sites: + return cls._cl_pending_sites[key][0] + label = f"ft_{len(cls._cl_pending_sites)}" + cls._cl_pending_sites[key] = (label, xform) + return label + + @classmethod + def request_extended_state_attribute(cls, attr: str) -> None: + """Request an extended state attribute (e.g. ``"body_qdd"``). + + Sensors call this during ``__init__``, before model finalization. + Attributes are forwarded to the builder in :meth:`start_simulation` + so that subsequent ``model.state()`` calls allocate them. + + Args: + attr: State attribute name (must be in ``State.EXTENDED_ATTRIBUTES``). + """ + cls._pending_extended_state_attributes.add(attr) + + @classmethod + def request_extended_contact_attribute(cls, attr: str) -> None: + """Request an extended contact attribute (e.g. ``"force"``). + + Sensors call this during ``__init__``, before model finalization. + Attributes are forwarded to the model in :meth:`start_simulation` + so that subsequent ``Contacts`` creation includes them. + + Args: + attr: Contact attribute name. + """ + cls._pending_extended_contact_attributes.add(attr) + + @classmethod + def _cl_inject_sites( + cls, + main_builder: ModelBuilder, + proto_builders: dict[str, ModelBuilder], + ) -> tuple[dict[str, int], dict[int, dict[str, list[int]]]]: + """Inject registered sites into prototype builders before replication. + + Non-global sites are matched against prototype body labels using + :func:`resolve_matching_names` (regex). Global sites + (``body_pattern is None``) are added to *main_builder* with + ``body=-1``. + + Returns proto-local shape indices so that ``newton_replicate`` can + compute final indices during replication without a second pattern match. + + Pending requests are cleared after processing. + + Args: + main_builder: Top-level builder that receives global sites. + proto_builders: ``{src_path: ModelBuilder}`` prototype builders. + + Returns: + Tuple of ``(global_sites, proto_sites)`` where *global_sites* maps + ``{label: main_builder_shape_idx}`` and *proto_sites* maps + ``{id(proto): {label: [proto_local_shape_idx, ...]}}``. + """ + global_sites: dict[str, int] = {} + proto_sites: dict[int, dict[str, list[int]]] = {} + + for (body_pattern, _xform_key), (label, xform) in cls._cl_pending_sites.items(): + if body_pattern is None: + site_idx = main_builder.add_site(body=-1, xform=xform, label=label) + global_sites[label] = site_idx + continue + + any_matched = False + for src_prefix, proto in proto_builders.items(): + body_labels = list(proto.body_label) + matched_indices, matched_names = resolve_matching_names( + body_pattern, body_labels, raise_when_no_match=False + ) + if not matched_indices: # Pattern has no matches in this prototype + continue + + any_matched = True + proto_id = id(proto) + site_indices: list[int] = [] + for body_idx, body_name in zip(matched_indices, matched_names): + site_label = f"{body_name}/{label}" + proto_site_idx = proto.add_site(body=body_idx, xform=xform, label=site_label) + site_indices.append(proto_site_idx) + logger.debug(f"Injected site '{site_label}' into prototype") + proto_sites.setdefault(proto_id, {})[label] = site_indices + + if not any_matched: + raise ValueError( + f"Site '{label}' with body_pattern '{body_pattern}' matched no prototype bodies " + f"across {len(proto_builders)} prototype(s). " + f"Check that the pattern matches a body label in the prototype builder." + ) + + cls._cl_pending_sites.clear() + return global_sites, proto_sites + + @classmethod + def _cl_inject_sites_fallback(cls) -> None: + """Inject pending sites into the flat builder (no-replication path). + + Populates :attr:`_cl_site_index_map` with the unified per-world structure: + + - Global sites (``body_pattern is None``): ``(shape_idx, None)`` + - Local sites: ``(None, [[idx, ...]])`` — one sublist for the single world. + """ + builder = cls._builder + body_labels = list(builder.body_label) + + for (body_pattern, _xform_key), (label, xform) in cls._cl_pending_sites.items(): + if body_pattern is None: + site_idx = builder.add_site(body=-1, xform=xform, label=label) + cls._cl_site_index_map[label] = (site_idx, None) + else: + try: + matched_indices, matched_names = resolve_matching_names(body_pattern, body_labels) + except ValueError as e: + raise ValueError( + f"Site '{label}' with body_pattern '{body_pattern}' matched no bodies " + f"in the flat builder. Available body labels: {body_labels}." + ) from e + + site_indices: list[int] = [] + for body_idx in matched_indices: + site_label = f"{builder.body_label[body_idx]}/{label}" + site_idx = builder.add_site(body=body_idx, xform=xform, label=site_label) + site_indices.append(site_idx) + + # Single world (no replication): one-element outer list + cls._cl_site_index_map[label] = (None, [site_indices]) + + cls._cl_pending_sites.clear() + + @classmethod + def add_model_change(cls, change: SolverNotifyFlags) -> None: + """Register a model change to notify the solver.""" + cls._model_changes.add(change) + + @classmethod + def invalidate_fk( + cls, + env_mask: wp.array | None = None, + env_ids: wp.array | None = None, + articulation_ids: wp.array | None = None, + ) -> None: + """Mark environments as needing FK recomputation and solver reset. + + Called by asset write methods that modify joint coordinates or root + transforms. The masks are consumed in :meth:`step` before physics + stepping. + + Args: + env_mask: Boolean mask of dirtied environments. Shape ``(num_envs,)``. + Used by ``_mask`` write methods. + env_ids: Integer indices of dirtied environments. + Used by ``_index`` write methods. + articulation_ids: Mapping from ``(world, arti)`` to model articulation + index. Shape ``(world_count, count_per_world)``. Obtained from + ``ArticulationView.articulation_ids``. + """ + if cls._world_reset_mask is None or cls._fk_reset_mask is None: + return + + if articulation_ids is not None and env_mask is not None: + wp.launch( + _or_reset_masks_from_mask, + dim=articulation_ids.shape, + inputs=[env_mask, articulation_ids], + outputs=[NewtonManager._world_reset_mask, NewtonManager._fk_reset_mask], + device=PhysicsManager._device, + ) + elif articulation_ids is not None and env_ids is not None: + wp.launch( + _scatter_reset_masks_from_ids, + dim=(env_ids.shape[0], articulation_ids.shape[1]), + inputs=[env_ids, articulation_ids], + outputs=[NewtonManager._world_reset_mask, NewtonManager._fk_reset_mask], + device=PhysicsManager._device, + ) + else: + # Fallback: no topology info — mark everything dirty + NewtonManager._world_reset_mask.fill_(1) + NewtonManager._fk_reset_mask.fill_(True) + + @classmethod + def start_simulation(cls) -> None: + """Start simulation by finalizing model and initializing state. + + This function finalizes the model and initializes the simulation state. + Note: Collision pipeline is initialized later in initialize_solver() after + we determine whether the solver needs external collision detection. + """ + logger.debug(f"Builder: {cls._builder}") + + # Create builder from USD stage if not provided + if cls._builder is None: + cls.instantiate_builder_from_stage() + + logger.info("Dispatching MODEL_INIT callbacks") + cls.dispatch_event(PhysicsEvent.MODEL_INIT) + + # Inject any pending site requests (no-replication fallback path). + # In the replication path, _cl_inject_sites() already ran from newton_replicate. + cls._cl_inject_sites_fallback() + + device = PhysicsManager._device + logger.info(f"Finalizing model on device: {device}") + cls._builder.up_axis = Axis.from_string(cls._up_axis) + # Forward pending extended attribute requests to builder and clear them + if cls._pending_extended_state_attributes: + cls._builder.request_state_attributes(*cls._pending_extended_state_attributes) + NewtonManager._pending_extended_state_attributes = set() + with Timer(name="newton_finalize_builder", msg="Finalize builder took:"): + NewtonManager._model = cls._builder.finalize(device=device) + cls._model.set_gravity(cls._gravity_vector) + cls._model.num_envs = cls._num_envs + + replace_newton_shape_colors(cls._model) + + if cls._pending_extended_contact_attributes: + cls._model.request_contact_attributes(*cls._pending_extended_contact_attributes) + NewtonManager._pending_extended_contact_attributes = set() + NewtonManager._state_0 = cls._model.state() + NewtonManager._state_1 = cls._model.state() + NewtonManager._control = cls._model.control() + eval_fk(cls._model, cls._state_0.joint_q, cls._state_0.joint_qd, cls._state_0, None) + + # Allocate per-world reset masks (used by all solvers for masked FK, and by Kamino for masked reset) + NewtonManager._world_reset_mask = wp.zeros(cls._model.world_count, dtype=wp.int32, device=device) + NewtonManager._fk_reset_mask = wp.zeros(cls._model.articulation_count, dtype=wp.bool, device=device) + + logger.info("Dispatching PHYSICS_READY callbacks") + cls.dispatch_event(PhysicsEvent.PHYSICS_READY) + + # Setup USD/Fabric sync for Kit viewport rendering + if not cls._clone_physics_only: + import usdrt + + body_paths = getattr(cls._model, "body_label", None) or getattr(cls._model, "body_key", None) + if body_paths is None: + raise RuntimeError("NewtonManager: model has no body_label/body_key, skipping USD/Fabric sync for RTX.") + NewtonManager._usdrt_stage = get_current_stage(fabric=True) + for i, prim_path in enumerate(body_paths): + prim = cls._usdrt_stage.GetPrimAtPath(prim_path) + prim.CreateAttribute(cls._newton_index_attr, usdrt.Sdf.ValueTypeNames.UInt, True) + prim.GetAttribute(cls._newton_index_attr).Set(i) + # Tag with PhysicsRigidBodyAPI so cubric's eRigidBody mode + # applies Inverse propagation (preserves Newton's world + # transforms and derives local) instead of Forward. + prim.AddAppliedSchema("PhysicsRigidBodyAPI") + xformable_prim = usdrt.Rt.Xformable(prim) + if not xformable_prim.HasWorldXform(): + xformable_prim.SetWorldXformFromUsd() + + cls._mark_transforms_dirty() + cls.sync_transforms_to_usd() + + @classmethod + def instantiate_builder_from_stage(cls): + """Create builder from USD stage. + + Detects env Xforms (e.g. ``/World/Env_0``, ``/World/Env_1``) and builds + each as a separate Newton world via ``begin_world``/``end_world``. + Falls back to a flat ``add_usd`` when no env Xforms are found. + + """ + import re + + from pxr import UsdGeom + + stage = get_current_stage() + up_axis = UsdGeom.GetStageUpAxis(stage) + + # Scan /World children for env-like Xforms (Env_0, env_1, ...) + env_pattern = re.compile(r"^[Ee]nv_(\d+)$") + world_prim = stage.GetPrimAtPath("/World") + env_paths: list[tuple[int, str]] = [] + if world_prim and world_prim.IsValid(): + for child in world_prim.GetChildren(): + m = env_pattern.match(child.GetName()) + if m: + env_paths.append((int(m.group(1)), child.GetPath().pathString)) + env_paths.sort(key=lambda x: x[0]) + + builder = ModelBuilder(up_axis=up_axis) + + schema_resolvers = [SchemaResolverNewton(), SchemaResolverPhysx()] + + if not env_paths: + # No env Xforms — flat loading + builder.add_usd(stage, schema_resolvers=schema_resolvers) + else: + # Load everything except the env subtrees (ground plane, lights, etc.) + ignore_paths = [path for _, path in env_paths] + builder.add_usd(stage, ignore_paths=ignore_paths, schema_resolvers=schema_resolvers) + + # Build a prototype from the first env (all envs assumed identical) + _, proto_path = env_paths[0] + proto = ModelBuilder(up_axis=up_axis) + proto.add_usd( + stage, + root_path=proto_path, + schema_resolvers=schema_resolvers, + ) + + # Inject registered sites into the proto before replication + global_sites, proto_sites = cls._cl_inject_sites(builder, {proto_path: proto}) + global_site_map: dict[str, tuple[int, None]] = {label: (idx, None) for label, idx in global_sites.items()} + num_worlds = len(env_paths) + local_site_map: dict[str, list[list[int]]] = {} + site_entries = proto_sites.get(id(proto), {}) + + # Add each env as a separate Newton world + xform_cache = UsdGeom.XformCache() + for col, (_, env_path) in enumerate(env_paths): + builder.begin_world() + offset = builder.shape_count + world_xform = xform_cache.GetLocalToWorldTransform(stage.GetPrimAtPath(env_path)) + translation = world_xform.ExtractTranslation() + rotation = world_xform.ExtractRotationQuat() + pos = (translation[0], translation[1], translation[2]) + quat = ( + rotation.GetImaginary()[0], + rotation.GetImaginary()[1], + rotation.GetImaginary()[2], + rotation.GetReal(), + ) + builder.add_builder(proto, xform=wp.transform(pos, quat)) + for label, proto_shape_indices in site_entries.items(): + if label not in local_site_map: + local_site_map[label] = [[] for _ in range(num_worlds)] + for proto_shape_idx in proto_shape_indices: + local_site_map[label][col].append(offset + proto_shape_idx) + builder.end_world() + + NewtonManager._cl_site_index_map = { + **global_site_map, + **{label: (None, per_world) for label, per_world in local_site_map.items()}, + } + NewtonManager._num_envs = len(env_paths) + + cls.set_builder(builder) + + @classmethod + def _initialize_contacts(cls) -> None: + """Initialize contacts using Newton's :class:`CollisionPipeline`. + + This default implementation handles solvers that rely on Newton's + unified collision pipeline (XPBD, Featherstone, and MuJoCo with + ``use_mujoco_contacts=False``). Solver subclasses with internal + contact handling (e.g. :class:`NewtonMJWarpManager` when + ``use_mujoco_contacts=True``) override this method to allocate a + :class:`Contacts` object sized to the solver's internal contact buffer. + """ + if not cls._needs_collision_pipeline: + return + if cls._collision_pipeline is None: + if cls._collision_cfg is not None: + NewtonManager._collision_pipeline = CollisionPipeline( + cls._model, **cls._collision_cfg.to_pipeline_args() + ) + else: + NewtonManager._collision_pipeline = CollisionPipeline(cls._model, broad_phase="explicit") + if cls._contacts is None: + NewtonManager._contacts = cls._collision_pipeline.contacts() + + # ----- Solver construction (subclass contract) ------------------------ + + @classmethod + @abstractmethod + def _build_solver(cls, model: Model, solver_cfg: NewtonSolverCfg) -> None: + """Construct the solver this manager owns and assign it onto the base class. + + Subclasses must populate the canonical :class:`NewtonManager` slots: + + * :attr:`NewtonManager._solver` — the constructed :class:`SolverBase` + instance. + * :attr:`NewtonManager._use_single_state` — ``True`` if the solver + steps in-place on a single :class:`State` (e.g. MuJoCo); ``False`` + if it needs separate input/output states (e.g. XPBD, Featherstone, + Kamino). + * :attr:`NewtonManager._needs_collision_pipeline` — ``True`` if the + manager owns Newton's :class:`CollisionPipeline` for contact + generation; ``False`` if the solver runs internal collision + detection (MuJoCo internal contacts, Kamino with its own detector). + + Writing through ``NewtonManager._foo`` (rather than ``cls._foo``) + keeps the canonical state visible to external readers regardless of + which subclass is active. + + Args: + model: Finalized Newton model the solver should run on. + solver_cfg: The manager-specific :class:`NewtonSolverCfg` + subclass (i.e. the inner ``cfg.solver_cfg``, not the outer + :class:`NewtonCfg`). + """ + raise NotImplementedError("NewtonManager subclasses must implement _build_solver()") + + @classmethod + def _step_solver(cls, state_0: State, state_1: State, control: Control, substep_dt: float) -> None: + """Run one solver substep. + + Default invokes :attr:`_solver` once. Subclasses can override to + batch multiple solvers within a single substep. + """ + # Only solvers that consume Newton collision-pipeline contacts receive ``_contacts`` here. Solvers with + # internal contact handling receive ``None`` even when ``_contacts`` is allocated for later reporting via + # ``solver.update_contacts`` (e.g. MuJoCo with ``use_mujoco_contacts=True``). + contacts = cls._contacts if cls._needs_collision_pipeline else None + cls._solver.step(state_0, state_1, control, contacts, substep_dt) + + @classmethod + def _solver_specific_clear(cls) -> None: + """Solver-specific cleanup hook called from :meth:`clear`. + + Default no-op. Subclasses override to release sub-solver references + or other solver-specific resources. + """ + + @classmethod + def _log_solver_debug(cls) -> None: + """Solver-specific debug logging after stepping. + + Default no-op. Subclasses override to log solver-specific debug info + (e.g. constraint violations, contact forces, etc.) after stepping. + """ + + # ----- Lifecycle orchestration ---------------------------------------- + + @classmethod + def initialize_solver(cls) -> None: + """Initialize the solver and collision pipeline. + + Thin orchestrator: delegates solver construction to + :meth:`_build_solver` (overridden by each solver subclass), allocates + the collision pipeline (when applicable) via + :meth:`_initialize_contacts`, then sets up cubric bindings and either + captures the CUDA graph immediately or defers capture until the + first :meth:`step` call (RTX-active path). + + .. warning:: + When using a CUDA-enabled device, the simulation is graphed. + This means the function steps the simulation once to capture the + graph, so it should only be called after everything else in the + simulation is initialized. + """ + cfg = PhysicsManager._cfg + if cfg is None: + return + + with Timer(name="newton_initialize_solver", msg="Initialize solver took:"): + NewtonManager._num_substeps = cfg.num_substeps # type: ignore[union-attr] + NewtonManager._solver_dt = cls.get_physics_dt() / cls._num_substeps + NewtonManager._collision_cfg = cfg.collision_cfg # type: ignore[union-attr] + + cls._build_solver( + cls._model, + cfg.solver_cfg, # type: ignore[union-attr] + ) + if NewtonManager._solver is None: + raise RuntimeError( + f"{cls.__name__}._build_solver did not assign NewtonManager._solver. " + "Subclasses of NewtonManager must populate NewtonManager._solver, " + "NewtonManager._use_single_state, and NewtonManager._needs_collision_pipeline." + ) + + cls._initialize_contacts() + + if cls._usdrt_stage is not None: + cls._setup_cubric_bindings() + + device = PhysicsManager._device + use_cuda_graph = cfg.use_cuda_graph and "cuda" in device # type: ignore[union-attr] + if use_cuda_graph: + cls._capture_or_defer_cuda_graph() + else: + NewtonManager._graph = None + + @classmethod + def _setup_cubric_bindings(cls) -> None: + """Initialize cubric ctypes bindings when the Kit viewport is active. + + Adapter creation itself is deferred to the first + :meth:`sync_transforms_to_usd` call to avoid startup-ordering issues + with the cubric plugin. + """ + from isaaclab_newton.physics._cubric import CubricBindings + + bindings = CubricBindings() + if bindings.initialize(): + NewtonManager._cubric = bindings + logger.info("cubric bindings ready (adapter deferred to first render)") + else: + NewtonManager._cubric = None + logger.warning("cubric bindings init failed; falling back to update_world_xforms()") + + @classmethod + def _capture_or_defer_cuda_graph(cls) -> None: + """Capture the physics CUDA graph, or defer if RTX is initializing.""" + with Timer(name="newton_cuda_graph", msg="CUDA graph took:"): + if cls._usdrt_stage is None: + # No RTX active — use standard Warp capture (cudaStreamCaptureModeGlobal). + with wp.ScopedCapture() as capture: + cls._simulate() + NewtonManager._graph = capture.graph + logger.info("Newton CUDA graph captured (standard Warp mode)") + else: + # RTX is active during initialization — cudaImportExternalMemory and other + # non-capturable RTX ops run on background CUDA streams right now. + # Defer capture to the first step() call, after RTX is fully initialized + # and idle between render frames (clean capture window). + NewtonManager._graph = None + NewtonManager._graph_capture_pending = True + logger.info("Newton CUDA graph capture deferred until first step() (RTX active)") + + @classmethod + def _capture_relaxed_graph(cls, device: str): + """Capture Newton physics (only) as a CUDA graph, RTX-compatible. + + Uses a hybrid approach to work around two conflicting requirements: + + 1. RTX background threads use CUDA's legacy stream (stream 0) for async operations + like ``cudaImportExternalMemory``. A standard ``wp.ScopedCapture()`` uses + ``cudaStreamCaptureModeThreadLocal`` on Warp's default stream (a blocking stream). + A blocking stream synchronises implicitly with legacy stream 0, so RTX ops inside + the capture window fail with error 906. + + 2. ``mujoco_warp`` calls ``wp.capture_while`` inside ``solver.solve()``. + ``wp.capture_while`` checks ``device.captures`` (populated by ``wp.capture_begin``) + to decide whether to insert a conditional graph node (graph-capture path) or to run + eagerly with ``wp.synchronize_stream`` (non-capture path). Without an entry in + ``device.captures``, it synchronises the capturing stream — which raises "Cannot + synchronize stream while graph capture is active". + + Solution: + + - Create a **non-blocking** stream (``cudaStreamNonBlocking = 0x01``): no implicit sync + with legacy stream 0, so RTX background threads are unaffected (avoids error 906). + - Start the capture externally via ``cudaStreamBeginCapture`` with + ``cudaStreamCaptureModeRelaxed`` so no other CUDA activity is disrupted. + - Call ``wp.capture_begin(external=True, stream=fresh_stream)``: + this registers the capture in Warp's ``device.captures`` *without* calling + ``cudaStreamBeginCapture`` (already done) and *without* changing device-wide memory + pool attributes (avoids error 900 in RTX's ``cudaMallocAsync``). + - Run ``_simulate_physics_only()`` inside ``ScopedStream(fresh_stream)``: + kernels dispatch to ``fresh_stream`` and are captured; ``wp.capture_while`` finds the + active capture and inserts a conditional graph node instead of synchronising. + - Call ``wp.capture_end(stream=fresh_stream)`` to finalise the Warp-level capture. + - Call ``cudaStreamEndCapture`` to close the CUDA stream capture and get the graph. + + Warmup run pre-allocates all solver scratch buffers so no ``cudaMalloc`` occurs during + capture. ``sync_transforms_to_usd`` (which calls ``wp.synchronize_device``) is + excluded from the capture and runs eagerly in ``step()`` after ``wp.capture_launch``. + + Returns a ``wp.Graph`` on success, or ``None`` on failure. + """ + if _cudart is None: + logger.warning("libcudart not available; cannot use relaxed graph capture") + return None + + # Warmup: pre-allocate all solver scratch buffers so the capture window has + # no new cudaMalloc calls (which are forbidden inside graph capture). + with wp.ScopedDevice(device): + cls._simulate_physics_only() + wp.synchronize_stream(wp.get_stream(device)) + + # Create a non-blocking stream (cudaStreamNonBlocking = 0x01). + raw_handle = ctypes.c_void_p() + ret = _cudart.cudaStreamCreateWithFlags(ctypes.byref(raw_handle), ctypes.c_uint(0x01)) + if ret != 0: + logger.warning("cudaStreamCreateWithFlags(NonBlocking) failed (code %d)", ret) + return None + fresh_handle = raw_handle.value + fresh_stream = wp.Stream(device, cuda_stream=fresh_handle, owner=False) + + # Start capture in relaxed mode BEFORE entering ScopedStream. + ret = _cudart.cudaStreamBeginCapture(ctypes.c_void_p(fresh_handle), ctypes.c_int(2)) + if ret != 0: + _cudart.cudaStreamDestroy(ctypes.c_void_p(fresh_handle)) + logger.warning("cudaStreamBeginCapture(relaxed) failed (code %d)", ret) + return None + + try: + wp.capture_begin(stream=fresh_stream, external=True) + except Exception as exc: + raw_graph = ctypes.c_void_p() + _cudart.cudaStreamEndCapture(ctypes.c_void_p(fresh_handle), ctypes.byref(raw_graph)) + if raw_graph.value: + _cudart.cudaGraphDestroy(raw_graph) + _cudart.cudaStreamDestroy(ctypes.c_void_p(fresh_handle)) + logger.warning("wp.capture_begin(external=True) failed: %s", exc) + return None + + err_during_capture = None + with wp.ScopedStream(fresh_stream, sync_enter=False): + try: + cls._simulate_physics_only() + except Exception as exc: + err_during_capture = exc + + if err_during_capture is None: + try: + graph = wp.capture_end(stream=fresh_stream) + except Exception as exc: + err_during_capture = exc + graph = None + else: + with contextlib.suppress(Exception): + wp.capture_end(stream=fresh_stream) + graph = None + + raw_graph = ctypes.c_void_p() + end_ret = _cudart.cudaStreamEndCapture(ctypes.c_void_p(fresh_handle), ctypes.byref(raw_graph)) + _cudart.cudaStreamDestroy(ctypes.c_void_p(fresh_handle)) + + if err_during_capture is not None: + if raw_graph.value: + _cudart.cudaGraphDestroy(raw_graph) + logger.warning("Newton graph capture aborted during simulate: %s", err_during_capture) + return None + + if end_ret != 0 or not raw_graph.value: + logger.warning("cudaStreamEndCapture failed (code %d)", end_ret) + return None + + # Patch the Warp Graph object with the raw CUDA graph handle obtained + # from our external cudaStreamEndCapture. wp.capture_end(external=True) + # returns a Graph with a stale handle; we overwrite it so that + # wp.capture_launch() replays the correct graph. + # NOTE: This relies on Warp internals (Graph.graph / Graph.graph_exec). + # Setting graph_exec = None triggers lazy cudaGraphInstantiate on + # the next capture_launch. Replace with public API when available. + graph.graph = raw_graph + graph.graph_exec = None + return graph + + @classmethod + def _simulate_physics_only(cls) -> None: + """Run one physics step without Fabric/USD sync — safe for CUDA graph capture. + + Used by :meth:`_capture_relaxed_graph` to capture only the pure physics kernels. + ``sync_transforms_to_usd`` is excluded because it calls ``wp.synchronize_device`` + (forbidden inside graph capture) and ``wp.fabricarray`` (device-wide allocation). + The caller (``step()``) is responsible for calling ``sync_transforms_to_usd()`` + eagerly after ``wp.capture_launch``. + """ + if cls._needs_collision_pipeline: + cls._collision_pipeline.collide(cls._state_0, cls._contacts) + + if cls._use_single_state: + for _ in range(cls._num_substeps): + cls._step_solver(cls._state_0, cls._state_0, cls._control, cls._solver_dt) + cls._state_0.clear_forces() + else: + cfg = PhysicsManager._cfg + need_copy_on_last_substep = (cfg is not None and cfg.use_cuda_graph) and cls._num_substeps % 2 == 1 # type: ignore[union-attr] + for i in range(cls._num_substeps): + cls._step_solver(cls._state_0, cls._state_1, cls._control, cls._solver_dt) + if need_copy_on_last_substep and i == cls._num_substeps - 1: + cls._state_0.assign(cls._state_1) + else: + NewtonManager._state_0, NewtonManager._state_1 = cls._state_1, cls._state_0 + cls._state_0.clear_forces() + + # Update frame transform sensors + if cls._newton_frame_transform_sensors: + for sensor in cls._newton_frame_transform_sensors: + sensor.update(cls._state_0) + + # Update IMU sensors + if cls._newton_imu_sensors: + for sensor in cls._newton_imu_sensors: + sensor.update(cls._state_0) + + # Populate contacts for contact sensors + if cls._report_contacts: + eval_contacts = cls._contacts + cls._solver.update_contacts(eval_contacts, cls._state_0) + for sensor in cls._newton_contact_sensors.values(): + sensor.update(cls._state_0, eval_contacts) + + @classmethod + def _simulate(cls) -> None: + """Run one simulation step with substeps and USD sync. + + Delegates physics work to :meth:`_simulate_physics_only` and then + marks transforms dirty for the next render-cadence sync. + """ + cls._simulate_physics_only() + + if cls._usdrt_stage is not None: + cls._mark_transforms_dirty() + + @classmethod + def get_solver_convergence_steps(cls) -> dict[str, float | int]: + """Get the solver convergence steps. Needs to be implemented in solver-specific managers.""" + if hasattr(cls, "_get_solver_convergence_steps"): + return cls._get_solver_convergence_steps() + else: + raise NotImplementedError("NewtonManager subclasses must implement _get_solver_convergence_steps()") + + # State accessors (used extensively by articulation/rigid object data) + @classmethod + def get_model(cls) -> Model: + """Get the Newton model.""" + return cls._model + + @classmethod + def get_state_0(cls) -> State: + """Get the current state.""" + return cls._state_0 + + @classmethod + def get_state_1(cls) -> State: + """Get the next state.""" + return cls._state_1 + + @classmethod + def get_control(cls) -> Control: + """Get the control object.""" + return cls._control + + @classmethod + def get_dt(cls) -> float: + """Get the physics timestep. Alias for get_physics_dt().""" + return cls.get_physics_dt() + + @classmethod + def get_solver_dt(cls) -> float: + """Get the solver substep timestep.""" + return cls._solver_dt + + @classmethod + def add_contact_sensor( + cls, + body_names_expr: str | list[str] | None = None, + shape_names_expr: str | list[str] | None = None, + contact_partners_body_expr: str | list[str] | None = None, + contact_partners_shape_expr: str | list[str] | None = None, + verbose: bool = False, + ) -> tuple[str | list[str] | None, str | list[str] | None, str | list[str] | None, str | list[str] | None]: + """Add a contact sensor for reporting contacts between bodies/shapes. + + Converts Isaac Lab pattern conventions (``.*`` regex, full USD paths) to + fnmatch globs and delegates to :class:`newton.sensors.SensorContact`. + + Args: + body_names_expr: Expression for body names to sense. + shape_names_expr: Expression for shape names to sense. + contact_partners_body_expr: Expression for contact partner body names. + contact_partners_shape_expr: Expression for contact partner shape names. + verbose: Print verbose information. + """ + if body_names_expr is None and shape_names_expr is None: + raise ValueError("At least one of body_names_expr or shape_names_expr must be provided") + if body_names_expr is not None and shape_names_expr is not None: + raise ValueError("Only one of body_names_expr or shape_names_expr must be provided") + if contact_partners_body_expr is not None and contact_partners_shape_expr is not None: + raise ValueError("Only one of contact_partners_body_expr or contact_partners_shape_expr must be provided") + + sensor_target = body_names_expr or shape_names_expr + partner_filter = contact_partners_body_expr or contact_partners_shape_expr or "all bodies/shapes" + logger.info(f"Adding contact sensor for {sensor_target} with filter {partner_filter}") + + def _hashable_key(x): + return tuple(x) if isinstance(x, list) else x + + def _to_fnmatch(expr: str | list[str] | None) -> str | list[str] | None: + """Convert Isaac Lab regex expressions (``.*``) to fnmatch glob (``*``).""" + if expr is None: + return None + if isinstance(expr, str): + return expr.replace(".*", "*") + return [p.replace(".*", "*") for p in expr] + + def _normalize_for_labels(expr: str | list[str] | None, labels: list[str]) -> str | list[str] | None: + """Strip leading path components from *expr* when labels are bare names. + + Model labels may be full USD paths (``/World/envs/env_0/Robot/base``) or bare + names (``base``). When the labels are bare names but the user expression + contains slashes, we strip everything up to the last ``/``. + """ + if expr is None or not labels: + return expr + label_has_paths = any("/" in lbl for lbl in labels) + items = [expr] if isinstance(expr, str) else list(expr) + expr_uses_paths = any("/" in p for p in items) + if label_has_paths or not expr_uses_paths: + return expr + normalized = [p.rsplit("/", 1)[-1] for p in items] + return normalized[0] if isinstance(expr, str) else normalized + + sensor_key = ( + _hashable_key(body_names_expr), + _hashable_key(shape_names_expr), + _hashable_key(contact_partners_body_expr), + _hashable_key(contact_partners_shape_expr), + ) + + body_labels = list(cls._model.body_label) + shape_labels = list(cls._model.shape_label) + + with Timer(name="newton_contact_sensor", msg="Contact sensor construction took:"): + sensor = NewtonContactSensor( + cls._model, + sensing_obj_bodies=_normalize_for_labels(_to_fnmatch(body_names_expr), body_labels), + sensing_obj_shapes=_normalize_for_labels(_to_fnmatch(shape_names_expr), shape_labels), + counterpart_bodies=_normalize_for_labels(_to_fnmatch(contact_partners_body_expr), body_labels), + counterpart_shapes=_normalize_for_labels(_to_fnmatch(contact_partners_shape_expr), shape_labels), + include_total=True, + verbose=verbose, + ) + + cls._newton_contact_sensors[sensor_key] = sensor + NewtonManager._report_contacts = True + + if cls._solver is not None and cls._contacts is not None and cls._contacts.force is None: + cls._initialize_contacts() + + return sensor_key + + @classmethod + def add_frame_transform_sensor(cls, shapes: list[int], reference_sites: list[int]) -> int: + """Add a frame transform sensor for measuring relative transforms. + + Creates a :class:`SensorFrameTransform` from pre-resolved shape and reference + site indices, appends it to the internal list, and returns its index. + + Args: + shapes: Ordered list of shape indices to measure. + reference_sites: 1:1 list of reference site indices (same length as shapes). + + Returns: + Index of the newly created sensor in :attr:`_newton_frame_transform_sensors`. + """ + sensor = SensorFrameTransform( + cls._model, + shapes=shapes, + reference_sites=reference_sites, + ) + idx = len(cls._newton_frame_transform_sensors) + cls._newton_frame_transform_sensors.append(sensor) + logger.info(f"Added frame transform sensor (index={idx}, shapes={len(shapes)})") + return idx + + @classmethod + def add_imu_sensor(cls, sites: list[int]) -> int: + """Add an IMU sensor for measuring acceleration and angular velocity at sites. + + Creates a ``newton.sensors.SensorIMU`` from pre-resolved site indices, + appends it to the internal list, and returns its index. + + Args: + sites: Ordered list of site indices (one per environment). + + Returns: + Index of the newly created sensor in the internal IMU sensor list. + """ + if cls._model is None: + raise RuntimeError("add_imu_sensor called before model finalization (start_simulation).") + sensor = NewtonSensorIMU( + cls._model, + sites=sites, + request_state_attributes=False, # Already requested via NewtonManager + ) + idx = len(cls._newton_imu_sensors) + cls._newton_imu_sensors.append(sensor) + logger.info(f"Added IMU sensor (index={idx}, sites={len(sites)})") + return idx diff --git a/source/isaaclab_newton/isaaclab_newton/physics/newton_manager_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager_cfg.py new file mode 100644 index 000000000000..85e4061ab911 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager_cfg.py @@ -0,0 +1,151 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton physics manager.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.physics import PhysicsCfg +from isaaclab.utils import configclass + +from .newton_collision_cfg import NewtonCollisionPipelineCfg + +if TYPE_CHECKING: + from isaaclab_newton.physics import NewtonManager + + +@configclass +class NewtonSolverCfg: + """Configuration for Newton solver-related parameters. + + These parameters are used to configure the Newton solver. For more information, see the `Newton documentation`_. + + Subclasses set :attr:`class_type` to their matching :class:`NewtonManager` + subclass; :class:`NewtonCfg` propagates that to its own + :attr:`NewtonCfg.class_type` in :meth:`NewtonCfg.__post_init__` so that + ``SimulationContext`` resolves the correct manager via the existing + dispatch path. + + .. _Newton documentation: https://newton.readthedocs.io/en/latest/ + """ + + class_type: type[NewtonManager] | str = "{DIR}.newton_manager:NewtonManager" + """Manager class for this solver. + + Default points at the abstract :class:`NewtonManager`; concrete subclasses + override it. + """ + + solver_type: str = "None" + """Solver type metadata (deprecated). + + .. deprecated:: + Manager dispatch is now driven by :attr:`class_type`; this field is + retained as metadata for logging and debugging only. Do not branch on + ``solver_type`` in new code. + """ + + +@configclass +class NewtonShapeCfg: + """Default per-shape collision properties applied to all shapes in a Newton scene. + + Mirrors Newton's :attr:`ModelBuilder.default_shape_cfg`. Only fields Isaac + Lab actually overrides are declared here; unspecified fields keep Newton's + upstream default. The struct is forwarded onto Newton's upstream + ``ShapeConfig`` via :func:`~isaaclab.utils.checked_apply` at builder + construction. + """ + + margin: float = 0.0 + """Default per-shape collision margin [m]. + + A nonzero margin (e.g. ``0.01``) is required for stable contact on + triangle-mesh terrain — without it, lightweight robots fail to learn + rough-terrain locomotion on Newton. Newton's upstream default is ``0.0``. + """ + + gap: float = 0.01 + """Default per-shape contact gap [m]. Newton's upstream default is ``None``.""" + + +@configclass +class NewtonCfg(PhysicsCfg): + """Configuration for Newton physics manager. + + This configuration includes Newton-specific simulation settings and solver configuration. + + The active :class:`NewtonManager` subclass is determined by + :attr:`solver_cfg.class_type`, which :meth:`__post_init__` propagates to + :attr:`class_type` so that ``SimulationContext`` resolves the right + manager subclass automatically. User code keeps the existing two-level + shape ``NewtonCfg(solver_cfg=...)`` and does not need to set + :attr:`class_type` explicitly. + """ + + class_type: type[NewtonManager] | str | None = None + """The class type of the :class:`NewtonManager`. + + Auto-set in :meth:`__post_init__` from :attr:`solver_cfg.class_type`. + Users normally do not set this directly. + """ + + num_substeps: int = 1 + """Number of substeps to use for the solver.""" + + debug_mode: bool = False + """Whether to enable debug mode for the solver.""" + + use_cuda_graph: bool = True + """Whether to use CUDA graphing when simulating. + + If set to False, the simulation performance will be severely degraded. + """ + + solver_cfg: NewtonSolverCfg | None = None + """Solver configuration. If None (default), MJWarpSolverCfg is used by default.""" + + collision_cfg: NewtonCollisionPipelineCfg | None = None + """Newton collision pipeline configuration. + + Controls how Newton's :class:`CollisionPipeline` is configured when it is active. + The pipeline is active when the solver delegates collision detection to Newton: + + - :class:`MJWarpSolverCfg` with ``use_mujoco_contacts=False``, + - :class:`KaminoSolverCfg` with ``use_collision_detector=False``, + - :class:`XPBDSolverCfg` (always), + - :class:`FeatherstoneSolverCfg` (always). + + If ``None`` (default), a pipeline with ``broad_phase="explicit"`` is created + automatically. Set this to a :class:`NewtonCollisionPipelineCfg` to customize + parameters such as broad phase algorithm, contact limits, or hydroelastic mode. + + .. note:: + Setting this while ``MJWarpSolverCfg.use_mujoco_contacts=True`` raises + :class:`ValueError`. When ``KaminoSolverCfg.use_collision_detector=True``, + the field is ignored because Kamino's internal detector handles contacts. + """ + + default_shape_cfg: NewtonShapeCfg = NewtonShapeCfg() + """Default per-shape collision properties applied to every shape in the scene. + + Forwarded to Newton's :attr:`ModelBuilder.default_shape_cfg` at builder + construction via :func:`~isaaclab.utils.checked_apply`. See + :class:`NewtonShapeCfg` for the declared fields. + """ + + def __post_init__(self): + # NewtonCfg.class_type is auto-derived from solver_cfg.class_type. + # Refuse a user-set value: setting both is ambiguous and was + # previously silently overwritten. + if self.class_type is not None: + raise TypeError("Cannot manually set NewtonCfg.class_type; it is auto-derived from solver_cfg.class_type.") + if self.solver_cfg is None: + from .mjwarp_manager_cfg import MJWarpSolverCfg + + self.solver_cfg = MJWarpSolverCfg() + self.class_type = self.solver_cfg.class_type diff --git a/source/isaaclab_newton/isaaclab_newton/physics/xpbd_manager.py b/source/isaaclab_newton/isaaclab_newton/physics/xpbd_manager.py new file mode 100644 index 000000000000..40022ef7e0d2 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/xpbd_manager.py @@ -0,0 +1,36 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""XPBD Newton manager.""" + +from __future__ import annotations + +import inspect + +from newton import Model +from newton.solvers import SolverXPBD + +from .newton_manager import NewtonManager +from .xpbd_manager_cfg import XPBDSolverCfg + + +class NewtonXPBDManager(NewtonManager): + """:class:`NewtonManager` specialization for the XPBD solver. + + Always uses Newton's :class:`CollisionPipeline` for contact handling. + """ + + @classmethod + def _build_solver(cls, model: Model, solver_cfg: XPBDSolverCfg) -> None: + """Construct :class:`SolverXPBD` and populate the base-class slots. + + XPBD always uses Newton's :class:`CollisionPipeline` and steps with + separate input/output states, so the flags are fixed. + """ + valid = set(inspect.signature(SolverXPBD.__init__).parameters) - {"self", "model"} + kwargs = {k: v for k, v in solver_cfg.to_dict().items() if k in valid} + NewtonManager._solver = SolverXPBD(model, **kwargs) + NewtonManager._use_single_state = False + NewtonManager._needs_collision_pipeline = True diff --git a/source/isaaclab_newton/isaaclab_newton/physics/xpbd_manager_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/xpbd_manager_cfg.py new file mode 100644 index 000000000000..0555bd95a412 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/xpbd_manager_cfg.py @@ -0,0 +1,72 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton physics manager.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils import configclass + +from .newton_manager_cfg import NewtonSolverCfg + +if TYPE_CHECKING: + from isaaclab_newton.physics import NewtonManager + + +@configclass +class XPBDSolverCfg(NewtonSolverCfg): + """An implicit integrator using eXtended Position-Based Dynamics (XPBD) for rigid and soft body simulation. + + References: + - Miles Macklin, Matthias Müller, and Nuttapong Chentanez. 2016. XPBD: position-based simulation of compliant + constrained dynamics. In Proceedings of the 9th International Conference on Motion in Games (MIG '16). + Association for Computing Machinery, New York, NY, USA, 49-54. https://doi.org/10.1145/2994258.2994272 + - Matthias Müller, Miles Macklin, Nuttapong Chentanez, Stefan Jeschke, and Tae-Yong Kim. 2020. Detailed rigid + body simulation with extended position based dynamics. In Proceedings of the ACM SIGGRAPH/Eurographics + Symposium on Computer Animation (SCA '20). Eurographics Association, Goslar, DEU, + Article 10, 1-12. https://doi.org/10.1111/cgf.14105 + + """ + + class_type: type[NewtonManager] | str = "{DIR}.xpbd_manager:NewtonXPBDManager" + """Manager class for the XPBD solver.""" + + solver_type: str = "xpbd" + """Solver type. Can be "xpbd".""" + + iterations: int = 2 + """Number of solver iterations.""" + + soft_body_relaxation: float = 0.9 + """Relaxation parameter for soft body simulation.""" + + soft_contact_relaxation: float = 0.9 + """Relaxation parameter for soft contact simulation.""" + + joint_linear_relaxation: float = 0.7 + """Relaxation parameter for joint linear simulation.""" + + joint_angular_relaxation: float = 0.4 + """Relaxation parameter for joint angular simulation.""" + + joint_linear_compliance: float = 0.0 + """Compliance parameter for joint linear simulation.""" + + joint_angular_compliance: float = 0.0 + """Compliance parameter for joint angular simulation.""" + + rigid_contact_relaxation: float = 0.8 + """Relaxation parameter for rigid contact simulation.""" + + rigid_contact_con_weighting: bool = True + """Whether to use contact constraint weighting for rigid contact simulation.""" + + angular_damping: float = 0.0 + """Angular damping parameter for rigid contact simulation.""" + + enable_restitution: bool = False + """Whether to enable restitution for rigid contact simulation.""" diff --git a/source/isaaclab_newton/isaaclab_newton/renderers/__init__.py b/source/isaaclab_newton/isaaclab_newton/renderers/__init__.py new file mode 100644 index 000000000000..72e88917ca53 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/renderers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for Newton renderer backends (Newton Warp).""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/renderers/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/renderers/__init__.pyi new file mode 100644 index 000000000000..6afa56720b99 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/renderers/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "NewtonWarpRenderer", + "NewtonWarpRendererCfg", +] + +from .newton_warp_renderer import NewtonWarpRenderer +from .newton_warp_renderer_cfg import NewtonWarpRendererCfg diff --git a/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer.py b/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer.py new file mode 100644 index 000000000000..a02d820f2951 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer.py @@ -0,0 +1,254 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton Warp renderer for tiled camera rendering.""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +import newton +import torch +import warp as wp + +from isaaclab.renderers import BaseRenderer, RenderBufferKind, RenderBufferSpec +from isaaclab.renderers.camera_render_spec import CameraRenderSpec +from isaaclab.sim import SimulationContext +from isaaclab.utils.math import convert_camera_frame_orientation_convention + +from .newton_warp_renderer_cfg import NewtonWarpRendererCfg + +if TYPE_CHECKING: + from isaaclab.physics import BaseSceneDataProvider + from isaaclab.sensors.camera.camera_data import CameraData + +logger = logging.getLogger(__name__) + + +class RenderData: + # Back-compat alias for callers of ``RenderData.OutputNames``. + OutputNames = RenderBufferKind + + @dataclass + class CameraOutputs: + color_image: wp.array(dtype=wp.uint32, ndim=4) = None + albedo_image: wp.array(dtype=wp.uint32, ndim=4) = None + depth_image: wp.array(dtype=wp.float32, ndim=4) = None + normals_image: wp.array(dtype=wp.vec3f, ndim=4) = None + instance_segmentation_image: wp.array(dtype=wp.uint32, ndim=4) = None + + def __init__(self, newton_sensor: newton.sensors.SensorTiledCamera, spec: CameraRenderSpec): + self.newton_sensor = newton_sensor + + self.num_cameras = 1 + + self.camera_rays: wp.array(dtype=wp.vec3f, ndim=4) = None + self.camera_transforms: wp.array(dtype=wp.transformf, ndim=2) = None + self.outputs = RenderData.CameraOutputs() + self.width = getattr(spec.cfg, "width", 100) + self.height = getattr(spec.cfg, "height", 100) + + def set_outputs(self, output_data: dict[str, torch.Tensor]): + for output_name, tensor_data in output_data.items(): + if output_name == RenderBufferKind.RGBA: + self.outputs.color_image = self._from_torch(tensor_data, dtype=wp.uint32) + elif output_name == RenderBufferKind.ALBEDO: + self.outputs.albedo_image = self._from_torch(tensor_data, dtype=wp.uint32) + elif output_name == RenderBufferKind.DEPTH: + self.outputs.depth_image = self._from_torch(tensor_data, dtype=wp.float32) + elif output_name == RenderBufferKind.NORMALS: + self.outputs.normals_image = self._from_torch(tensor_data, dtype=wp.vec3f) + elif output_name == RenderBufferKind.INSTANCE_SEGMENTATION_FAST: + self.outputs.instance_segmentation_image = self._from_torch(tensor_data, dtype=wp.uint32) + elif output_name == RenderBufferKind.RGB: + pass + else: + logger.warning(f"NewtonWarpRenderer - output type {output_name} is not yet supported") + + def get_output(self, output_name: str) -> wp.array: + if output_name == RenderBufferKind.RGBA: + return self.outputs.color_image + elif output_name == RenderBufferKind.ALBEDO: + return self.outputs.albedo_image + elif output_name == RenderBufferKind.DEPTH: + return self.outputs.depth_image + elif output_name == RenderBufferKind.NORMALS: + return self.outputs.normals_image + elif output_name == RenderBufferKind.INSTANCE_SEGMENTATION_FAST: + return self.outputs.instance_segmentation_image + return None + + def update(self, positions: torch.Tensor, orientations: torch.Tensor, intrinsics: torch.Tensor): + converted_orientations = convert_camera_frame_orientation_convention( + orientations, origin="world", target="opengl" + ) + + self.camera_transforms = wp.empty( + (1, self.newton_sensor.model.world_count), dtype=wp.transformf, device=self.newton_sensor.model.device + ) + wp.launch( + RenderData._update_transforms, + self.newton_sensor.model.world_count, + [positions, converted_orientations, self.camera_transforms], + device=self.newton_sensor.model.device, + ) + + if self.camera_rays is None: + first_focal_length = intrinsics[:, 1, 1][0:1] + fov_radians_all = 2.0 * torch.atan(self.height / (2.0 * first_focal_length)) + + self.camera_rays = self.newton_sensor.utils.compute_pinhole_camera_rays( + self.width, self.height, wp.from_torch(fov_radians_all, dtype=wp.float32) + ) + + def _from_torch(self, tensor: torch.Tensor, dtype) -> wp.array: + proxy_array = wp.from_torch(tensor) + if tensor.is_contiguous(): + return wp.array( + ptr=proxy_array.ptr, + dtype=dtype, + shape=(self.newton_sensor.model.world_count, self.num_cameras, self.height, self.width), + device=proxy_array.device, + copy=False, + ) + + logger.warning("NewtonWarpRenderer - torch output array is non-contiguous") + return wp.zeros( + (self.newton_sensor.model.world_count, self.num_cameras, self.height, self.width), + dtype=dtype, + device=proxy_array.device, + ) + + @wp.kernel + def _update_transforms( + positions: wp.array(dtype=wp.vec3f), + orientations: wp.array(dtype=wp.quatf), + output: wp.array(dtype=wp.transformf, ndim=2), + ): + tid = wp.tid() + output[0, tid] = wp.transformf(positions[tid], orientations[tid]) + + +class NewtonWarpRenderer(BaseRenderer): + """Newton Warp backend for tiled camera rendering.""" + + RenderData = RenderData + + def __init__(self, cfg: NewtonWarpRendererCfg): + from isaaclab.physics.scene_data_requirements import ( + aggregate_requirements, + requirement_for_renderer_type, + ) + + self.cfg = cfg + sim = SimulationContext.instance() + current_req = sim.get_scene_data_requirements() + renderer_req = requirement_for_renderer_type("newton_warp") + merged = aggregate_requirements([current_req, renderer_req]) + if merged != current_req: + sim.update_scene_data_requirements(merged) + + newton_model = self.get_scene_data_provider().get_newton_model() + if newton_model is None: + raise RuntimeError( + "NewtonWarpRenderer requires a Newton model but the scene data provider returned None. " + "This usually means the Newton model failed to build from the USD stage " + "(e.g., unsupported PhysX schemas such as tendons). " + "Check the log for earlier Newton model build errors." + ) + + self.newton_sensor = newton.sensors.SensorTiledCamera( + newton_model, + config=newton.sensors.SensorTiledCamera.RenderConfig( + enable_textures=cfg.enable_textures, + enable_shadows=cfg.enable_shadows, + enable_ambient_lighting=cfg.enable_ambient_lighting, + enable_backface_culling=cfg.enable_backface_culling, + max_distance=cfg.max_distance, + ), + ) + + if cfg.create_default_light: + self.newton_sensor.utils.create_default_light(enable_shadows=cfg.enable_shadows) + + def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: + """Publish the per-output layout this Newton Warp backend writes. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.supported_output_types`.""" + seg_spec = ( + RenderBufferSpec(4, torch.uint8) + if self.cfg.colorize_instance_segmentation + else RenderBufferSpec(1, torch.int32) + ) + return { + RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.ALBEDO: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.DEPTH: RenderBufferSpec(1, torch.float32), + RenderBufferKind.NORMALS: RenderBufferSpec(3, torch.float32), + RenderBufferKind.INSTANCE_SEGMENTATION_FAST: seg_spec, + } + + def prepare_stage(self, stage: Any, num_envs: int) -> None: + """No-op for Newton Warp - uses Newton scene directly without stage export. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.prepare_stage`.""" + pass + + def create_render_data(self, spec: CameraRenderSpec) -> RenderData: + """Create render data for the Newton tiled camera. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.create_render_data`.""" + return RenderData(self.newton_sensor, spec) + + def set_outputs(self, render_data: RenderData, output_data: dict[str, torch.Tensor]): + """Store output buffers. See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.set_outputs`.""" + render_data.set_outputs(output_data) + + def update_transforms(self): + """Sync Newton scene state before rendering. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.update_transforms`.""" + SimulationContext.instance().update_scene_data_provider(True) + + def update_camera( + self, render_data: RenderData, positions: torch.Tensor, orientations: torch.Tensor, intrinsics: torch.Tensor + ): + """Update camera poses and intrinsics. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.update_camera`.""" + render_data.update(positions, orientations, intrinsics) + + def render(self, render_data: RenderData): + """Render and write to output buffers. See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.render`.""" + self.newton_sensor.update( + self.get_scene_data_provider().get_newton_state(), + render_data.camera_transforms, + render_data.camera_rays, + color_image=render_data.outputs.color_image, + albedo_image=render_data.outputs.albedo_image, + depth_image=render_data.outputs.depth_image, + normal_image=render_data.outputs.normals_image, + shape_index_image=render_data.outputs.instance_segmentation_image, + # ARGB 93% gray to improve visibility of dark objects and align with RTX renderer background + clear_data=newton.sensors.SensorTiledCamera.ClearData(clear_color=0xFFEEEEEE), + ) + + def read_output(self, render_data: RenderData, camera_data: CameraData) -> None: + """Copy rendered outputs to the camera data buffers. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.read_output`.""" + for output_name in camera_data.output: + if output_name == "rgb": + continue + image_data = render_data.get_output(output_name) + if image_data is not None: + output_data = camera_data.output[output_name] + if image_data.ptr != output_data.data_ptr(): + wp.copy(wp.from_torch(output_data), image_data) + + def cleanup(self, render_data: RenderData | None): + """Release resources. No-op for Newton Warp. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.cleanup`.""" + pass + + def get_scene_data_provider(self) -> BaseSceneDataProvider: + return SimulationContext.instance().initialize_scene_data_provider() diff --git a/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer_cfg.py b/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer_cfg.py new file mode 100644 index 000000000000..96db9ca41fd1 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer_cfg.py @@ -0,0 +1,38 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton Warp Renderer.""" + +from isaaclab.renderers.renderer_cfg import RendererCfg +from isaaclab.utils import configclass + + +@configclass +class NewtonWarpRendererCfg(RendererCfg): + """Configuration for Newton Warp Renderer.""" + + renderer_type: str = "newton_warp" + """Type identifier for Newton Warp renderer.""" + + enable_textures: bool = True + """Enable texture-mapped rendering for meshes.""" + + enable_shadows: bool = False + """Enable shadow rays for directional lights.""" + + enable_ambient_lighting: bool = True + """Enable ambient lighting for the scene.""" + + enable_backface_culling: bool = True + """Cull back-facing triangles.""" + + max_distance: float = 1000.0 + """Maximum ray distance [m].""" + + create_default_light: bool = True + """Create a default directional light source in the scene.""" + + colorize_instance_segmentation: bool = True + """Expose ``instance_segmentation_fast`` as ``(N, H, W, 4) uint8`` if True, else ``(N, H, W, 1) int32``.""" diff --git a/source/isaaclab_newton/isaaclab_newton/scene_data_providers/__init__.py b/source/isaaclab_newton/isaaclab_newton/scene_data_providers/__init__.py new file mode 100644 index 000000000000..cf0f30853ead --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/scene_data_providers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton scene data provider backends.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/scene_data_providers/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/scene_data_providers/__init__.pyi new file mode 100644 index 000000000000..3cb204031738 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/scene_data_providers/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "NewtonSceneDataProvider", +] + +from .newton_scene_data_provider import NewtonSceneDataProvider diff --git a/source/isaaclab_newton/isaaclab_newton/scene_data_providers/newton_scene_data_provider.py b/source/isaaclab_newton/isaaclab_newton/scene_data_providers/newton_scene_data_provider.py new file mode 100644 index 000000000000..ba19f4e7c63a --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/scene_data_providers/newton_scene_data_provider.py @@ -0,0 +1,306 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Scene data provider for Newton physics backend.""" + +from __future__ import annotations + +import logging +import re +from collections import deque +from typing import Any + +from pxr import UsdGeom + +from isaaclab.physics.base_scene_data_provider import BaseSceneDataProvider + +logger = logging.getLogger(__name__) + +_ENV_ID_RE = re.compile(r"/World/envs/env_(\d+)") + + +class NewtonSceneDataProvider(BaseSceneDataProvider): + """Scene data provider for Newton physics backend. + + Provides access to Newton model, state, and USD stage for visualizers and renderers. + Unlike PhysxSceneDataProvider which must build its own Newton model from USD and sync + PhysX transforms into it, this provider delegates directly to NewtonManager since the + Newton backend already owns the authoritative model and state. + """ + + def __init__(self, stage, simulation_context) -> None: + """Initialize the Newton scene data provider. + + Args: + stage: USD stage handle. + simulation_context: Active simulation context. + """ + self._simulation_context = simulation_context + self._stage = stage + self._metadata = {"physics_backend": "newton"} + self._num_envs: int | None = None + self._warned_once: set[str] = set() + + # Determine if usd stage sync is required for selected renderers and visualizers + requirements = self._simulation_context.get_scene_data_requirements() + self._needs_usd_sync = bool(requirements.requires_usd_stage) + + def _warn_once(self, key: str, message: str, *args) -> None: + """Emit a warning once per unique key. + + Args: + key: Unique warning key. + message: Warning message format string. + *args: Optional formatting arguments. + """ + if key in self._warned_once: + return + self._warned_once.add(key) + logger.warning(message, *args) + + # ---- Environment discovery --------------------------------------------------------------- + + def get_num_envs(self) -> int: + """Return discovered environment count. + + Returns: + Number of environments discovered from stage prim paths. + """ + if self._num_envs is not None and self._num_envs > 0: + return self._num_envs + discovered = self._determine_num_envs_in_scene() + if discovered > 0: + self._num_envs = discovered + return discovered + return 0 + + def _determine_num_envs_in_scene(self) -> int: + """Infer environment count from ``/World/envs/env_`` prim names. + + Returns: + Number of environments inferred from the stage. + """ + if self._stage is None: + return 0 + max_env_id = -1 + env_name_re = re.compile(r"^env_(\d+)$") + envs_root = self._stage.GetPrimAtPath("/World/envs") + if envs_root.IsValid(): + for child in envs_root.GetChildren(): + match = env_name_re.match(child.GetName()) + if match: + max_env_id = max(max_env_id, int(match.group(1))) + return max_env_id + 1 if max_env_id >= 0 else 0 + + # ---- Core provider API ------------------------------------------------------------------- + + def update(self) -> None: + """Sync Newton body transforms to USD Fabric when a Kit viewport is active. + + Called at render cadence by :meth:`~isaaclab.sim.SimulationContext.update_scene_data_provider`, + after forward kinematics have been evaluated. Only calls + :meth:`~isaaclab_newton.physics.NewtonManager.sync_transforms_to_usd` when a Kit + (or other USD-based) visualizer is in use. When both sim and rendering backend + are Newton (or Rerun), the sync is skipped to avoid unnecessary slowdown. + """ + if not self._needs_usd_sync: + return + try: + from isaaclab_newton.physics import NewtonManager + + NewtonManager.sync_transforms_to_usd() + except Exception: + pass + + def get_newton_model(self) -> Any | None: + """Return Newton model from ``NewtonManager``. + + Returns: + Newton model object, or ``None`` when unavailable. + """ + from isaaclab_newton.physics import NewtonManager + + return NewtonManager.get_model() + + def get_newton_state(self) -> Any | None: + """Return Newton state from NewtonManager. + + Returns: + The current Newton state (state_0) from NewtonManager. + """ + from isaaclab_newton.physics import NewtonManager + + return NewtonManager.get_state_0() + + def get_model(self) -> Any | None: + """Alias for :meth:`get_newton_model` for visualizer compatibility. + + Returns: + Newton model object, or ``None`` when unavailable. + """ + return self.get_newton_model() + + def get_state(self) -> Any | None: + """Alias for :meth:`get_newton_state` for visualizer compatibility.""" + return self.get_newton_state() + + def get_usd_stage(self) -> Any | None: + """Return the USD stage handle. + + Returns: + USD stage object, or ``None`` when unavailable. + """ + if self._stage is not None: + return self._stage + return getattr(self._simulation_context, "stage", None) + + def get_metadata(self) -> dict[str, Any]: + """Return provider metadata. + + Returns: + Metadata dictionary with backend and synchronization information. + """ + out = dict(self._metadata) + out["num_envs"] = self.get_num_envs() + out["needs_usd_sync"] = self._needs_usd_sync + return out + + def get_transforms(self) -> dict[str, Any] | None: + """Return body transforms from Newton state. + + Reads body_q from the authoritative Newton state and splits it into + positions (vec3) and orientations (quaternion xyzw). + + Returns: + Dictionary containing positions and orientations, or ``None`` when unavailable. + """ + try: + import warp as wp + + from isaaclab_newton.physics import NewtonManager + + state = NewtonManager.get_state_0() + if state is None or state.body_q is None: + return None + + body_q_t = wp.to_torch(state.body_q) + positions = body_q_t[:, :3] + orientations = body_q_t[:, 3:7] + return {"positions": positions, "orientations": orientations} + except Exception as exc: + self._warn_once( + "get-transforms-failed", + "[NewtonSceneDataProvider] get_transforms() failed: %s", + exc, + ) + return None + + def get_velocities(self) -> dict[str, Any] | None: + """Return body velocities from Newton state. + + Returns: + Dictionary containing linear and angular velocities, or ``None`` when unavailable. + """ + try: + import warp as wp + + from isaaclab_newton.physics import NewtonManager + + state = NewtonManager.get_state_0() + if state is None: + return None + + body_qd = getattr(state, "body_qd", None) + if body_qd is None: + return None + + body_qd_t = wp.to_torch(body_qd) + linear = body_qd_t[:, :3] + angular = body_qd_t[:, 3:6] + return {"linear": linear, "angular": angular, "source": "newton"} + except Exception as exc: + self._warn_once( + "get-velocities-failed", + "[NewtonSceneDataProvider] get_velocities() failed: %s", + exc, + ) + return None + + def get_contacts(self) -> dict[str, Any] | None: + """Return contact data for Newton provider. + + Returns: + ``None`` because contacts are not currently implemented in this provider. + """ + return None + + def get_camera_transforms(self) -> dict[str, Any] | None: + """Return per-camera, per-environment transforms. + + Returns: + Dictionary containing camera order, positions, orientations, and environment count, + or ``None`` when unavailable. + """ + if self._stage is None: + return None + + import isaaclab.sim as isaaclab_sim + + env_pattern = re.compile(r"(?P/World/envs/env_)(?P\d+)(?P/.*)") + shared_paths: list[str] = [] + instances: dict[str, list[tuple[int, str]]] = {} + num_envs = -1 + + stage_prims = deque([self._stage.GetPseudoRoot()]) + while stage_prims: + prim = stage_prims.popleft() + prim_path = prim.GetPath().pathString + + world_id = 0 + template_path = prim_path + if match := env_pattern.match(prim_path): + world_id = int(match.group("id")) + template_path = match.group("root") + "%d" + match.group("path") + if world_id > num_envs: + num_envs = world_id + + imageable = UsdGeom.Imageable(prim) + if imageable and imageable.ComputeVisibility() == UsdGeom.Tokens.invisible: + continue + + if prim.IsA(UsdGeom.Camera): + if template_path not in instances: + instances[template_path] = [] + instances[template_path].append((world_id, prim_path)) + if template_path not in shared_paths: + shared_paths.append(template_path) + + if hasattr(UsdGeom, "TraverseInstanceProxies"): + child_prims = prim.GetFilteredChildren(UsdGeom.TraverseInstanceProxies()) + else: + child_prims = prim.GetChildren() + if child_prims: + stage_prims.extend(child_prims) + + num_envs += 1 + positions: list[list[list[float] | None]] = [] + orientations: list[list[list[float] | None]] = [] + + for template_path in shared_paths: + per_world_pos: list[list[float] | None] = [None] * num_envs + per_world_ori: list[list[float] | None] = [None] * num_envs + for world_id, prim_path in instances.get(template_path, []): + if world_id < 0 or world_id >= num_envs: + continue + prim = self._stage.GetPrimAtPath(prim_path) + if not prim.IsValid(): + continue + pos, ori = isaaclab_sim.resolve_prim_pose(prim) + per_world_pos[world_id] = [float(pos[0]), float(pos[1]), float(pos[2])] + per_world_ori[world_id] = [float(ori[0]), float(ori[1]), float(ori[2]), float(ori[3])] + positions.append(per_world_pos) + orientations.append(per_world_ori) + + return {"order": shared_paths, "positions": positions, "orientations": orientations, "num_envs": num_envs} diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/__init__.py b/source/isaaclab_newton/isaaclab_newton/sensors/__init__.py new file mode 100644 index 000000000000..cd81f5a3c2f2 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package containing Newton-specific sensor implementations.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sensors/__init__.pyi new file mode 100644 index 000000000000..e536b281952b --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ContactSensor", + "ContactSensorData", + "ContactSensorCfg", + "FrameTransformer", + "FrameTransformerData", + "Imu", + "ImuData", + "JointWrenchSensor", + "JointWrenchSensorData", + "Pva", + "PvaData", +] + +from .contact_sensor import ContactSensor, ContactSensorData, ContactSensorCfg +from .frame_transformer import FrameTransformer, FrameTransformerData +from .imu import Imu, ImuData +from .joint_wrench import JointWrenchSensor, JointWrenchSensorData +from .pva import Pva, PvaData diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/__init__.py b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/__init__.py new file mode 100644 index 000000000000..6225c0602968 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for contact sensor based on :class:`newton.SensorContact`.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/__init__.pyi new file mode 100644 index 000000000000..fd936d53b0c0 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ContactSensor", + "ContactSensorCfg", + "ContactSensorData", +] + +from .contact_sensor import ContactSensor +from .contact_sensor_cfg import ContactSensorCfg +from .contact_sensor_data import ContactSensorData diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor.py new file mode 100644 index 000000000000..02850e9cc903 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor.py @@ -0,0 +1,479 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ignore optional memory usage warning globally +# pyright: reportOptionalSubscript=false + +from __future__ import annotations + +import logging +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp +from newton.sensors import SensorContact as NewtonContactSensor + +import isaaclab.utils.string as string_utils +from isaaclab.sensors.contact_sensor.base_contact_sensor import BaseContactSensor +from isaaclab.utils.warp import ProxyArray + +from isaaclab_newton.physics import NewtonManager + +from .contact_sensor_data import ContactSensorData +from .contact_sensor_kernels import ( + compute_first_transition_kernel, + copy_from_newton_kernel, + reset_contact_sensor_kernel, + update_contact_sensor_kernel, +) + +if TYPE_CHECKING: + from isaaclab.sensors.contact_sensor.contact_sensor_cfg import ContactSensorCfg as BaseContactSensorCfg + + from .contact_sensor_cfg import ContactSensorCfg + +logger = logging.getLogger(__name__) + + +class ContactSensor(BaseContactSensor): + """A contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body or shape in the world frame. + + The sensor can be configured to report the contact forces on a set of sensors (bodies or shapes) + against specific filter objects using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is + useful when you want to report the contact forces between the sensors and a specific set of objects + in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + + .. _Newton SensorContact: https://newton-physics.github.io/newton/api/_generated/newton.sensors.SensorContact.html + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + def __init__(self, cfg: BaseContactSensorCfg | ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + from isaaclab.sensors.contact_sensor.contact_sensor_cfg import ContactSensorCfg as BaseContactSensorCfg + + from .contact_sensor_cfg import ContactSensorCfg + + if isinstance(cfg, ContactSensorCfg): + pass + elif isinstance(cfg, BaseContactSensorCfg): + cfg = ContactSensorCfg.from_base_cfg(cfg) + else: + raise TypeError(f"Invalid config: {cfg}") + + super().__init__(cfg) + + # Create empty variables for storing output data + self._data: ContactSensorData = ContactSensorData() + # Defaults used before full initialization completes. + self._num_sensors: int = 0 + self._sensor_names: list[str] = [] + self._filter_object_names: list[str] = [] + self._num_filter_objects: int = 0 + self._init_error: str | None = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Contact sensor @ '{self.cfg.prim_path}': \n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self.num_sensors}\n" + f"\tsensor names : {self.sensor_names}\n" + ) + + """ + Properties + """ + + @property + def num_instances(self) -> int | None: + return self._num_sensors + + @property + def data(self) -> ContactSensorData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_sensors(self) -> int: + """Number of sensors (bodies or shapes with contact sensing attached).""" + return self._num_sensors + + @property + def sensor_names(self) -> list[str] | None: + """Ordered names of sensors (shapes or bodies with contact sensing attached).""" + return self._sensor_names + + @property + def filter_object_names(self) -> list[str] | None: + """Ordered names of filter objects (counterparts) for contact filtering.""" + return self._filter_object_names + + @property + def num_filter_objects(self) -> int: + """Number of filter objects (counterparts) for contact filtering.""" + return self._num_filter_objects + + @property + def contact_view(self) -> NewtonContactSensor: + """View for the contact forces captured (Newton).""" + return NewtonManager._newton_contact_sensors[self._sensor_key] + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + # reset the timers and counters + super().reset(env_ids, env_mask) + + # Resolve env_mask (same logic as base class) + if env_ids is None and env_mask is None: + env_mask = wp.full(self._num_envs, True, dtype=wp.bool, device=self._device) + elif env_mask is None: + if isinstance(env_ids, torch.Tensor): + env_ids_torch = env_ids.to(device=self._device, dtype=torch.long).reshape(-1) + mask_torch = torch.zeros(self._num_envs, dtype=torch.bool, device=self._device) + if env_ids_torch.numel() > 0: + mask_torch[env_ids_torch] = True + env_mask = wp.from_torch(mask_torch, dtype=wp.bool) + elif isinstance(env_ids, wp.array): + env_ids_np = np.asarray(env_ids.numpy(), dtype=np.int64).reshape(-1) + mask_np = np.zeros(self._num_envs, dtype=np.bool_) + if env_ids_np.size > 0: + mask_np[env_ids_np] = True + env_mask = wp.array(mask_np, dtype=wp.bool, device=self._device) + else: + env_ids_np = np.asarray(env_ids, dtype=np.int64).reshape(-1) + mask_np = np.zeros(self._num_envs, dtype=np.bool_) + if env_ids_np.size > 0: + mask_np[env_ids_np] = True + env_mask = wp.array(mask_np, dtype=wp.bool, device=self._device) + + # Compute num_filter_objects + num_filter_objects = self._num_filter_objects + + # Reset contact sensor buffers via kernel + wp.launch( + reset_contact_sensor_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[ + self.cfg.history_length, + num_filter_objects, + env_mask, + self._data._net_forces_w, + self._data._net_forces_w_history, + self._data._force_matrix_w, + ], + outputs=[ + self._data._current_air_time, + self._data._last_air_time, + self._data._current_contact_time, + self._data._last_contact_time, + ], + device=self._device, + ) + + def find_sensors(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find sensors based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the sensor names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple containing the sensor indices and names. + """ + sensor_names = self.sensor_names + if not sensor_names: + if self._init_error is not None: + raise ValueError(f"ContactSensor initialization failed: {self._init_error}") + raise ValueError( + "ContactSensor metadata is unavailable. Expected sensor names to be populated during" + " PHYSICS_READY initialization." + ) + return string_utils.resolve_matching_names(name_keys, sensor_names, preserve_order) + + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> ProxyArray: + """Checks if sensors that have established contact within the last :attr:`dt` seconds. + + This function checks if the sensors have established contact within the last :attr:`dt` seconds + by comparing the current contact time with the given time period. If the contact time is less + than the given time period, then the sensors are considered to be in contact. + + Note: + The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other + words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true + if the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contact was established. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A float array (1.0/0.0) indicating the sensors that have established contact within the + last :attr:`dt` seconds. Shape is (N, S), where N is the number of environments and S is + the number of sensors. The returned array is a shared internal buffer; it is invalidated + by the next call to :meth:`compute_first_contact` or :meth:`compute_first_air`. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + wp.launch( + compute_first_transition_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[float(dt + abs_tol), self._data._current_contact_time], + outputs=[self._data._first_transition], + device=self._device, + ) + return self._data._first_transition_ta + + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> ProxyArray: + """Checks if sensors that have broken contact within the last :attr:`dt` seconds. + + This function checks if the sensors have broken contact within the last :attr:`dt` seconds + by comparing the current air time with the given time period. If the air time is less + than the given time period, then the sensors are considered to not be in contact. + + Note: + It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, + :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if + the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contract is broken. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A float array (1.0/0.0) indicating the sensors that have broken contact within the last + :attr:`dt` seconds. Shape is (N, S), where N is the number of environments and S is the + number of sensors. The returned array is a shared internal buffer; it is invalidated by + the next call to :meth:`compute_first_contact` or :meth:`compute_first_air`. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + + wp.launch( + compute_first_transition_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[float(dt + abs_tol), self._data._current_air_time], + outputs=[self._data._first_transition], + device=self._device, + ) + return self._data._first_transition_ta + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor-related handles and internal buffers.""" + super()._initialize_impl() + + if self.cfg.force_threshold is None: + self.cfg.force_threshold = 0.0 + + self._generate_force_matrix = bool(self.cfg.filter_prim_paths_expr or self.cfg.filter_shape_prim_expr) + + try: + self._sensor_key = NewtonManager.add_contact_sensor( + body_names_expr=self.cfg.prim_path if not self.cfg.sensor_shape_prim_expr else None, + shape_names_expr=self.cfg.sensor_shape_prim_expr or None, + contact_partners_body_expr=self.cfg.filter_prim_paths_expr or None, + contact_partners_shape_expr=self.cfg.filter_shape_prim_expr or None, + ) + + self._create_buffers() + self._init_error = None + except Exception as err: + self._init_error = ( + f"failed to initialize contact sensor for prim path '{self.cfg.prim_path}'" + f" with sensor shape expr '{self.cfg.sensor_shape_prim_expr}': {err}" + ) + raise RuntimeError(self._init_error) from err + + def _create_buffers(self): + # Get Newton sensor shape: (n_sensors * n_envs, n_counterparts) + newton_shape = self.contact_view.shape + + # resolve the true count of sensors + self._num_sensors = newton_shape[0] // self._num_envs + + # Check that number of sensors is an integer + if newton_shape[0] % self._num_envs != 0: + raise RuntimeError( + "Number of sensors is not an integer multiple of the number of environments. Received:" + f" {newton_shape[0]} sensors across {self._num_envs} environments." + ) + if self._num_sensors == 0: + raise RuntimeError( + "Contact sensor matched zero sensing objects. This usually indicates a prim-path pattern mismatch" + f" for expression '{self.cfg.prim_path}'." + ) + logger.info(f"Contact sensor initialized with {self._num_sensors} sensors.") + + # Assume homogeneous envs, i.e. all envs have the same number of sensors + # Only get the names for the first env. Expected structure: /World/envs/env_.*/... + body_labels = self._get_model_labels("body") + shape_labels = self._get_model_labels("shape") + + def get_name(idx, kind): + kind_name = getattr(kind, "name", None) + kind_value = getattr(kind, "value", kind) + if kind_name == "BODY" or kind_value == 2: + return body_labels[idx].split("/")[-1] + if kind_name == "SHAPE" or kind_value == 1: + return shape_labels[idx].split("/")[-1] + return "MATCH_ANY" + + flat_sensing = [obj for world_objs in self.contact_view.sensing_objs for obj in world_objs] + self._sensor_names = [get_name(idx, kind) for idx, kind in flat_sensing] + # Assumes the environments are processed in order. + self._sensor_names = self._sensor_names[: self._num_sensors] + flat_counterparts = [obj for world_objs in self.contact_view.counterparts for obj in world_objs] + self._filter_object_names = [get_name(idx, kind) for idx, kind in flat_counterparts] + + # Number of filter objects (counterparts minus the total column) + self._num_filter_objects = max(newton_shape[1] - 1, 0) + + # Store reshaped Newton net_force view for copying data + # Newton net_force shape: (n_sensors * n_envs, n_counterparts) + # Reshaped to: (n_envs, n_sensors, n_counterparts) + self._newton_forces_view = self.contact_view.net_force.reshape((self._num_envs, self._num_sensors, -1)) + + # prepare data buffers + logger.info( + f"Creating buffers for contact sensor data with num_envs: {self._num_envs}, num_sensors:" + f" {self._num_sensors}, num_filter_objects: {self._num_filter_objects}, history_length:" + f" {self.cfg.history_length}, generate_force_matrix: {self._generate_force_matrix}, track_air_time:" + f" {self.cfg.track_air_time}, track_pose: {self.cfg.track_pose}, device: {self._device}" + ) + self._data.create_buffers( + self._num_envs, + self._num_sensors, + self._num_filter_objects, + self.cfg.history_length, + self._generate_force_matrix, + self.cfg.track_air_time, + self.cfg.track_pose, + self._device, + ) + + def _get_model_labels(self, kind: str) -> list[str]: + """Return Newton model labels in a version-compatible way.""" + model = NewtonManager._model + primary = f"{kind}_label" + fallback = f"{kind}_key" + labels = getattr(model, primary, None) + if labels is None: + labels = getattr(model, fallback, None) + if labels is None: + raise RuntimeError(f"Newton model does not expose '{primary}' or '{fallback}'.") + return list(labels) + + def _update_buffers_impl(self, env_mask: wp.array): + """Fills the buffers of the sensor data. + + Args: + env_mask: Mask of the environments to update. None: update all environments. + """ + # Copy data from Newton into owned buffers (respecting env_mask) + # Launch with 3D for coalescing: dim=(num_envs, num_sensors, max(num_filter_objects, 1)) + wp.launch( + copy_from_newton_kernel, + dim=(self._num_envs, self._num_sensors, max(self._num_filter_objects, 1)), + inputs=[ + env_mask, + self._newton_forces_view, + ], + outputs=[ + self._data._net_forces_w, + self._data._force_matrix_w, + ], + device=self._device, + ) + + # Update history and air/contact time tracking + wp.launch( + update_contact_sensor_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[ + self.cfg.history_length, + self.cfg.force_threshold, + env_mask, + self._data._net_forces_w, + self._timestamp, + self._timestamp_last_update, + self._data._net_forces_w_history, + self._data._current_air_time, + self._data._current_contact_time, + self._data._last_air_time, + self._data._last_contact_time, + ], + device=self._device, + ) + + # FIXME: Re-enable this when we have a non-physx rigid body view? + # (tracked in https://github.com/newton-physics/newton/issues/1489) + # obtain the pose of the sensor origin + # if self.cfg.track_pose: + # pose = self.body_physx_view.get_transforms().view(-1, self._num_sensors, 7)[env_ids] + # pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") + # self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + return + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + # TODO: invalidate NewtonManager if necessary + + """ + Renamed + """ + + @property + def body_names(self) -> list[str] | None: + warnings.warn( + "ContactSensor.body_names is deprecated; use ContactSensor.sensor_names instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.sensor_names diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_cfg.py b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_cfg.py new file mode 100644 index 000000000000..196c5d0349e7 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_cfg.py @@ -0,0 +1,119 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from typing import TYPE_CHECKING + +from isaaclab.sensors.contact_sensor.contact_sensor_cfg import ContactSensorCfg as BaseContactSensorCfg +from isaaclab.utils import configclass + +if TYPE_CHECKING: + from .contact_sensor import ContactSensor + + +@configclass +class ContactSensorCfg(BaseContactSensorCfg): + """Configuration for the Newton contact sensor with shape-level support. + + Extends :class:`ContactSensorCfg` with shape-level fields for finer-grained + contact reporting. A body is a rigid body; a shape is an individual collision geometry + attached to a body. A single body can have multiple shapes. + + Sensing objects (what to measure forces on): + + - :attr:`sensor_body_prim_expr` — read-only alias for :attr:`prim_path` (body-level sensing). + - :attr:`sensor_shape_prim_expr` — optional shape-level sensing. If set, takes + precedence over :attr:`prim_path`. + + Filter partners (what to measure forces against): + + - :attr:`filter_prim_paths_expr` — body-level filter (inherited from :class:`ContactSensorCfg`). + - :attr:`filter_shape_prim_expr` — shape-level filter. + + An instance can be created from an existing :class:`ContactSensorCfg` via + :meth:`from_base_cfg`. + """ + + class_type: type["ContactSensor"] | str = "{DIR}.contact_sensor:ContactSensor" + + @property + def sensor_body_prim_expr(self) -> str: + """Read-only alias for :attr:`prim_path`.""" + return self.prim_path + + sensor_shape_prim_expr: list[str] = [] + """List of shape prim path expressions for shape-level contact sensing. + Defaults to empty, meaning sensing is at the body level (via :attr:`prim_path`). + + Mutually exclusive with body-level sensing: if non-empty, :attr:`prim_path` is ignored + for the sensing objects and these shape expressions are used instead. + + .. note:: + Expressions can contain the environment namespace regex ``{ENV_REGEX_NS}``, which + is replaced with the environment namespace. + + Example: ``{ENV_REGEX_NS}/Robot/fingertip_.*`` becomes ``/World/envs/env_.*/Robot/fingertip_.*``. + """ + + filter_shape_prim_expr: list[str] = [] + """List of shape prim path expressions to filter contacts against at the shape level. + Defaults to empty, meaning filter partners are resolved at the body level only + (via :attr:`ContactSensorCfg.filter_prim_paths_expr`). + + If provided, the force matrix reports per-shape contact forces between the sensing + primitives and the filter shapes. + + Mutually exclusive with :attr:`ContactSensorCfg.filter_prim_paths_expr`; only one + must be set. + + .. note:: + Expressions can contain the environment namespace regex ``{ENV_REGEX_NS}``, which + is replaced with the environment namespace. + + Example: ``{ENV_REGEX_NS}/Object`` becomes ``/World/envs/env_.*/Object``. + """ + + def __post_init__(self): + if self.track_contact_points: + warnings.warn( + "ContactSensorCfg: 'track_contact_points' is not supported by the Newton backend. Ignoring.", + stacklevel=2, + ) + self.track_contact_points = False + + if self.max_contact_data_count_per_prim is not None: + warnings.warn( + "ContactSensorCfg: 'max_contact_data_count_per_prim' is not supported by the Newton backend. Ignoring.", + stacklevel=2, + ) + self.max_contact_data_count_per_prim = None + + if self.track_friction_forces: + warnings.warn( + "ContactSensorCfg: 'track_friction_forces' is not supported by the Newton backend. Ignoring.", + stacklevel=2, + ) + self.track_friction_forces = False + + @classmethod + def from_base_cfg(cls, base_cfg: BaseContactSensorCfg, **kwargs) -> "ContactSensorCfg": + """Creates a :class:`ContactSensorCfg` from an existing :class:`ContactSensorCfg`. + + Args: + base_cfg: The base contact sensor configuration to copy from. + **kwargs: Newton-specific fields, e.g. ``filter_shape_prim_expr=["fingertip_.*"]``. + + Returns: + A new :class:`ContactSensorCfg` instance. + + Raises: + ValueError: If ``class_type`` is passed in keyword arguments. + """ + if "class_type" in kwargs: + raise ValueError("Cannot override 'class_type' via from_base_cfg.") + base_fields = { + field: getattr(base_cfg, field) for field in base_cfg.__dataclass_fields__ if field != "class_type" + } + return cls(**base_fields, **kwargs) diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_data.py new file mode 100644 index 000000000000..8ed00b91eab8 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_data.py @@ -0,0 +1,299 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# needed to import for allowing type-hinting: torch.Tensor | None +from __future__ import annotations + +import logging + +import warp as wp + +from isaaclab.sensors.contact_sensor.base_contact_sensor_data import BaseContactSensorData +from isaaclab.utils.warp import ProxyArray + +logger = logging.getLogger(__name__) + + +class ContactSensorData(BaseContactSensorData): + """Data container for the contact reporting sensor.""" + + _pos_w: wp.array | None + _quat_w: wp.array | None + + _net_forces_w: wp.array | None + _net_forces_w_history: wp.array | None + _force_matrix_w: wp.array | None + _force_matrix_w_history: wp.array | None + _last_air_time: wp.array | None + _current_air_time: wp.array | None + _last_contact_time: wp.array | None + _current_contact_time: wp.array | None + _first_transition: wp.array | None + + @property + def pose_w(self) -> ProxyArray | None: + """Not supported by Newton contact sensor.""" + raise NotImplementedError("pose_w is not supported by the Newton contact sensor.") + + @property + def pos_w(self) -> ProxyArray | None: + """Position of the sensor origin in world frame. + + `wp.vec3f` array whose shape is (N,) where N is the number of sensors. Note, that when casted to as a + `torch.Tensor`, the shape will be (N, 3). + + Note: + If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. + """ + if self._pos_w is None: + return None + if self._pos_w_ta is None: + self._pos_w_ta = ProxyArray(self._pos_w) + return self._pos_w_ta + + @property + def quat_w(self) -> ProxyArray | None: + """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. + + `wp.quatf` whose shape is (N,) where N is the number of sensors. Note, that when casted to as a `torch.Tensor`, + the shape will be (N, 4). + + Note: + If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. + """ + if self._quat_w is None: + return None + if self._quat_w_ta is None: + self._quat_w_ta = ProxyArray(self._quat_w) + return self._quat_w_ta + + @property + def net_forces_w(self) -> ProxyArray | None: + """The net (total) contact forces in world frame. + + `wp.vec3f` array whose shape is (N, S) where N is the number of environments and S is the number of sensors. + Note, that when casted to as a `torch.Tensor`, the shape will be (N, S, 3). + + Note: + This quantity is the sum of the contact forces acting on each sensor. It must not be confused + with the total contact forces acting on the sensors (which also includes the tangential forces). + """ + if self._net_forces_w is None: + return None + return self._net_forces_w_ta + + @property + def net_forces_w_history(self) -> ProxyArray | None: + """The net (total) contact forces in world frame. + + `wp.vec3f` array whose shape is (N, T, S) where N is the number of environments, T is the configured history + length and S is the number of sensors. Note, that when casted to as a `torch.Tensor`, the shape will be + (N, T, S, 3). + + In the history dimension, the first index is the most recent and the last index is the oldest. + + Note: + This quantity is the sum of the contact forces acting on each sensor. It must not be confused + with the total contact forces acting on the sensors (which also includes the tangential forces). + """ + if self._net_forces_w_history is None: + return None + return self._net_forces_w_history_ta + + @property + def force_matrix_w(self) -> ProxyArray | None: + """The contact forces between sensors and filter objects in world frame. + + `wp.vec3f` array whose shape is (N, S, F) where N is the number of environments, S is number of sensors + and F is the number of filter objects. Note, that when casted to as a `torch.Tensor`, the shape will be + (N, S, F, 3). + + Note: + If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. + """ + if self._force_matrix_w is None: + return None + if self._force_matrix_w_ta is None: + self._force_matrix_w_ta = ProxyArray(self._force_matrix_w) + return self._force_matrix_w_ta + + @property + def force_matrix_w_history(self) -> ProxyArray | None: + """The contact forces between sensors and filter objects in world frame. + + `wp.vec3f` array whose shape is (N, T, S, F) where N is the number of environments, T is the configured history + length, S is number of sensors and F is the number of filter objects. Note, that when casted to as a + `torch.Tensor`, the shape will be (N, T, S, F, 3). + + In the history dimension, the first index is the most recent and the last index is the oldest. + + Note: + If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. + """ + if self._force_matrix_w_history is None: + return None + if self._force_matrix_w_history_ta is None: + self._force_matrix_w_history_ta = ProxyArray(self._force_matrix_w_history) + return self._force_matrix_w_history_ta + + @property + def contact_pos_w(self) -> ProxyArray | None: + """Not supported by Newton contact sensor.""" + raise NotImplementedError("contact_pos_w is not supported by the Newton contact sensor.") + + @property + def friction_forces_w(self) -> ProxyArray | None: + """Not supported by Newton contact sensor.""" + raise NotImplementedError("friction_forces_w is not supported by the Newton contact sensor.") + + @property + def last_air_time(self) -> ProxyArray | None: + """Time spent (in s) in the air before the last contact. + + `wp.float32` array whose shape is (N, S) where N is the number of environments and S is the number of sensors. + Note, that when casted to as a `torch.Tensor`, the shape will be (N, S). + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ + if self._last_air_time is None: + return None + return self._last_air_time_ta + + @property + def current_air_time(self) -> ProxyArray | None: + """Time spent (in s) in the air since the last detach. + + `wp.float32` array whose shape is (N, S) where N is the number of environments and S is the number of sensors. + Note, that when casted to as a `torch.Tensor`, the shape will be (N, S). + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ + if self._current_air_time is None: + return None + return self._current_air_time_ta + + @property + def last_contact_time(self) -> ProxyArray | None: + """Time spent (in s) in contact before the last detach. + + `wp.float32` array whose shape is (N, S) where N is the number of environments and S is the number of sensors. + Note, that when casted to as a `torch.Tensor`, the shape will be (N, S). + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ + if self._last_contact_time is None: + return None + return self._last_contact_time_ta + + @property + def current_contact_time(self) -> ProxyArray | None: + """Time spent (in s) in contact since the last contact. + + `wp.float32` array whose shape is (N, S) where N is the number of environments and S is the number of sensors. + Note, that when casted to as a `torch.Tensor`, the shape will be (N, S). + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ + if self._current_contact_time is None: + return None + return self._current_contact_time_ta + + def create_buffers( + self, + num_envs: int, + num_sensors: int, + num_filter_objects: int, + history_length: int, + generate_force_matrix: bool, + track_air_time: bool, + track_pose: bool, + device: str, + ) -> None: + """Creates the buffers for the contact sensor data. + + Args: + num_envs: The number of environments. + num_sensors: The number of sensors. + num_filter_objects: The number of filter objects (counterparts). + history_length: The history length. + generate_force_matrix: Whether to generate the force matrix. + track_air_time: Whether to track the air time. + track_pose: Whether to track the pose. + device: The device to use. + """ + logger.info( + f"Creating buffers for contact sensor data with num_envs: {num_envs}, num_sensors: {num_sensors}," + f" num_filter_objects: {num_filter_objects}, history_length: {history_length}, generate_force_matrix:" + f" {generate_force_matrix}, track_air_time: {track_air_time}, track_pose: {track_pose}, device: {device}" + ) + # Track pose if requested + if track_pose: + self._pose = wp.zeros((num_envs,), dtype=wp.transformf, device=device) + pos_scalars = wp.array(self._pose, dtype=wp.float32, device=device, copy=False) + self._pos_w = wp.array(pos_scalars[:, :3], dtype=wp.vec3f, device=device, copy=False) + self._quat_w = wp.array(pos_scalars[:, 3:], dtype=wp.quatf, device=device, copy=False) + else: + self._pose = None + self._pos_w = None + self._quat_w = None + + # Create owned buffer for net (total) forces - shape: (num_envs, num_sensors) + self._net_forces_w = wp.zeros((num_envs, num_sensors), dtype=wp.vec3f, device=device) + # Track net forces history if requested + if history_length > 0: + self._net_forces_w_history = wp.zeros( + (num_envs, history_length, num_sensors), dtype=wp.vec3f, device=device + ) + self._force_matrix_w_history = None # TODO: implement force matrix history if needed + else: + self._net_forces_w_history = None + self._force_matrix_w_history = None + + # Create owned buffer for force matrix - shape: (num_envs, num_sensors, num_filter_objects) + # None if no filter objects configured + if num_filter_objects > 0: + self._force_matrix_w = wp.zeros((num_envs, num_sensors, num_filter_objects), dtype=wp.vec3f, device=device) + else: + self._force_matrix_w = None + + # Track air time if requested + if track_air_time: + self._last_air_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._current_air_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._last_contact_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._current_contact_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._first_transition = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._first_transition_ta = ProxyArray(self._first_transition) + else: + self._last_air_time = None + self._current_air_time = None + self._last_contact_time = None + self._current_contact_time = None + self._first_transition = None + self._first_transition_ta = None + + # -- Pin ProxyArray instances for pre-allocated buffers + self._net_forces_w_ta = ProxyArray(self._net_forces_w) + self._net_forces_w_history_ta = ( + ProxyArray(self._net_forces_w_history) if self._net_forces_w_history is not None else None + ) + # -- Lazy ProxyArray instances for nullable buffers (pinned on first access) + self._pos_w_ta: ProxyArray | None = None + self._quat_w_ta: ProxyArray | None = None + self._force_matrix_w_ta: ProxyArray | None = None + self._force_matrix_w_history_ta: ProxyArray | None = None + # -- Pin ProxyArray instances for air/contact time buffers (eagerly when allocated) + self._last_air_time_ta = ProxyArray(self._last_air_time) if self._last_air_time is not None else None + self._current_air_time_ta = ProxyArray(self._current_air_time) if self._current_air_time is not None else None + self._last_contact_time_ta = ( + ProxyArray(self._last_contact_time) if self._last_contact_time is not None else None + ) + self._current_contact_time_ta = ( + ProxyArray(self._current_contact_time) if self._current_contact_time is not None else None + ) diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_kernels.py b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_kernels.py new file mode 100644 index 000000000000..324f0106682f --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/contact_sensor/contact_sensor_kernels.py @@ -0,0 +1,161 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ignore optional memory usage warning globally +# pyright: reportOptionalSubscript=false + +import warp as wp + + +@wp.kernel +def copy_from_newton_kernel( + # in + env_mask: wp.array(dtype=wp.bool), + newton_forces: wp.array3d(dtype=wp.vec3f), # (n_envs, n_sensors, n_counterparts) + # outputs + net_force_total: wp.array2d(dtype=wp.vec3f), # (n_envs, n_sensors) + force_matrix: wp.array3d(dtype=wp.vec3f), # (n_envs, n_sensors, n_filter_objects) or None +): + """Copy contact force data from Newton sensor into owned buffers. + + Launch with dim=(num_envs, num_sensors, max(num_filter_objects, 1)) for coalescing. + When num_filter_objects == 0, trailing dim is 1 and only total is copied. + """ + env, sensor, f_idx = wp.tid() + + if env_mask: + if not env_mask[env]: + return + + # Copy total force (column 0) - only thread with f_idx == 0 does this + if f_idx == 0: + net_force_total[env, sensor] = newton_forces[env, sensor, 0] + + # Copy per-filter-object forces (columns 1+) + # Guard with `if force_matrix:` to handle None case (no filter objects) + if force_matrix: + force_matrix[env, sensor, f_idx] = newton_forces[env, sensor, f_idx + 1] + + +@wp.kernel +def reset_contact_sensor_kernel( + # in + history_length: int, + num_filter_objects: int, + env_mask: wp.array(dtype=wp.bool), + # in-out + net_forces_w: wp.array2d(dtype=wp.vec3f), + net_forces_w_history: wp.array3d(dtype=wp.vec3f), + force_matrix_w: wp.array3d(dtype=wp.vec3f), + # outputs + current_air_time: wp.array2d(dtype=wp.float32), + last_air_time: wp.array2d(dtype=wp.float32), + current_contact_time: wp.array2d(dtype=wp.float32), + last_contact_time: wp.array2d(dtype=wp.float32), +): + """Reset the contact sensor data for specified environments. + + Launch with dim=(num_envs, num_sensors). + """ + env, sensor = wp.tid() + + if env_mask: + if not env_mask[env]: + return + + # Reset net forces + net_forces_w[env, sensor] = wp.vec3f(0.0) + + # Reset history + if net_forces_w_history: + for i in range(history_length): + net_forces_w_history[env, i, sensor] = wp.vec3f(0.0) + + # Reset force matrix (guard for None case) + if force_matrix_w: + for f in range(num_filter_objects): + force_matrix_w[env, sensor, f] = wp.vec3f(0.0) + + # Reset air/contact time tracking + if current_air_time: + current_air_time[env, sensor] = 0.0 + last_air_time[env, sensor] = 0.0 + current_contact_time[env, sensor] = 0.0 + last_contact_time[env, sensor] = 0.0 + + +@wp.kernel +def update_contact_sensor_kernel( + # in + history_length: int, + contact_force_threshold: wp.float32, + env_mask: wp.array(dtype=wp.bool), + net_forces: wp.array2d(dtype=wp.vec3f), + timestamp: wp.array(dtype=wp.float32), + timestamp_last_update: wp.array(dtype=wp.float32), + # in-out + net_forces_history: wp.array3d(dtype=wp.vec3f), + current_air_time: wp.array2d(dtype=wp.float32), + current_contact_time: wp.array2d(dtype=wp.float32), + # out + last_air_time: wp.array2d(dtype=wp.float32), + last_contact_time: wp.array2d(dtype=wp.float32), +): + """Update the contact sensor data (history and air/contact time tracking). + + Launch with dim=(num_envs, num_sensors). + """ + env, sensor = wp.tid() + + if env_mask: + if not env_mask[env]: + return + + # Update history + if net_forces_history: + for i in range(history_length - 1, 0, -1): + net_forces_history[env, i, sensor] = net_forces_history[env, i - 1, sensor] + net_forces_history[env, 0, sensor] = net_forces[env, sensor] + + # Update air/contact time tracking + if current_air_time: + elapsed_time = timestamp[env] - timestamp_last_update[env] + in_contact = wp.length_sq(net_forces[env, sensor]) > contact_force_threshold * contact_force_threshold + + cat = current_air_time[env, sensor] + cct = current_contact_time[env, sensor] + is_first_contact = in_contact and (cat > 0.0) + is_first_detached = not in_contact and (cct > 0.0) + + if is_first_contact: + last_air_time[env, sensor] = cat + elapsed_time + elif is_first_detached: + last_contact_time[env, sensor] = cct + elapsed_time + + current_contact_time[env, sensor] = wp.where(in_contact, cct + elapsed_time, 0.0) + current_air_time[env, sensor] = wp.where(in_contact, 0.0, cat + elapsed_time) + + +@wp.kernel +def compute_first_transition_kernel( + # in + threshold: wp.float32, + time: wp.array2d(dtype=wp.float32), + # out + result: wp.array2d(dtype=wp.float32), +): + """Compute boolean mask (as float) for sensors whose time is in (0, threshold). + + Used by both compute_first_contact (with current_contact_time) and + compute_first_air (with current_air_time). + + Launch with dim=(num_envs, num_sensors). + """ + env, sensor = wp.tid() + t = time[env, sensor] + if t > 0.0 and t < threshold: + result[env, sensor] = 1.0 + else: + result[env, sensor] = 0.0 diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/__init__.py b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/__init__.py new file mode 100644 index 000000000000..fceb54d9d89b --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for frame transformer sensor based on Newton physics engine.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/__init__.pyi new file mode 100644 index 000000000000..1bd63d0390d3 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "FrameTransformer", + "FrameTransformerData", +] + +from .frame_transformer import FrameTransformer +from .frame_transformer_data import FrameTransformerData diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer.py b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer.py new file mode 100644 index 000000000000..e9150b63174b --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer.py @@ -0,0 +1,354 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.sensors.frame_transformer.base_frame_transformer import BaseFrameTransformer + +from isaaclab_newton.physics import NewtonManager + +from .frame_transformer_data import FrameTransformerData +from .frame_transformer_kernels import compose_target_world_kernel, copy_from_newton_kernel + +if TYPE_CHECKING: + from isaaclab.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg + +logger = logging.getLogger(__name__) + + +class FrameTransformer(BaseFrameTransformer): + """Newton frame transformer wrapping :class:`newton.sensors.SensorFrameTransform`. + + Creates per-env sites for the source and all target frames, backed by a single + :class:`SensorFrameTransform` with 1:1 shape/reference pairs: + + * Entry 0 per env — source site measured w.r.t. a world-origin site. + * Entries 1..M per env — target sites measured w.r.t. source site. + + Flat sensor output is indexed with stride ``1 + num_targets``: + ``[i * stride]`` is the source world transform, ``[i * stride + 1 + j]`` + is target *j* relative to source in env *i*. + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + __backend_name__: str = "newton" + """The name of the backend for the frame transformer sensor.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer. + + Registers site requests via :meth:`NewtonManager.cl_register_site` for + the source frame, each target frame, and a shared world-origin reference. + Sites are injected into prototype builders by ``newton_replicate`` before + replication, so they end up correctly in each world. + + Args: + cfg: Configuration parameters. + """ + # initialize base class (registers PHYSICS_READY callback for _initialize_impl) + super().__init__(cfg) + + self._data: FrameTransformerData = FrameTransformerData() + self._newton_transforms = None + self._stride: int = 0 + + self._sensor_index: int | None = None + self._source_frame_body_name: str = cfg.prim_path.rsplit("/", 1)[-1] + + # Register world-origin reference site + self._world_origin_label = NewtonManager.cl_register_site(None, wp.transform()) + + # Register source site + source_offset = wp.transform(cfg.source_frame_offset.pos, cfg.source_frame_offset.rot) + self._source_label = NewtonManager.cl_register_site(cfg.prim_path, source_offset) + + # Register target sites + self._target_labels: list[str] = [] + self._target_frame_body_names: list[str] = [] + self._num_targets: int = 0 + + for target_frame in cfg.target_frames: + target_offset = wp.transform(target_frame.offset.pos, target_frame.offset.rot) + label = NewtonManager.cl_register_site(target_frame.prim_path, target_offset) + + self._target_labels.append(label) + body_name = target_frame.prim_path.rsplit("/", 1)[-1] + self._target_frame_body_names.append(target_frame.name or body_name) + self._num_targets += 1 + + # Set target frame names for base class find_bodies() and data container + self._target_frame_names = [t.name or t.prim_path.rsplit("/", 1)[-1] for t in cfg.target_frames] + self._data._target_frame_names = self._target_frame_names + + logger.info( + f"FrameTransformer '{cfg.prim_path}': source='{self._source_frame_body_name}', " + f"{self._num_targets} target(s) registered" + ) + + """ + Properties + """ + + @property + def data(self) -> FrameTransformerData: + # update sensors if needed + self._update_outdated_buffers() + return self._data + + @property + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked.""" + return len(self._target_frame_body_names) + + @property + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked.""" + return self._target_frame_body_names + + """ + Implementation + """ + + def _initialize_impl(self): + """PHYSICS_READY callback: resolves site indices and creates the SensorFrameTransform.""" + super()._initialize_impl() + + num_envs = self._num_envs + site_map = NewtonManager._cl_site_index_map + + # Resolve and validate per-env site indices + assert self._world_origin_label in site_map + world_origin_idx, _ = site_map[self._world_origin_label] + source_indices, target_per_world = self._validate_site_map( + self._source_label, + self.cfg.prim_path, + self._target_labels, + [t.prim_path for t in self.cfg.target_frames], + site_map, + num_envs, + ) + + # Expand targets and build sensor index lists + expanded_names, target_indices_per_target, shapes_list, references_list = self._build_sensor_index_lists( + source_indices, + target_per_world, + self._target_frame_body_names, + NewtonManager._builder.shape_label, + world_origin_idx, + num_envs, + ) + + # Update instance state with expanded values + self._num_targets = len(target_indices_per_target) + self._target_frame_names = expanded_names + self._target_frame_body_names = expanded_names + self._data._target_frame_names = expanded_names + + # Create SensorFrameTransform via NewtonManager + self._sensor_index = NewtonManager.add_frame_transform_sensor(shapes_list, references_list) + + # Store reference to Newton sensor's flat transforms array + sensor = NewtonManager._newton_frame_transform_sensors[self._sensor_index] + self._newton_transforms = sensor.transforms + self._stride = 1 + self._num_targets + + # Allocate owned buffers + self._data._create_buffers(num_envs, self._num_targets, self._device) + + logger.info( + f"FrameTransformer initialized: {num_envs} envs, " + f"{self._num_targets} targets, sensor_index={self._sensor_index}" + ) + + @staticmethod + def _validate_site_map( + source_label: str, + source_prim_path: str, + target_labels: list[str], + target_prim_paths: list[str], + site_map: dict, + num_envs: int, + ) -> tuple[list[int], list[list[list[int]]]]: + """Validate per-env site counts and return resolved index arrays. + + Args: + source_label: Site label for the source frame. + source_prim_path: Config prim path used in error messages. + target_labels: Site labels for each target frame (in order). + target_prim_paths: Config prim paths used in error messages. + site_map: ``NewtonManager._cl_site_index_map``. + num_envs: Expected number of environments. + + Returns: + ``(source_indices, target_per_world)`` where ``source_indices[e]`` is the + single source site index for env ``e``, and ``target_per_world[t][e]`` is + the list of site indices for target ``t`` in env ``e``. + + Raises: + ValueError: If the source has the wrong world count, or any env has a + count other than 1. If any target has zero matches, non-uniform + counts across envs, or a world-count mismatch. + """ + assert source_label in site_map, ( + f"FrameTransformer source '{source_prim_path}' (site label '{source_label}') " + "not found in NewtonManager._cl_site_index_map." + ) + _, source_per_world = site_map[source_label] + if len(source_per_world) != num_envs: + raise ValueError( + f"FrameTransformer source '{source_prim_path}' has {len(source_per_world)} " + f"world entries in the site map, expected {num_envs}." + ) + for env_idx, world_sites in enumerate(source_per_world): + if len(world_sites) != 1: + raise ValueError( + f"FrameTransformer source pattern '{source_prim_path}' matched " + f"{len(world_sites)} bodies in env {env_idx}, expected exactly 1. " + f"Source patterns must resolve to a single rigid body per environment." + ) + source_indices: list[int] = [w[0] for w in source_per_world] + + target_per_world: list[list[list[int]]] = [] + for tgt_idx, label in enumerate(target_labels): + assert label in site_map, ( + f"FrameTransformer target '{target_prim_paths[tgt_idx]}' (site label '{label}') " + "not found in NewtonManager._cl_site_index_map." + ) + _, per_world = site_map[label] + if len(per_world) != num_envs: + raise ValueError( + f"FrameTransformer target '{target_prim_paths[tgt_idx]}' has " + f"{len(per_world)} world entries, expected {num_envs}." + ) + lengths = [len(w) for w in per_world] + if len(set(lengths)) != 1: + raise ValueError( + f"FrameTransformer target pattern '{target_prim_paths[tgt_idx]}' matched " + f"different numbers of bodies across envs: {lengths}. " + f"All environments must have the same number of matches." + ) + if lengths[0] == 0: + raise ValueError( + f"FrameTransformer target pattern '{target_prim_paths[tgt_idx]}' " + f"matched no bodies in any environment." + ) + target_per_world.append(per_world) + + return source_indices, target_per_world + + @staticmethod + def _build_sensor_index_lists( + source_indices: list[int], + target_per_world: list[list[list[int]]], + target_frame_body_names: list[str], + shape_labels: list[str], + world_origin_idx: int, + num_envs: int, + ) -> tuple[list[str], list[list[int]], list[int], list[int]]: + """Expand per-world target sublists and build sensor index lists. + + Args: + source_indices: Per-env source site indices, length ``num_envs``. + target_per_world: Per-target-config, per-world, per-body site indices. + Shape: ``[num_target_cfgs][num_envs][n_bodies_per_env]``. + target_frame_body_names: Config-level name for each target config entry. + shape_labels: ``builder.shape_label`` — maps shape index to its label string. + Site labels have the form ``"{body_name}/{site_label}"``; the body name + is extracted for wildcard expansion. + world_origin_idx: Global world-origin site index. + num_envs: Number of environments. + + Returns: + ``(expanded_names, target_indices_per_target, shapes_list, references_list)`` + where ``expanded_names[k]`` is the resolved name for expanded target ``k``, + ``target_indices_per_target[k][e]`` is the site index for expanded target ``k`` + in env ``e``, ``shapes_list`` and ``references_list`` are 1:1 sensor inputs. + """ + target_indices_per_target: list[list[int]] = [] + expanded_names: list[str] = [] + + for tgt_idx, per_world in enumerate(target_per_world): + n_bodies = len(per_world[0]) # uniform across envs (validated) + for k in range(n_bodies): + per_env = [per_world[env_idx][k] for env_idx in range(num_envs)] + target_indices_per_target.append(per_env) + # For wildcards (n_bodies > 1), derive the bare body name from the + # site label ("{body_path}/{site_label}") using env 0. + if n_bodies > 1: + site_idx = per_world[0][k] + expanded_names.append(shape_labels[site_idx].rsplit("/", 2)[-2]) + else: + expanded_names.append(target_frame_body_names[tgt_idx]) + + num_targets = len(target_indices_per_target) + shapes_list: list[int] = [] + references_list: list[int] = [] + + for env_idx in range(num_envs): + source_idx = source_indices[env_idx] + shapes_list.append(source_idx) + references_list.append(world_origin_idx) + for tgt_idx in range(num_targets): + target_idx = target_indices_per_target[tgt_idx][env_idx] + shapes_list.append(target_idx) + references_list.append(source_idx) + + return expanded_names, target_indices_per_target, shapes_list, references_list + + def _update_buffers_impl(self, env_mask: wp.array): + """Copies transforms from Newton sensor into owned buffers.""" + if self._newton_transforms is None: + raise RuntimeError(f"FrameTransformer '{self.cfg.prim_path}': sensor is not initialized") + wp.launch( + copy_from_newton_kernel, + dim=(self._num_envs, 1 + self._num_targets), + inputs=[env_mask, self._newton_transforms, self._stride], + outputs=[self._data._source_transforms, self._data._target_transforms], + device=self._device, + ) + + # Compose target world transforms: source_world * target_relative + if self._num_targets > 0: + wp.launch( + compose_target_world_kernel, + dim=(self._num_envs, self._num_targets), + inputs=[env_mask, self._data._source_transforms, self._data._target_transforms], + outputs=[self._data._target_transforms_w], + device=self._device, + ) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Clears references to the native sensor and re-registers sites. + + ``NewtonManager.close()`` calls ``clear()`` before dispatching ``STOP``, + so ``_cl_pending_sites`` is already empty when this callback fires. + Re-registering here ensures sites survive a close/reinit cycle. + """ + super()._invalidate_initialize_callback(event) + self._newton_transforms = None + self._sensor_index = None + + # Re-register sites so a subsequent start_simulation picks them up. + self._world_origin_label = NewtonManager.cl_register_site(None, wp.transform()) + + source_offset = wp.transform(self.cfg.source_frame_offset.pos, self.cfg.source_frame_offset.rot) + self._source_label = NewtonManager.cl_register_site(self.cfg.prim_path, source_offset) + + self._target_labels = [] + for target_frame in self.cfg.target_frames: + target_offset = wp.transform(target_frame.offset.pos, target_frame.offset.rot) + label = NewtonManager.cl_register_site(target_frame.prim_path, target_offset) + self._target_labels.append(label) diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer_data.py new file mode 100644 index 000000000000..2b5f4f48006b --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer_data.py @@ -0,0 +1,96 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp + +from isaaclab.sensors.frame_transformer.base_frame_transformer_data import BaseFrameTransformerData +from isaaclab.utils.warp import ProxyArray +from isaaclab.utils.warp.math_ops import transform_to_vec_quat + + +class FrameTransformerData(BaseFrameTransformerData): + """Data container for the Newton frame transformer sensor. + + Transform buffers are populated from the Newton sensor via + :func:`copy_from_newton_kernel`. + """ + + _source_transforms: wp.array | None + """Source world transforms — ``(num_envs,)`` array of ``wp.transformf``.""" + + _target_transforms: wp.array | None + """Target-relative transforms — ``(num_envs, num_targets)`` array of ``wp.transformf``.""" + + _target_transforms_w: wp.array | None + """Target world transforms — ``(num_envs, num_targets)`` array of ``wp.transformf``.""" + + def __init__(self): + self._target_frame_names: list[str] | None = None + self._source_transforms = None + self._target_transforms = None + self._target_transforms_w = None + + def _create_buffers(self, num_envs: int, num_targets: int, device: str): + """Allocates transform buffers and zero-copy views.""" + self._source_transforms = wp.zeros(num_envs, dtype=wp.transformf, device=device) + self._target_transforms = wp.zeros((num_envs, num_targets), dtype=wp.transformf, device=device) + self._target_transforms_w = wp.zeros((num_envs, num_targets), dtype=wp.transformf, device=device) + + # Zero-copy views — auto-reflect kernel writes to underlying transforms + self._source_pos_w, self._source_quat_w = transform_to_vec_quat(self._source_transforms) + self._target_pos_source, self._target_quat_source = transform_to_vec_quat(self._target_transforms) + self._target_pos_w, self._target_quat_w = transform_to_vec_quat(self._target_transforms_w) + + @property + def target_frame_names(self) -> list[str]: + """Names of the target frames.""" + return self._target_frame_names + + @property + def target_pose_source(self) -> ProxyArray: + """Target poses relative to source frame, shape ``(num_envs, num_targets)``, dtype ``wp.transformf``.""" + return ProxyArray(self._target_transforms) + + @property + def target_pos_source(self) -> ProxyArray: + """Position of target frames relative to source frame [m], shape ``(num_envs, num_targets, 3)``.""" + return ProxyArray(self._target_pos_source) + + @property + def target_quat_source(self) -> ProxyArray: + """Orientation of target frames relative to source frame (xyzw), shape ``(num_envs, num_targets, 4)``.""" + return ProxyArray(self._target_quat_source) + + @property + def target_pose_w(self) -> ProxyArray: + """Target poses in world frame, shape ``(num_envs, num_targets)``, dtype ``wp.transformf``.""" + return ProxyArray(self._target_transforms_w) + + @property + def target_pos_w(self) -> ProxyArray: + """Position of target frames in world frame [m], shape ``(num_envs, num_targets, 3)``.""" + return ProxyArray(self._target_pos_w) + + @property + def target_quat_w(self) -> ProxyArray: + """Orientation of target frames in world frame (xyzw), shape ``(num_envs, num_targets, 4)``.""" + return ProxyArray(self._target_quat_w) + + @property + def source_pose_w(self) -> ProxyArray: + """Source pose in world frame, shape ``(num_envs,)``, dtype ``wp.transformf``.""" + return ProxyArray(self._source_transforms) + + @property + def source_pos_w(self) -> ProxyArray: + """Position of source frame in world frame [m], shape ``(num_envs, 3)``.""" + return ProxyArray(self._source_pos_w) + + @property + def source_quat_w(self) -> ProxyArray: + """Orientation of source frame in world frame (xyzw), shape ``(num_envs, 4)``.""" + return ProxyArray(self._source_quat_w) diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer_kernels.py b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer_kernels.py new file mode 100644 index 000000000000..26004b8a1d7c --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/frame_transformer/frame_transformer_kernels.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ignore optional memory usage warning globally +# pyright: reportOptionalSubscript=false + +import warp as wp + + +@wp.kernel +def copy_from_newton_kernel( + # in + env_mask: wp.array(dtype=wp.bool), + newton_transforms: wp.array(dtype=wp.transformf), # flat (num_envs * stride,) + stride: int, + # outputs + source_transforms: wp.array(dtype=wp.transformf), # (num_envs,) + target_transforms: wp.array2d(dtype=wp.transformf), # (num_envs, num_targets) +): + """Copy frame transform data from Newton sensor into owned buffers. + + Deinterleaves the flat strided Newton output into separate source and target buffers. + Launch with dim=(num_envs, 1 + num_targets). + """ + env, idx = wp.tid() + + if env_mask: + if not env_mask[env]: + return + + t = newton_transforms[env * stride + idx] + + if idx == 0: + source_transforms[env] = t + else: + target_transforms[env, idx - 1] = t + + +@wp.kernel +def compose_target_world_kernel( + # in + env_mask: wp.array(dtype=wp.bool), + source_transforms: wp.array(dtype=wp.transformf), # (num_envs,) + target_transforms: wp.array2d(dtype=wp.transformf), # (num_envs, num_targets) + # outputs + target_transforms_w: wp.array2d(dtype=wp.transformf), # (num_envs, num_targets) +): + """Compute target world transforms: source_world * target_relative. + + Launch with dim=(num_envs, num_targets). + """ + env, tgt = wp.tid() + + if env_mask: + if not env_mask[env]: + return + + target_transforms_w[env, tgt] = source_transforms[env] * target_transforms[env, tgt] diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/imu/__init__.py b/source/isaaclab_newton/isaaclab_newton/sensors/imu/__init__.py new file mode 100644 index 000000000000..4d0042be743c --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/imu/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for the Newton IMU sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/imu/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sensors/imu/__init__.pyi new file mode 100644 index 000000000000..b5d6077112f3 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/imu/__init__.pyi @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = ["Imu", "ImuData"] + +from .imu import Imu +from .imu_data import ImuData diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/imu/imu.py b/source/isaaclab_newton/isaaclab_newton/sensors/imu/imu.py new file mode 100644 index 000000000000..1c25f4550932 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/imu/imu.py @@ -0,0 +1,187 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.sensors.imu import BaseImu + +from isaaclab_newton.physics import NewtonManager + +from .imu_data import ImuData +from .kernels import imu_copy_kernel, imu_reset_kernel + +if TYPE_CHECKING: + from newton.sensors import SensorIMU as NewtonSensorIMU + + from isaaclab.sensors.imu import ImuCfg + +logger = logging.getLogger(__name__) + + +class Imu(BaseImu): + """Newton Inertial Measurement Unit (IMU) sensor. + + Wrapper around ``newton.sensors.SensorIMU`` providing angular velocity + (gyroscope) and linear acceleration (accelerometer) in the sensor's + body frame. + """ + + cfg: ImuCfg + """The configuration parameters.""" + + __backend_name__: str = "newton" + """The name of the backend for the IMU sensor.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the Newton IMU sensor. + + Registers a site request and the ``body_qdd`` state attribute with + :class:`NewtonManager`. The site is injected into prototype builders + before replication so it ends up in each world. + + Args: + cfg: The configuration parameters. + """ + super().__init__(cfg) + + self._data = ImuData() + self._sensor_index: int | None = None + self._newton_sensor: NewtonSensorIMU | None = None + + offset_xform = wp.transform(cfg.offset.pos, cfg.offset.rot) + self._site_label: str = NewtonManager.cl_register_site(cfg.prim_path, offset_xform) + NewtonManager.request_extended_state_attribute("body_qdd") + + logger.info(f"IMU '{cfg.prim_path}': site registered (label='{self._site_label}')") + + def __str__(self) -> str: + """String representation of the sensor instance.""" + return ( + f"IMU sensor @ '{self.cfg.prim_path}': \n" + f"\tbackend : newton\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self._num_envs}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> ImuData: + """The IMU sensor data.""" + self._update_outdated_buffers() + return self._data + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + """Reset the sensor for the given environments. + + Zeroes out angular velocity and linear acceleration buffers for the + specified environments. + + Args: + env_ids: Environment indices to reset. Defaults to all environments. + env_mask: Boolean mask of environments to reset. Mutually exclusive with *env_ids*. + """ + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + super().reset(None, env_mask) + wp.launch( + imu_reset_kernel, + dim=self._num_envs, + inputs=[env_mask, self._data._lin_acc_b, self._data._ang_vel_b], + device=self._device, + ) + + """ + Implementation + """ + + def _initialize_impl(self): + """PHYSICS_READY callback: resolves site indices and creates the native SensorIMU.""" + super()._initialize_impl() + + site_map = NewtonManager._cl_site_index_map + num_envs = self._num_envs + + if self._site_label not in site_map: + raise ValueError( + f"IMU '{self.cfg.prim_path}': site label '{self._site_label}' " + "not found in NewtonManager._cl_site_index_map." + ) + + global_idx, per_world = site_map[self._site_label] + + if per_world is None: + # Global site (body=-1, i.e. world frame): replicate across envs. + site_indices = [global_idx] * num_envs + else: + if len(per_world) != num_envs: + raise ValueError( + f"IMU '{self.cfg.prim_path}': site has {len(per_world)} world entries, expected {num_envs}." + ) + + site_indices: list[int] = [] + for env_idx, world_sites in enumerate(per_world): + if len(world_sites) != 1: + raise ValueError( + f"IMU '{self.cfg.prim_path}': pattern matched {len(world_sites)} " + f"bodies in env {env_idx}, expected exactly 1." + ) + site_indices.append(world_sites[0]) + + self._sensor_index = NewtonManager.add_imu_sensor(site_indices) + self._newton_sensor = NewtonManager._newton_imu_sensors[self._sensor_index] + + self._data.create_buffers(num_envs=num_envs, device=self._device) + + logger.info(f"IMU initialized: {num_envs} envs, sensor_index={self._sensor_index}") + + def _update_buffers_impl(self, env_mask: wp.array): + """Copies accelerometer/gyroscope data from native Newton sensor into owned buffers.""" + if self._newton_sensor is None: + raise RuntimeError( + f"IMU '{self.cfg.prim_path}': sensor not initialized. " + "Access sensor data only after sim.reset() has been called." + ) + wp.launch( + imu_copy_kernel, + dim=self._num_envs, + inputs=[env_mask, self._newton_sensor.accelerometer, self._newton_sensor.gyroscope], + outputs=[self._data._lin_acc_b, self._data._ang_vel_b], + device=self._device, + ) + + def _invalidate_initialize_callback(self, event): + """Clears references to the native Newton sensor and re-registers site/attributes. + + ``NewtonManager.close()`` calls ``clear()`` before dispatching ``STOP``, + so ``_cl_pending_sites`` and ``_pending_extended_state_attributes`` are + already empty when this callback fires. Re-registering here ensures the + site and ``body_qdd`` attribute survive a close/reinit cycle. + """ + super()._invalidate_initialize_callback(event) + self._newton_sensor = None + self._sensor_index = None + + # Zero out data buffers so stale data is not served between STOP and reinit. + if self._data._ang_vel_b is not None: + self._data._ang_vel_b.zero_() + if self._data._lin_acc_b is not None: + self._data._lin_acc_b.zero_() + + # Re-register so a subsequent start_simulation picks them up. + offset_xform = wp.transform(self.cfg.offset.pos, self.cfg.offset.rot) + self._site_label = NewtonManager.cl_register_site(self.cfg.prim_path, offset_xform) + NewtonManager.request_extended_state_attribute("body_qdd") diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/imu/imu_data.py b/source/isaaclab_newton/isaaclab_newton/sensors/imu/imu_data.py new file mode 100644 index 000000000000..f288df131a2f --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/imu/imu_data.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp + +from isaaclab.sensors.imu import BaseImuData +from isaaclab.utils.warp import ProxyArray + + +class ImuData(BaseImuData): + """Data container for the Newton IMU sensor.""" + + def __init__(self): + self._ang_vel_b: wp.array | None = None + self._lin_acc_b: wp.array | None = None + self._ang_vel_b_ta: ProxyArray | None = None + self._lin_acc_b_ta: ProxyArray | None = None + + @property + def ang_vel_b(self) -> ProxyArray | None: + """IMU frame angular velocity relative to the world expressed in IMU frame [rad/s]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._ang_vel_b is None: + return None + if self._ang_vel_b_ta is None: + self._ang_vel_b_ta = ProxyArray(self._ang_vel_b) + return self._ang_vel_b_ta + + @property + def lin_acc_b(self) -> ProxyArray | None: + """Linear acceleration (proper) in the IMU frame [m/s^2]. + + Zero in freefall, +g upward at rest. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._lin_acc_b is None: + return None + if self._lin_acc_b_ta is None: + self._lin_acc_b_ta = ProxyArray(self._lin_acc_b) + return self._lin_acc_b_ta + + def create_buffers(self, num_envs: int, device: str) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + device: Device for array storage. + """ + self._ang_vel_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._lin_acc_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._ang_vel_b_ta = None + self._lin_acc_b_ta = None diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/imu/kernels.py b/source/isaaclab_newton/isaaclab_newton/sensors/imu/kernels.py new file mode 100644 index 000000000000..7c4716fd55c9 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/imu/kernels.py @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def imu_copy_kernel( + env_mask: wp.array(dtype=wp.bool), + accelerometer: wp.array(dtype=wp.vec3f), + gyroscope: wp.array(dtype=wp.vec3f), + out_lin_acc_b: wp.array(dtype=wp.vec3f), + out_ang_vel_b: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + out_lin_acc_b[idx] = accelerometer[idx] + out_ang_vel_b[idx] = gyroscope[idx] + + +@wp.kernel +def imu_reset_kernel( + env_mask: wp.array(dtype=wp.bool), + out_lin_acc_b: wp.array(dtype=wp.vec3f), + out_ang_vel_b: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + out_lin_acc_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_ang_vel_b[idx] = wp.vec3f(0.0, 0.0, 0.0) diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/__init__.py b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/__init__.py new file mode 100644 index 000000000000..fb10acc86182 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for the Newton joint-wrench sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/__init__.pyi new file mode 100644 index 000000000000..b2bcd3582d44 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/__init__.pyi @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = ["JointWrenchSensor", "JointWrenchSensorData"] + +from .joint_wrench_sensor import JointWrenchSensor +from .joint_wrench_sensor_data import JointWrenchSensorData diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/joint_wrench_sensor.py b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/joint_wrench_sensor.py new file mode 100644 index 000000000000..bd53751d6a5f --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/joint_wrench_sensor.py @@ -0,0 +1,247 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import warp as wp +from newton import JointType +from newton.selection import ArticulationView + +from pxr import UsdPhysics + +from isaaclab.sensors.joint_wrench import BaseJointWrenchSensor +from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims + +from isaaclab_newton.physics import NewtonManager + +from .joint_wrench_sensor_data import JointWrenchSensorData +from .kernels import joint_wrench_reset_kernel, joint_wrench_to_incoming_joint_frame_kernel + +if TYPE_CHECKING: + from isaaclab.sensors.joint_wrench import JointWrenchSensorCfg + +logger = logging.getLogger(__name__) + + +class JointWrenchSensor(BaseJointWrenchSensor): + """Newton joint reaction wrench sensor. + + Reads Newton's ``body_parent_f`` (world-frame wrench at child COM) and + converts each entry to the ``INCOMING_JOINT_FRAME`` convention + (child-side joint frame, child-side joint anchor as reference point) + before storing it in per-joint force / torque buffers. + + :attr:`~isaaclab.sensors.SensorBaseCfg.prim_path` must point at either + the articulation root prim or a parent prim containing a single + articulation root in every environment. ``FREE`` and ``FIXED`` joints are + excluded — neither has a meaningful joint anchor. + """ + + cfg: JointWrenchSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "newton" + """The name of the backend for the joint wrench sensor.""" + + def __init__(self, cfg: JointWrenchSensorCfg): + """Initialize the Newton joint-wrench sensor. + + Requests the ``body_parent_f`` extended state attribute from :class:`NewtonManager` so the + model builder allocates it during simulation startup. + + Args: + cfg: The configuration parameters. + """ + super().__init__(cfg) + + self._data = JointWrenchSensorData() + self._root_view: ArticulationView | None = None + self._sim_bind_body_parent_f: wp.array | None = None + self._sim_bind_body_q: wp.array | None = None + self._sim_bind_body_com: wp.array | None = None + self._sim_bind_joint_X_c: wp.array | None = None + self._joint_child: wp.array | None = None + self._num_joints: int = 0 + + NewtonManager.request_extended_state_attribute("body_parent_f") + + def __str__(self) -> str: + """String representation of the sensor instance.""" + return ( + f"Joint wrench sensor @ '{self.cfg.prim_path}': \n" + f"\tbackend : newton\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of joints : {self._num_joints}\n" + ) + + """ + Properties + """ + + @property + def body_names(self) -> list[str]: + """Ordered names of the bodies whose incoming joint wrench is reported.""" + return self._data._body_names + + @property + def data(self) -> JointWrenchSensorData: + """The joint-wrench sensor data.""" + self._update_outdated_buffers() + return self._data + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the sensor buffers for the given environments. + + Args: + env_ids: the environment ids to reset. + env_mask: the mask used to reset the environments. Shape is (num_envs).""" + if self._data._force is None or self._data._torque is None: + return + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + super().reset(None, env_mask) + wp.launch( + joint_wrench_reset_kernel, + dim=(self._num_envs, self._num_joints), + inputs=[env_mask, self._data._force, self._data._torque], + device=self._device, + ) + + """ + Implementation + """ + + def _initialize_impl(self) -> None: + """PHYSICS_READY callback: builds the articulation view and binds model / state arrays.""" + super()._initialize_impl() + + model = NewtonManager.get_model() + state_0 = NewtonManager.get_state_0() + + root_prim_path_expr = self._resolve_articulation_root_prim_path() + self._root_view = ArticulationView( + model, + root_prim_path_expr.replace(".*", "*"), + verbose=False, + exclude_joint_types=[JointType.FREE, JointType.FIXED], + ) + self._num_joints = self._root_view.joint_count + if self._num_joints == 0: + raise RuntimeError( + "Joint wrench sensor matched zero reportable joints (all joints are FREE or FIXED)." + f" Check the articulation at '{root_prim_path_expr}'." + ) + + try: + body_parent_f = self._root_view.get_attribute("body_parent_f", state_0) + except AttributeError as err: + raise RuntimeError( + f"Joint wrench sensor '{self.cfg.prim_path}': Newton state does not expose" + " 'body_parent_f'. Construct the sensor before sim startup so the extended-state" + " request is forwarded to the model builder." + ) from err + + self._sim_bind_body_parent_f = body_parent_f[:, 0] + self._sim_bind_body_q = self._root_view.get_link_transforms(state_0)[:, 0] + self._sim_bind_body_com = self._root_view.get_attribute("body_com", model)[:, 0] + self._sim_bind_joint_X_c = self._root_view.get_attribute("joint_X_c", model)[:, 0] + + # joint_child is per-articulation; topology is identical across envs, + # so we take the first-env mapping as the 1-D kernel input. + joint_child_full = self._root_view.get_attribute("joint_child", model)[:, 0] + joint_child_np = joint_child_full.numpy()[0] + if not all(0 <= b < self._sim_bind_body_parent_f.shape[1] for b in joint_child_np): + raise RuntimeError(f"joint_child contains out-of-range body indices for '{self.cfg.prim_path}'") + self._joint_child = wp.array(joint_child_np, dtype=wp.int32, device=self._device) + + link_names = list(self._root_view.link_names) + self._data._body_names = [link_names[int(b)] for b in joint_child_np] + + self._data.create_buffers(num_envs=self._num_envs, num_joints=self._num_joints, device=self._device) + + logger.info(f"Joint wrench sensor initialized: {self._num_envs} envs, {self._num_joints} joints") + + def _resolve_articulation_root_prim_path(self) -> str: + """Resolve the articulation root prim path expression from the configured asset prim path.""" + first_env_matching_prim = find_first_matching_prim(self.cfg.prim_path) + if first_env_matching_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString + + first_env_root_prims = get_all_matching_child_prims( + first_env_matching_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI) + and prim.GetAttribute("physxArticulation:articulationEnabled").Get() is not False, + traverse_instance_prims=False, + ) + if len(first_env_root_prims) == 0: + raise RuntimeError( + f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." + " Please ensure that the prim has 'USD ArticulationRootAPI' applied." + ) + if len(first_env_root_prims) > 1: + raise RuntimeError( + f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." + f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." + " Please ensure that there is only one articulation in the prim path tree." + ) + + first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString + root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] + return self.cfg.prim_path + root_prim_path_relative_to_prim_path + + def _update_buffers_impl(self, env_mask: wp.array) -> None: + """Convert Newton's body_parent_f into INCOMING_JOINT_FRAME force and torque buffers. + + Args: + env_mask: A mask containing which environments need to be updated. Shape is (num_envs) + """ + if self._sim_bind_body_parent_f is None: + raise RuntimeError( + f"Joint wrench sensor '{self.cfg.prim_path}': not initialized." + " Access sensor data only after sim.reset() has been called." + ) + wp.launch( + joint_wrench_to_incoming_joint_frame_kernel, + dim=(self._num_envs, self._num_joints), + inputs=[ + env_mask, + self._sim_bind_body_parent_f, + self._sim_bind_body_q, + self._sim_bind_body_com, + self._sim_bind_joint_X_c, + self._joint_child, + ], + outputs=[self._data._force, self._data._torque], + device=self._device, + ) + + def _invalidate_initialize_callback(self, event) -> None: + """Drop view, cached sizes, and buffers; re-register the extended-state request. + + Args: + event: An invalidate event. + """ + super()._invalidate_initialize_callback(event) + self._root_view = None + self._sim_bind_body_parent_f = None + self._sim_bind_body_q = None + self._sim_bind_body_com = None + self._sim_bind_joint_X_c = None + self._joint_child = None + self._num_joints = 0 + self._data._force = None + self._data._torque = None + self._data._body_names = [] + self._data._force_ta = None + self._data._torque_ta = None + NewtonManager.request_extended_state_attribute("body_parent_f") diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/joint_wrench_sensor_data.py b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/joint_wrench_sensor_data.py new file mode 100644 index 000000000000..76c5a565bdfb --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/joint_wrench_sensor_data.py @@ -0,0 +1,67 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp + +from isaaclab.sensors.joint_wrench import BaseJointWrenchSensorData +from isaaclab.utils.warp import ProxyArray + + +class JointWrenchSensorData(BaseJointWrenchSensorData): + """Data container for the Newton joint-wrench sensor.""" + + def __init__(self): + self._force: wp.array | None = None + self._torque: wp.array | None = None + self._body_names: list[str] = [] + self._force_ta: ProxyArray | None = None + self._torque_ta: ProxyArray | None = None + + @property + def force(self) -> ProxyArray | None: + """Linear component of the joint reaction wrench [N]. + + Expressed in the frame selected by + :attr:`~isaaclab.sensors.JointWrenchSensorCfg.convention`. Shape is + ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. In torch this resolves + to ``(num_envs, num_bodies, 3)``. ``None`` before the simulation is + initialized. + """ + if self._force is None: + return None + if self._force_ta is None: + self._force_ta = ProxyArray(self._force) + return self._force_ta + + @property + def torque(self) -> ProxyArray | None: + """Angular component of the joint reaction wrench [N·m]. + + Expressed in the frame selected by + :attr:`~isaaclab.sensors.JointWrenchSensorCfg.convention`. Shape is + ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. In torch this resolves + to ``(num_envs, num_bodies, 3)``. ``None`` before the simulation is + initialized. + """ + if self._torque is None: + return None + if self._torque_ta is None: + self._torque_ta = ProxyArray(self._torque) + return self._torque_ta + + def create_buffers(self, num_envs: int, num_joints: int, device: str) -> None: + """Allocate internal buffers. + + Args: + num_envs: Number of environments. + num_joints: Number of reported joints (excludes FREE and FIXED joint types). + device: Device for array storage. + """ + self._force = wp.zeros((num_envs, num_joints), dtype=wp.vec3f, device=device) + self._torque = wp.zeros((num_envs, num_joints), dtype=wp.vec3f, device=device) + self._force_ta = None + self._torque_ta = None diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/kernels.py b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/kernels.py new file mode 100644 index 000000000000..64a0b4f0dda2 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/joint_wrench/kernels.py @@ -0,0 +1,80 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def joint_wrench_to_incoming_joint_frame_kernel( + env_mask: wp.array(dtype=wp.bool), + body_parent_f: wp.array(dtype=wp.spatial_vectorf, ndim=2), + body_q: wp.array(dtype=wp.transformf, ndim=2), + body_com: wp.array(dtype=wp.vec3f, ndim=2), + joint_X_c: wp.array(dtype=wp.transformf, ndim=2), + joint_child: wp.array(dtype=wp.int32), + out_force: wp.array(dtype=wp.vec3f, ndim=2), + out_torque: wp.array(dtype=wp.vec3f, ndim=2), +): + """Convert Newton's ``body_parent_f`` to the INCOMING_JOINT_FRAME convention. + + Newton reports ``body_parent_f[env, body]`` as a spatial wrench in world frame, referenced at + the child body's centre of mass. The output is that same wrench re-expressed in the child-side + joint frame and with the child-side joint anchor as the reference point — matching what a 6-axis + force/torque sensor mounted at the joint would measure. + + Args: + env_mask: Boolean mask selecting which environments to update. + body_parent_f: Newton state — world-frame spatial wrench at child COM ``(num_envs, num_bodies)``. + body_q: Newton state — child link transforms in world frame ``(num_envs, num_bodies)``. + body_com: Newton model — COM offset in link-local frame ``(num_envs, num_bodies)``. + joint_X_c: Newton model — child-side joint frame relative to child link ``(num_envs, num_joints)``. + joint_child: Newton model — body index of each joint's child link ``(num_joints,)``. + out_force: Output force in joint frame [N] ``(num_envs, num_joints)``. + out_torque: Output torque in joint frame [N·m] ``(num_envs, num_joints)``. + """ + env, j = wp.tid() + if not env_mask[env]: + return + + body_idx = joint_child[j] + + # Source wrench in world frame. Newton's body_parent_f stores (force, torque-about-COM). + src = body_parent_f[env, body_idx] + f_world = wp.spatial_top(src) + tau_world_com = wp.spatial_bottom(src) + + # Child link transform in world and COM offset in link frame. + link_xform = body_q[env, body_idx] + link_quat = wp.transform_get_rotation(link_xform) + link_pos = wp.transform_get_translation(link_xform) + com_world = link_pos + wp.quat_rotate(link_quat, body_com[env, body_idx]) + + # Child-side joint frame in world = body link pose composed with joint_X_c. + joint_xform_world = link_xform * joint_X_c[env, j] + anchor_world = wp.transform_get_translation(joint_xform_world) + joint_quat_world = wp.transform_get_rotation(joint_xform_world) + + # Shift torque reference from COM to joint anchor: + # tau_anchor = tau_com + (com - anchor) x f = tau_com + r_anchor_to_com x f. + r_anchor_to_com = com_world - anchor_world + tau_world_anchor = tau_world_com + wp.cross(r_anchor_to_com, f_world) + + # Rotate both components into the child-side joint frame. + out_force[env, j] = wp.quat_rotate_inv(joint_quat_world, f_world) + out_torque[env, j] = wp.quat_rotate_inv(joint_quat_world, tau_world_anchor) + + +@wp.kernel +def joint_wrench_reset_kernel( + env_mask: wp.array(dtype=wp.bool), + out_force: wp.array(dtype=wp.vec3f, ndim=2), + out_torque: wp.array(dtype=wp.vec3f, ndim=2), +): + """Zero force / torque entries for the environments selected by ``env_mask``.""" + env, joint = wp.tid() + if not env_mask[env]: + return + out_force[env, joint] = wp.vec3f(0.0, 0.0, 0.0) + out_torque[env, joint] = wp.vec3f(0.0, 0.0, 0.0) diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/pva/__init__.py b/source/isaaclab_newton/isaaclab_newton/sensors/pva/__init__.py new file mode 100644 index 000000000000..257e5b627f83 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/pva/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for Newton PVA sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/pva/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sensors/pva/__init__.pyi new file mode 100644 index 000000000000..bba750977ae1 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/pva/__init__.pyi @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = ["Pva", "PvaData"] + +from .pva import Pva +from .pva_data import PvaData diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/pva/kernels.py b/source/isaaclab_newton/isaaclab_newton/sensors/pva/kernels.py new file mode 100644 index 000000000000..8ea779284058 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/pva/kernels.py @@ -0,0 +1,108 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def pva_update_kernel( + # indexing + env_mask: wp.array(dtype=wp.bool), + site_indices: wp.array(dtype=int), + # model arrays + shape_body: wp.array(dtype=int), + shape_transform: wp.array(dtype=wp.transform), + body_com: wp.array(dtype=wp.vec3), + gravity: wp.array(dtype=wp.vec3), + body_world: wp.array(dtype=wp.int32), + # state arrays + body_q: wp.array(dtype=wp.transform), + body_qd: wp.array(dtype=wp.spatial_vector), + body_qdd: wp.array(dtype=wp.spatial_vector), + # outputs + out_pose_w: wp.array(dtype=wp.transformf), + out_pos_w: wp.array(dtype=wp.vec3f), + out_quat_w: wp.array(dtype=wp.quatf), + out_projected_gravity_b: wp.array(dtype=wp.vec3f), + out_lin_vel_b: wp.array(dtype=wp.vec3f), + out_ang_vel_b: wp.array(dtype=wp.vec3f), + out_lin_acc_b: wp.array(dtype=wp.vec3f), + out_ang_acc_b: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + + site_idx = site_indices[idx] + body_idx = shape_body[site_idx] + site_xform = shape_transform[site_idx] + + # 1. World-frame pose at sensor site + body_xform = body_q[body_idx] + body_quat = wp.transform_get_rotation(body_xform) + sensor_pos = wp.transform_get_translation(body_xform) + wp.quat_rotate(body_quat, site_xform.p) + sensor_quat = body_quat * site_xform.q + + # 2. Projected gravity (normalized to unit vector) + world_idx = body_world[body_idx] + g = gravity[wp.max(world_idx, 0)] + projected_gravity_b = wp.quat_rotate_inv(sensor_quat, wp.normalize(g)) + + # 3. Velocity at sensor site: v_site = v_com + omega x r + # r = lever arm from body COM to sensor site, in world frame. + # body_qd stores the spatial velocity at the COM, so the linear + # velocity at the site requires the cross-product correction. + r = wp.quat_rotate(body_quat, site_xform.p - body_com[body_idx]) + ang_vel_w = wp.spatial_bottom(body_qd[body_idx]) + lin_vel_w = wp.spatial_top(body_qd[body_idx]) + wp.cross(ang_vel_w, r) + + lin_vel_b = wp.quat_rotate_inv(sensor_quat, lin_vel_w) + ang_vel_b = wp.quat_rotate_inv(sensor_quat, ang_vel_w) + + # 4. Acceleration at sensor site via rigid-body transport (from body_qdd): + # a_site = a_com + alpha x r + omega x (omega x r) + ang_acc_w = wp.spatial_bottom(body_qdd[body_idx]) + lin_acc_w = ( + wp.spatial_top(body_qdd[body_idx]) + wp.cross(ang_acc_w, r) + wp.cross(ang_vel_w, wp.cross(ang_vel_w, r)) + ) + + lin_acc_b = wp.quat_rotate_inv(sensor_quat, lin_acc_w) + ang_acc_b = wp.quat_rotate_inv(sensor_quat, ang_acc_w) + + # 5. Write outputs + out_pose_w[idx] = wp.transform(sensor_pos, sensor_quat) + out_pos_w[idx] = sensor_pos + out_quat_w[idx] = sensor_quat + out_projected_gravity_b[idx] = projected_gravity_b + out_lin_vel_b[idx] = lin_vel_b + out_ang_vel_b[idx] = ang_vel_b + out_lin_acc_b[idx] = lin_acc_b + out_ang_acc_b[idx] = ang_acc_b + + +@wp.kernel +def pva_reset_kernel( + env_mask: wp.array(dtype=wp.bool), + out_pose_w: wp.array(dtype=wp.transformf), + out_pos_w: wp.array(dtype=wp.vec3f), + out_quat_w: wp.array(dtype=wp.quatf), + out_projected_gravity_b: wp.array(dtype=wp.vec3f), + out_lin_vel_b: wp.array(dtype=wp.vec3f), + out_ang_vel_b: wp.array(dtype=wp.vec3f), + out_lin_acc_b: wp.array(dtype=wp.vec3f), + out_ang_acc_b: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + + out_pose_w[idx] = wp.transform(wp.vec3f(0.0, 0.0, 0.0), wp.quatf(0.0, 0.0, 0.0, 1.0)) + out_pos_w[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_quat_w[idx] = wp.quatf(0.0, 0.0, 0.0, 1.0) + out_projected_gravity_b[idx] = wp.vec3f(0.0, 0.0, -1.0) + out_lin_vel_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_ang_vel_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_lin_acc_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_ang_acc_b[idx] = wp.vec3f(0.0, 0.0, 0.0) diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/pva/pva.py b/source/isaaclab_newton/isaaclab_newton/sensors/pva/pva.py new file mode 100644 index 000000000000..c000e17c437a --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/pva/pva.py @@ -0,0 +1,256 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from pxr import UsdGeom + +import isaaclab.utils.math as math_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.pva import BasePva + +from isaaclab_newton.physics import NewtonManager + +from .kernels import pva_reset_kernel, pva_update_kernel +from .pva_data import PvaData + +if TYPE_CHECKING: + from isaaclab.sensors.pva import PvaCfg + +logger = logging.getLogger(__name__) + + +class Pva(BasePva): + """Newton Pose Velocity Acceleration (PVA) sensor. + + Reads body transforms, velocities, and accelerations directly from + Newton's simulation state (``body_q``, ``body_qd``, ``body_qdd``) to + provide world-frame pose and body-frame velocities/accelerations. + """ + + cfg: PvaCfg + """The configuration parameters.""" + + __backend_name__: str = "newton" + """The name of the backend for the PVA sensor.""" + + def __init__(self, cfg: PvaCfg): + """Initializes the Newton PVA sensor. + + Registers a site request and the ``body_qdd`` state attribute with + :class:`NewtonManager`. The site is injected into prototype builders + before replication so it ends up in each world. + + Args: + cfg: The configuration parameters. + """ + super().__init__(cfg) + + self._data = PvaData() + self._site_indices: wp.array | None = None + self._newton_model = None + + offset_xform = wp.transform(cfg.offset.pos, cfg.offset.rot) + self._site_label = NewtonManager.cl_register_site(cfg.prim_path, offset_xform) + NewtonManager.request_extended_state_attribute("body_qdd") + + logger.info(f"Pva '{cfg.prim_path}': site registered (label='{self._site_label}')") + + def __str__(self) -> str: + """String representation of the sensor instance.""" + return ( + f"Pva sensor @ '{self.cfg.prim_path}': \n" + f"\tbackend : newton\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self._num_envs}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> PvaData: + """The PVA sensor data.""" + self._update_outdated_buffers() + return self._data + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + """Reset the sensor for the given environments. + + Zeroes out all PVA buffers for the specified environments. + + Args: + env_ids: Environment indices to reset. Defaults to all environments. + env_mask: Boolean mask of environments to reset. Mutually exclusive with *env_ids*. + """ + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + super().reset(None, env_mask) + wp.launch( + pva_reset_kernel, + dim=self._num_envs, + inputs=[ + env_mask, + self._data._pose_w, + self._data._pos_w, + self._data._quat_w, + self._data._projected_gravity_b, + self._data._lin_vel_b, + self._data._ang_vel_b, + self._data._lin_acc_b, + self._data._ang_acc_b, + ], + device=self._device, + ) + + """ + Implementation + """ + + def _initialize_impl(self): + """PHYSICS_READY callback: resolves site indices and stores model reference.""" + super()._initialize_impl() + + site_map = NewtonManager._cl_site_index_map + num_envs = self._num_envs + + if self._site_label not in site_map: + raise ValueError( + f"Pva '{self.cfg.prim_path}': site label '{self._site_label}' " + "not found in NewtonManager._cl_site_index_map." + ) + + global_idx, per_world = site_map[self._site_label] + + if per_world is None: + site_indices = [global_idx] * num_envs + else: + if len(per_world) != num_envs: + raise ValueError( + f"Pva '{self.cfg.prim_path}': site has {len(per_world)} world entries, expected {num_envs}." + ) + + site_indices: list[int] = [] + for env_idx, world_sites in enumerate(per_world): + if len(world_sites) != 1: + raise ValueError( + f"Pva '{self.cfg.prim_path}': pattern matched {len(world_sites)} " + f"bodies in env {env_idx}, expected exactly 1." + ) + site_indices.append(world_sites[0]) + + self._site_indices = wp.array(site_indices, dtype=int, device=self._device) + self._newton_model = NewtonManager._model + + self._data.create_buffers(num_envs=num_envs, device=self._device) + + logger.info(f"Pva initialized: {num_envs} envs") + + def _update_buffers_impl(self, env_mask: wp.array): + """Reads Newton body state and computes all PVA quantities.""" + if self._newton_model is None: + raise RuntimeError( + f"Pva '{self.cfg.prim_path}': sensor not initialized. " + "Access sensor data only after sim.reset() has been called." + ) + state = NewtonManager._state_0 + + wp.launch( + pva_update_kernel, + dim=self._num_envs, + inputs=[ + env_mask, + self._site_indices, + self._newton_model.shape_body, + self._newton_model.shape_transform, + self._newton_model.body_com, + self._newton_model.gravity, + self._newton_model.body_world, + state.body_q, + state.body_qd, + state.body_qdd, + ], + outputs=[ + self._data._pose_w, + self._data._pos_w, + self._data._quat_w, + self._data._projected_gravity_b, + self._data._lin_vel_b, + self._data._ang_vel_b, + self._data._lin_acc_b, + self._data._ang_acc_b, + ], + device=self._device, + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + if debug_vis: + if not hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + self.acceleration_visualizer.set_visibility(True) + else: + if hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + if self._newton_model is None: + return + # base position (offset upward for visibility) + base_pos_w = self._data.pos_w.torch.clone() + base_pos_w[:, 2] += 0.5 + # arrow scale + default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale + arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.torch.shape[0], 1) + # arrow direction from acceleration + up_axis = UsdGeom.GetStageUpAxis(self.stage) + pos_w_torch = self._data.pos_w.torch + quat_w_torch = self._data.quat_w.torch + lin_acc_b_torch = self._data.lin_acc_b.torch + quat_opengl = math_utils.quat_from_matrix( + math_utils.create_rotation_matrix_from_view( + pos_w_torch, + pos_w_torch + math_utils.quat_apply(quat_w_torch, lin_acc_b_torch), + up_axis=up_axis, + device=self._device, + ) + ) + quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") + self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) + + def _invalidate_initialize_callback(self, event): + """Clears references for re-initialization and re-registers with NewtonManager.""" + super()._invalidate_initialize_callback(event) + self._newton_model = None + self._site_indices = None + + # Zero out data buffers so stale data is not served between STOP and reinit. + for buf in [ + self._data._pose_w, + self._data._pos_w, + self._data._quat_w, + self._data._projected_gravity_b, + self._data._lin_vel_b, + self._data._ang_vel_b, + self._data._lin_acc_b, + self._data._ang_acc_b, + ]: + if buf is not None: + buf.zero_() + + # Re-register so a subsequent start_simulation picks them up. + offset_xform = wp.transform(self.cfg.offset.pos, self.cfg.offset.rot) + self._site_label = NewtonManager.cl_register_site(self.cfg.prim_path, offset_xform) + NewtonManager.request_extended_state_attribute("body_qdd") diff --git a/source/isaaclab_newton/isaaclab_newton/sensors/pva/pva_data.py b/source/isaaclab_newton/isaaclab_newton/sensors/pva/pva_data.py new file mode 100644 index 000000000000..5b75a0087a26 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sensors/pva/pva_data.py @@ -0,0 +1,175 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp + +from isaaclab.sensors.pva import BasePvaData +from isaaclab.utils.warp import ProxyArray + + +class PvaData(BasePvaData): + """Data container for the Newton PVA sensor.""" + + def __init__(self): + self._pose_w: wp.array | None = None + self._pos_w: wp.array | None = None + self._quat_w: wp.array | None = None + self._projected_gravity_b: wp.array | None = None + self._lin_vel_b: wp.array | None = None + self._ang_vel_b: wp.array | None = None + self._lin_acc_b: wp.array | None = None + self._ang_acc_b: wp.array | None = None + # ProxyArray caches + self._pose_w_ta: ProxyArray | None = None + self._pos_w_ta: ProxyArray | None = None + self._quat_w_ta: ProxyArray | None = None + self._projected_gravity_b_ta: ProxyArray | None = None + self._lin_vel_b_ta: ProxyArray | None = None + self._ang_vel_b_ta: ProxyArray | None = None + self._lin_acc_b_ta: ProxyArray | None = None + self._ang_acc_b_ta: ProxyArray | None = None + + @property + def pose_w(self) -> ProxyArray | None: + """Pose of the sensor origin in world frame [m, unitless]. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + The pose is provided in (x, y, z, qx, qy, qz, qw) format. + + ``None`` before the simulation is initialized. + """ + if self._pose_w is None: + return None + if self._pose_w_ta is None: + self._pose_w_ta = ProxyArray(self._pose_w) + return self._pose_w_ta + + @property + def pos_w(self) -> ProxyArray | None: + """Position of the sensor origin in world frame [m]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._pos_w is None: + return None + if self._pos_w_ta is None: + self._pos_w_ta = ProxyArray(self._pos_w) + return self._pos_w_ta + + @property + def quat_w(self) -> ProxyArray | None: + """Orientation of the sensor origin in world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + The orientation is provided in (x, y, z, w) format. + + ``None`` before the simulation is initialized. + """ + if self._quat_w is None: + return None + if self._quat_w_ta is None: + self._quat_w_ta = ProxyArray(self._quat_w) + return self._quat_w_ta + + @property + def projected_gravity_b(self) -> ProxyArray | None: + """Gravity direction unit vector projected on the PVA frame [unitless]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._projected_gravity_b is None: + return None + if self._projected_gravity_b_ta is None: + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b) + return self._projected_gravity_b_ta + + @property + def lin_vel_b(self) -> ProxyArray | None: + """PVA frame linear velocity relative to the world expressed in PVA frame [m/s]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._lin_vel_b is None: + return None + if self._lin_vel_b_ta is None: + self._lin_vel_b_ta = ProxyArray(self._lin_vel_b) + return self._lin_vel_b_ta + + @property + def ang_vel_b(self) -> ProxyArray | None: + """PVA frame angular velocity relative to the world expressed in PVA frame [rad/s]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._ang_vel_b is None: + return None + if self._ang_vel_b_ta is None: + self._ang_vel_b_ta = ProxyArray(self._ang_vel_b) + return self._ang_vel_b_ta + + @property + def lin_acc_b(self) -> ProxyArray | None: + """Linear acceleration (coordinate) in the PVA frame [m/s^2]. + + Equal to -g in freefall, zero at rest. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._lin_acc_b is None: + return None + if self._lin_acc_b_ta is None: + self._lin_acc_b_ta = ProxyArray(self._lin_acc_b) + return self._lin_acc_b_ta + + @property + def ang_acc_b(self) -> ProxyArray | None: + """PVA frame angular acceleration relative to the world expressed in PVA frame [rad/s^2]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + ``None`` before the simulation is initialized. + """ + if self._ang_acc_b is None: + return None + if self._ang_acc_b_ta is None: + self._ang_acc_b_ta = ProxyArray(self._ang_acc_b) + return self._ang_acc_b_ta + + def create_buffers(self, num_envs: int, device: str) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + device: Device for array storage. + """ + self._pose_w = wp.zeros(num_envs, dtype=wp.transformf, device=device) + self._pos_w = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._quat_w = wp.zeros(num_envs, dtype=wp.quatf, device=device) + self._projected_gravity_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._lin_vel_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._ang_vel_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._lin_acc_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._ang_acc_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + # Reset ProxyArray caches + self._pose_w_ta = None + self._pos_w_ta = None + self._quat_w_ta = None + self._projected_gravity_b_ta = None + self._lin_vel_b_ta = None + self._ang_vel_b_ta = None + self._lin_acc_b_ta = None + self._ang_acc_b_ta = None diff --git a/source/isaaclab_newton/isaaclab_newton/sim/__init__.py b/source/isaaclab_newton/isaaclab_newton/sim/__init__.py new file mode 100644 index 000000000000..b4646aabbd0a --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton simulation utilities.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sim/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sim/__init__.pyi new file mode 100644 index 000000000000..aac4c8327ccb --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "views", +] + +from . import views diff --git a/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.py b/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.py new file mode 100644 index 000000000000..44e8303bcedf --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton simulation views.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.pyi new file mode 100644 index 000000000000..433dfc1e8b6a --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "NewtonSiteFrameView", +] + +from .newton_site_frame_view import NewtonSiteFrameView diff --git a/source/isaaclab_newton/isaaclab_newton/sim/views/newton_site_frame_view.py b/source/isaaclab_newton/isaaclab_newton/sim/views/newton_site_frame_view.py new file mode 100644 index 000000000000..c7758f0cbe42 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/views/newton_site_frame_view.py @@ -0,0 +1,951 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton-backed FrameView — Warp-native, GPU-resident pose queries.""" + +from __future__ import annotations + +import logging + +import warp as wp + +from pxr import Gf, Usd, UsdGeom + +import isaaclab.sim as sim_utils +from isaaclab.physics import PhysicsEvent +from isaaclab.sim.views.base_frame_view import BaseFrameView +from isaaclab.utils.warp import ProxyArray + +from isaaclab_newton.physics.newton_manager import NewtonManager + +logger = logging.getLogger(__name__) + +WORLD_BODY_INDEX = -1 + + +# ------------------------------------------------------------------ +# Warp kernels +# ------------------------------------------------------------------ + + +@wp.kernel +def _compute_site_world_transforms( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + site_local: wp.array(dtype=wp.transformf), + out_pos: wp.array(dtype=wp.vec3f), + out_quat: wp.array(dtype=wp.vec4f), +): + """Compute world-space transforms for every site in the view. + + For each site *i*, computes ``world = body_q[site_body[i]] * site_local[i]`` + and splits the result into position and quaternion outputs. When + ``site_body[i] == -1`` the site is world-attached and ``site_local[i]`` is + returned directly. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + A value of ``-1`` indicates a world-attached site. + site_local: Per-site local offset relative to its parent body, shape ``[num_sites]``. + out_pos: Output world positions [m], shape ``[num_sites]``. + out_quat: Output world orientations as ``(qx, qy, qz, qw)``, shape ``[num_sites]``. + """ + i = wp.tid() + bid = site_body[i] + if bid == -1: + world = site_local[i] + else: + world = wp.transform_multiply(body_q[bid], site_local[i]) + out_pos[i] = wp.transform_get_translation(world) + q = wp.transform_get_rotation(world) + out_quat[i] = wp.vec4f(q[0], q[1], q[2], q[3]) + + +@wp.kernel +def _compute_site_world_transforms_indexed( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + site_local: wp.array(dtype=wp.transformf), + indices: wp.array(dtype=wp.int32), + out_pos: wp.array(dtype=wp.vec3f), + out_quat: wp.array(dtype=wp.vec4f), +): + """Indexed variant of :func:`_compute_site_world_transforms`. + + Only computes world transforms for the subset of sites selected by + ``indices``. Thread *i* reads ``indices[i]`` to obtain the site index, + then writes the result to ``out_pos[i]`` / ``out_quat[i]``. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + site_local: Per-site local offset relative to its parent body, shape ``[num_sites]``. + indices: Site indices to query, shape ``[M]``. + out_pos: Output world positions [m], shape ``[M]``. + out_quat: Output world orientations as ``(qx, qy, qz, qw)``, shape ``[M]``. + """ + i = wp.tid() + si = indices[i] + bid = site_body[si] + if bid == -1: + world = site_local[si] + else: + world = wp.transform_multiply(body_q[bid], site_local[si]) + out_pos[i] = wp.transform_get_translation(world) + q = wp.transform_get_rotation(world) + out_quat[i] = wp.vec4f(q[0], q[1], q[2], q[3]) + + +@wp.kernel +def _gather_scales( + shape_scale: wp.array(dtype=wp.vec3f), + shape_body: wp.array(dtype=wp.int32), + site_body: wp.array(dtype=wp.int32), + num_shapes: wp.int32, + out_scales: wp.array(dtype=wp.vec3f), +): + """Gather per-site scales from collision shapes on the same body. + + For each site *i*, linearly scans all shapes to find the first one whose + ``shape_body`` matches ``site_body[i]`` and copies its scale. Falls back + to ``(1, 1, 1)`` if no shape is found on that body. + + Args: + shape_scale: Per-shape scale vectors from the Newton model, shape ``[num_shapes]``. + shape_body: Per-shape parent body index, shape ``[num_shapes]``. + site_body: Per-site body index, shape ``[num_sites]``. + num_shapes: Total number of shapes in the model. + out_scales: Output scale per site, shape ``[num_sites]``. + """ + i = wp.tid() + bid = site_body[i] + found = int(0) + for s in range(num_shapes): + if shape_body[s] == bid and found == 0: + out_scales[i] = shape_scale[s] + found = 1 + if found == 0: + out_scales[i] = wp.vec3f(1.0, 1.0, 1.0) + + +@wp.kernel +def _gather_scales_indexed( + shape_scale: wp.array(dtype=wp.vec3f), + shape_body: wp.array(dtype=wp.int32), + site_body: wp.array(dtype=wp.int32), + indices: wp.array(dtype=wp.int32), + num_shapes: wp.int32, + out_scales: wp.array(dtype=wp.vec3f), +): + """Indexed variant of :func:`_gather_scales`. + + Args: + shape_scale: Per-shape scale vectors from the Newton model, shape ``[num_shapes]``. + shape_body: Per-shape parent body index, shape ``[num_shapes]``. + site_body: Per-site body index, shape ``[num_sites]``. + indices: Site indices to query, shape ``[M]``. + num_shapes: Total number of shapes in the model. + out_scales: Output scale per queried site, shape ``[M]``. + """ + i = wp.tid() + si = indices[i] + bid = site_body[si] + found = int(0) + for s in range(num_shapes): + if shape_body[s] == bid and found == 0: + out_scales[i] = shape_scale[s] + found = 1 + if found == 0: + out_scales[i] = wp.vec3f(1.0, 1.0, 1.0) + + +@wp.kernel +def _scatter_scales( + site_body: wp.array(dtype=wp.int32), + new_scales: wp.array(dtype=wp.vec3f), + shape_body: wp.array(dtype=wp.int32), + num_shapes: wp.int32, + shape_scale: wp.array(dtype=wp.vec3f), +): + """Scatter per-site scales to all collision shapes on the same body. + + For each site *i*, writes ``new_scales[i]`` to every shape whose + ``shape_body`` matches ``site_body[i]``. Multiple shapes on the same + body all receive the same scale. + + Args: + site_body: Per-site body index, shape ``[num_sites]``. + new_scales: New scale to apply per site, shape ``[num_sites]``. + shape_body: Per-shape parent body index, shape ``[num_shapes]``. + num_shapes: Total number of shapes in the model. + shape_scale: Per-shape scale vectors to write into (modified in-place), + shape ``[num_shapes]``. + """ + i = wp.tid() + bid = site_body[i] + for s in range(num_shapes): + if shape_body[s] == bid: + shape_scale[s] = new_scales[i] + + +@wp.kernel +def _scatter_scales_indexed( + site_body: wp.array(dtype=wp.int32), + indices: wp.array(dtype=wp.int32), + new_scales: wp.array(dtype=wp.vec3f), + shape_body: wp.array(dtype=wp.int32), + num_shapes: wp.int32, + shape_scale: wp.array(dtype=wp.vec3f), +): + """Indexed variant of :func:`_scatter_scales`. + + Args: + site_body: Per-site body index, shape ``[num_sites]``. + indices: Site indices to update, shape ``[M]``. + new_scales: New scale to apply per selected site, shape ``[M]``. + shape_body: Per-shape parent body index, shape ``[num_shapes]``. + num_shapes: Total number of shapes in the model. + shape_scale: Per-shape scale vectors to write into (modified in-place), + shape ``[num_shapes]``. + """ + i = wp.tid() + si = indices[i] + bid = site_body[si] + for s in range(num_shapes): + if shape_body[s] == bid: + shape_scale[s] = new_scales[i] + + +# ------------------------------------------------------------------ +# World-pose site_local write kernels +# ------------------------------------------------------------------ + + +@wp.kernel +def _write_site_local_from_world_poses( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + world_pos: wp.array(dtype=wp.vec3f), + world_quat: wp.array(dtype=wp.vec4f), + site_local: wp.array(dtype=wp.transformf), +): + """Update site local offsets so that the sites reach desired world poses. + + For each site *i*, computes + ``site_local[i] = inv(body_q[site_body[i]]) * desired_world`` so that + a subsequent ``body_q[bid] * site_local[i]`` yields the requested world + pose. For world-attached sites (``site_body[i] == -1``) the desired world + transform is written directly into ``site_local[i]``. + + Does **not** modify ``body_q``. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + world_pos: Desired world positions [m], shape ``[num_sites]``. + world_quat: Desired world orientations as ``(qx, qy, qz, qw)``, shape ``[num_sites]``. + site_local: Per-site local offset (modified in-place), shape ``[num_sites]``. + """ + i = wp.tid() + w_pos = world_pos[i] + w_q = world_quat[i] + desired_world = wp.transform(w_pos, wp.quatf(w_q[0], w_q[1], w_q[2], w_q[3])) + + bid = site_body[i] + if bid == -1: + site_local[i] = desired_world + else: + site_local[i] = wp.transform_multiply(wp.transform_inverse(body_q[bid]), desired_world) + + +@wp.kernel +def _write_site_local_from_world_poses_indexed( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + indices: wp.array(dtype=wp.int32), + world_pos: wp.array(dtype=wp.vec3f), + world_quat: wp.array(dtype=wp.vec4f), + site_local: wp.array(dtype=wp.transformf), +): + """Indexed variant of :func:`_write_site_local_from_world_poses`. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + indices: Site indices to update, shape ``[M]``. + world_pos: Desired world positions [m], shape ``[M]``. + world_quat: Desired world orientations as ``(qx, qy, qz, qw)``, shape ``[M]``. + site_local: Per-site local offset (modified in-place), shape ``[num_sites]``. + """ + i = wp.tid() + si = indices[i] + w_pos = world_pos[i] + w_q = world_quat[i] + desired_world = wp.transform(w_pos, wp.quatf(w_q[0], w_q[1], w_q[2], w_q[3])) + + bid = site_body[si] + if bid == -1: + site_local[si] = desired_world + else: + site_local[si] = wp.transform_multiply(wp.transform_inverse(body_q[bid]), desired_world) + + +# ------------------------------------------------------------------ +# Local-pose Warp kernels +# ------------------------------------------------------------------ + + +@wp.kernel +def _compute_site_local_transforms( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + site_local: wp.array(dtype=wp.transformf), + parent_site_body: wp.array(dtype=wp.int32), + parent_site_local: wp.array(dtype=wp.transformf), + out_pos: wp.array(dtype=wp.vec3f), + out_quat: wp.array(dtype=wp.vec4f), +): + """Compute parent-relative transforms for every site in the view. + + For each site *i*, computes the world pose of both the site and its USD + parent, then returns ``inv(parent_world) * prim_world``. When + ``site_body[i] == -1`` the site is world-attached and ``site_local[i]`` + is used as the world transform directly. The same convention applies to + the parent arrays. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + site_local: Per-site local offset relative to its parent body, shape ``[num_sites]``. + parent_site_body: Per-site USD-parent body index, shape ``[num_sites]``. + parent_site_local: Per-site USD-parent local offset, shape ``[num_sites]``. + out_pos: Output parent-relative positions [m], shape ``[num_sites]``. + out_quat: Output parent-relative orientations as ``(qx, qy, qz, qw)``, + shape ``[num_sites]``. + """ + i = wp.tid() + prim_bid = site_body[i] + if prim_bid == -1: + prim_world = site_local[i] + else: + prim_world = wp.transform_multiply(body_q[prim_bid], site_local[i]) + + parent_bid = parent_site_body[i] + if parent_bid == -1: + parent_world = parent_site_local[i] + else: + parent_world = wp.transform_multiply(body_q[parent_bid], parent_site_local[i]) + + local_tf = wp.transform_multiply(wp.transform_inverse(parent_world), prim_world) + out_pos[i] = wp.transform_get_translation(local_tf) + q = wp.transform_get_rotation(local_tf) + out_quat[i] = wp.vec4f(q[0], q[1], q[2], q[3]) + + +@wp.kernel +def _compute_site_local_transforms_indexed( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + site_local: wp.array(dtype=wp.transformf), + parent_site_body: wp.array(dtype=wp.int32), + parent_site_local: wp.array(dtype=wp.transformf), + indices: wp.array(dtype=wp.int32), + out_pos: wp.array(dtype=wp.vec3f), + out_quat: wp.array(dtype=wp.vec4f), +): + """Indexed variant of :func:`_compute_site_local_transforms`. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + site_local: Per-site local offset relative to its parent body, shape ``[num_sites]``. + parent_site_body: Per-site USD-parent body index, shape ``[num_sites]``. + parent_site_local: Per-site USD-parent local offset, shape ``[num_sites]``. + indices: Site indices to query, shape ``[M]``. + out_pos: Output parent-relative positions [m], shape ``[M]``. + out_quat: Output parent-relative orientations as ``(qx, qy, qz, qw)``, + shape ``[M]``. + """ + i = wp.tid() + si = indices[i] + prim_bid = site_body[si] + if prim_bid == -1: + prim_world = site_local[si] + else: + prim_world = wp.transform_multiply(body_q[prim_bid], site_local[si]) + + parent_bid = parent_site_body[si] + if parent_bid == -1: + parent_world = parent_site_local[si] + else: + parent_world = wp.transform_multiply(body_q[parent_bid], parent_site_local[si]) + + local_tf = wp.transform_multiply(wp.transform_inverse(parent_world), prim_world) + out_pos[i] = wp.transform_get_translation(local_tf) + q = wp.transform_get_rotation(local_tf) + out_quat[i] = wp.vec4f(q[0], q[1], q[2], q[3]) + + +@wp.kernel +def _write_site_local_from_local_poses( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + parent_site_body: wp.array(dtype=wp.int32), + parent_site_local: wp.array(dtype=wp.transformf), + local_pos: wp.array(dtype=wp.vec3f), + local_quat: wp.array(dtype=wp.vec4f), + site_local: wp.array(dtype=wp.transformf), +): + """Update site local offsets so that sites reach desired parent-relative poses. + + For each site *i*, reconstructs the desired world pose as + ``parent_world * desired_local``, then solves for the body-relative offset: + ``site_local[i] = inv(body_q[bid]) * desired_world``. For world-attached + sites (``site_body[i] == -1``) the world transform is written directly. + + Does **not** modify ``body_q``. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + parent_site_body: Per-site USD-parent body index, shape ``[num_sites]``. + parent_site_local: Per-site USD-parent local offset, shape ``[num_sites]``. + local_pos: Desired parent-relative positions [m], shape ``[num_sites]``. + local_quat: Desired parent-relative orientations as ``(qx, qy, qz, qw)``, + shape ``[num_sites]``. + site_local: Per-site local offset (modified in-place), shape ``[num_sites]``. + """ + i = wp.tid() + parent_bid = parent_site_body[i] + if parent_bid == -1: + parent_world = parent_site_local[i] + else: + parent_world = wp.transform_multiply(body_q[parent_bid], parent_site_local[i]) + + l_pos = local_pos[i] + l_q = local_quat[i] + local_tf = wp.transform(l_pos, wp.quatf(l_q[0], l_q[1], l_q[2], l_q[3])) + desired_world = wp.transform_multiply(parent_world, local_tf) + + bid = site_body[i] + if bid == -1: + site_local[i] = desired_world + else: + site_local[i] = wp.transform_multiply(wp.transform_inverse(body_q[bid]), desired_world) + + +@wp.kernel +def _write_site_local_from_local_poses_indexed( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + parent_site_body: wp.array(dtype=wp.int32), + parent_site_local: wp.array(dtype=wp.transformf), + indices: wp.array(dtype=wp.int32), + local_pos: wp.array(dtype=wp.vec3f), + local_quat: wp.array(dtype=wp.vec4f), + site_local: wp.array(dtype=wp.transformf), +): + """Indexed variant of :func:`_write_site_local_from_local_poses`. + + Args: + body_q: Rigid-body world transforms from the Newton state, shape ``[num_bodies]``. + site_body: Per-site body index (flat model-level), shape ``[num_sites]``. + parent_site_body: Per-site USD-parent body index, shape ``[num_sites]``. + parent_site_local: Per-site USD-parent local offset, shape ``[num_sites]``. + indices: Site indices to update, shape ``[M]``. + local_pos: Desired parent-relative positions [m], shape ``[M]``. + local_quat: Desired parent-relative orientations as ``(qx, qy, qz, qw)``, + shape ``[M]``. + site_local: Per-site local offset (modified in-place), shape ``[num_sites]``. + """ + i = wp.tid() + si = indices[i] + parent_bid = parent_site_body[si] + if parent_bid == -1: + parent_world = parent_site_local[si] + else: + parent_world = wp.transform_multiply(body_q[parent_bid], parent_site_local[si]) + + l_pos = local_pos[i] + l_q = local_quat[i] + local_tf = wp.transform(l_pos, wp.quatf(l_q[0], l_q[1], l_q[2], l_q[3])) + desired_world = wp.transform_multiply(parent_world, local_tf) + + bid = site_body[si] + if bid == -1: + site_local[si] = desired_world + else: + site_local[si] = wp.transform_multiply(wp.transform_inverse(body_q[bid]), desired_world) + + +# ------------------------------------------------------------------ +# View class +# ------------------------------------------------------------------ + + +class NewtonSiteFrameView(BaseFrameView): + """Batched prim view for non-physics prims tracked as sites on Newton bodies. + + Each matched USD prim must be a **non-physics** prim (camera, sensor, + Xform marker, etc.) that sits as a child of a Newton rigid body in the + USD hierarchy. The prim path must **not** resolve directly to a physics + body or collision shape -- those are owned by Newton and should be + accessed through :class:`~isaaclab_newton.assets.Articulation` or + :class:`~isaaclab_newton.assets.RigidObject` instead. + + At init time each prim is resolved to a ``(body_index, site_local)`` + pair via ancestor walk: the nearest ancestor that appears in + ``model.body_label`` becomes the attachment body, and the relative USD + transform becomes the site offset. If no body ancestor exists the prim + is attached to the world frame (``body_index = -1``). + + World poses are computed on GPU as + ``body_q[body_index] * site_local`` via a Warp kernel. Both + ``set_world_poses`` and ``set_local_poses`` update ``site_local`` -- + neither touches ``body_q``. + + Pose getters return :class:`~isaaclab.utils.warp.ProxyArray`. Setters accept ``wp.array``. + + Raises: + ValueError: If any matched prim resolves to a Newton physics body + or collision shape. + """ + + def __init__(self, prim_path: str, device: str = "cpu", stage: Usd.Stage | None = None, **kwargs): + """Initialize the Newton site-based frame view. + + Resolves all USD prims matching ``prim_path`` and, for each one, walks + the USD ancestor hierarchy to find the nearest Newton rigid body. The + relative transform between the prim and its ancestor body becomes the + site's local offset. + + If the Newton model is already finalized the view initializes + immediately; otherwise initialization is deferred to a + :attr:`PhysicsEvent.PHYSICS_READY` callback. + + Args: + prim_path: USD prim path pattern (may contain regex). + device: Warp device for GPU arrays (e.g. ``"cuda:0"``). + stage: USD stage to search. Defaults to the current stage. + **kwargs: Unused; accepted for interface compatibility with other + :class:`~isaaclab.sim.views.BaseFrameView` backends. + """ + self._prim_path = prim_path + self._device = device + + stage = sim_utils.get_current_stage() if stage is None else stage + self._prims: list[Usd.Prim] = sim_utils.find_matching_prims(prim_path, stage=stage) + + model = NewtonManager.get_model() + if model is not None: + self._initialize_impl(model) + else: + self._physics_ready_handle = NewtonManager.register_callback( + self._on_physics_ready, PhysicsEvent.PHYSICS_READY, name=f"site_view_{prim_path}" + ) + + def _on_physics_ready(self, _event) -> None: + """Callback invoked when the Newton model becomes available.""" + self._initialize_impl(NewtonManager.get_model()) + + def _initialize_impl(self, model) -> None: + """Resolve USD prims to Newton body indices and allocate GPU buffers.""" + body_labels = list(model.body_label) + body_label_set = set(body_labels) + body_label_to_idx = {path: idx for idx, path in enumerate(body_labels)} + shape_label_set = set(model.shape_label) + + xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) + + site_bodies: list[int] = [] + site_locals: list[list[float]] = [] + parent_bodies: list[int] = [] + parent_locals: list[list[float]] = [] + + identity_xform = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] + resolve_cache: dict[str, tuple[int, list[float]]] = {} + + for prim in self._prims: + pp = prim.GetPath().pathString + if pp in body_label_set: + raise ValueError( + f"FrameView prim '{pp}' is a Newton physics body. " + "FrameView should only be used for non-physics prims (cameras, sensors, Xform markers). " + "Use Articulation or RigidObject APIs to control physics bodies." + ) + if pp in shape_label_set: + raise ValueError( + f"FrameView prim '{pp}' is a Newton collision shape. " + "FrameView should only be used for non-physics prims (cameras, sensors, Xform markers). " + "Use Articulation or RigidObject APIs to control collision shapes." + ) + + body_idx, local_xform = self._resolve_ancestor_body(prim, body_label_to_idx, xform_cache) + site_bodies.append(body_idx) + site_locals.append(local_xform) + + parent = prim.GetParent() + if not parent or not parent.IsValid() or parent.GetPath().pathString == "/": + parent_bodies.append(WORLD_BODY_INDEX) + parent_locals.append(identity_xform) + else: + parent_path = parent.GetPath().pathString + if parent_path in resolve_cache: + pb_idx, pb_local = resolve_cache[parent_path] + elif parent_path in body_label_to_idx: + pb_idx = body_label_to_idx[parent_path] + pb_local = identity_xform + resolve_cache[parent_path] = (pb_idx, pb_local) + else: + pb_idx, pb_local = self._resolve_ancestor_body(parent, body_label_to_idx, xform_cache) + resolve_cache[parent_path] = (pb_idx, pb_local) + parent_bodies.append(pb_idx) + parent_locals.append(pb_local) + + device = self._device + self._site_body = wp.array(site_bodies, dtype=wp.int32, device=device) + self._site_local = wp.array( + [wp.transform(*x) for x in site_locals], + dtype=wp.transformf, + device=device, + ) + self._parent_site_body = wp.array(parent_bodies, dtype=wp.int32, device=device) + self._parent_site_local = wp.array( + [wp.transform(*x) for x in parent_locals], + dtype=wp.transformf, + device=device, + ) + + self._pos_buf = wp.zeros(self.count, dtype=wp.vec3f, device=device) + self._quat_buf = wp.zeros(self.count, dtype=wp.vec4f, device=device) + self._local_pos_buf = wp.zeros(self.count, dtype=wp.vec3f, device=device) + self._local_quat_buf = wp.zeros(self.count, dtype=wp.vec4f, device=device) + self._pos_ta = ProxyArray(self._pos_buf) + self._quat_ta = ProxyArray(self._quat_buf) + self._local_pos_ta = ProxyArray(self._local_pos_buf) + self._local_quat_ta = ProxyArray(self._local_quat_buf) + + @staticmethod + def _resolve_ancestor_body( + prim: Usd.Prim, + body_label_to_idx: dict[str, int], + xform_cache: UsdGeom.XformCache, + ) -> tuple[int, list[float]]: + """Walk USD ancestors to find the nearest Newton body and compute the relative local transform. + + Args: + prim: The USD prim to resolve. + body_label_to_idx: Dict mapping body prim paths to their Newton body indices. + xform_cache: USD xform cache for efficient transform lookups. + + Returns: + A tuple ``(body_index, local_xform_7)`` where *local_xform_7* is + ``[tx, ty, tz, qx, qy, qz, qw]``. If no body ancestor exists, + ``body_index`` is :data:`WORLD_BODY_INDEX` and the local transform + is the prim's world transform. + """ + prim_world_tf = xform_cache.GetLocalToWorldTransform(prim) + prim_world_tf.Orthonormalize() + + ancestor = prim.GetParent() + while ancestor and ancestor.IsValid() and ancestor.GetPath().pathString != "/": + ancestor_path = ancestor.GetPath().pathString + body_idx = body_label_to_idx.get(ancestor_path) + if body_idx is not None: + ancestor_world_tf = xform_cache.GetLocalToWorldTransform(ancestor) + ancestor_world_tf.Orthonormalize() + local_tf = prim_world_tf * ancestor_world_tf.GetInverse() + return body_idx, _gf_matrix_to_xform7(local_tf) + ancestor = ancestor.GetParent() + + return WORLD_BODY_INDEX, _gf_matrix_to_xform7(prim_world_tf) + + @property + def prims(self) -> list: + """List of USD prims being managed by this view.""" + return self._prims + + @property + def count(self) -> int: + """Number of prims in this view.""" + return len(self._prims) + + @property + def device(self) -> str: + """Device where arrays are allocated (cpu or cuda).""" + return self._device + + # ------------------------------------------------------------------ + # World poses + # ------------------------------------------------------------------ + + def get_world_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + """Get world-space positions and orientations. + + Args: + indices: Subset of sites to query. ``None`` means all sites. + + Returns: + A tuple ``(positions, orientations)`` of :class:`~isaaclab.utils.warp.ProxyArray` + wrappers. Use ``.warp`` for the underlying ``wp.array`` or ``.torch`` for a + cached zero-copy ``torch.Tensor`` view. + """ + state = NewtonManager.get_state_0() + + if indices is not None: + n = len(indices) + pos_buf = wp.zeros(n, dtype=wp.vec3f, device=self._device) + quat_buf = wp.zeros(n, dtype=wp.vec4f, device=self._device) + wp.launch( + _compute_site_world_transforms_indexed, + dim=n, + inputs=[state.body_q, self._site_body, self._site_local, indices], + outputs=[pos_buf, quat_buf], + device=self._device, + ) + return ProxyArray(pos_buf), ProxyArray(quat_buf) + + wp.launch( + _compute_site_world_transforms, + dim=self.count, + inputs=[state.body_q, self._site_body, self._site_local], + outputs=[self._pos_buf, self._quat_buf], + device=self._device, + ) + return self._pos_ta, self._quat_ta + + def set_world_poses( + self, + positions: wp.array | None = None, + orientations: wp.array | None = None, + indices: wp.array | None = None, + ) -> None: + """Set world-space positions and/or orientations. + + Updates the internal ``site_local`` offsets so that + ``body_q[body] * new_site_local`` yields the desired world pose. + Does **not** modify ``body_q``. + + Args: + positions: Desired world positions ``(M, 3)``. ``None`` leaves + positions unchanged. + orientations: Desired world quaternions ``(M, 4)`` as + ``(qx, qy, qz, qw)``. ``None`` leaves orientations unchanged. + indices: Subset of sites to update. ``None`` means all sites. + """ + if positions is None and orientations is None: + return + + state = NewtonManager.get_state_0() + + if positions is None or orientations is None: + cur_pos_ta, cur_quat_ta = self.get_world_poses(indices) + if positions is None: + positions = cur_pos_ta.warp + if orientations is None: + orientations = cur_quat_ta.warp + + if indices is not None: + wp.launch( + _write_site_local_from_world_poses_indexed, + dim=len(indices), + inputs=[state.body_q, self._site_body, indices, positions, orientations, self._site_local], + device=self._device, + ) + else: + wp.launch( + _write_site_local_from_world_poses, + dim=self.count, + inputs=[state.body_q, self._site_body, positions, orientations, self._site_local], + device=self._device, + ) + + # ------------------------------------------------------------------ + # Local poses (parent-relative) + # ------------------------------------------------------------------ + + def get_local_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + """Get parent-relative positions and orientations. + + Computes ``inv(parent_world) * prim_world`` for each site. + + Args: + indices: Subset of sites to query. ``None`` means all sites. + + Returns: + A tuple ``(translations, orientations)`` of :class:`~isaaclab.utils.warp.ProxyArray` + wrappers. Use ``.warp`` for the underlying ``wp.array`` or ``.torch`` for a + cached zero-copy ``torch.Tensor`` view. + """ + state = NewtonManager.get_state_0() + + if indices is not None: + n = len(indices) + pos_buf = wp.zeros(n, dtype=wp.vec3f, device=self._device) + quat_buf = wp.zeros(n, dtype=wp.vec4f, device=self._device) + wp.launch( + _compute_site_local_transforms_indexed, + dim=n, + inputs=[ + state.body_q, + self._site_body, + self._site_local, + self._parent_site_body, + self._parent_site_local, + indices, + ], + outputs=[pos_buf, quat_buf], + device=self._device, + ) + return ProxyArray(pos_buf), ProxyArray(quat_buf) + + wp.launch( + _compute_site_local_transforms, + dim=self.count, + inputs=[ + state.body_q, + self._site_body, + self._site_local, + self._parent_site_body, + self._parent_site_local, + ], + outputs=[self._local_pos_buf, self._local_quat_buf], + device=self._device, + ) + return self._local_pos_ta, self._local_quat_ta + + def set_local_poses( + self, + translations: wp.array | None = None, + orientations: wp.array | None = None, + indices: wp.array | None = None, + ) -> None: + """Set parent-relative translations and/or orientations. + + Updates the internal ``site_local`` offsets so that + ``inv(parent_world) * (body_q[bid] * site_local)`` yields the desired + local pose. Does **not** modify ``body_q``. + + Args: + translations: Desired parent-relative translations ``(M, 3)``. + ``None`` leaves translations unchanged. + orientations: Desired parent-relative quaternions ``(M, 4)`` as + ``(qx, qy, qz, qw)``. ``None`` leaves orientations unchanged. + indices: Subset of sites to update. ``None`` means all sites. + """ + if translations is None and orientations is None: + return + + state = NewtonManager.get_state_0() + + if translations is None or orientations is None: + cur_pos_ta, cur_quat_ta = self.get_local_poses(indices) + if translations is None: + translations = cur_pos_ta.warp + if orientations is None: + orientations = cur_quat_ta.warp + + if indices is not None: + wp.launch( + _write_site_local_from_local_poses_indexed, + dim=len(indices), + inputs=[ + state.body_q, + self._site_body, + self._parent_site_body, + self._parent_site_local, + indices, + translations, + orientations, + self._site_local, + ], + device=self._device, + ) + else: + wp.launch( + _write_site_local_from_local_poses, + dim=self.count, + inputs=[ + state.body_q, + self._site_body, + self._parent_site_body, + self._parent_site_local, + translations, + orientations, + self._site_local, + ], + device=self._device, + ) + + # ------------------------------------------------------------------ + # Scales + # ------------------------------------------------------------------ + + def get_scales(self, indices: wp.array | None = None) -> wp.array: + """Get per-site scales by reading from the first collision shape on the same body. + + Args: + indices: Subset of sites to query. ``None`` means all sites. + + Returns: + A ``wp.array`` of shape ``(M, 3)``. + """ + model = NewtonManager.get_model() + num_shapes = model.shape_count + + if indices is not None: + n = len(indices) + out = wp.zeros(n, dtype=wp.vec3f, device=self._device) + wp.launch( + _gather_scales_indexed, + dim=n, + inputs=[model.shape_scale, model.shape_body, self._site_body, indices, num_shapes], + outputs=[out], + device=self._device, + ) + else: + out = wp.zeros(self.count, dtype=wp.vec3f, device=self._device) + wp.launch( + _gather_scales, + dim=self.count, + inputs=[model.shape_scale, model.shape_body, self._site_body, num_shapes], + outputs=[out], + device=self._device, + ) + return out + + def set_scales(self, scales: wp.array, indices: wp.array | None = None) -> None: + """Set per-site scales by writing to all collision shapes on the same body. + + Args: + scales: New scales ``(M, 3)`` as ``wp.array``. + indices: Subset of sites to update. ``None`` means all sites. + """ + model = NewtonManager.get_model() + num_shapes = model.shape_count + + if indices is not None: + wp.launch( + _scatter_scales_indexed, + dim=len(indices), + inputs=[self._site_body, indices, scales, model.shape_body, num_shapes, model.shape_scale], + device=self._device, + ) + else: + wp.launch( + _scatter_scales, + dim=self.count, + inputs=[self._site_body, scales, model.shape_body, num_shapes, model.shape_scale], + device=self._device, + ) + + +def _gf_matrix_to_xform7(mat: Gf.Matrix4d) -> list[float]: + """Convert a ``Gf.Matrix4d`` to ``[tx, ty, tz, qx, qy, qz, qw]``.""" + t = mat.ExtractTranslation() + q = mat.ExtractRotationQuat() + imag = q.GetImaginary() + return [float(t[0]), float(t[1]), float(t[2]), float(imag[0]), float(imag[1]), float(imag[2]), float(q.GetReal())] diff --git a/source/isaaclab/isaaclab/assets/utils/__init__.py b/source/isaaclab_newton/isaaclab_newton/test/__init__.py similarity index 100% rename from source/isaaclab/isaaclab/assets/utils/__init__.py rename to source/isaaclab_newton/isaaclab_newton/test/__init__.py diff --git a/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/__init__.py b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/__init__.py new file mode 100644 index 000000000000..bc4409447df3 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock interfaces for Newton simulation views. + +This module provides mock implementations of Newton simulation components for unit testing +without requiring an actual simulation environment. +""" + +from .factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, +) +from .mock_newton import ( + MockNewtonContactSensor, + MockNewtonModel, + MockWrenchComposer, + create_mock_newton_manager, +) +from .views import MockNewtonArticulationView + +__all__ = [ + # Views + "MockNewtonArticulationView", + # Other mocks + "MockNewtonModel", + "MockWrenchComposer", + "MockNewtonContactSensor", + # Factory functions + "create_mock_articulation_view", + "create_mock_quadruped_view", + "create_mock_humanoid_view", + "create_mock_newton_manager", +] diff --git a/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/factories.py b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/factories.py new file mode 100644 index 000000000000..9a188456a8a7 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/factories.py @@ -0,0 +1,191 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory functions for creating mock Newton views.""" + +from __future__ import annotations + +from .views import MockNewtonArticulationView + + +def create_mock_articulation_view( + count: int = 1, + num_joints: int = 1, + num_bodies: int = 2, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + device: str = "cpu", +) -> MockNewtonArticulationView: + """Create a mock Newton articulation view. + + Args: + count: Number of articulation instances. + num_joints: Number of degrees of freedom (joints). + num_bodies: Number of bodies (links). + joint_names: Names of the joints. Defaults to auto-generated names. + body_names: Names of the bodies. Defaults to auto-generated names. + is_fixed_base: Whether the articulation has a fixed base. + device: Device for array allocation. + + Returns: + A MockNewtonArticulationView instance. + """ + return MockNewtonArticulationView( + num_instances=count, + num_bodies=num_bodies, + num_joints=num_joints, + device=device, + is_fixed_base=is_fixed_base, + joint_names=joint_names, + body_names=body_names, + ) + + +# -- Pre-configured factories -- + + +def create_mock_quadruped_view( + count: int = 1, + device: str = "cpu", +) -> MockNewtonArticulationView: + """Create a mock articulation view configured for a quadruped robot. + + Configuration: + - 12 DOFs (3 per leg x 4 legs: hip, thigh, calf) + - 13 links (base + 3 per leg) + - Floating base + + Args: + count: Number of articulation instances. + device: Device for array allocation. + + Returns: + A MockNewtonArticulationView configured for quadruped. + """ + joint_names = [ + "FL_hip_joint", + "FL_thigh_joint", + "FL_calf_joint", + "FR_hip_joint", + "FR_thigh_joint", + "FR_calf_joint", + "RL_hip_joint", + "RL_thigh_joint", + "RL_calf_joint", + "RR_hip_joint", + "RR_thigh_joint", + "RR_calf_joint", + ] + body_names = [ + "base", + "FL_hip", + "FL_thigh", + "FL_calf", + "FR_hip", + "FR_thigh", + "FR_calf", + "RL_hip", + "RL_thigh", + "RL_calf", + "RR_hip", + "RR_thigh", + "RR_calf", + ] + return MockNewtonArticulationView( + num_instances=count, + num_bodies=13, + num_joints=12, + device=device, + is_fixed_base=False, + joint_names=joint_names, + body_names=body_names, + ) + + +def create_mock_humanoid_view( + count: int = 1, + device: str = "cpu", +) -> MockNewtonArticulationView: + """Create a mock articulation view configured for a humanoid robot. + + Configuration: + - 21 DOFs (typical humanoid configuration) + - 22 links + - Floating base + + Args: + count: Number of articulation instances. + device: Device for array allocation. + + Returns: + A MockNewtonArticulationView configured for humanoid. + """ + joint_names = [ + # Torso + "torso_joint", + # Left arm + "left_shoulder_pitch", + "left_shoulder_roll", + "left_shoulder_yaw", + "left_elbow", + # Right arm + "right_shoulder_pitch", + "right_shoulder_roll", + "right_shoulder_yaw", + "right_elbow", + # Left leg + "left_hip_yaw", + "left_hip_roll", + "left_hip_pitch", + "left_knee", + "left_ankle_pitch", + "left_ankle_roll", + # Right leg + "right_hip_yaw", + "right_hip_roll", + "right_hip_pitch", + "right_knee", + "right_ankle_pitch", + "right_ankle_roll", + ] + body_names = [ + "pelvis", + "torso", + # Left arm + "left_shoulder", + "left_upper_arm", + "left_lower_arm", + "left_hand", + # Right arm + "right_shoulder", + "right_upper_arm", + "right_lower_arm", + "right_hand", + # Left leg + "left_hip", + "left_upper_leg", + "left_lower_leg", + "left_ankle", + "left_foot", + # Right leg + "right_hip", + "right_upper_leg", + "right_lower_leg", + "right_ankle", + "right_foot", + # Head + "neck", + "head", + ] + return MockNewtonArticulationView( + num_instances=count, + num_bodies=22, + num_joints=21, + device=device, + is_fixed_base=False, + joint_names=joint_names, + body_names=body_names, + ) diff --git a/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/mock_newton.py b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/mock_newton.py new file mode 100644 index 000000000000..f97ebd733eeb --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/mock_newton.py @@ -0,0 +1,182 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared mock interfaces for testing and benchmarking Newton-based asset classes. + +This module provides mock implementations of Newton simulation components that can +be used to test ArticulationData, RigidObjectData, and related classes without +requiring an actual simulation environment. +""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import warp as wp + + +class MockNewtonModel: + """Mock Newton model that provides gravity.""" + + def __init__(self, gravity: tuple[float, float, float] = (0.0, 0.0, -9.81), device: str = "cpu"): + self._gravity = wp.array([gravity], dtype=wp.vec3f, device=device) + + @property + def gravity(self): + return self._gravity + + +class MockWrenchComposer: + """Mock WrenchComposer for testing without full simulation infrastructure. + + This class mimics the interface of :class:`isaaclab.utils.wrench_composer.WrenchComposer` + and can be used to test Articulation and RigidObject classes. + """ + + def __init__(self, asset): + """Initialize the mock wrench composer. + + Args: + asset: The asset (Articulation or RigidObject) to compose wrenches for. + """ + self.num_envs = asset.num_instances + self.num_bodies = asset.num_bodies + self.device = asset.device + self._active = False + + # Create buffers matching the real WrenchComposer + self._composed_force_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._composed_torque_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._ALL_ENV_MASK_WP = wp.ones((self.num_envs,), dtype=wp.bool, device=self.device) + self._ALL_BODY_MASK_WP = wp.ones((self.num_bodies,), dtype=wp.bool, device=self.device) + + @property + def active(self) -> bool: + """Whether the wrench composer is active.""" + return self._active + + @property + def composed_force(self) -> wp.array: + """Composed force at the body's com frame.""" + return self._composed_force_b + + @property + def composed_torque(self) -> wp.array: + """Composed torque at the body's com frame.""" + return self._composed_torque_b + + def set_forces_and_torques( + self, + forces=None, + torques=None, + positions=None, + body_ids=None, + env_ids=None, + body_mask=None, + env_mask=None, + is_global: bool = False, + ): + """Mock set_forces_and_torques - just marks as active.""" + self._active = True + + def add_forces_and_torques( + self, + forces=None, + torques=None, + positions=None, + body_ids=None, + env_ids=None, + body_mask=None, + env_mask=None, + is_global: bool = False, + ): + """Mock add_forces_and_torques - just marks as active.""" + self._active = True + + def reset(self, env_ids=None, env_mask=None): + """Reset the composed force and torque.""" + self._composed_force_b.zero_() + self._composed_torque_b.zero_() + self._active = False + + +def create_mock_newton_manager( + patch_path: str, + gravity: tuple[float, float, float] = (0.0, 0.0, -9.81), +): + """Create a mock NewtonManager for testing. + + Args: + patch_path: The module path to patch + (e.g., "isaaclab_newton.assets.articulation.articulation_data.NewtonManager"). + gravity: Gravity vector to use for the mock model. + + Returns: + A context manager that patches the NewtonManager. + """ + mock_model = MockNewtonModel(gravity) + mock_state = MagicMock() + mock_control = MagicMock() + + return patch( + patch_path, + **{ + "get_model.return_value": mock_model, + "get_state_0.return_value": mock_state, + "get_control.return_value": mock_control, + "get_dt.return_value": 0.01, + }, + ) + + +class MockNewtonContactSensor: + """Mock Newton contact sensor for testing without full simulation infrastructure. + + This class mimics the interface of Newton's SensorContact and can be used to test + ContactSensor classes without requiring an actual simulation environment. + """ + + def __init__( + self, + num_sensing_objs: int, + num_counterparts: int = 1, + device: str = "cuda:0", + ): + """Initialize the mock contact sensor. + + Args: + num_sensing_objs: Number of sensing objects (e.g., bodies or shapes). + num_counterparts: Number of counterparts per sensing object. + device: Device to use. + """ + self.device = device + self.shape: tuple[int, int] = (num_sensing_objs, num_counterparts) + self.sensing_objs: list[list[tuple[int, int]]] = [[(i, 1) for i in range(num_sensing_objs)]] + self.counterparts: list[list[tuple[int, int]]] = [[(i, 1) for i in range(num_counterparts)]] + self.reading_indices: list[list[int]] = [list(range(num_counterparts)) for _ in range(num_sensing_objs)] + + # Net force array (n_sensing_objs, n_counterparts) of vec3 + self._net_force = wp.zeros(num_sensing_objs * num_counterparts, dtype=wp.vec3, device=device) + self.net_force = self._net_force.reshape(self.shape) + + def eval(self, contacts): + """Mock eval - does nothing since forces are set directly via set_mock_data.""" + pass + + def get_total_force(self) -> wp.array: + """Get the total net force measured by the contact sensor.""" + return self.net_force + + def set_mock_data(self, net_force: wp.array | None = None): + """Set mock contact force data. + + Args: + net_force: Force data shaped (num_sensing_objs, num_counterparts) of vec3. + If None, zeros the force data. + """ + if net_force is None: + self._net_force.zero_() + else: + self._net_force.assign(net_force) diff --git a/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/views/__init__.py b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/views/__init__.py new file mode 100644 index 000000000000..721f887ba475 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/views/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock Newton views.""" + +from .mock_articulation_view import MockNewtonArticulationView, MockNewtonCollectionView + +__all__ = [ + "MockNewtonArticulationView", + "MockNewtonCollectionView", +] diff --git a/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/views/mock_articulation_view.py b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/views/mock_articulation_view.py new file mode 100644 index 000000000000..26761937667e --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/test/mock_interfaces/views/mock_articulation_view.py @@ -0,0 +1,677 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of Newton ArticulationView using structured Warp types.""" + +from __future__ import annotations + +import numpy as np +import warp as wp + + +class MockNewtonCollectionView: + """Mock Newton ArticulationView for rigid object collection testing. + + This class mimics the interface of a single ``ArticulationView`` whose combined + fnmatch pattern matches **B** body types per world. Its data is shaped + ``(N, B, ...)`` rather than the ``(N, 1, ...)`` convention used for single + articulations or single rigid bodies. + + Data Shapes: + - root_transforms: ``(N, B)`` dtype=wp.transformf + - root_velocities: ``(N, B)`` dtype=wp.spatial_vectorf + - body_com: ``(N, B, 1)`` dtype=wp.vec3f + - body_mass: ``(N, B, 1)`` dtype=wp.float32 + - body_inertia: ``(N, B, 1)`` dtype=wp.mat33f + - body_f: ``(N, B, 1)`` dtype=wp.spatial_vectorf + + Where N = num_envs, B = num_bodies (body types in the collection). + ``count`` returns ``N * B`` so that the data class can compute + ``num_instances = count // num_bodies``. + """ + + def __init__( + self, + num_envs: int = 2, + num_bodies: int = 3, + device: str = "cpu", + body_names: list[str] | None = None, + ): + self._num_envs = num_envs + self._num_bodies = num_bodies + self._device = device + self._noop_setters = False + + self._body_names = body_names if body_names is not None else [f"object_{i}" for i in range(num_bodies)] + + # Internal state (lazily initialised) + self._root_transforms: wp.array | None = None + self._root_velocities: wp.array | None = None + self._articulation_ids: wp.array | None = None + self._attributes: dict[str, wp.array | None] = { + "body_com": None, + "body_mass": None, + "body_inertia": None, + "body_f": None, + } + + # -- Properties -------------------------------------------------------- + + @property + def count(self) -> int: + """Total matched entities (``N * B``).""" + return self._num_envs * self._num_bodies + + @property + def body_names(self) -> list[str]: + return self._body_names + + @property + def articulation_ids(self) -> wp.array: + """Mapping from ``(world, arti)`` to model articulation index. Shape ``(N, B)`` dtype=int.""" + if self._articulation_ids is None: + ids_np = np.arange(self._num_envs * self._num_bodies, dtype=np.int32).reshape( + self._num_envs, self._num_bodies + ) + self._articulation_ids = wp.array(ids_np, dtype=int, device=self._device) + return self._articulation_ids + + # -- Lazy init helpers ------------------------------------------------- + + def _ensure_root_transforms(self) -> wp.array: + if self._root_transforms is None: + self._root_transforms = wp.zeros( + (self._num_envs, self._num_bodies), dtype=wp.transformf, device=self._device + ) + return self._root_transforms + + def _ensure_root_velocities(self) -> wp.array: + if self._root_velocities is None: + self._root_velocities = wp.zeros( + (self._num_envs, self._num_bodies), dtype=wp.spatial_vectorf, device=self._device + ) + return self._root_velocities + + def _ensure_attribute(self, name: str) -> wp.array: + if self._attributes[name] is None: + self._attributes[name] = self._create_default_attribute(name) + return self._attributes[name] + + def _create_default_attribute(self, name: str) -> wp.array: + N, B = self._num_envs, self._num_bodies + dev = self._device + if name == "body_com": + return wp.zeros((N, B, 1), dtype=wp.vec3f, device=dev) + elif name == "body_mass": + return wp.zeros((N, B, 1), dtype=wp.float32, device=dev) + elif name == "body_inertia": + return wp.zeros((N, B, 1), dtype=wp.mat33f, device=dev) + elif name == "body_f": + return wp.zeros((N, B, 1), dtype=wp.spatial_vectorf, device=dev) + else: + raise KeyError(f"Unknown attribute: {name}") + + # -- Getters ----------------------------------------------------------- + + def get_root_transforms(self, state) -> wp.array: + return self._ensure_root_transforms() + + def get_root_velocities(self, state) -> wp.array: + return self._ensure_root_velocities() + + def get_attribute(self, name: str, model_or_state) -> wp.array: + return self._ensure_attribute(name) + + # -- Setters ----------------------------------------------------------- + + def set_root_transforms(self, state, transforms: wp.array) -> None: + if self._noop_setters: + return + self._ensure_root_transforms().assign(transforms) + + def set_root_velocities(self, state, velocities: wp.array) -> None: + if self._noop_setters: + return + self._ensure_root_velocities().assign(velocities) + + # -- Mock data injection ----------------------------------------------- + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for testing.""" + N, B = self._num_envs, self._num_bodies + dev = self._device + + # Root transforms + root_tf_np = np.random.randn(N, B, 7).astype(np.float32) + root_tf_np[..., 3:7] /= np.linalg.norm(root_tf_np[..., 3:7], axis=-1, keepdims=True) + self._root_transforms = wp.array(root_tf_np, dtype=wp.transformf, device=dev) + + # Root velocities + root_vel_np = np.random.randn(N, B, 6).astype(np.float32) + self._root_velocities = wp.array(root_vel_np, dtype=wp.spatial_vectorf, device=dev) + + # Attributes (all have trailing link dim of 1) + self._attributes["body_com"] = wp.array( + np.random.randn(N, B, 1, 3).astype(np.float32), dtype=wp.vec3f, device=dev + ) + self._attributes["body_mass"] = wp.array( + (np.random.rand(N, B, 1) * 10 + 0.1).astype(np.float32), dtype=wp.float32, device=dev + ) + self._attributes["body_inertia"] = wp.array( + np.random.randn(N, B, 1, 9).astype(np.float32), dtype=wp.mat33f, device=dev + ) + self._attributes["body_f"] = wp.array( + np.random.randn(N, B, 1, 6).astype(np.float32), dtype=wp.spatial_vectorf, device=dev + ) + + +class MockNewtonArticulationView: + """Mock Newton ArticulationView for unit testing without simulation. + + This class mimics the interface that ``ArticulationData`` and ``RigidObjectData`` + expect from Newton's selection API. It can be used for both articulation and + rigid object testing since Newton has no separate rigid body view. + + Data Shapes (structured Warp types with ``(N, 1, ...)`` convention): + - root_transforms: ``(N, 1)`` dtype=wp.transformf for floating base, + ``(N, 1, 1)`` for fixed base + - root_velocities: ``(N, 1)`` dtype=wp.spatial_vectorf (None for fixed base) + - link_transforms: ``(N, 1, L)`` dtype=wp.transformf + - link_velocities: ``(N, 1, L)`` dtype=wp.spatial_vectorf (None for fixed base) + - dof_positions: ``(N, 1, J)`` dtype=wp.float32 + - dof_velocities: ``(N, 1, J)`` dtype=wp.float32 + - body_com: ``(N, 1, L)`` dtype=wp.vec3f + - body_mass: ``(N, 1, L)`` dtype=wp.float32 + - body_inertia: ``(N, 1, L)`` dtype=wp.mat33f + + Where N = count, L = link_count, J = joint_dof_count + + Note: + Newton's selection API uses structured Warp types (``wp.transformf``, + ``wp.spatial_vectorf``, ``wp.vec3f``, ``wp.mat33f``) natively, unlike PhysX + which uses flat float32 arrays. + """ + + def __init__( + self, + num_instances: int = 1, + num_bodies: int = 2, + num_joints: int = 1, + device: str = "cpu", + is_fixed_base: bool = False, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + ): + """Initialize the mock Newton articulation view. + + Args: + num_instances: Number of articulation instances. + num_bodies: Number of bodies (links). + num_joints: Number of joints (DOFs). + device: Device for array allocation ("cpu" or "cuda:N"). + is_fixed_base: Whether the articulation has a fixed base. + joint_names: Names of joints. Defaults to ["joint_0", ...]. + body_names: Names of bodies. Defaults to ["body_0", ...]. + """ + self._count = num_instances + self._link_count = num_bodies + self._joint_dof_count = num_joints + self._device = device + self._is_fixed_base = is_fixed_base + self._noop_setters = False + + # Set joint and body names + self._joint_dof_names = joint_names if joint_names is not None else [f"joint_{i}" for i in range(num_joints)] + self._body_names = body_names if body_names is not None else [f"body_{i}" for i in range(num_bodies)] + + # Internal state (lazily initialized) + self._root_transforms: wp.array | None = None + self._root_velocities: wp.array | None = None + self._link_transforms: wp.array | None = None + self._link_velocities: wp.array | None = None + self._dof_positions: wp.array | None = None + self._dof_velocities: wp.array | None = None + self._articulation_ids: wp.array | None = None + + # Attributes dict (lazily initialized) + self._attributes: dict[str, wp.array | None] = { + "body_com": None, + "body_mass": None, + "body_inertia": None, + "joint_limit_lower": None, + "joint_limit_upper": None, + "joint_target_ke": None, + "joint_target_kd": None, + "joint_armature": None, + "joint_friction": None, + "joint_velocity_limit": None, + "joint_effort_limit": None, + "body_f": None, + "joint_f": None, + "joint_target_pos": None, + "joint_target_vel": None, + "joint_limit_ke": None, + "joint_limit_kd": None, + } + + # -- Properties -- + + @property + def count(self) -> int: + """Number of articulation instances.""" + return self._count + + @property + def link_count(self) -> int: + """Number of links (bodies) per instance.""" + return self._link_count + + @property + def joint_dof_count(self) -> int: + """Number of DOFs (joints) per instance.""" + return self._joint_dof_count + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation has a fixed base.""" + return self._is_fixed_base + + @property + def joint_dof_names(self) -> list[str]: + """Names of the DOFs.""" + return self._joint_dof_names + + @property + def body_names(self) -> list[str]: + """Names of the bodies.""" + return self._body_names + + @property + def link_names(self) -> list[str]: + """Alias for body_names (Newton calls bodies 'links').""" + return self._body_names + + @property + def articulation_ids(self) -> wp.array: + """Mapping from ``(world, arti)`` to model articulation index. Shape ``(N, 1)`` dtype=int.""" + if self._articulation_ids is None: + ids_np = np.arange(self._count, dtype=np.int32).reshape(self._count, 1) + self._articulation_ids = wp.array(ids_np, dtype=int, device=self._device) + return self._articulation_ids + + # -- Lazy Initialization Helpers -- + + def _ensure_root_transforms(self) -> wp.array: + """Lazily create root transforms with identity quaternions.""" + if self._root_transforms is None: + if self._is_fixed_base: + self._root_transforms = wp.zeros((self._count, 1, 1), dtype=wp.transformf, device=self._device) + else: + self._root_transforms = wp.zeros((self._count, 1), dtype=wp.transformf, device=self._device) + return self._root_transforms + + def _ensure_root_velocities(self) -> wp.array | None: + """Lazily create root velocities (None for fixed base).""" + if self._is_fixed_base: + return None + if self._root_velocities is None: + self._root_velocities = wp.zeros((self._count, 1), dtype=wp.spatial_vectorf, device=self._device) + return self._root_velocities + + def _ensure_link_transforms(self) -> wp.array: + """Lazily create link transforms.""" + if self._link_transforms is None: + self._link_transforms = wp.zeros( + (self._count, 1, self._link_count), dtype=wp.transformf, device=self._device + ) + return self._link_transforms + + def _ensure_link_velocities(self) -> wp.array | None: + """Lazily create link velocities (None for fixed base).""" + if self._is_fixed_base: + return None + if self._link_velocities is None: + self._link_velocities = wp.zeros( + (self._count, 1, self._link_count), dtype=wp.spatial_vectorf, device=self._device + ) + return self._link_velocities + + def _ensure_dof_positions(self) -> wp.array: + """Lazily create DOF positions.""" + if self._dof_positions is None: + self._dof_positions = wp.zeros( + (self._count, 1, self._joint_dof_count), dtype=wp.float32, device=self._device + ) + return self._dof_positions + + def _ensure_dof_velocities(self) -> wp.array: + """Lazily create DOF velocities.""" + if self._dof_velocities is None: + self._dof_velocities = wp.zeros( + (self._count, 1, self._joint_dof_count), dtype=wp.float32, device=self._device + ) + return self._dof_velocities + + def _ensure_attribute(self, name: str) -> wp.array: + """Lazily create an attribute array.""" + if self._attributes[name] is None: + self._attributes[name] = self._create_default_attribute(name) + return self._attributes[name] + + def _create_default_attribute(self, name: str) -> wp.array: + """Create a default attribute array based on name.""" + N, L, J = self._count, self._link_count, self._joint_dof_count + dev = self._device + + if name == "body_com": + return wp.zeros((N, 1, L), dtype=wp.vec3f, device=dev) + elif name == "body_mass": + return wp.zeros((N, 1, L), dtype=wp.float32, device=dev) + elif name == "body_inertia": + return wp.zeros((N, 1, L), dtype=wp.mat33f, device=dev) + elif name == "body_f": + return wp.zeros((N, 1, L), dtype=wp.spatial_vectorf, device=dev) + elif name in ( + "joint_limit_lower", + "joint_limit_upper", + "joint_target_ke", + "joint_target_kd", + "joint_armature", + "joint_friction", + "joint_velocity_limit", + "joint_effort_limit", + "joint_f", + "joint_target_pos", + "joint_target_vel", + "joint_limit_ke", + "joint_limit_kd", + ): + return wp.zeros((N, 1, J), dtype=wp.float32, device=dev) + else: + raise KeyError(f"Unknown attribute: {name}") + + # -- Root Getters -- + + def get_root_transforms(self, state) -> wp.array: + """Get world transforms of root links. + + Args: + state: Newton state object (unused in mock). + + Returns: + Warp array with dtype=wp.transformf. Shape ``(N, 1)`` for floating base, + ``(N, 1, 1)`` for fixed base. + """ + return self._ensure_root_transforms() + + def get_root_velocities(self, state) -> wp.array | None: + """Get velocities of root links. + + Args: + state: Newton state object (unused in mock). + + Returns: + Warp array of shape ``(N, 1)`` with dtype=wp.spatial_vectorf, + or None for fixed base. + """ + return self._ensure_root_velocities() + + # -- Link Getters -- + + def get_link_transforms(self, state) -> wp.array: + """Get world transforms of all links. + + Args: + state: Newton state object (unused in mock). + + Returns: + Warp array of shape ``(N, 1, L)`` with dtype=wp.transformf. + """ + return self._ensure_link_transforms() + + def get_link_velocities(self, state) -> wp.array | None: + """Get velocities of all links. + + Args: + state: Newton state object (unused in mock). + + Returns: + Warp array of shape ``(N, 1, L)`` with dtype=wp.spatial_vectorf, + or None for fixed base. + """ + return self._ensure_link_velocities() + + # -- DOF Getters -- + + def get_dof_positions(self, state) -> wp.array: + """Get positions of all DOFs. + + Args: + state: Newton state object (unused in mock). + + Returns: + Warp array of shape ``(N, 1, J)`` with dtype=wp.float32. + """ + return self._ensure_dof_positions() + + def get_dof_velocities(self, state) -> wp.array: + """Get velocities of all DOFs. + + Args: + state: Newton state object (unused in mock). + + Returns: + Warp array of shape ``(N, 1, J)`` with dtype=wp.float32. + """ + return self._ensure_dof_velocities() + + # -- Attribute Getter -- + + def get_attribute(self, name: str, model_or_state) -> wp.array: + """Get a named attribute array. + + Args: + name: Attribute name (e.g. "body_mass", "joint_target_ke"). + model_or_state: Newton model or state object (unused in mock). + + Returns: + Warp array for the requested attribute. + + Raises: + KeyError: If the attribute name is unknown. + """ + return self._ensure_attribute(name) + + # -- Root Setters -- + + def set_root_transforms(self, state, transforms: wp.array) -> None: + """Set world transforms of root links. + + Args: + state: Newton state object (unused in mock). + transforms: Warp array with dtype=wp.transformf matching root shape. + + Raises: + ValueError: If the transforms shape does not match the expected root shape. + """ + if self._noop_setters: + return + expected = self._ensure_root_transforms() + if transforms.shape != expected.shape: + raise ValueError(f"Root transforms shape mismatch: expected {expected.shape}, got {transforms.shape}") + expected.assign(transforms) + + def set_root_velocities(self, state, velocities: wp.array) -> None: + """Set velocities of root links. + + Args: + state: Newton state object (unused in mock). + velocities: Warp array of shape ``(N, 1)`` with dtype=wp.spatial_vectorf. + """ + if self._noop_setters: + return + vel = self._ensure_root_velocities() + if vel is not None: + vel.assign(velocities) + + # -- Mock Setters (direct test data injection) -- + + def set_mock_root_transforms(self, transforms: wp.array) -> None: + """Set mock root transform data directly for testing. + + Args: + transforms: Warp array with dtype=wp.transformf. + """ + self._root_transforms = transforms + + def set_mock_root_velocities(self, velocities: wp.array) -> None: + """Set mock root velocity data directly for testing. + + Args: + velocities: Warp array with dtype=wp.spatial_vectorf. + """ + self._root_velocities = velocities + + def set_mock_link_transforms(self, transforms: wp.array) -> None: + """Set mock link transform data directly for testing. + + Args: + transforms: Warp array of shape ``(N, 1, L)`` with dtype=wp.transformf. + """ + self._link_transforms = transforms + + def set_mock_link_velocities(self, velocities: wp.array) -> None: + """Set mock link velocity data directly for testing. + + Args: + velocities: Warp array of shape ``(N, 1, L)`` with dtype=wp.spatial_vectorf. + """ + self._link_velocities = velocities + + def set_mock_dof_positions(self, positions: wp.array) -> None: + """Set mock DOF position data directly for testing. + + Args: + positions: Warp array of shape ``(N, 1, J)`` with dtype=wp.float32. + """ + self._dof_positions = positions + + def set_mock_dof_velocities(self, velocities: wp.array) -> None: + """Set mock DOF velocity data directly for testing. + + Args: + velocities: Warp array of shape ``(N, 1, J)`` with dtype=wp.float32. + """ + self._dof_velocities = velocities + + def set_mock_masses(self, masses: wp.array) -> None: + """Set mock body mass data directly for testing. + + Args: + masses: Warp array of shape ``(N, 1, L)`` with dtype=wp.float32. + """ + self._attributes["body_mass"] = masses + + def set_mock_coms(self, coms: wp.array) -> None: + """Set mock body center-of-mass data directly for testing. + + Args: + coms: Warp array of shape ``(N, 1, L)`` with dtype=wp.vec3f. + """ + self._attributes["body_com"] = coms + + def set_mock_inertias(self, inertias: wp.array) -> None: + """Set mock body inertia data directly for testing. + + Args: + inertias: Warp array of shape ``(N, 1, L)`` with dtype=wp.mat33f. + """ + self._attributes["body_inertia"] = inertias + + # -- Benchmark Utilities -- + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for benchmarking. + + Uses numpy for random data generation (matching PhysX mock pattern). + """ + N = self._count + L = self._link_count + J = self._joint_dof_count + dev = self._device + + # Root transforms + if self._is_fixed_base: + root_tf_np = np.random.randn(N, 1, 1, 7).astype(np.float32) + root_tf_np[..., 3:7] /= np.linalg.norm(root_tf_np[..., 3:7], axis=-1, keepdims=True) + self._root_transforms = wp.array(root_tf_np, dtype=wp.transformf, device=dev) + else: + root_tf_np = np.random.randn(N, 1, 7).astype(np.float32) + root_tf_np[..., 3:7] /= np.linalg.norm(root_tf_np[..., 3:7], axis=-1, keepdims=True) + self._root_transforms = wp.array(root_tf_np, dtype=wp.transformf, device=dev) + + # Root velocities (floating base only) + root_vel_np = np.random.randn(N, 1, 6).astype(np.float32) + self._root_velocities = wp.array(root_vel_np, dtype=wp.spatial_vectorf, device=dev) + + # Link transforms + link_tf_np = np.random.randn(N, 1, L, 7).astype(np.float32) + link_tf_np[..., 3:7] /= np.linalg.norm(link_tf_np[..., 3:7], axis=-1, keepdims=True) + self._link_transforms = wp.array(link_tf_np, dtype=wp.transformf, device=dev) + + # Link velocities (floating base only) + if not self._is_fixed_base: + link_vel_np = np.random.randn(N, 1, L, 6).astype(np.float32) + self._link_velocities = wp.array(link_vel_np, dtype=wp.spatial_vectorf, device=dev) + + # DOF state + self._dof_positions = wp.array(np.random.randn(N, 1, J).astype(np.float32), dtype=wp.float32, device=dev) + self._dof_velocities = wp.array(np.random.randn(N, 1, J).astype(np.float32), dtype=wp.float32, device=dev) + + # Body properties + self._attributes["body_com"] = wp.array( + np.random.randn(N, 1, L, 3).astype(np.float32), + dtype=wp.vec3f, + device=dev, + ) + self._attributes["body_mass"] = wp.array( + (np.random.rand(N, 1, L) * 10 + 0.1).astype(np.float32), + dtype=wp.float32, + device=dev, + ) + self._attributes["body_inertia"] = wp.array( + np.random.randn(N, 1, L, 9).astype(np.float32), + dtype=wp.mat33f, + device=dev, + ) + + # Joint properties + for attr_name in ( + "joint_limit_lower", + "joint_limit_upper", + "joint_target_ke", + "joint_target_kd", + "joint_armature", + "joint_friction", + "joint_velocity_limit", + "joint_effort_limit", + "joint_f", + "joint_target_pos", + "joint_target_vel", + "joint_limit_ke", + "joint_limit_kd", + ): + self._attributes[attr_name] = wp.array( + np.random.randn(N, 1, J).astype(np.float32), + dtype=wp.float32, + device=dev, + ) + + # Body forces + self._attributes["body_f"] = wp.array( + np.random.randn(N, 1, L, 6).astype(np.float32), + dtype=wp.spatial_vectorf, + device=dev, + ) diff --git a/source/isaaclab_newton/isaaclab_newton/video_recording/__init__.py b/source/isaaclab_newton/isaaclab_newton/video_recording/__init__.py new file mode 100644 index 000000000000..1d5cb96e0ef3 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/video_recording/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton GL perspective video recording.""" + +from .newton_gl_perspective_video import NewtonGlPerspectiveVideo +from .newton_gl_perspective_video_cfg import NewtonGlPerspectiveVideoCfg + +__all__ = ["NewtonGlPerspectiveVideo", "NewtonGlPerspectiveVideoCfg"] diff --git a/source/isaaclab_newton/isaaclab_newton/video_recording/newton_gl_perspective_video.py b/source/isaaclab_newton/isaaclab_newton/video_recording/newton_gl_perspective_video.py new file mode 100644 index 000000000000..9c440a657981 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/video_recording/newton_gl_perspective_video.py @@ -0,0 +1,128 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton GL perspective RGB capture via headless ``newton.viewer.ViewerGL``.""" + +from __future__ import annotations + +import logging +import math +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from .newton_gl_perspective_video_cfg import NewtonGlPerspectiveVideoCfg + +logger = logging.getLogger(__name__) + + +class NewtonGlPerspectiveVideo: + """Lazy-initialised ViewerGL; one RGB frame per :meth:`render_rgb_array` call.""" + + def __init__(self, cfg: NewtonGlPerspectiveVideoCfg): + self.cfg = cfg + self._viewer = None + self._init_attempted = False + + def _ensure_viewer(self) -> None: + if self._init_attempted: + return + self._init_attempted = True + from isaaclab.sim import SimulationContext + + sdp = SimulationContext.instance().initialize_scene_data_provider() + model = sdp.get_newton_model() + if model is None: + raise RuntimeError( + "Newton GL perspective video requires a Newton model on the scene data provider. " + "Do not use --video for this setup." + ) + + import pyglet + + pyglet.options["headless"] = True + from newton.viewer import ViewerGL + + w, h = self.cfg.window_width, self.cfg.window_height + viewer = ViewerGL(width=w, height=h, headless=True) + viewer.set_model(model) + viewer.set_world_offsets((0.0, 0.0, 0.0)) + viewer.up_axis = 2 + + aspect = w / h + h_fov = math.radians(self.cfg.horiz_fov_deg) + v_fov_deg = math.degrees(2.0 * math.atan(math.tan(h_fov / 2.0) / aspect)) + viewer.camera.fov = v_fov_deg + + self._viewer = viewer + self._apply_camera(self.cfg.eye, self.cfg.lookat) + logger.info("[NewtonGlPerspectiveVideo] ViewerGL ready (%dx%d).", w, h) + + def _apply_camera( + self, + position: tuple[float, float, float], + target: tuple[float, float, float], + ) -> None: + """Point the recorder's ViewerGL at ``position`` looking toward ``target``.""" + if self._viewer is None: + return + import warp as wp + + ex, ey, ez = position + lx, ly, lz = target + dx, dy, dz = lx - ex, ly - ey, lz - ez + length = math.sqrt(dx**2 + dy**2 + dz**2) + if length > 1e-9: + dx, dy, dz = dx / length, dy / length, dz / length + pitch = math.degrees(math.asin(max(-1.0, min(1.0, dz)))) + yaw = math.degrees(math.atan2(dy, dx)) + self._viewer.set_camera(pos=wp.vec3(ex, ey, ez), pitch=pitch, yaw=yaw) + + def update_camera( + self, + position: tuple[float, float, float], + target: tuple[float, float, float], + ) -> None: + """Update the recorder camera to match ``position`` / ``target``. + + Safe to call before the first :meth:`render_rgb_array` (the viewer is + created lazily; the values will be applied immediately after creation). + When the viewer is already live the camera is repositioned in-place so + the next frame reflects the new viewpoint. + + Args: + position: Camera eye position ``(x, y, z)``. + target: Camera look-at target ``(x, y, z)``. + """ + self._ensure_viewer() + self._apply_camera(position, target) + + def render_rgb_array(self) -> np.ndarray: + """Return one RGB frame from the Newton GL viewer. Raises on failure.""" + self._ensure_viewer() + from isaaclab.sim import SimulationContext + + sim = SimulationContext.instance() + sdp = sim.initialize_scene_data_provider() + state = sdp.get_newton_state() + dt = sim.get_physics_dt() + + viewer = self._viewer + viewer.begin_frame(dt) + viewer.log_state(state) + viewer.end_frame() + return viewer.get_frame().numpy() + + +def create_newton_gl_perspective_video(cfg: NewtonGlPerspectiveVideoCfg) -> NewtonGlPerspectiveVideo: + """Instantiate the Newton GL perspective capture from ``cfg.class_type``.""" + ct = cfg.class_type + if isinstance(ct, type): + return ct(cfg) + from isaaclab.utils.string import string_to_callable + + cls = string_to_callable(str(ct)) + return cls(cfg) diff --git a/source/isaaclab_newton/isaaclab_newton/video_recording/newton_gl_perspective_video_cfg.py b/source/isaaclab_newton/isaaclab_newton/video_recording/newton_gl_perspective_video_cfg.py new file mode 100644 index 000000000000..6408b2ac0e53 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/video_recording/newton_gl_perspective_video_cfg.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton GL perspective RGB capture (headless ViewerGL).""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from isaaclab.utils import configclass + +if TYPE_CHECKING: + pass + + +@configclass +class NewtonGlPerspectiveVideoCfg: + """Settings for capturing a perspective RGB frame via ``newton.viewer.ViewerGL``.""" + + class_type: type[Any] | str = "isaaclab_newton.video_recording.newton_gl_perspective_video:NewtonGlPerspectiveVideo" + """Implementation class; default is + :class:`~isaaclab_newton.video_recording.newton_gl_perspective_video.NewtonGlPerspectiveVideo`.""" + + window_width: int = 1280 + """Viewer width in pixels.""" + + window_height: int = 720 + """Viewer height in pixels.""" + + eye: tuple[float, float, float] = (7.5, 7.5, 7.5) + """Camera position in world space (metres).""" + + lookat: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Camera look-at target in world space (metres).""" + + horiz_fov_deg: float = 60.0 + """Horizontal FOV assumed for Kit ``/OmniverseKit_Persp``; converted to vertical FOV for GL viewer.""" diff --git a/source/isaaclab_newton/isaaclab_newton/video_recording/recording_hooks.py b/source/isaaclab_newton/isaaclab_newton/video_recording/recording_hooks.py new file mode 100644 index 000000000000..7efcae7b5500 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/video_recording/recording_hooks.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Hooks for Newton-based video recording after visualizers have stepped.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from isaaclab.sim import SimulationContext + + +def recording_followup_after_visualizers(sim: SimulationContext) -> None: + """Newton extension hook: recording pipeline after visualizers have stepped. + + Called from :func:`isaaclab.envs.utils.recording_hooks.run_recording_hooks_after_visualizers`. + Wire **Newton GL** / Newton-specific video capture here (e.g. perspective video, + frame sync with ``NewtonVisualizer``). Stay lightweight and no-op when Newton + recording is inactive. + + The Isaac Sim / RTX path (``omni.kit.app`` pump for Replicator ``rgb_array``) lives in + :mod:`isaaclab_physx.renderers.isaac_rtx_renderer_utils` — not here. + + Args: + sim: Active simulation context. + """ + _ = sim # Reserved until Newton GL video paths are hooked up. diff --git a/source/isaaclab_newton/pyproject.toml b/source/isaaclab_newton/pyproject.toml new file mode 100644 index 000000000000..31dce8d230ec --- /dev/null +++ b/source/isaaclab_newton/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools<82.0.0", "wheel", "toml"] +build-backend = "setuptools.build_meta" diff --git a/source/isaaclab_newton/setup.py b/source/isaaclab_newton/setup.py new file mode 100644 index 000000000000..2e0b87f17543 --- /dev/null +++ b/source/isaaclab_newton/setup.py @@ -0,0 +1,87 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_newton' python package.""" + +import os +import shutil + +import toml +from setuptools import setup +from setuptools.command.build_py import build_py as _build_py + + +class build_py(_build_py): + """Custom build command that bundles config/extension.toml into the package. + + This ensures the toml is available when installed as a regular (non-editable) + wheel, e.g. when pulled in as a dependency via a file:// URL. + """ + + def run(self): + super().run() + src = os.path.join(EXTENSION_PATH, "config", "extension.toml") + dst_dir = os.path.join(self.build_lib, "isaaclab_newton", "config") + os.makedirs(dst_dir, exist_ok=True) + shutil.copy(src, os.path.join(dst_dir, "extension.toml")) + + +# Obtain the extension data from the extension.toml file +EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) +# Read the extension.toml file +EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +INSTALL_REQUIRES = [] + +EXTRAS_REQUIRE = { + "all": [ + "prettytable==3.3.0", + "mujoco==3.6.0", + "mujoco-warp==3.6.0", + "PyOpenGL-accelerate==3.1.10", + "newton @ git+https://github.com/newton-physics/newton.git@a27277ed49d6f307b8a1e4c394be7e1d14965a62", + ], +} + +# Installation operation +setup( + name="isaaclab_newton", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url=EXTENSION_TOML_DATA["package"]["repository"], + version=EXTENSION_TOML_DATA["package"]["version"], + description=EXTENSION_TOML_DATA["package"]["description"], + keywords=EXTENSION_TOML_DATA["package"]["keywords"], + license="BSD-3-Clause", + include_package_data=True, + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", + install_requires=INSTALL_REQUIRES, + extras_require=EXTRAS_REQUIRE, + packages=[ + "isaaclab_newton", + "isaaclab_newton.assets", + "isaaclab_newton.assets.articulation", + "isaaclab_newton.assets.rigid_object", + "isaaclab_newton.assets.rigid_object_collection", + "isaaclab_newton.cloner", + "isaaclab_newton.physics", + "isaaclab_newton.renderers", + "isaaclab_newton.scene_data_providers", + "isaaclab_newton.sensors", + "isaaclab_newton.sensors.contact_sensor", + "isaaclab_newton.sensors.frame_transformer", + "isaaclab_newton.test", + "isaaclab_newton.test.mock_interfaces", + "isaaclab_newton.test.mock_interfaces.views", + ], + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", + ], + zip_safe=False, + cmdclass={"build_py": build_py}, +) diff --git a/source/isaaclab_newton/test/assets/test_articulation.py b/source/isaaclab_newton/test/assets/test_articulation.py new file mode 100644 index 000000000000..a5eacf95045e --- /dev/null +++ b/source/isaaclab_newton/test/assets/test_articulation.py @@ -0,0 +1,2476 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +HEADLESS = True + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import sys + +import pytest +import torch +import warp as wp +from isaaclab_newton.assets import Articulation +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.physics import NewtonManager as SimulationManager +from newton.solvers import SolverNotifyFlags + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +import isaaclab.utils.string as string_utils +from isaaclab.actuators import ActuatorBase, IdealPDActuatorCfg, ImplicitActuatorCfg +from isaaclab.assets import ArticulationCfg +from isaaclab.envs.mdp.terminations import joint_effort_out_of_limit +from isaaclab.managers import SceneEntityCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab.utils.version import get_isaac_sim_version, has_kit + +## +# Pre-defined configs +## +from isaaclab_assets import ANYMAL_C_CFG, FRANKA_PANDA_CFG # isort:skip +# , SHADOW_HAND_CFG # isort:skip + +SIM_CFGs = { + "humanoid": SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=80, + nconmax=25, + ls_iterations=20, + ls_parallel=False, + cone="pyramidal", + update_data_interval=2, + integrator="implicitfast", + impratio=1, + ), + num_substeps=2, + debug_mode=False, + ), + ), + "anymal": SimulationCfg( + dt=1 / 200, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=70, + nconmax=70, + ls_iterations=40, + cone="elliptic", + impratio=100, + ls_parallel=False, + integrator="implicitfast", + ), + num_substeps=2, + debug_mode=True, + ), + ), + "panda": SimulationCfg( + dt=1 / 120, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=20, + nconmax=20, + ls_iterations=20, + cone="pyramidal", + impratio=1, + ls_parallel=False, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ), + ), + "single_joint_implicit": SimulationCfg( + dt=1 / 120, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=20, + nconmax=20, + ls_iterations=20, + cone="pyramidal", + impratio=1, + ls_parallel=False, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ), + ), + "single_joint_explicit": SimulationCfg( + dt=1 / 120, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=20, + nconmax=20, + ls_iterations=20, + cone="pyramidal", + impratio=1, + ls_parallel=False, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ), + ), + "shadow_hand": SimulationCfg( + dt=1 / 120, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=70, + nconmax=70, + ls_iterations=40, + cone="elliptic", + impratio=100, + ls_parallel=False, + integrator="implicitfast", + ), + num_substeps=2, + debug_mode=True, + ), + ), +} + + +def generate_articulation_cfg( + articulation_type: str, + stiffness: float | None = 10.0, + damping: float | None = 2.0, + velocity_limit: float | None = None, + effort_limit: float | None = None, + velocity_limit_sim: float | None = None, + effort_limit_sim: float | None = None, +) -> ArticulationCfg: + """Generate an articulation configuration. + + Args: + articulation_type: Type of articulation to generate. + It should be one of: "humanoid", "panda", "anymal", "shadow_hand", "single_joint_implicit", + "single_joint_explicit". + stiffness: Stiffness value for the articulation's actuators. Only currently used for "humanoid". + Defaults to 10.0. + damping: Damping value for the articulation's actuators. Only currently used for "humanoid". + Defaults to 2.0. + velocity_limit: Velocity limit for the actuators. Only currently used for "single_joint_implicit" + and "single_joint_explicit". + effort_limit: Effort limit for the actuators. Only currently used for "single_joint_implicit" + and "single_joint_explicit". + velocity_limit_sim: Velocity limit for the actuators (set into the simulation). + Only currently used for "single_joint_implicit" and "single_joint_explicit". + effort_limit_sim: Effort limit for the actuators (set into the simulation). + Only currently used for "single_joint_implicit" and "single_joint_explicit". + + Returns: + The articulation configuration for the requested articulation type. + + """ + if articulation_type == "humanoid": + articulation_cfg = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/IsaacSim/Humanoid/humanoid_instanceable.usd" + ), + init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.34)), + actuators={"body": ImplicitActuatorCfg(joint_names_expr=[".*"], stiffness=stiffness, damping=damping)}, + ) + elif articulation_type == "panda": + articulation_cfg = FRANKA_PANDA_CFG + elif articulation_type == "anymal": + articulation_cfg = ANYMAL_C_CFG + elif articulation_type == "shadow_hand": + pytest.skip("Shadow hand is not supported in Newton") + # articulation_cfg = SHADOW_HAND_CFG + elif articulation_type == "single_joint_implicit": + articulation_cfg = ArticulationCfg( + # we set 80.0 default for max force because default in USD is 10e10 which makes testing annoying. + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/IsaacSim/SimpleArticulation/revolute_articulation.usd", + joint_drive_props=sim_utils.JointDrivePropertiesCfg(max_effort=80.0, max_velocity=5.0), + ), + actuators={ + "joint": ImplicitActuatorCfg( + joint_names_expr=[".*"], + effort_limit_sim=effort_limit_sim, + velocity_limit_sim=velocity_limit_sim, + effort_limit=effort_limit, + velocity_limit=velocity_limit, + stiffness=2000.0, + damping=100.0, + ), + }, + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 0.0), + joint_pos=({"RevoluteJoint": 1.5708}), + rot=(0.7071081, 0, 0, 0.7071055), + ), + ) + elif articulation_type == "single_joint_explicit": + # we set 80.0 default for max force because default in USD is 10e10 which makes testing annoying. + articulation_cfg = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/IsaacSim/SimpleArticulation/revolute_articulation.usd", + joint_drive_props=sim_utils.JointDrivePropertiesCfg(max_effort=80.0, max_velocity=5.0), + ), + actuators={ + "joint": IdealPDActuatorCfg( + joint_names_expr=[".*"], + effort_limit_sim=effort_limit_sim, + velocity_limit_sim=velocity_limit_sim, + effort_limit=effort_limit, + velocity_limit=velocity_limit, + stiffness=0.0, + damping=10.0, + ), + }, + ) + elif articulation_type == "spatial_tendon_test_asset": + # we set 80.0 default for max force because default in USD is 10e10 which makes testing annoying. + articulation_cfg = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/IsaacLab/Tests/spatial_tendons.usd", + ), + actuators={ + "joint": ImplicitActuatorCfg( + joint_names_expr=[".*"], + stiffness=2000.0, + damping=100.0, + ), + }, + ) + else: + raise ValueError( + f"Invalid articulation type: {articulation_type}, valid options are 'humanoid', 'panda', 'anymal'," + " 'shadow_hand', 'single_joint_implicit', 'single_joint_explicit' or 'spatial_tendon_test_asset'." + ) + + return articulation_cfg + + +def fix_reversed_joints(stage): + """Fix reversed joints on the USD stage. + + Some USD assets have joints where physics:body0 is the child and physics:body1 is the parent, + which is the opposite of what Newton expects. This function detects reversed joints by building + a graph of body connections and identifying the root body (the one attached to world via a joint + with a missing body target). Any joint where body0 is closer to the root than body1 is swapped. + """ + from pxr import UsdPhysics + + # First pass: find root bodies (bodies with a joint that has only one target, i.e. attached to world) + root_bodies: set[str] = set() + joints_to_check = [] + for prim in stage.Traverse(): + if not prim.IsA(UsdPhysics.Joint): + continue + body0_targets = prim.GetRelationship("physics:body0").GetTargets() + body1_targets = prim.GetRelationship("physics:body1").GetTargets() + if body0_targets and not body1_targets: + root_bodies.add(str(body0_targets[0])) + elif body1_targets and not body0_targets: + root_bodies.add(str(body1_targets[0])) + elif body0_targets and body1_targets: + joints_to_check.append(prim) + + if not root_bodies: + return + + # Second pass: for each joint with two bodies, ensure body0 is the parent (closer to root) + for prim in joints_to_check: + body0_rel = prim.GetRelationship("physics:body0") + body1_rel = prim.GetRelationship("physics:body1") + body0_path = str(body0_rel.GetTargets()[0]) + body1_path = str(body1_rel.GetTargets()[0]) + + # Determine if we need to swap: body1 is root or ancestor → need to swap + body1_is_parent = body1_path in root_bodies or body0_path.startswith(body1_path + "/") + body0_is_parent = body0_path in root_bodies or body1_path.startswith(body0_path + "/") + + if body0_is_parent or not body1_is_parent: + continue # already correct or ambiguous + + # Swap body0 and body1 + body0_rel.SetTargets(body1_rel.GetTargets()) + body1_rel.SetTargets([body0_path]) + + # Swap local transforms + for attr_suffix in ("localPos", "localRot"): + attr0 = prim.GetAttribute(f"physics:{attr_suffix}0") + attr1 = prim.GetAttribute(f"physics:{attr_suffix}1") + val0, val1 = attr0.Get(), attr1.Get() + if val0 is not None and val1 is not None: + attr0.Set(val1) + attr1.Set(val0) + + +_REVERSED_JOINT_USD_FILES = {"revolute_articulation.usd"} +"""USD filenames with known reversed joint body0/body1 ordering.""" + + +def generate_articulation( + articulation_cfg: ArticulationCfg, num_articulations: int, device: str +) -> tuple[Articulation, torch.tensor]: + """Generate an articulation from a configuration. + + Handles the creation of the articulation, the environment prims and the articulation's environment + translations + + Args: + articulation_cfg: Articulation configuration. + num_articulations: Number of articulations to generate. + device: Device to use for the tensors. + + Returns: + The articulation and environment translations. + + """ + # Generate translations of 2.5 m in x for each articulation + translations = torch.zeros(num_articulations, 3, device=device) + translations[:, 0] = torch.arange(num_articulations) * 2.5 + + # Create Top-level Xforms, one for each articulation + for i in range(num_articulations): + sim_utils.create_prim(f"/World/Env_{i}", "Xform", translation=translations[i][:3]) + articulation = Articulation(articulation_cfg.replace(prim_path="/World/Env_.*/Robot")) + + # Fix reversed joints for known-broken USD assets (body0/body1 swapped) + usd_path = getattr(articulation_cfg.spawn, "usd_path", "") + if any(name in usd_path for name in _REVERSED_JOINT_USD_FILES): + import omni.usd + + fix_reversed_joints(omni.usd.get_context().get_stage()) + + return articulation, translations + + +@pytest.fixture +def sim(request): + """Create simulation context with the specified device.""" + device = request.getfixturevalue("device") + if "gravity_enabled" in request.fixturenames: + gravity_enabled = request.getfixturevalue("gravity_enabled") + else: + gravity_enabled = True # default to gravity enabled + if "add_ground_plane" in request.fixturenames: + add_ground_plane = request.getfixturevalue("add_ground_plane") + else: + add_ground_plane = False # default to no ground plane + articulation_type = request.getfixturevalue("articulation_type") + sim_cfg = SIM_CFGs[articulation_type] + sim_cfg.device = device + with build_simulation_context( + device=device, + auto_add_lighting=True, + gravity_enabled=gravity_enabled, + add_ground_plane=add_ground_plane, + sim_cfg=sim_cfg, + ) as sim: + sim._app_control_on_stop_handle = None + yield sim + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["humanoid"]) +def test_initialization_floating_base_non_root(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test initialization for a floating-base with articulation root on a rigid body. + + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation is not fixed base + 3. All buffers have correct shapes + 4. The articulation can be simulated + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type, stiffness=0.0, damping=0.0) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + + # Check if articulation is initialized + assert articulation.is_initialized + # Check that is fixed base + assert not articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 21) + + # -- actuator type + for actuator_name, actuator in articulation.actuators.items(): + is_implicit_model_cfg = isinstance(articulation_cfg.actuators[actuator_name], ImplicitActuatorCfg) + assert actuator.is_implicit_model == is_implicit_model_cfg + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_initialization_floating_base(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test initialization for a floating-base with articulation root on provided prim path. + + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation is not fixed base + 3. All buffers have correct shapes + 4. The articulation can be simulated + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type, stiffness=0.0, damping=0.0) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + # Check that floating base + assert not articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 12) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) + + # -- actuator type + for actuator_name, actuator in articulation.actuators.items(): + is_implicit_model_cfg = isinstance(articulation_cfg.actuators[actuator_name], ImplicitActuatorCfg) + assert actuator.is_implicit_model == is_implicit_model_cfg + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_initialization_fixed_base(sim, num_articulations, device, articulation_type): + """Test initialization for fixed base. + + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation is fixed base + 3. All buffers have correct shapes + 4. The articulation maintains its default state + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, translations = generate_articulation(articulation_cfg, num_articulations, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + # Check that fixed base + assert articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 9) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) + + # -- actuator type + for actuator_name, actuator in articulation.actuators.items(): + is_implicit_model_cfg = isinstance(articulation_cfg.actuators[actuator_name], ImplicitActuatorCfg) + assert actuator.is_implicit_model == is_implicit_model_cfg + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + # check that the root is at the correct state - its default state as it is fixed base + default_root_pose = articulation.data.default_root_pose.torch.clone() + default_root_vel = articulation.data.default_root_vel.torch.clone() + default_root_pose[:, :3] = default_root_pose[:, :3] + translations + + torch.testing.assert_close(articulation.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(articulation.data.root_com_vel_w.torch, default_root_vel) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["single_joint_implicit"]) +def test_initialization_fixed_base_single_joint(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test initialization for fixed base articulation with a single joint. + + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation is fixed base + 3. All buffers have correct shapes + 4. The articulation maintains its default state + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, translations = generate_articulation(articulation_cfg, num_articulations, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + # Check that fixed base + assert articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 1) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) + + # -- actuator type + for actuator_name, actuator in articulation.actuators.items(): + is_implicit_model_cfg = isinstance(articulation_cfg.actuators[actuator_name], ImplicitActuatorCfg) + assert actuator.is_implicit_model == is_implicit_model_cfg + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + # check that the root is at the correct state - its default state as it is fixed base + default_root_pose = articulation.data.default_root_pose.torch.clone() + default_root_vel = articulation.data.default_root_vel.torch.clone() + default_root_pose[:, :3] = default_root_pose[:, :3] + translations + + torch.testing.assert_close(articulation.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(articulation.data.root_com_vel_w.torch, default_root_vel) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["shadow_hand"]) +def test_initialization_hand_with_tendons(sim, num_articulations, device, articulation_type): + """Test initialization for fixed base articulated hand with tendons. + + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation is fixed base + 3. All buffers have correct shapes + 4. The articulation can be simulated + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + # Check that fixed base + assert articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 24) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) + + # -- actuator type + for actuator_name, actuator in articulation.actuators.items(): + is_implicit_model_cfg = isinstance(articulation_cfg.actuators[actuator_name], ImplicitActuatorCfg) + assert actuator.is_implicit_model == is_implicit_model_cfg + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_initialization_floating_base_made_fixed_base( + sim, num_articulations, device, add_ground_plane, articulation_type +): + """Test initialization for a floating-base articulation made fixed-base using schema properties. + + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation is fixed base after modification + 3. All buffers have correct shapes + 4. The articulation maintains its default state + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type).copy() + # Fix root link by making it kinematic + articulation_cfg.spawn.articulation_props.fix_root_link = True + articulation, translations = generate_articulation(articulation_cfg, num_articulations, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + # Check that is fixed base + assert articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 12) + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + # check that the root is at the correct state - its default state as it is fixed base + default_root_pose = articulation.data.default_root_pose.torch.clone() + default_root_vel = articulation.data.default_root_vel.torch.clone() + default_root_pose[:, :3] = default_root_pose[:, :3] + translations + + torch.testing.assert_close(articulation.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(articulation.data.root_com_vel_w.torch, default_root_vel) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_initialization_fixed_base_made_floating_base( + sim, num_articulations, device, add_ground_plane, articulation_type +): + """Test initialization for fixed base made floating-base using schema properties. + + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation is floating base after modification + 3. All buffers have correct shapes + 4. The articulation can be simulated + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + # Unfix root link by making it non-kinematic + articulation_cfg.spawn.articulation_props.fix_root_link = False + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + # Check that is floating base + assert not articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 9) + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_out_of_range_default_joint_pos(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test that the default joint position from configuration is out of range. + + This test verifies that: + 1. The articulation fails to initialize when joint positions are out of range + 2. The error is properly handled + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + # Create articulation + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type).copy() + articulation_cfg.init_state.joint_pos = { + "panda_joint1": 10.0, + "panda_joint[2, 4]": -20.0, + } + + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + with pytest.raises(ValueError): + sim.reset() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_out_of_range_default_joint_vel(sim, device, articulation_type): + """Test that the default joint velocity from configuration is out of range. + + This test verifies that: + 1. The articulation fails to initialize when joint velocities are out of range + 2. The error is properly handled + """ + articulation_cfg = FRANKA_PANDA_CFG.replace(prim_path="/World/Robot") + articulation_cfg.init_state.joint_vel = { + "panda_joint1": 100.0, + "panda_joint[2, 4]": -60.0, + } + articulation = Articulation(articulation_cfg) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + with pytest.raises(ValueError): + sim.reset() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_joint_pos_limits(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test write_joint_limits_to_sim API and when default pos falls outside of the new limits. + + This test verifies that: + 1. Joint limits can be set correctly + 2. Default positions are preserved when setting new limits + 3. Joint limits can be set with indexing + 4. Invalid joint positions are properly handled + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + # Create articulation + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device) + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + + # Get current default joint pos + default_joint_pos = articulation._data.default_joint_pos.torch.clone() + + # Set new joint limits + limits = torch.zeros(num_articulations, articulation.num_joints, 2, device=device) + limits[..., 0] = (torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0) * -1.0 + limits[..., 1] = torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0 + articulation.write_joint_position_limit_to_sim_index(limits=limits) + + # Check new limits are in place + torch.testing.assert_close(articulation._data.joint_pos_limits.torch, limits) + torch.testing.assert_close(articulation._data.default_joint_pos.torch, default_joint_pos) + + # Set new joint limits with indexing + env_ids = torch.arange(1, device=device, dtype=torch.int32) + joint_ids = torch.arange(2, device=device, dtype=torch.int32) + limits = torch.zeros(env_ids.shape[0], joint_ids.shape[0], 2, device=device) + limits[..., 0] = (torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) + 5.0) * -1.0 + limits[..., 1] = torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) + 5.0 + articulation.write_joint_position_limit_to_sim_index(limits=limits, env_ids=env_ids, joint_ids=joint_ids) + + # Check new limits are in place + torch.testing.assert_close(articulation._data.joint_pos_limits.torch[env_ids][:, joint_ids], limits) + torch.testing.assert_close(articulation._data.default_joint_pos.torch, default_joint_pos) + + # Set new joint limits that invalidate default joint pos + limits = torch.zeros(num_articulations, articulation.num_joints, 2, device=device) + limits[..., 0] = torch.rand(num_articulations, articulation.num_joints, device=device) * -0.1 + limits[..., 1] = torch.rand(num_articulations, articulation.num_joints, device=device) * 0.1 + articulation.write_joint_position_limit_to_sim_index(limits=limits) + + # Check if all values are within the bounds + default_joint_pos_torch = articulation._data.default_joint_pos.torch + within_bounds = (default_joint_pos_torch >= limits[..., 0]) & (default_joint_pos_torch <= limits[..., 1]) + assert torch.all(within_bounds) + + # Set new joint limits that invalidate default joint pos with indexing + limits = torch.zeros(env_ids.shape[0], joint_ids.shape[0], 2, device=device) + limits[..., 0] = torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) * -0.1 + limits[..., 1] = torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) * 0.1 + articulation.write_joint_position_limit_to_sim_index(limits=limits, env_ids=env_ids, joint_ids=joint_ids) + + # Check if all values are within the bounds + default_joint_pos_torch = articulation._data.default_joint_pos.torch + within_bounds = (default_joint_pos_torch[env_ids][:, joint_ids] >= limits[..., 0]) & ( + default_joint_pos_torch[env_ids][:, joint_ids] <= limits[..., 1] + ) + assert torch.all(within_bounds) + + +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_joint_effort_limits(sim, num_articulations, device, add_ground_plane, articulation_type): + """Validate joint effort limits via joint_effort_out_of_limit().""" + # Create articulation + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device) + + # Minimal env wrapper exposing scene["robot"] + class _Env: + def __init__(self, art): + self.scene = {"robot": art} + + env = _Env(articulation) + robot_all = SceneEntityCfg(name="robot") + + sim.reset() + assert articulation.is_initialized + + # Case A: no clipping → should NOT terminate + articulation._data.computed_torque.torch.zero_() + articulation._data.applied_torque.torch.zero_() + out = joint_effort_out_of_limit(env, robot_all) # [N] + assert torch.all(~out) + + # Case B: simulate clipping → should terminate + articulation._data.computed_torque.torch.fill_(100.0) # pretend controller commanded 100 + articulation._data.applied_torque.torch.fill_(50.0) # pretend actuator clipped to 50 + out = joint_effort_out_of_limit(env, robot_all) # [N] + assert torch.all(out) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_external_force_buffer(sim, num_articulations, device, articulation_type): + """Test if external force buffer correctly updates in the force value is zero case. + + This test verifies that: + 1. External forces can be applied correctly + 2. Force buffers are updated properly + 3. Zero forces are handled correctly + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + + # play the simulator + sim.reset() + + # find bodies to apply the force + body_ids, _ = articulation.find_bodies("base") + + # reset root state + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) + + # reset dof state + joint_pos, joint_vel = ( + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, + ) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) + + # reset articulation + articulation.reset() + + # perform simulation + for step in range(5): + # initiate force tensor + external_wrench_b = torch.zeros(articulation.num_instances, len(body_ids), 6, device=sim.device) + + if step == 0 or step == 3: + # set a non-zero force + force = 1 + else: + # set a zero force + force = 0 + + # set force value + external_wrench_b[:, :, 0] = force + external_wrench_b[:, :, 3] = force + + # apply force + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + body_ids=body_ids, + ) + + # check if the articulation's force and torque buffers are correctly updated + for i in range(num_articulations): + assert articulation.permanent_wrench_composer.composed_force.torch[i, 0, 0].item() == force + assert articulation.permanent_wrench_composer.composed_torque.torch[i, 0, 0].item() == force + + # Check if the instantaneous wrench is correctly added to the permanent wrench + articulation.instantaneous_wrench_composer.add_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + body_ids=body_ids, + ) + + # apply action to the articulation + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) + articulation.write_data_to_sim() + + # perform step + sim.step() + + # update buffers + articulation.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_external_force_on_single_body(sim, num_articulations, device, articulation_type): + """Test application of external force on the base of the articulation. + + This test verifies that: + 1. External forces can be applied to specific bodies + 2. The forces affect the articulation's motion correctly + 3. The articulation responds to the forces as expected + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + # Play the simulator + sim.reset() + + # Find bodies to apply the force + body_ids, _ = articulation.find_bodies("base") + # Sample a large force + external_wrench_b = torch.zeros(articulation.num_instances, len(body_ids), 6, device=sim.device) + external_wrench_b[..., 1] = 100.0 + + # Now we are ready! + for _ in range(5): + # reset root state + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) + # reset dof state + joint_pos, joint_vel = ( + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, + ) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) + # reset articulation + articulation.reset() + # apply force + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=body_ids + ) + # perform simulation + for _ in range(100): + # apply action to the articulation + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) + articulation.write_data_to_sim() + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + # check condition that the articulations have fallen down + for i in range(num_articulations): + assert articulation.data.root_pos_w.torch[i, 2].item() < 0.2 + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_external_force_on_single_body_at_position(sim, num_articulations, device, articulation_type): + """Test application of external force on the base of the articulation at a given position. + + This test verifies that: + 1. External forces can be applied to specific bodies at a given position + 2. External forces can be applied to specific bodies in the global frame + 3. External forces are calculated and composed correctly + 4. The forces affect the articulation's motion correctly + 5. The articulation responds to the forces as expected + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + # Play the simulator + sim.reset() + + # Find bodies to apply the force + body_ids, _ = articulation.find_bodies("base") + # Sample a large force + external_wrench_b = torch.zeros(articulation.num_instances, len(body_ids), 6, device=sim.device) + external_wrench_b[..., 2] = 100.0 + external_wrench_positions_b = torch.zeros(articulation.num_instances, len(body_ids), 3, device=sim.device) + external_wrench_positions_b[..., 1] = 1.0 + + desired_force = torch.zeros(articulation.num_instances, len(body_ids), 3, device=sim.device) + desired_force[..., 2] = 200.0 + desired_torque = torch.zeros(articulation.num_instances, len(body_ids), 3, device=sim.device) + desired_torque[..., 0] = 200.0 + + # Now we are ready! + for i in range(5): + # reset root state + root_pose = articulation.data.default_root_pose.torch.clone() + root_pose[0, 0] = 2.5 # space them apart by 2.5m + + articulation.write_root_pose_to_sim_index(root_pose=root_pose) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) + # reset dof state + joint_pos, joint_vel = ( + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, + ) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) + # reset articulation + articulation.reset() + # apply force + is_global = False + + if i % 2 == 0: + body_com_pos_w = articulation.data.body_com_pos_w.torch[:, body_ids, :3] + # is_global = True + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + external_wrench_positions_b += body_com_pos_w + else: + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=body_ids, + is_global=is_global, + ) + articulation.permanent_wrench_composer.add_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=body_ids, + is_global=is_global, + ) + # perform simulation + for _ in range(100): + # apply action to the articulation + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) + articulation.write_data_to_sim() + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + # check condition that the articulations have fallen down + for i in range(num_articulations): + assert articulation.data.root_pos_w.torch[i, 2].item() < 0.2 + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_external_force_on_multiple_bodies(sim, num_articulations, device, articulation_type): + """Test application of external force on the legs of the articulation. + + This test verifies that: + 1. External forces can be applied to multiple bodies + 2. The forces affect the articulation's motion correctly + 3. The articulation responds to the forces as expected + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + + # Play the simulator + sim.reset() + + # Find bodies to apply the force + body_ids, _ = articulation.find_bodies(".*_SHANK") + # Sample a large force + external_wrench_b = torch.zeros(articulation.num_instances, len(body_ids), 6, device=sim.device) + external_wrench_b[..., 1] = 200.0 + + # Now we are ready! + for _ in range(5): + # reset root state + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) + # reset dof state + joint_pos, joint_vel = ( + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, + ) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) + # reset articulation + articulation.reset() + # apply force + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=body_ids + ) + # perform simulation + for _ in range(100): + # apply action to the articulation + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) + articulation.write_data_to_sim() + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + # check condition + for i in range(num_articulations): + # since there is a moment applied on the articulation, the articulation should rotate + assert articulation.data.root_ang_vel_w.torch[i, 2].item() > 0.1 + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_external_force_on_multiple_bodies_at_position(sim, num_articulations, device, articulation_type): + """Test application of external force on the legs of the articulation at a given position. + + This test verifies that: + 1. External forces can be applied to multiple bodies at a given position + 2. External forces can be applied to multiple bodies in the global frame + 3. External forces are calculated and composed correctly + 4. The forces affect the articulation's motion correctly + 5. The articulation responds to the forces as expected + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + + # Play the simulator + sim.reset() + + # Find bodies to apply the force + body_ids, _ = articulation.find_bodies(".*_SHANK") + # Sample a large force + external_wrench_b = torch.zeros(articulation.num_instances, len(body_ids), 6, device=sim.device) + external_wrench_b[..., 2] = 100.0 + external_wrench_positions_b = torch.zeros(articulation.num_instances, len(body_ids), 3, device=sim.device) + external_wrench_positions_b[..., 1] = 1.0 + + desired_force = torch.zeros(articulation.num_instances, len(body_ids), 3, device=sim.device) + desired_force[..., 2] = 200.0 + desired_torque = torch.zeros(articulation.num_instances, len(body_ids), 3, device=sim.device) + desired_torque[..., 0] = 200.0 + + # Now we are ready! + for i in range(5): + # reset root state + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) + # reset dof state + joint_pos, joint_vel = ( + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, + ) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) + # reset articulation + articulation.reset() + + is_global = False + if i % 2 == 0: + body_com_pos_w = articulation.data.body_com_pos_w.torch[:, body_ids, :3] + is_global = True + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + external_wrench_positions_b += body_com_pos_w + else: + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + + # apply force + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=body_ids, + is_global=is_global, + ) + articulation.permanent_wrench_composer.add_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=body_ids, + is_global=is_global, + ) + # perform simulation + for _ in range(100): + # apply action to the articulation + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) + articulation.write_data_to_sim() + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + # check condition + for i in range(num_articulations): + # since there is a moment applied on the articulation, the articulation should rotate + assert torch.abs(articulation.data.root_ang_vel_w.torch[i, 2]).item() > 0.1 + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["humanoid"]) +def test_loading_gains_from_usd(sim, num_articulations, device, articulation_type): + """Test that gains are loaded from USD file if actuator model has them as None. + + This test verifies that: + 1. Gains are loaded correctly from USD file + 2. Default gains are applied when not specified + 3. The gains match the expected values + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type, stiffness=None, damping=None) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) + + # Play sim + sim.reset() + + # Expected gains + # -- Stiffness values + expected_stiffness = { + ".*_waist.*": 20.0, + ".*_upper_arm.*": 10.0, + "pelvis": 10.0, + ".*_lower_arm": 2.0, + ".*_thigh:0": 10.0, + ".*_thigh:1": 20.0, + ".*_thigh:2": 10.0, + ".*_shin": 5.0, + ".*_foot.*": 2.0, + } + indices_list, _, values_list = string_utils.resolve_matching_names_values( + expected_stiffness, articulation.joint_names + ) + expected_stiffness = torch.zeros(articulation.num_instances, articulation.num_joints, device=articulation.device) + expected_stiffness[:, indices_list] = torch.tensor(values_list, device=articulation.device) + # -- Damping values + expected_damping = { + ".*_waist.*": 5.0, + ".*_upper_arm.*": 5.0, + "pelvis": 5.0, + ".*_lower_arm": 1.0, + ".*_thigh:0": 5.0, + ".*_thigh:1": 5.0, + ".*_thigh:2": 5.0, + ".*_shin": 0.1, + ".*_foot.*": 1.0, + } + indices_list, _, values_list = string_utils.resolve_matching_names_values( + expected_damping, articulation.joint_names + ) + expected_damping = torch.zeros_like(expected_stiffness) + expected_damping[:, indices_list] = torch.tensor(values_list, device=articulation.device) + + # Check that gains are loaded from USD file + torch.testing.assert_close(articulation.actuators["body"].stiffness, expected_stiffness) + torch.testing.assert_close(articulation.actuators["body"].damping, expected_damping) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["humanoid"]) +def test_setting_gains_from_cfg(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test that gains are loaded from the configuration correctly. + + This test verifies that: + 1. Gains are loaded correctly from configuration + 2. The gains match the expected values + 3. The gains are applied correctly to the actuators + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=sim.device + ) + + # Play sim + sim.reset() + + # Expected gains + expected_stiffness = torch.full( + (articulation.num_instances, articulation.num_joints), 10.0, device=articulation.device + ) + expected_damping = torch.full_like(expected_stiffness, 2.0) + + # Check that gains are loaded from USD file + torch.testing.assert_close(articulation.actuators["body"].stiffness, expected_stiffness) + torch.testing.assert_close(articulation.actuators["body"].damping, expected_damping) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["humanoid"]) +def test_setting_gains_from_cfg_dict(sim, num_articulations, device, articulation_type): + """Test that gains are loaded from the configuration dictionary correctly. + + This test verifies that: + 1. Gains are loaded correctly from configuration dictionary + 2. The gains match the expected values + 3. The gains are applied correctly to the actuators + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + """ + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=sim.device + ) + # Play sim + sim.reset() + + # Expected gains + expected_stiffness = torch.full( + (articulation.num_instances, articulation.num_joints), 10.0, device=articulation.device + ) + expected_damping = torch.full_like(expected_stiffness, 2.0) + + # Check that gains are loaded from USD file + torch.testing.assert_close(articulation.actuators["body"].stiffness, expected_stiffness) + torch.testing.assert_close(articulation.actuators["body"].damping, expected_damping) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("vel_limit_sim", [1e5, None]) +@pytest.mark.parametrize("vel_limit", [1e2, None]) +@pytest.mark.parametrize("add_ground_plane", [False]) +@pytest.mark.parametrize("articulation_type", ["single_joint_implicit"]) +def test_setting_velocity_limit_implicit( + sim, num_articulations, device, vel_limit_sim, vel_limit, add_ground_plane, articulation_type +): + """Test setting of velocity limit for implicit actuators. + + This test verifies that: + 1. Velocity limits can be set correctly for implicit actuators + 2. The limits are applied correctly to the simulation + 3. The limits are handled correctly when both sim and non-sim limits are set + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + vel_limit_sim: The velocity limit to set in simulation + vel_limit: The velocity limit to set in actuator + """ + # create simulation + articulation_cfg = generate_articulation_cfg( + articulation_type=articulation_type, + velocity_limit_sim=vel_limit_sim, + velocity_limit=vel_limit, + ) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, + num_articulations=num_articulations, + device=device, + ) + # Play sim + if vel_limit_sim is not None and vel_limit is not None: + with pytest.raises(ValueError): + sim.reset() + return + sim.reset() + + # read the values set into the simulation + newton_vel_limit = wp.to_torch( + articulation.root_view.get_attribute("joint_velocity_limit", SimulationManager.get_model()) + ).to(device)[:, 0, :] + # check data buffer + torch.testing.assert_close(articulation.data.joint_velocity_limits.torch, newton_vel_limit) + # check actuator has simulation velocity limit + torch.testing.assert_close(articulation.actuators["joint"].velocity_limit_sim, newton_vel_limit) + # check that both values match for velocity limit + torch.testing.assert_close( + articulation.actuators["joint"].velocity_limit_sim, + articulation.actuators["joint"].velocity_limit, + ) + + if vel_limit_sim is None: + # Case 2: both velocity limit and velocity limit sim are not set + # This is the case where the velocity limit keeps its USD default value + # Case 3: velocity limit sim is not set but velocity limit is set + # For backwards compatibility, we do not set velocity limit to simulation + # Thus, both default to USD default value. + limit = articulation_cfg.spawn.joint_drive_props.max_velocity + else: + # Case 4: only velocity limit sim is set + # In this case, the velocity limit is set to the USD value + limit = vel_limit_sim + + # check max velocity is what we set + expected_velocity_limit = torch.full_like(newton_vel_limit, limit) + torch.testing.assert_close(newton_vel_limit, expected_velocity_limit) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("vel_limit_sim", [1e5, None]) +@pytest.mark.parametrize("vel_limit", [1e2, None]) +@pytest.mark.parametrize("articulation_type", ["single_joint_explicit"]) +def test_setting_velocity_limit_explicit(sim, num_articulations, device, vel_limit_sim, vel_limit, articulation_type): + """Test setting of velocity limit for explicit actuators.""" + articulation_cfg = generate_articulation_cfg( + articulation_type=articulation_type, + velocity_limit_sim=vel_limit_sim, + velocity_limit=vel_limit, + ) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, + num_articulations=num_articulations, + device=device, + ) + # Play sim + sim.reset() + + # collect limit init values + newton_vel_limit = wp.to_torch( + articulation.root_view.get_attribute("joint_velocity_limit", SimulationManager.get_model()) + ).to(device)[:, 0, :] + actuator_vel_limit = articulation.actuators["joint"].velocity_limit + actuator_vel_limit_sim = articulation.actuators["joint"].velocity_limit_sim + + # check data buffer for joint_velocity_limits_sim + torch.testing.assert_close(articulation.data.joint_velocity_limits.torch, newton_vel_limit) + # check actuator velocity_limit_sim is set to physx + torch.testing.assert_close(actuator_vel_limit_sim, newton_vel_limit) + + if vel_limit is not None: + expected_actuator_vel_limit = torch.full( + (articulation.num_instances, articulation.num_joints), + vel_limit, + device=articulation.device, + ) + # check actuator is set + torch.testing.assert_close(actuator_vel_limit, expected_actuator_vel_limit) + # check physx is not velocity_limit + assert not torch.allclose(actuator_vel_limit, newton_vel_limit) + else: + # check actuator velocity_limit is the same as the PhysX default + torch.testing.assert_close(actuator_vel_limit, newton_vel_limit) + + # simulation velocity limit is set to USD value unless user overrides + if vel_limit_sim is not None: + limit = vel_limit_sim + else: + limit = articulation_cfg.spawn.joint_drive_props.max_velocity + # check physx is set to expected value + expected_vel_limit = torch.full_like(newton_vel_limit, limit) + torch.testing.assert_close(newton_vel_limit, expected_vel_limit) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("effort_limit_sim", [1e5, None]) +@pytest.mark.parametrize("effort_limit", [1e2, 80.0, None]) +@pytest.mark.parametrize("articulation_type", ["single_joint_implicit"]) +def test_setting_effort_limit_implicit( + sim, num_articulations, device, effort_limit_sim, effort_limit, articulation_type +): + """Test setting of effort limit for implicit actuators. + + This test verifies the effort limit resolution logic for actuator models implemented in :class:`ActuatorBase`: + - Case 1: If USD value == actuator config value: values match correctly + - Case 2: If USD value != actuator config value: actuator config value is used + - Case 3: If actuator config value is None: USD value is used as default + """ + articulation_cfg = generate_articulation_cfg( + articulation_type=articulation_type, + effort_limit_sim=effort_limit_sim, + effort_limit=effort_limit, + ) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, + num_articulations=num_articulations, + device=device, + ) + # Play sim + if effort_limit_sim is not None and effort_limit is not None: + with pytest.raises(ValueError): + sim.reset() + return + sim.reset() + + # obtain the physx effort limits + newton_effort_limit = wp.to_torch( + articulation.root_view.get_attribute("joint_effort_limit", SimulationManager.get_model()) + ).to(device)[:, 0, :] + + # check that the two are equivalent + torch.testing.assert_close( + articulation.actuators["joint"].effort_limit_sim, + articulation.actuators["joint"].effort_limit, + ) + torch.testing.assert_close(articulation.actuators["joint"].effort_limit_sim, newton_effort_limit) + + # decide the limit based on what is set + if effort_limit_sim is None and effort_limit is None: + limit = articulation_cfg.spawn.joint_drive_props.max_effort + elif effort_limit_sim is not None and effort_limit is None: + limit = effort_limit_sim + elif effort_limit_sim is None and effort_limit is not None: + limit = effort_limit + + # check that the max force is what we set + expected_effort_limit = torch.full_like(newton_effort_limit, limit) + torch.testing.assert_close(newton_effort_limit, expected_effort_limit) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("effort_limit_sim", [1e5, None]) +@pytest.mark.parametrize("effort_limit", [80.0, 1e2, None]) +@pytest.mark.parametrize("articulation_type", ["single_joint_explicit"]) +def test_setting_effort_limit_explicit( + sim, num_articulations, device, effort_limit_sim, effort_limit, articulation_type +): + """Test setting of effort limit for explicit actuators. + + This test verifies the effort limit resolution logic for actuator models implemented in :class:`ActuatorBase`: + - Case 1: If USD value == actuator config value: values match correctly + - Case 2: If USD value != actuator config value: actuator config value is used + - Case 3: If actuator config value is None: USD value is used as default + + """ + + articulation_cfg = generate_articulation_cfg( + articulation_type=articulation_type, + effort_limit_sim=effort_limit_sim, + effort_limit=effort_limit, + ) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, + num_articulations=num_articulations, + device=device, + ) + # Play sim + sim.reset() + + # usd default effort limit is set to 80 + usd_default_effort_limit = 80.0 + + # collect limit init values + newton_effort_limit = wp.to_torch( + articulation.root_view.get_attribute("joint_effort_limit", SimulationManager.get_model()) + ).to(device)[:, 0, :] + actuator_effort_limit = articulation.actuators["joint"].effort_limit + actuator_effort_limit_sim = articulation.actuators["joint"].effort_limit_sim + + # check actuator effort_limit_sim is set to physx + torch.testing.assert_close(actuator_effort_limit_sim, newton_effort_limit) + + if effort_limit is not None: + expected_actuator_effort_limit = torch.full_like(actuator_effort_limit, effort_limit) + # check actuator is set + torch.testing.assert_close(actuator_effort_limit, expected_actuator_effort_limit) + + # check physx effort limit does not match the one explicit actuator has + assert not (torch.allclose(actuator_effort_limit, newton_effort_limit)) + else: + # When effort_limit is None, actuator should use USD default values + expected_actuator_effort_limit = torch.full_like(newton_effort_limit, usd_default_effort_limit) + torch.testing.assert_close(actuator_effort_limit, expected_actuator_effort_limit) + + # when using explicit actuators, the limits are set to high unless user overrides + if effort_limit_sim is not None: + limit = effort_limit_sim + else: + limit = ActuatorBase._DEFAULT_MAX_EFFORT_SIM # type: ignore + # check physx internal value matches the expected sim value + expected_effort_limit = torch.full_like(newton_effort_limit, limit) + torch.testing.assert_close(actuator_effort_limit_sim, expected_effort_limit) + torch.testing.assert_close(newton_effort_limit, expected_effort_limit) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["humanoid"]) +def test_reset(sim, num_articulations, device, articulation_type): + """Test that reset method works properly.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device + ) + + # Play the simulator + sim.reset() + + # Now we are ready! + # reset articulation + articulation.reset() + + # Reset should zero external forces and torques + assert not articulation._instantaneous_wrench_composer.active + assert not articulation._permanent_wrench_composer.active + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_torque.torch) == 0 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_torque.torch) == 0 + + if num_articulations > 1: + num_bodies = articulation.num_bodies + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=torch.ones((num_articulations, num_bodies, 3), device=device), + torques=torch.ones((num_articulations, num_bodies, 3), device=device), + ) + articulation.instantaneous_wrench_composer.add_forces_and_torques_index( + forces=torch.ones((num_articulations, num_bodies, 3), device=device), + torques=torch.ones((num_articulations, num_bodies, 3), device=device), + ) + articulation.reset(env_ids=torch.tensor([0], device=device)) + assert articulation._instantaneous_wrench_composer.active + assert articulation._permanent_wrench_composer.active + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_force.torch) == num_bodies * 3 + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_torque.torch) == num_bodies * 3 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_force.torch) == num_bodies * 3 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_torque.torch) == num_bodies * 3 + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_apply_joint_command(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test applying of joint position target functions correctly for a robotic arm.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device + ) + + # Play the simulator + sim.reset() + + for _ in range(100): + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + + # reset dof state + joint_pos = articulation.data.default_joint_pos.torch.clone() + joint_pos[:, 3] = 0.0 + + # apply action to the articulation + articulation.set_joint_position_target_index(target=joint_pos) + articulation.write_data_to_sim() + + for _ in range(100): + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + + # Check that current joint position is not the same as default joint position, meaning + # the articulation moved. We can't check that it reached its desired joint position as the gains + # are not properly tuned + assert not torch.allclose(articulation.data.joint_pos.torch, joint_pos) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True, False]) +@pytest.mark.parametrize("articulation_type", ["single_joint_implicit"]) +def test_body_root_state(sim, num_articulations, device, with_offset, articulation_type): + """Test for reading the `body_state_w` property. + + This test verifies that: + 1. Body states can be read correctly + 2. States are correct with and without offsets + 3. States are consistent across different devices + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + with_offset: Whether to test with offset + """ + sim._app_control_on_stop_handle = None + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, env_pos = generate_articulation(articulation_cfg, num_articulations, device) + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10, "Possible reference leak for articulation" + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized, "Articulation is not initialized" + # Check that fixed base + assert articulation.is_fixed_base, "Articulation is not a fixed base" + + # Resolve body indices by name (ordering may differ across physics backends) + root_idx = articulation.body_names.index("CenterPivot") + arm_idx = articulation.body_names.index("Arm") + + # change center of mass offset from link frame + if with_offset: + offset = [0.5, 0.0, 0.0] + else: + offset = [0.0, 0.0, 0.0] + + # create com offsets — apply offset to the Arm body + num_bodies = articulation.num_bodies + com = wp.to_torch(articulation.root_view.get_attribute("body_com", SimulationManager.get_model())) + link_offset = [1.0, 0.0, 0.0] # the offset from CenterPivot to Arm frames + new_com = torch.tensor(offset, device=device).repeat(num_articulations, 1, 1) + com[:, 0, arm_idx, :] = new_com.squeeze(-2) + articulation.root_view.set_attribute("body_com", SimulationManager.get_model(), wp.from_torch(com, dtype=wp.vec3f)) + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check they are set + torch.testing.assert_close( + wp.to_torch(articulation.root_view.get_attribute("body_com", SimulationManager.get_model())), com + ) + + for i in range(50): + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + + # get state properties + root_link_pose_w = articulation.data.root_link_pose_w.torch + root_link_vel_w = articulation.data.root_link_vel_w.torch + root_com_pose_w = articulation.data.root_com_pose_w.torch + root_com_vel_w = articulation.data.root_com_vel_w.torch + body_link_pose_w = articulation.data.body_link_pose_w.torch + body_link_vel_w = articulation.data.body_link_vel_w.torch + body_com_pose_w = articulation.data.body_com_pose_w.torch + body_com_vel_w = articulation.data.body_com_vel_w.torch + + if with_offset: + # get joint state + joint_pos = articulation.data.joint_pos.torch.unsqueeze(-1) + joint_vel = articulation.data.joint_vel.torch.unsqueeze(-1) + + # LINK state + # angular velocity should be the same for both COM and link frames + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) + + # lin_vel arm + lin_vel_gt = torch.zeros(num_articulations, num_bodies, 3, device=device) + vx = -(link_offset[0]) * joint_vel * torch.sin(joint_pos) + vy = torch.zeros(num_articulations, 1, 1, device=device) + vz = (link_offset[0]) * joint_vel * torch.cos(joint_pos) + lin_vel_gt[:, arm_idx, :] = torch.cat([vx, vy, vz], dim=-1).squeeze(-2) + + # linear velocity of root link should be zero + torch.testing.assert_close(lin_vel_gt[:, root_idx, :], root_link_vel_w[..., :3], atol=1e-3, rtol=1e-1) + # linear velocity of pendulum link should be + torch.testing.assert_close(lin_vel_gt, body_link_vel_w[..., :3], atol=1e-3, rtol=1e-1) + + # ang_vel + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) + + # COM state + # position and orientation shouldn't match for the _state_com_w but everything else will + pos_gt = torch.zeros(num_articulations, num_bodies, 3, device=device) + px = (link_offset[0] + offset[0]) * torch.cos(joint_pos) + py = torch.zeros(num_articulations, 1, 1, device=device) + pz = (link_offset[0] + offset[0]) * torch.sin(joint_pos) + pos_gt[:, arm_idx, :] = torch.cat([px, py, pz], dim=-1).squeeze(-2) + pos_gt += env_pos.unsqueeze(-2).repeat(1, num_bodies, 1) + torch.testing.assert_close(pos_gt[:, root_idx, :], root_com_pose_w[..., :3], atol=1e-3, rtol=1e-1) + torch.testing.assert_close(pos_gt, body_com_pose_w[..., :3], atol=1e-3, rtol=1e-1) + + # orientation + com_quat_b = articulation.data.body_com_quat_b.torch + com_quat_w = math_utils.quat_mul(body_link_pose_w[..., 3:], com_quat_b) + torch.testing.assert_close(com_quat_w, body_com_pose_w[..., 3:]) + torch.testing.assert_close(com_quat_w[:, root_idx, :], root_com_pose_w[..., 3:]) + + # angular velocity should be the same for both COM and link frames + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) + else: + # single joint center of masses are at link frames so they will be the same + torch.testing.assert_close(root_link_pose_w, root_com_pose_w) + torch.testing.assert_close(root_com_vel_w, root_link_vel_w) + torch.testing.assert_close(body_link_pose_w, body_com_pose_w) + torch.testing.assert_close(body_com_vel_w, body_link_vel_w) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True, False]) +@pytest.mark.parametrize("state_location", ["com", "link"]) +@pytest.mark.parametrize("gravity_enabled", [False]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_write_root_state( + sim, num_articulations, device, with_offset, state_location, gravity_enabled, articulation_type +): + """Test the setters for root_state using both the link frame and center of mass as reference frame. + + This test verifies that: + 1. Root states can be written correctly + 2. States are correct with and without offsets + 3. States can be written for both COM and link frames + 4. States are consistent across different devices + + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + with_offset: Whether to test with offset + state_location: Whether to test COM or link frame + """ + sim._app_control_on_stop_handle = None + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, env_pos = generate_articulation(articulation_cfg, num_articulations, device) + env_idx = torch.tensor([x for x in range(num_articulations)], device=device, dtype=torch.int32) + + # Play sim + sim.reset() + + # Resolve root body index by name (ordering may differ across physics backends) + root_idx = articulation.find_bodies("base")[0][0] + + # change center of mass offset from link frame + if with_offset: + offset = torch.tensor([1.0, 0.0, 0.0]).repeat(num_articulations, 1, 1) + else: + offset = torch.tensor([0.0, 0.0, 0.0]).repeat(num_articulations, 1, 1) + + # create com offsets + com = wp.to_torch(articulation.root_view.get_attribute("body_com", SimulationManager.get_model())) + new_com = offset + com[:, 0, root_idx, :] = new_com.squeeze(-2) + articulation.root_view.set_attribute("body_com", SimulationManager.get_model(), wp.from_torch(com, dtype=wp.vec3f)) + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check they are set + torch.testing.assert_close( + wp.to_torch(articulation.root_view.get_attribute("body_com", SimulationManager.get_model())), com + ) + + rand_state = torch.zeros(num_articulations, 13, device=device) + rand_state[..., :7] = articulation.data.default_root_pose.torch + rand_state[..., :3] += env_pos + # make quaternion a unit vector + rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) + + env_idx = env_idx.to(device) + for i in range(10): + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + + if state_location == "com": + if i % 2 == 0: + articulation.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7]) + articulation.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) + else: + articulation.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + articulation.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:], env_ids=env_idx) + elif state_location == "link": + if i % 2 == 0: + articulation.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7]) + articulation.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) + else: + articulation.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + articulation.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:], env_ids=env_idx) + + if state_location == "com": + torch.testing.assert_close(rand_state[..., :7], articulation.data.root_com_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], articulation.data.root_com_vel_w.torch) + elif state_location == "link": + torch.testing.assert_close(rand_state[..., :7], articulation.data.root_link_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], articulation.data.root_link_vel_w.torch) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["humanoid"]) +def test_setting_articulation_root_prim_path(sim, device, articulation_type): + """Test that the articulation root prim path can be set explicitly.""" + sim._app_control_on_stop_handle = None + # Create articulation + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation_cfg.articulation_root_prim_path = "/torso" + articulation, _ = generate_articulation(articulation_cfg, 1, device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation._is_initialized + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["humanoid"]) +def test_setting_invalid_articulation_root_prim_path(sim, device, articulation_type): + """Test that the articulation root prim path can be set explicitly.""" + sim._app_control_on_stop_handle = None + # Create articulation + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation_cfg.articulation_root_prim_path = "/non_existing_prim_path" + articulation, _ = generate_articulation(articulation_cfg, 1, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + with pytest.raises((RuntimeError, KeyError)): + sim.reset() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("gravity_enabled", [False]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_write_joint_state_data_consistency(sim, num_articulations, device, gravity_enabled, articulation_type): + """Test the setters for root_state using both the link frame and center of mass as reference frame. + + This test verifies that after write_joint_state_to_sim operations: + 1. state, com_state, link_state value consistency + 2. body_pose, link + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + """ + sim._app_control_on_stop_handle = None + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, env_pos = generate_articulation(articulation_cfg, num_articulations, device) + env_idx = torch.tensor([x for x in range(num_articulations)]) + + # Play sim + sim.reset() + + limits = torch.zeros(num_articulations, articulation.num_joints, 2, device=device) + limits[..., 0] = (torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0) * -1.0 + limits[..., 1] = torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0 + articulation.write_joint_position_limit_to_sim_index(limits=limits) + + from torch.distributions import Uniform + + joint_pos_limits = articulation.data.joint_pos_limits.torch + joint_vel_limits = articulation.data.joint_vel_limits.torch + pos_dist = Uniform(joint_pos_limits[..., 0], joint_pos_limits[..., 1]) + vel_dist = Uniform(-joint_vel_limits, joint_vel_limits) + + original_body_link_pose_w = articulation.data.body_link_pose_w.torch.clone() + original_body_com_vel_w = articulation.data.body_com_vel_w.torch.clone() + + rand_joint_pos = pos_dist.sample() + rand_joint_vel = vel_dist.sample() + + articulation.write_joint_position_to_sim_index(position=rand_joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=rand_joint_vel) + # make sure valued updated + body_link_pose_w = articulation.data.body_link_pose_w.torch + body_com_vel_w = articulation.data.body_com_vel_w.torch + original_body_states = torch.cat([original_body_link_pose_w, original_body_com_vel_w], dim=-1) + body_state_w = torch.cat([body_link_pose_w, body_com_vel_w], dim=-1) + assert torch.count_nonzero(original_body_states[:, 1:] != body_state_w[:, 1:]) > ( + len(original_body_states[:, 1:]) / 2 + ) + # validate body - link consistency + body_link_vel_w = articulation.data.body_link_vel_w.torch + torch.testing.assert_close(body_link_pose_w, articulation.data.body_link_pose_w.torch) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking + # if velocity update is triggered, which can be determined by comparing angular velocity + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) + + # validate link - com conistency + body_com_pos_b = articulation.data.body_com_pos_b.torch + body_com_quat_b = articulation.data.body_com_quat_b.torch + expected_com_pos, expected_com_quat = math_utils.combine_frame_transforms( + body_link_pose_w[..., :3].view(-1, 3), + body_link_pose_w[..., 3:].view(-1, 4), + body_com_pos_b.view(-1, 3), + body_com_quat_b.view(-1, 4), + ) + body_com_pos_w = articulation.data.body_com_pos_w.torch + body_com_quat_w = articulation.data.body_com_quat_w.torch + torch.testing.assert_close(expected_com_pos.view(len(env_idx), -1, 3), body_com_pos_w) + torch.testing.assert_close(expected_com_quat.view(len(env_idx), -1, 4), body_com_quat_w) + + # validate body - com consistency + body_com_lin_vel_w = articulation.data.body_com_lin_vel_w.torch + body_com_ang_vel_w = articulation.data.body_com_ang_vel_w.torch + torch.testing.assert_close(body_com_vel_w[..., :3], body_com_lin_vel_w) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_com_ang_vel_w) + + # validate pos_w, quat_w, pos_b, quat_b is consistent with pose_w and pose_b + expected_com_pose_w = torch.cat((body_com_pos_w, body_com_quat_w), dim=2) + expected_com_pose_b = torch.cat((body_com_pos_b, body_com_quat_b), dim=2) + body_pos_w = articulation.data.body_pos_w.torch + body_quat_w = articulation.data.body_quat_w.torch + expected_body_pose_w = torch.cat((body_pos_w, body_quat_w), dim=2) + body_link_pos_w = articulation.data.body_link_pos_w.torch + body_link_quat_w = articulation.data.body_link_quat_w.torch + expected_body_link_pose_w = torch.cat((body_link_pos_w, body_link_quat_w), dim=2) + body_com_pose_w = articulation.data.body_com_pose_w.torch + body_com_pose_b = articulation.data.body_com_pose_b.torch + body_pose_w = articulation.data.body_pose_w.torch + body_link_pose_w_fresh = articulation.data.body_link_pose_w.torch + torch.testing.assert_close(body_com_pose_w, expected_com_pose_w) + torch.testing.assert_close(body_com_pose_b, expected_com_pose_b) + torch.testing.assert_close(body_pose_w, expected_body_pose_w) + torch.testing.assert_close(body_link_pose_w_fresh, expected_body_link_pose_w) + + # validate pose_w is consistent with individual properties + body_vel_w = articulation.data.body_vel_w.torch + body_com_vel_w_fresh = articulation.data.body_com_vel_w.torch + torch.testing.assert_close(body_pose_w, body_link_pose_w) + torch.testing.assert_close(body_vel_w, body_com_vel_w) + torch.testing.assert_close(body_link_pose_w_fresh, body_link_pose_w) + torch.testing.assert_close(body_com_pose_w, articulation.data.body_com_pose_w.torch) + torch.testing.assert_close(body_vel_w, body_com_vel_w_fresh) + + +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["shadow_hand"]) +@pytest.mark.skip(reason="Spatial tendons are not supported in Newton yet.") +def test_spatial_tendons(sim, num_articulations, device, articulation_type): + """Test spatial tendons apis. + This test verifies that: + 1. The articulation is properly initialized + 2. The articulation has spatial tendons + 3. All buffers have correct shapes + 4. The articulation can be simulated + Args: + sim: The simulation fixture + num_articulations: Number of articulations to test + device: The device to run the simulation on + """ + # skip test if Isaac Sim version is less than 5.0 + if has_kit() and get_isaac_sim_version().major < 5: + pytest.skip("Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later.") + return + articulation_cfg = generate_articulation_cfg(articulation_type="spatial_tendon_test_asset") + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 + + # Play sim + sim.reset() + # Check if articulation is initialized + assert articulation.is_initialized + # Check that fixed base + assert articulation.is_fixed_base + # Check buffers that exists and have correct shapes + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 3) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.num_spatial_tendons == 1 + + articulation.set_spatial_tendon_stiffness_index(stiffness=10.0) + articulation.set_spatial_tendon_limit_stiffness_index(limit_stiffness=10.0) + articulation.set_spatial_tendon_damping_index(damping=10.0) + articulation.set_spatial_tendon_offset_index(offset=10.0) + + # Simulate physics + for _ in range(10): + # perform rendering + sim.step() + # update articulation + articulation.update(sim.cfg.dt) + + +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test applying of joint position target functions correctly for a robotic arm.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device + ) + + # Play the simulator + sim.reset() + + for _ in range(100): + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + + # apply action to the articulation + friction = torch.rand(num_articulations, articulation.num_joints, device=device) + + # The static friction must be set first to be sure the dynamic friction is not greater than static + # when both are set. + articulation.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=friction, + ) + articulation.write_data_to_sim() + + for _ in range(100): + # perform step + sim.step() + # update buffers + articulation.update(sim.cfg.dt) + + joint_friction_coeff_sim = wp.to_torch( + articulation.root_view.get_attribute("joint_friction", SimulationManager.get_model()) + )[:, 0, :] + assert torch.allclose(joint_friction_coeff_sim, friction) + + # Reset simulator to ensure a clean state for the alternative API path + sim.reset() + + # Warm up a few steps to populate buffers + for _ in range(100): + sim.step() + articulation.update(sim.cfg.dt) + + # New random coefficients + friction_2 = torch.rand(num_articulations, articulation.num_joints, device=device) + + # Use the combined setter to write all three at once + articulation.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=friction_2, + ) + articulation.write_data_to_sim() + + # Step to let sim ingest new params and refresh data buffers + for _ in range(100): + sim.step() + articulation.update(sim.cfg.dt) + + joint_friction_coeff_sim_2 = wp.to_torch( + articulation.root_view.get_attribute("joint_friction", SimulationManager.get_model()) + )[:, 0, :] + + # Validate values propagated + assert torch.allclose(joint_friction_coeff_sim_2, friction_2) + + +@pytest.mark.parametrize("num_articulations", [2]) +@pytest.mark.parametrize("device", ["cuda:0"]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_body_q_consistent_after_root_write(num_articulations, device, articulation_type): + """Test that body_q is fresh when collide() runs after a root pose write. + + Regression test for a NaN bug where collide() used stale body_q after env + reset because eval_fk was not called between write_root_pose and collide. + + Uses ``use_mujoco_contacts=False`` so the Newton collision pipeline is + active, then patches ``_simulate_physics_only`` to capture body_q at + the moment collide() is called and asserts it matches joint_q. + """ + from unittest.mock import patch + + sim_cfg = SimulationCfg( + dt=1 / 200, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=70, + nconmax=70, + integrator="implicitfast", + use_mujoco_contacts=False, + ), + num_substeps=1, + use_cuda_graph=False, + ), + ) + with build_simulation_context(sim_cfg=sim_cfg, device=device) as sim: + sim._app_control_on_stop_handle = None + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, env_pos = generate_articulation(articulation_cfg, num_articulations, device) + + sim.reset() + + model = SimulationManager.get_model() + jc_starts = model.joint_coord_world_start.numpy() + body_starts = model.body_world_start.numpy() + + for _ in range(5): + sim.step() + articulation.update(sim.cfg.dt) + + # Teleport env 0 by 10m (simulating a reset) + new_pose = articulation.data.default_root_pose.torch.clone() + new_pose[0, 0] += 10.0 + new_pose[0, 1] += 5.0 + articulation.write_root_pose_to_sim_index( + root_pose=new_pose[0:1], + env_ids=torch.tensor([0], device=device, dtype=torch.int32), + ) + + # Patch _simulate_physics_only to capture body_q before collide runs + captured = {} + original_simulate = SimulationManager._simulate_physics_only.__func__ + + @classmethod # type: ignore[misc] + def _patched_simulate(cls): + if cls._needs_collision_pipeline: + bq = wp.to_torch(cls._state_0.body_q) + jq = wp.to_torch(cls._state_0.joint_q) + b0 = int(body_starts[0]) + jc0 = int(jc_starts[0]) + captured["bq_root"] = bq[b0, :3].clone() + captured["jq_root"] = jq[jc0 : jc0 + 3].clone() + original_simulate(cls) + + with patch.object(SimulationManager, "_simulate_physics_only", _patched_simulate): + sim.step() + articulation.update(sim.cfg.dt) + + assert captured, "collision pipeline did not run — _needs_collision_pipeline is False" + + bq_root = captured["bq_root"] + jq_root = captured["jq_root"] + diff = (jq_root - bq_root).abs().max().item() + assert diff < 0.01, ( + f"body_q was stale when collide() ran: diff={diff:.4f}m, jq={jq_root.tolist()}, bq={bq_root.tolist()}" + ) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +def test_set_material_properties(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test getting and setting material properties (friction/restitution) via view-level APIs.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device + ) + + # Play the simulator + sim.reset() + + # Get friction/restitution bindings via view-level API + model = SimulationManager.get_model() + friction_binding = articulation._root_view.get_attribute("shape_material_mu", model)[:, 0] + restitution_binding = articulation._root_view.get_attribute("shape_material_restitution", model)[:, 0] + num_shapes = friction_binding.shape[1] + + # Test 1: Set all shapes via in-place writes to the warp binding + friction = torch.empty(num_articulations, num_shapes, device=device).uniform_(0.4, 0.8) + restitution = torch.empty(num_articulations, num_shapes, device=device).uniform_(0.0, 0.2) + + wp.to_torch(friction_binding)[:] = friction + wp.to_torch(restitution_binding)[:] = restitution + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + # Simulate physics + sim.step() + articulation.update(sim.cfg.dt) + + # Verify by reading back from the binding + mu = wp.to_torch(friction_binding) + restitution_check = wp.to_torch(restitution_binding) + torch.testing.assert_close(mu, friction) + torch.testing.assert_close(restitution_check, restitution) + + # Test 2: Set subset of shapes (only shape 0) + if num_shapes > 1: + subset_friction = torch.empty(num_articulations, device=device).uniform_(0.1, 0.2) + subset_restitution = torch.empty(num_articulations, device=device).uniform_(0.5, 0.6) + + wp.to_torch(friction_binding)[:, 0] = subset_friction + wp.to_torch(restitution_binding)[:, 0] = subset_restitution + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + sim.step() + articulation.update(sim.cfg.dt) + + # Check only the subset was updated + mu_updated = wp.to_torch(friction_binding) + restitution_updated = wp.to_torch(restitution_binding) + torch.testing.assert_close(mu_updated[:, 0], subset_friction) + torch.testing.assert_close(restitution_updated[:, 0], subset_restitution) + + +@pytest.mark.parametrize("num_articulations", [2]) +@pytest.mark.parametrize("device", ["cuda:0"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_randomize_rigid_body_com(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test that randomize_rigid_body_com modifies CoM and affects simulation dynamics.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) + + sim.reset() + assert articulation.is_initialized + + original_com = articulation.data.body_com_pos_b.torch.clone() + + com_offset = torch.zeros(num_articulations, articulation.num_bodies, 3, device=device) + com_offset[..., 0] = 0.5 + new_com = original_com + com_offset + env_ids = torch.arange(num_articulations, device=device, dtype=torch.int32) + articulation.set_coms_index(coms=new_com, env_ids=env_ids) + + updated_com = articulation.data.body_com_pos_b.torch + torch.testing.assert_close(updated_com, new_com, atol=1e-5, rtol=1e-5) + + +@pytest.mark.parametrize("num_articulations", [2]) +@pytest.mark.parametrize("device", ["cuda:0"]) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_randomize_rigid_body_collider_offsets(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test that Newton collider offset randomization (shape_margin, shape_gap) takes effect.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) + + sim.reset() + assert articulation.is_initialized + + model = SimulationManager.get_model() + original_margin = wp.to_torch(articulation.root_view.get_attribute("shape_margin", model)).clone() + original_gap = wp.to_torch(articulation.root_view.get_attribute("shape_gap", model)).clone() + + new_margin = original_margin.clone() + new_margin[:, 0] += 0.01 + articulation.root_view.set_attribute("shape_margin", model, wp.from_torch(new_margin, dtype=wp.float32)) + + new_gap = original_gap.clone() + new_gap[:, 0] += 0.005 + articulation.root_view.set_attribute("shape_gap", model, wp.from_torch(new_gap, dtype=wp.float32)) + + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.SHAPE_PROPERTIES) + + updated_margin = wp.to_torch(articulation.root_view.get_attribute("shape_margin", model)) + updated_gap = wp.to_torch(articulation.root_view.get_attribute("shape_gap", model)) + torch.testing.assert_close(updated_margin, new_margin) + torch.testing.assert_close(updated_gap, new_gap) + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--maxfail=1"]) diff --git a/source/isaaclab_newton/test/assets/test_rigid_object.py b/source/isaaclab_newton/test/assets/test_rigid_object.py new file mode 100644 index 000000000000..3b8e3aa9bf85 --- /dev/null +++ b/source/isaaclab_newton/test/assets/test_rigid_object.py @@ -0,0 +1,1261 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import sys +from typing import Literal + +import pytest +import torch +import warp as wp +from flaky import flaky +from isaaclab_newton.assets import RigidObject +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.physics import NewtonManager as SimulationManager +from newton.solvers import SolverNotifyFlags + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.sim.spawners import materials +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR +from isaaclab.utils.math import ( + combine_frame_transforms, + default_orientation, + quat_apply_inverse, + quat_inv, + quat_mul, + quat_rotate, + random_orientation, +) + +NEWTON_SIM_CFG = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg(), + ), +) + + +def _newton_sim_context(device, gravity_enabled=True, dt=None, **kwargs): + """Helper to create a Newton simulation context with the correct device. + + When sim_cfg is provided to build_simulation_context, the device, gravity_enabled, and dt + kwargs are ignored. This helper applies them to the shared NEWTON_SIM_CFG before calling. + """ + NEWTON_SIM_CFG.device = device + NEWTON_SIM_CFG.gravity = (0.0, 0.0, -9.81) if gravity_enabled else (0.0, 0.0, 0.0) + if dt is not None: + NEWTON_SIM_CFG.dt = dt + return build_simulation_context(device=device, sim_cfg=NEWTON_SIM_CFG, **kwargs) + + +def generate_cubes_scene( + num_cubes: int = 1, + height=1.0, + api: Literal["none", "rigid_body", "articulation_root"] = "rigid_body", + kinematic_enabled: bool = False, + device: str = "cuda:0", +) -> tuple[RigidObject, torch.Tensor]: + """Generate a scene with the provided number of cubes. + + Args: + num_cubes: Number of cubes to generate. + height: Height of the cubes. + api: The type of API that the cubes should have. + kinematic_enabled: Whether the cubes are kinematic. + device: Device to use for the simulation. + + Returns: + A tuple containing the rigid object representing the cubes and the origins of the cubes. + + """ + origins = torch.tensor([(i * 1.0, 0, height) for i in range(num_cubes)]).to(device) + # Create Top-level Xforms, one for each cube + for i, origin in enumerate(origins): + sim_utils.create_prim(f"/World/Env_{i}", "Xform", translation=origin) + + # Resolve spawn configuration + if api == "none": + # since no rigid body properties defined, this is just a static collider + spawn_cfg = sim_utils.CuboidCfg( + size=(0.1, 0.1, 0.1), + collision_props=sim_utils.CollisionPropertiesCfg(), + ) + elif api == "rigid_body": + spawn_cfg = sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=kinematic_enabled), + ) + elif api == "articulation_root": + spawn_cfg = sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Tests/RigidObject/Cube/dex_cube_instanceable_with_articulation_root.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=kinematic_enabled), + ) + else: + raise ValueError(f"Unknown api: {api}") + + # Create rigid object + cube_object_cfg = RigidObjectCfg( + prim_path="/World/Env_.*/Object", + spawn=spawn_cfg, + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, height)), + ) + cube_object = RigidObject(cfg=cube_object_cfg) + + return cube_object, origins + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_initialization(num_cubes, device): + """Test initialization for prim with rigid body API at the provided prim path.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 + + # Play sim + sim.reset() + + # Check if object is initialized + assert cube_object.is_initialized + assert len(cube_object.body_names) == 1 + + # Check buffers that exists and have correct shapes + assert cube_object.data.root_pos_w.torch.shape == (num_cubes, 3) + assert cube_object.data.root_quat_w.torch.shape == (num_cubes, 4) + assert cube_object.data.body_mass.torch.shape == (num_cubes, 1) + assert cube_object.data.body_inertia.torch.shape == (num_cubes, 1, 9) + + # Simulate physics + for _ in range(2): + # perform rendering + sim.step() + # update object + cube_object.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.skip(reason="Newton does not support kinematic rigid bodies") +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_initialization_with_kinematic_enabled(num_cubes, device): + """Test that initialization for prim with kinematic flag enabled.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, origins = generate_cubes_scene(num_cubes=num_cubes, kinematic_enabled=True, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 + + # Play sim + sim.reset() + + # Check if object is initialized + assert cube_object.is_initialized + assert len(cube_object.body_names) == 1 + + # Check buffers that exists and have correct shapes + assert cube_object.data.root_pos_w.torch.shape == (num_cubes, 3) + assert cube_object.data.root_quat_w.torch.shape == (num_cubes, 4) + + # Simulate physics + for _ in range(2): + # perform rendering + sim.step() + # update object + cube_object.update(sim.cfg.dt) + # check that the object is kinematic + default_root_pose = cube_object.data.default_root_pose.torch.clone() + default_root_vel = cube_object.data.default_root_vel.torch.clone() + default_root_pose[:, :3] += origins + torch.testing.assert_close(cube_object.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(cube_object.data.root_com_vel_w.torch, default_root_vel) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_initialization_with_no_rigid_body(num_cubes, device): + """Test that initialization fails when no rigid body is found at the provided prim path.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, api="none", device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 + + # Play sim + with pytest.raises(RuntimeError): + sim.reset() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_initialization_with_articulation_root(num_cubes, device): + """Test that initialization fails when an articulation root is found at the provided prim path.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, api="articulation_root", device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 + + # Play sim + with pytest.raises(RuntimeError): + sim.reset() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_external_force_buffer(device): + """Test if external force buffer correctly updates in the force value is zero case. + + In this test, we apply a non-zero force, then a zero force, then finally a non-zero force + to an object. We check if the force buffer is properly updated at each step. + """ + + # Generate cubes scene + with _newton_sim_context(device, add_ground_plane=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, origins = generate_cubes_scene(num_cubes=1, device=device) + + # play the simulator + sim.reset() + + # find bodies to apply the force + body_ids, body_names = cube_object.find_bodies(".*") + + # reset object + cube_object.reset() + + # perform simulation + for step in range(5): + # initiate force tensor + external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=sim.device) + + if step == 0 or step == 3: + # set a non-zero force + force = 1 + else: + # set a zero force + force = 0 + + # set force value + external_wrench_b[:, :, 0] = force + external_wrench_b[:, :, 3] = force + + # apply force + cube_object.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + body_ids=body_ids, + ) + + # check if the cube's force and torque buffers are correctly updated + for i in range(cube_object.num_instances): + assert cube_object._permanent_wrench_composer.composed_force.torch[i, 0, 0].item() == force + assert cube_object._permanent_wrench_composer.composed_torque.torch[i, 0, 0].item() == force + + # Check if the instantaneous wrench is correctly added to the permanent wrench + cube_object.permanent_wrench_composer.add_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + body_ids=body_ids, + ) + + # apply action to the object + cube_object.write_data_to_sim() + + # perform step + sim.step() + + # update buffers + cube_object.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [2, 4]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_external_force_on_single_body(num_cubes, device): + """Test application of external force on the base of the object. + + In this test, we apply a force equal to the weight of an object on the base of + one of the objects. We check that the object does not move. For the other object, + we do not apply any force and check that it falls down. + + We validate that this works when we apply the force in the global frame and in the local frame. + """ + # Generate cubes scene + with _newton_sim_context(device, add_ground_plane=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, origins = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Play the simulator + sim.reset() + + # Find bodies to apply the force + body_ids, body_names = cube_object.find_bodies(".*") + + # Sample a force equal to the weight of the object + external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=sim.device) + # Every 2nd cube should have a force applied to it + external_wrench_b[0::2, :, 2] = 9.81 * cube_object.data.body_mass.torch[0] + + # Now we are ready! + for i in range(5): + # reset root state + root_pose = cube_object.data.default_root_pose.torch.clone() + root_vel = cube_object.data.default_root_vel.torch.clone() + + # need to shift the position of the cubes otherwise they will be on top of each other + root_pose[:, :3] = origins + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) + + # reset object + cube_object.reset() + + is_global = False + if i % 2 == 0: + is_global = True + positions = cube_object.data.body_com_pos_w.torch[:, body_ids, :3] + else: + positions = None + + # apply force + cube_object.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=positions, + body_ids=body_ids, + is_global=is_global, + ) + # perform simulation + for _ in range(5): + # apply action to the object + cube_object.write_data_to_sim() + + # perform step + sim.step() + + # update buffers + cube_object.update(sim.cfg.dt) + + # First object should still be at the same Z position (1.0) + torch.testing.assert_close( + cube_object.data.root_pos_w.torch[0::2, 2], torch.ones(num_cubes // 2, device=sim.device) + ) + # Second object should have fallen, so it's Z height should be less than initial height of 1.0 + assert torch.all(cube_object.data.root_pos_w.torch[1::2, 2] < 1.0) + + +@pytest.mark.parametrize("num_cubes", [2, 4]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_external_force_on_single_body_at_position(num_cubes, device): + """Test application of external force on the base of the object at a specific position. + + In this test, we apply a force equal to the weight of an object on the base of + one of the objects at 1m in the Y direction, we check that the object rotates around it's X axis. + For the other object, we do not apply any force and check that it falls down. + + We validate that this works when we apply the force in the global frame and in the local frame. + """ + # Generate cubes scene + with _newton_sim_context(device, add_ground_plane=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, origins = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Play the simulator + sim.reset() + + # Find bodies to apply the force + body_ids, body_names = cube_object.find_bodies(".*") + + # Sample a force equal to the weight of the object + external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=sim.device) + external_wrench_positions_b = torch.zeros(cube_object.num_instances, len(body_ids), 3, device=sim.device) + # Every 2nd cube should have a force applied to it + external_wrench_b[0::2, :, 2] = 50.0 + external_wrench_positions_b[0::2, :, 1] = 1.0 + + # Now we are ready! + for i in range(5): + # reset root state + root_pose = cube_object.data.default_root_pose.torch.clone() + root_vel = cube_object.data.default_root_vel.torch.clone() + + # need to shift the position of the cubes otherwise they will be on top of each other + root_pose[:, :3] = origins + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) + + # reset object + cube_object.reset() + + is_global = False + if i % 2 == 0: + is_global = True + body_com_pos_w = cube_object.data.body_com_pos_w.torch[:, body_ids, :3] + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + external_wrench_positions_b += body_com_pos_w + else: + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + + # apply force + cube_object.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=body_ids, + is_global=is_global, + ) + cube_object.permanent_wrench_composer.add_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=body_ids, + is_global=is_global, + ) + # perform simulation + for _ in range(5): + # apply action to the object + cube_object.write_data_to_sim() + + # perform step + sim.step() + + # update buffers + cube_object.update(sim.cfg.dt) + + # The first object should be rotating around it's X axis + assert torch.all(torch.abs(cube_object.data.root_ang_vel_b.torch[0::2, 0]) > 0.1) + # Second object should have fallen, so it's Z height should be less than initial height of 1.0 + assert torch.all(cube_object.data.root_pos_w.torch[1::2, 2] < 1.0) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_rigid_object_state(num_cubes, device): + """Test setting the state of the rigid object. + + In this test, we set the state of the rigid object to a random state and check + that the object is in that state after simulation. We set gravity to zero as + we don't want any external forces acting on the object to ensure state remains static. + """ + # Turn off gravity for this test as we don't want any external forces acting on the object + # to ensure state remains static + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Play the simulator + sim.reset() + + state_types = ["root_pos_w", "root_quat_w", "root_lin_vel_w", "root_ang_vel_w"] + + # Set each state type individually as they are dependent on each other + for state_type_to_randomize in state_types: + state_dict = { + "root_pos_w": torch.zeros_like(cube_object.data.root_pos_w.torch, device=sim.device), + "root_quat_w": default_orientation(num=num_cubes, device=sim.device), + "root_lin_vel_w": torch.zeros_like(cube_object.data.root_lin_vel_w.torch, device=sim.device), + "root_ang_vel_w": torch.zeros_like(cube_object.data.root_ang_vel_w.torch, device=sim.device), + } + + # Now we are ready! + for _ in range(5): + # reset object + cube_object.reset() + + # Set random state + if state_type_to_randomize == "root_quat_w": + state_dict[state_type_to_randomize] = random_orientation(num=num_cubes, device=sim.device) + else: + state_dict[state_type_to_randomize] = torch.randn(num_cubes, 3, device=sim.device) + + # perform simulation + for _ in range(5): + root_pose = torch.cat( + [state_dict["root_pos_w"], state_dict["root_quat_w"]], + dim=-1, + ) + root_vel = torch.cat( + [state_dict["root_lin_vel_w"], state_dict["root_ang_vel_w"]], + dim=-1, + ) + # reset root state + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) + + sim.step() + + # assert that set root quantities are equal to the ones set in the state_dict + for key, expected_value in state_dict.items(): + value = getattr(cube_object.data, key).torch + # Newton reads state directly from sim (not cached), so post-step drift + # from velocity integration causes larger differences than PhysX + torch.testing.assert_close(value, expected_value, rtol=1e-1, atol=1e-1) + + cube_object.update(sim.cfg.dt) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_reset_rigid_object(num_cubes, device): + """Test resetting the state of the rigid object.""" + with _newton_sim_context(device, gravity_enabled=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Play the simulator + sim.reset() + + for i in range(5): + # perform rendering + sim.step() + + # update object + cube_object.update(sim.cfg.dt) + + # Move the object to a random position + root_pose = cube_object.data.default_root_pose.torch.clone() + root_pose[:, :3] = torch.randn(num_cubes, 3, device=sim.device) + + # Random orientation + root_pose[:, 3:7] = random_orientation(num=num_cubes, device=sim.device) + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + root_vel = cube_object.data.default_root_vel.torch.clone() + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) + + if i % 2 == 0: + # reset object + cube_object.reset() + + # Reset should zero external forces and torques + assert not cube_object._instantaneous_wrench_composer.active + assert not cube_object._permanent_wrench_composer.active + assert torch.count_nonzero(cube_object._instantaneous_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(cube_object._instantaneous_wrench_composer.composed_torque.torch) == 0 + assert torch.count_nonzero(cube_object._permanent_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(cube_object._permanent_wrench_composer.composed_torque.torch) == 0 + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_rigid_body_set_material_properties(num_cubes, device): + """Test getting and setting material properties of rigid object via view-level APIs.""" + with _newton_sim_context(device, gravity_enabled=True, add_ground_plane=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Play sim + sim.reset() + + # Get friction/restitution bindings via view-level API + model = SimulationManager.get_model() + friction_binding = cube_object._root_view.get_attribute("shape_material_mu", model)[:, 0] + restitution_binding = cube_object._root_view.get_attribute("shape_material_restitution", model)[:, 0] + num_shapes = friction_binding.shape[1] + + # Set material properties via in-place writes to the warp binding + friction = torch.empty(num_cubes, num_shapes, device=device).uniform_(0.4, 0.8) + restitution = torch.empty(num_cubes, num_shapes, device=device).uniform_(0.0, 0.2) + + wp.to_torch(friction_binding)[:] = friction + wp.to_torch(restitution_binding)[:] = restitution + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + # Simulate physics + sim.step() + cube_object.update(sim.cfg.dt) + + # Verify by reading back from the binding + mu = wp.to_torch(friction_binding) + restitution_check = wp.to_torch(restitution_binding) + torch.testing.assert_close(mu, friction) + torch.testing.assert_close(restitution_check, restitution) + + +def _set_newton_material_properties(cube_object, friction_val, restitution_val, device): + """Helper to set material properties via Newton view-level APIs.""" + model = SimulationManager.get_model() + friction_binding = cube_object._root_view.get_attribute("shape_material_mu", model)[:, 0] + restitution_binding = cube_object._root_view.get_attribute("shape_material_restitution", model)[:, 0] + num_envs = friction_binding.shape[0] + num_shapes = friction_binding.shape[1] + + friction_tensor = torch.full((num_envs, num_shapes), friction_val, device=device) + restitution_tensor = torch.full((num_envs, num_shapes), restitution_val, device=device) + + wp.to_torch(friction_binding)[:] = friction_tensor + wp.to_torch(restitution_binding)[:] = restitution_tensor + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + +@pytest.mark.isaacsim_ci +@pytest.mark.skip(reason="MuJoCo contact at height=0 does not settle the same as PhysX — cube falls on z-axis") +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_rigid_body_no_friction(num_cubes, device): + """Test that a rigid object with no friction will maintain it's velocity when sliding across a plane.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device) + + # Create ground plane with near-zero friction + # Note: MuJoCo requires friction >= MJ_MINMU (1e-5), so we use 1e-4 + cfg = sim_utils.GroundPlaneCfg( + physics_material=materials.RigidBodyMaterialCfg( + static_friction=1e-4, + dynamic_friction=1e-4, + restitution=0.0, + ) + ) + cfg.func("/World/GroundPlane", cfg) + + # Play sim + sim.reset() + + # Set material friction to near-zero via view-level API + _set_newton_material_properties(cube_object, friction_val=1e-4, restitution_val=0.0, device=device) + + # Set initial velocity + # Initial velocity in X to get the block moving + initial_velocity = torch.zeros((num_cubes, 6), device=sim.cfg.device) + initial_velocity[:, 0] = 0.1 + + cube_object.write_root_velocity_to_sim_index(root_velocity=initial_velocity) + + # Simulate physics + for _ in range(5): + # perform rendering + sim.step() + # update object + cube_object.update(sim.cfg.dt) + + # Non-deterministic when on GPU, so we use different tolerances + if device == "cuda:0": + tolerance = 1e-2 + else: + tolerance = 1e-5 + + torch.testing.assert_close( + cube_object.data.root_lin_vel_w.torch, initial_velocity[:, :3], rtol=1e-5, atol=tolerance + ) + + +@pytest.mark.isaacsim_ci +@pytest.mark.skip(reason="MuJoCo uses Coulomb friction (single mu), no static/dynamic distinction") +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_rigid_body_with_static_friction(num_cubes, device): + """Test that static friction applied to rigid object works as expected. + + This test works by applying a force to the object and checking if the object moves or not based on the + mu (coefficient of static friction) value set for the object. We set the static friction to be non-zero and + apply a force to the object. When the force applied is below mu, the object should not move. When the force + applied is above mu, the object should move. + """ + with _newton_sim_context(device, dt=0.01, add_ground_plane=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=0.03125, device=device) + + # Create ground plane + static_friction_coefficient = 0.5 + cfg = sim_utils.GroundPlaneCfg( + physics_material=materials.RigidBodyMaterialCfg( + static_friction=static_friction_coefficient, + dynamic_friction=static_friction_coefficient, + ) + ) + cfg.func("/World/GroundPlane", cfg) + + # Play sim + sim.reset() + + # Set friction via view-level API + _set_newton_material_properties( + cube_object, + friction_val=static_friction_coefficient, + restitution_val=0.0, + device=device, + ) + + # let everything settle + for _ in range(100): + sim.step() + cube_object.update(sim.cfg.dt) + cube_object.write_root_velocity_to_sim_index(root_velocity=torch.zeros((num_cubes, 6), device=sim.device)) + cube_mass = cube_object.data.body_mass.torch + gravity_magnitude = abs(sim.cfg.gravity[2]) + # 2 cases: force applied is below and above mu + # below mu: block should not move as the force applied is <= mu + # above mu: block should move as the force applied is > mu + for force in "below_mu", "above_mu": + # set initial velocity to zero + cube_object.write_root_velocity_to_sim_index(root_velocity=torch.zeros((num_cubes, 6), device=sim.device)) + + external_wrench_b = torch.zeros((num_cubes, 1, 6), device=sim.device) + if force == "below_mu": + external_wrench_b[..., 0] = static_friction_coefficient * cube_mass * gravity_magnitude * 0.99 + else: + external_wrench_b[..., 0] = static_friction_coefficient * cube_mass * gravity_magnitude * 1.01 + + cube_object.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + ) + + # Get root state + initial_root_pos = cube_object.data.root_pos_w.torch.clone() + # Simulate physics + for _ in range(200): + # apply the wrench + cube_object.write_data_to_sim() + sim.step() + # update object + cube_object.update(sim.cfg.dt) + if force == "below_mu": + # Assert that the block has not moved + torch.testing.assert_close( + cube_object.data.root_pos_w.torch, initial_root_pos, rtol=2e-3, atol=2e-3 + ) + if force == "above_mu": + assert (cube_object.data.root_pos_w.torch[..., 0] - initial_root_pos[..., 0] > 0.02).all() + + +@pytest.mark.isaacsim_ci +@pytest.mark.skip(reason="MuJoCo restitution model differs from PhysX — inelastic collisions still bounce") +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_rigid_body_with_restitution(num_cubes, device): + """Test that restitution when applied to rigid object works as expected. + + This test works by dropping a block from a height and checking if the block bounces or not based on the + restitution value set for the object. We set the restitution to be non-zero and drop the block from a height. + When the restitution is 0, the block should not bounce. When the restitution is between 0 and 1, the block + should bounce with less energy. + """ + for expected_collision_type in "partially_elastic", "inelastic": + with _newton_sim_context(device, add_ground_plane=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=1.0, device=device) + + # Set static friction to be non-zero + if expected_collision_type == "inelastic": + restitution_coefficient = 0.0 + elif expected_collision_type == "partially_elastic": + restitution_coefficient = 0.5 + + # Create ground plane + cfg = sim_utils.GroundPlaneCfg( + physics_material=materials.RigidBodyMaterialCfg( + restitution=restitution_coefficient, + ) + ) + cfg.func("/World/GroundPlane", cfg) + + # Play sim + sim.reset() + + root_pose = torch.zeros(num_cubes, 7, device=sim.device) + root_pose[:, 3] = 1.0 # To make orientation a quaternion + for i in range(num_cubes): + root_pose[i, 1] = 1.0 * i + root_pose[:, 2] = 1.0 # Set an initial drop height + root_vel = torch.zeros(num_cubes, 6, device=sim.device) + root_vel[:, 2] = -1.0 # Set an initial downward velocity + + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) + + # Set restitution via view-level API + _set_newton_material_properties( + cube_object, + friction_val=0.0, + restitution_val=restitution_coefficient, + device=device, + ) + + curr_z_velocity = cube_object.data.root_lin_vel_w.torch[:, 2].clone() + + for _ in range(100): + sim.step() + + # update object + cube_object.update(sim.cfg.dt) + curr_z_velocity = cube_object.data.root_lin_vel_w.torch[:, 2].clone() + + if expected_collision_type == "inelastic": + # assert that the block has not bounced by checking that the z velocity is less than or equal to 0 + assert (curr_z_velocity <= 0.0).all() + + if torch.all(curr_z_velocity <= 0.0): + # Still in the air + prev_z_velocity = curr_z_velocity + else: + # collision has happened, exit the for loop + break + + if expected_collision_type == "partially_elastic": + # Assert that the block has lost some energy by checking that the z velocity is less + assert torch.all(torch.le(abs(curr_z_velocity), abs(prev_z_velocity))) + assert (curr_z_velocity > 0.0).all() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_rigid_body_set_mass(num_cubes, device): + """Test getting and setting mass of rigid object.""" + with _newton_sim_context(device, gravity_enabled=False, add_ground_plane=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Create a scene with random cubes + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, height=1.0, device=device) + + # Play sim + sim.reset() + + # Get masses before increasing + original_masses = cube_object.data.body_mass.torch + + assert original_masses.shape == (num_cubes, 1) + + # Randomize mass of the object + masses = original_masses + torch.zeros(num_cubes, 1, device=device).uniform_(4, 8) + + # Set masses using Newton API + cube_object.set_masses_index(masses=wp.from_torch(masses, dtype=wp.float32)) + + torch.testing.assert_close(cube_object.data.body_mass.torch, masses) + + # Simulate physics + # perform rendering + sim.step() + # update object + cube_object.update(sim.cfg.dt) + + masses_to_check = cube_object.data.body_mass.torch + + # Check if mass is set correctly + torch.testing.assert_close(masses, masses_to_check) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("gravity_enabled", [True, False]) +def test_gravity_vec_w(num_cubes, device, gravity_enabled): + """Test that gravity vector direction is set correctly for the rigid object.""" + with _newton_sim_context(device, gravity_enabled=gravity_enabled) as sim: + sim._app_control_on_stop_handle = None + # Create a scene with random cubes + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Obtain gravity direction + if gravity_enabled: + gravity_dir = (0.0, 0.0, -1.0) + else: + gravity_dir = (0.0, 0.0, 0.0) + + # Play sim + sim.reset() + + # Check that gravity is set correctly + assert cube_object.data.GRAVITY_VEC_W.torch[0, 0] == gravity_dir[0] + assert cube_object.data.GRAVITY_VEC_W.torch[0, 1] == gravity_dir[1] + assert cube_object.data.GRAVITY_VEC_W.torch[0, 2] == gravity_dir[2] + + # Simulate physics + for _ in range(2): + # perform rendering + sim.step() + # update object + cube_object.update(sim.cfg.dt) + + # Expected gravity value is the acceleration of the body + gravity = torch.zeros(num_cubes, 1, 6, device=device) + if gravity_enabled: + gravity[:, :, 2] = -9.81 + # Check the body accelerations are correct + torch.testing.assert_close(cube_object.data.body_acc_w.torch, gravity) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True, False]) +@flaky(max_runs=3, min_passes=1) +def test_body_root_state_properties(num_cubes, device, with_offset): + """Test the root_com_state_w, root_link_state_w, body_com_state_w, and body_link_state_w properties.""" + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Create a scene with random cubes + cube_object, env_pos = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device) + + # Play sim + sim.reset() + + # Check if cube_object is initialized + assert cube_object.is_initialized + + # change center of mass offset from link frame + if with_offset: + offset = torch.tensor([0.1, 0.0, 0.0], device=device).repeat(num_cubes, 1) + else: + offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) + + # Set center of mass offset via Newton API (position only, no quaternion) + com_pos = offset.unsqueeze(1) # (N, 1, 3) + cube_object.set_coms_index(coms=wp.from_torch(com_pos, dtype=wp.vec3f)) + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check center of mass has been set + torch.testing.assert_close(cube_object.data.body_com_pos_b.torch.squeeze(1), offset) + + # random z spin velocity (bounded to keep numerical drift within the position tolerance below) + spin_twist = torch.zeros(6, device=device) + spin_twist[5] = 0.5 * torch.randn(1, device=device).clamp(-1.0, 1.0) + + # Simulate physics + for _ in range(100): + # spin the object around Z axis (com) + cube_object.write_root_velocity_to_sim_index(root_velocity=spin_twist.repeat(num_cubes, 1)) + # perform rendering + sim.step() + # update object + cube_object.update(sim.cfg.dt) + + # get state properties + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + body_link_pose_w = cube_object.data.body_link_pose_w.torch + body_link_vel_w = cube_object.data.body_link_vel_w.torch + body_com_pose_w = cube_object.data.body_com_pose_w.torch + body_com_vel_w = cube_object.data.body_com_vel_w.torch + + # if offset is [0,0,0] all root_state_%_w will match and all body_%_w will match + if not with_offset: + torch.testing.assert_close(root_link_pose_w, root_com_pose_w) + torch.testing.assert_close(root_com_vel_w, root_link_vel_w) + torch.testing.assert_close(root_link_pose_w, root_link_pose_w) + torch.testing.assert_close(root_com_vel_w, root_link_vel_w) + torch.testing.assert_close(body_link_pose_w, body_com_pose_w) + torch.testing.assert_close(body_com_vel_w, body_link_vel_w) + torch.testing.assert_close(body_link_pose_w, body_link_pose_w) + torch.testing.assert_close(body_com_vel_w, body_link_vel_w) + else: + # cubes are spinning around center of mass + # position will not match + # center of mass position will be constant (i.e. spinning around com) + _tol = dict(atol=2e-3, rtol=2e-3) + torch.testing.assert_close(env_pos + offset, root_com_pose_w[..., :3], **_tol) + torch.testing.assert_close(env_pos + offset, body_com_pose_w[..., :3].squeeze(-2), **_tol) + # link position will be moving but should stay constant away from center of mass + root_link_state_pos_rel_com = quat_apply_inverse( + root_link_pose_w[..., 3:], + root_link_pose_w[..., :3] - root_com_pose_w[..., :3], + ) + torch.testing.assert_close(-offset, root_link_state_pos_rel_com, **_tol) + body_link_state_pos_rel_com = quat_apply_inverse( + body_link_pose_w[..., 3:], + body_link_pose_w[..., :3] - body_com_pose_w[..., :3], + ) + torch.testing.assert_close(-offset, body_link_state_pos_rel_com.squeeze(-2), **_tol) + + # orientation of com will be a constant rotation from link orientation + com_quat_b = cube_object.data.body_com_quat_b.torch + com_quat_w = quat_mul(body_link_pose_w[..., 3:], com_quat_b) + torch.testing.assert_close(com_quat_w, body_com_pose_w[..., 3:], **_tol) + torch.testing.assert_close(com_quat_w.squeeze(-2), root_com_pose_w[..., 3:], **_tol) + + # orientation of link will match root state will always match + torch.testing.assert_close(root_link_pose_w[..., 3:], root_link_pose_w[..., 3:], **_tol) + torch.testing.assert_close(body_link_pose_w[..., 3:], body_link_pose_w[..., 3:], **_tol) + + # lin_vel will not match + # center of mass vel will be constant (i.e. spinning around com) + torch.testing.assert_close(torch.zeros_like(root_com_vel_w[..., :3]), root_com_vel_w[..., :3], **_tol) + torch.testing.assert_close(torch.zeros_like(body_com_vel_w[..., :3]), body_com_vel_w[..., :3], **_tol) + # link frame will be moving, and should be equal to input angular velocity cross offset + lin_vel_rel_root_gt = quat_apply_inverse(root_link_pose_w[..., 3:], root_link_vel_w[..., :3]) + lin_vel_rel_body_gt = quat_apply_inverse(body_link_pose_w[..., 3:], body_link_vel_w[..., :3]) + lin_vel_rel_gt = torch.linalg.cross(spin_twist.repeat(num_cubes, 1)[..., 3:], -offset) + torch.testing.assert_close(lin_vel_rel_gt, lin_vel_rel_root_gt, **_tol) + torch.testing.assert_close(lin_vel_rel_gt, lin_vel_rel_body_gt.squeeze(-2), **_tol) + + # ang_vel will always match + torch.testing.assert_close(root_com_vel_w[..., 3:], root_com_vel_w[..., 3:]) + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_com_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True, False]) +@pytest.mark.parametrize("state_location", ["com", "link"]) +def test_write_root_state(num_cubes, device, with_offset, state_location): + """Test the setters for root_state using both the link frame and center of mass as reference frame.""" + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Create a scene with random cubes + cube_object, env_pos = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device) + env_idx = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32, device=device) + + # Play sim + sim.reset() + + # Check if cube_object is initialized + assert cube_object.is_initialized + + # change center of mass offset from link frame + if with_offset: + offset = torch.tensor([0.1, 0.0, 0.0], device=device).repeat(num_cubes, 1) + else: + offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) + + # Set center of mass offset via Newton API (position only) + com_pos = offset.unsqueeze(1) # (N, 1, 3) + cube_object.set_coms_index(coms=wp.from_torch(com_pos, dtype=wp.vec3f)) + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check center of mass has been set + torch.testing.assert_close(cube_object.data.body_com_pos_b.torch.squeeze(1), offset) + + rand_state = torch.zeros(num_cubes, 13, device=device) + rand_state[..., :7] = cube_object.data.default_root_pose.torch + rand_state[..., :3] += env_pos + # make quaternion a unit vector + rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) + + for i in range(10): + # perform step + sim.step() + # update buffers + cube_object.update(sim.cfg.dt) + + if state_location == "com": + if i % 2 == 0: + cube_object.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) + else: + cube_object.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + cube_object.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:], env_ids=env_idx) + elif state_location == "link": + if i % 2 == 0: + cube_object.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) + else: + cube_object.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + cube_object.write_root_link_velocity_to_sim_index( + root_velocity=rand_state[..., 7:], env_ids=env_idx + ) + + if state_location == "com": + torch.testing.assert_close(rand_state[..., :7], cube_object.data.root_com_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.root_com_vel_w.torch) + elif state_location == "link": + torch.testing.assert_close(rand_state[..., :7], cube_object.data.root_link_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.root_link_vel_w.torch) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True]) +@pytest.mark.parametrize("state_location", ["com", "link", "root"]) +def test_write_state_functions_data_consistency(num_cubes, device, with_offset, state_location): + """Test the setters for root_state using both the link frame and center of mass as reference frame.""" + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Create a scene with random cubes + cube_object, env_pos = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device) + + # Play sim + sim.reset() + + # Check if cube_object is initialized + assert cube_object.is_initialized + + # change center of mass offset from link frame + if with_offset: + offset = torch.tensor([0.1, 0.0, 0.0], device=device).repeat(num_cubes, 1) + else: + offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) + + # Set center of mass offset via Newton API (position only) + com_pos = offset.unsqueeze(1) # (N, 1, 3) + cube_object.set_coms_index(coms=wp.from_torch(com_pos, dtype=wp.vec3f)) + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check center of mass has been set + torch.testing.assert_close(cube_object.data.body_com_pos_b.torch.squeeze(1), offset) + + rand_state = torch.rand(num_cubes, 13, device=device) + rand_state[..., :3] += env_pos + # make quaternion a unit vector + rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) + + # perform step + sim.step() + # update buffers + cube_object.update(sim.cfg.dt) + + if state_location == "com": + cube_object.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) + elif state_location == "link": + cube_object.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) + elif state_location == "root": + cube_object.write_root_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) + + if state_location == "com": + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + body_com_pose_b = cube_object.data.body_com_pose_b.torch + expected_root_link_pos, expected_root_link_quat = combine_frame_transforms( + root_com_pose_w[:, :3], + root_com_pose_w[:, 3:], + quat_rotate(quat_inv(body_com_pose_b[:, 0, 3:7]), -body_com_pose_b[:, 0, :3]), + quat_inv(body_com_pose_b[:, 0, 3:7]), + ) + expected_root_link_pose = torch.cat((expected_root_link_pos, expected_root_link_quat), dim=1) + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + # test both root_pose and root_link successfully updated when root_com updates + torch.testing.assert_close(expected_root_link_pose, root_link_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking + # if velocity update is triggered, which can be determined by comparing angular velocity + torch.testing.assert_close(root_com_vel_w[:, 3:], root_link_vel_w[:, 3:]) + torch.testing.assert_close(expected_root_link_pose, root_link_pose_w) + torch.testing.assert_close(root_com_vel_w[:, 3:], cube_object.data.root_com_vel_w.torch[:, 3:]) + elif state_location == "link": + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + body_com_pose_b = cube_object.data.body_com_pose_b.torch + expected_com_pos, expected_com_quat = combine_frame_transforms( + root_link_pose_w[:, :3], + root_link_pose_w[:, 3:], + body_com_pose_b[:, 0, :3], + body_com_pose_b[:, 0, 3:7], + ) + expected_com_pose = torch.cat((expected_com_pos, expected_com_quat), dim=1) + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + # test both root_pose and root_com successfully updated when root_link updates + torch.testing.assert_close(expected_com_pose, root_com_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking + # if velocity update is triggered, which can be determined by comparing angular velocity + torch.testing.assert_close(root_link_vel_w[:, 3:], root_com_vel_w[:, 3:]) + torch.testing.assert_close(root_link_pose_w, cube_object.data.root_link_pose_w.torch) + torch.testing.assert_close(root_link_vel_w[:, 3:], cube_object.data.root_com_vel_w.torch[:, 3:]) + elif state_location == "root": + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + body_com_pose_b = cube_object.data.body_com_pose_b.torch + expected_com_pos, expected_com_quat = combine_frame_transforms( + root_link_pose_w[:, :3], + root_link_pose_w[:, 3:], + body_com_pose_b[:, 0, :3], + body_com_pose_b[:, 0, 3:7], + ) + expected_com_pose = torch.cat((expected_com_pos, expected_com_quat), dim=1) + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + # test both root_com and root_link successfully updated when root_pose updates + torch.testing.assert_close(expected_com_pose, root_com_pose_w) + torch.testing.assert_close(root_com_vel_w, cube_object.data.root_com_vel_w.torch) + torch.testing.assert_close(root_link_pose_w, cube_object.data.root_link_pose_w.torch) + torch.testing.assert_close(root_com_vel_w[:, 3:], root_link_vel_w[:, 3:]) + + +@pytest.mark.isaacsim_ci +@pytest.mark.skip(reason="PhysX-specific warmup test") +def test_warmup_attach_stage_not_called_for_cpu(): + """Regression test: attach_stage() must not be called for CPU in _warmup_and_create_views(). + + Bug (commit 0ba9c5cb3b): ``PhysxManager._warmup_and_create_views()`` called + ``_physx_sim.attach_stage()`` unconditionally before ``force_load_physics_from_usd()``. + These are two alternative initialization patterns; combining them causes + double-initialization that corrupts the CPU MBP broadphase, producing + non-deterministic collision failures (objects passing through surfaces). + + Fix: guard ``attach_stage()`` with ``if is_gpu:`` — it is only required by the + GPU pipeline, which needs explicit stage attachment before the physics load step. + The CPU pipeline attaches implicitly via ``force_load_physics_from_usd()``. + + This test verifies the guard is in place by monkeypatching ``attach_stage`` on + the PhysX simulation interface and asserting it is *not* called during CPU warmup. + The simulation test itself (1 cube falling onto a ground plane) is intentionally + omitted here because the MBP corruption is non-deterministic and depends on scene + complexity (multiple dynamic actors on a mesh collider), making it unreliable as a + unit test assertion. + """ + from unittest.mock import MagicMock + + from isaaclab_physx.physics import PhysxManager + + with _newton_sim_context("cpu", add_ground_plane=True, dt=0.01, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + generate_cubes_scene(num_cubes=1, height=1.0, device="cpu") + + # IPhysxSimulation is a C++ binding whose attributes are read-only, so we cannot + # assign to _physx_sim.attach_stage directly. Instead, replace the class-level + # reference with a MagicMock that wraps the real object so all other calls still + # work, then restore it in the finally block. + original_physx_sim = PhysxManager._physx_sim + spy = MagicMock(wraps=original_physx_sim) + PhysxManager._physx_sim = spy + try: + sim.reset() + finally: + PhysxManager._physx_sim = original_physx_sim + + assert spy.attach_stage.call_count == 0, ( + f"attach_stage() was called {spy.attach_stage.call_count} time(s) during CPU warmup. " + f"This indicates the CPU MBP broadphase double-initialization regression is present: " + f"attach_stage() + force_load_physics_from_usd() must not be combined for CPU." + ) diff --git a/source/isaaclab_newton/test/assets/test_rigid_object_collection.py b/source/isaaclab_newton/test/assets/test_rigid_object_collection.py new file mode 100644 index 000000000000..0873da8ecf22 --- /dev/null +++ b/source/isaaclab_newton/test/assets/test_rigid_object_collection.py @@ -0,0 +1,894 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import sys + +import pytest +import torch +import warp as wp +from isaaclab_newton.assets import RigidObjectCollection +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.physics import NewtonManager as SimulationManager +from newton.solvers import SolverNotifyFlags + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg, RigidObjectCollectionCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab.utils.math import ( + combine_frame_transforms, + default_orientation, + quat_apply_inverse, + quat_inv, + quat_mul, + quat_rotate, + random_orientation, + subtract_frame_transforms, +) + +NEWTON_SIM_CFG = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg(), + ), +) + + +def _newton_sim_context(device, gravity_enabled=True, dt=None, **kwargs): + """Helper to create a Newton simulation context with the correct device. + + When sim_cfg is provided to build_simulation_context, the device, gravity_enabled, and dt + kwargs are ignored. This helper applies them to the shared NEWTON_SIM_CFG before calling. + """ + NEWTON_SIM_CFG.device = device + NEWTON_SIM_CFG.gravity = (0.0, 0.0, -9.81) if gravity_enabled else (0.0, 0.0, 0.0) + if dt is not None: + NEWTON_SIM_CFG.dt = dt + return build_simulation_context(device=device, sim_cfg=NEWTON_SIM_CFG, **kwargs) + + +def generate_cubes_scene( + num_envs: int = 1, + num_cubes: int = 1, + height=1.0, + has_api: bool = True, + kinematic_enabled: bool = False, + device: str = "cuda:0", +) -> tuple[RigidObjectCollection, torch.Tensor]: + """Generate a scene with the provided number of cubes. + + Args: + num_envs: Number of envs to generate. + num_cubes: Number of cubes to generate. + height: Height of the cubes. + has_api: Whether the cubes have a rigid body API on them. + kinematic_enabled: Whether the cubes are kinematic. + device: Device to use for the simulation. + + Returns: + A tuple containing the rigid object collection representing the cubes and the origins of the cubes. + + """ + origins = torch.tensor([(i * 3.0, 0, height) for i in range(num_envs)]).to(device) + # Create Top-level Xforms, one for each cube + for i, origin in enumerate(origins): + sim_utils.create_prim(f"/World/Env_{i}", "Xform", translation=origin) + + # Resolve spawn configuration + if has_api: + spawn_cfg = sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=kinematic_enabled), + ) + else: + # since no rigid body properties defined, this is just a static collider + spawn_cfg = sim_utils.CuboidCfg( + size=(0.1, 0.1, 0.1), + collision_props=sim_utils.CollisionPropertiesCfg(), + ) + + # create the rigid object configs + cube_config_dict = {} + for i in range(num_cubes): + cube_object_cfg = RigidObjectCfg( + prim_path=f"/World/Env_.*/Object_{i}", + spawn=spawn_cfg, + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 3 * i, height)), + ) + cube_config_dict[f"cube_{i}"] = cube_object_cfg + # create the rigid object collection + cube_object_collection_cfg = RigidObjectCollectionCfg(rigid_objects=cube_config_dict) + cube_object_collection = RigidObjectCollection(cfg=cube_object_collection_cfg) + + return cube_object_collection, origins + + +@pytest.mark.parametrize("num_envs", [1, 2]) +@pytest.mark.parametrize("num_cubes", [1, 3]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_initialization(num_envs, num_cubes, device): + """Test initialization for prim with rigid body API at the provided prim path.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, _ = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(object_collection) < 10 + + # Play sim + sim.reset() + + # Check if object is initialized + assert object_collection.is_initialized + assert len(object_collection.body_names) == num_cubes + + # Check buffers that exist and have correct shapes + assert object_collection.data.body_link_pos_w.torch.shape == (num_envs, num_cubes, 3) + assert object_collection.data.body_link_quat_w.torch.shape == (num_envs, num_cubes, 4) + assert object_collection.data.body_mass.torch.shape == (num_envs, num_cubes) + assert object_collection.data.body_inertia.torch.shape == (num_envs, num_cubes, 9) + + # Simulate physics + for _ in range(2): + sim.step() + object_collection.update(sim.cfg.dt) + + +@pytest.mark.skip(reason="Newton doesn't support kinematic rigid bodies yet") +@pytest.mark.parametrize("num_envs", [1, 2]) +@pytest.mark.parametrize("num_cubes", [1, 3]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_initialization_with_kinematic_enabled(num_envs, num_cubes, device): + """Test that initialization for prim with kinematic flag enabled.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, origins = generate_cubes_scene( + num_envs=num_envs, num_cubes=num_cubes, kinematic_enabled=True, device=device + ) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(object_collection) < 10 + + # Play sim + sim.reset() + + # Check if object is initialized + assert object_collection.is_initialized + assert len(object_collection.body_names) == num_cubes + + # Check buffers that exist and have correct shapes + assert object_collection.data.body_link_pos_w.torch.shape == (num_envs, num_cubes, 3) + assert object_collection.data.body_link_quat_w.torch.shape == (num_envs, num_cubes, 4) + + # Simulate physics + for _ in range(2): + sim.step() + object_collection.update(sim.cfg.dt) + # check that the object is kinematic + default_body_pose = object_collection.data.default_body_pose.torch.clone() + default_body_vel = object_collection.data.default_body_vel.torch.clone() + default_body_pose[..., :3] += origins.unsqueeze(1) + torch.testing.assert_close(object_collection.data.body_link_pose_w.torch, default_body_pose) + torch.testing.assert_close(object_collection.data.body_link_vel_w.torch, default_body_vel) + + +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_initialization_with_no_rigid_body(num_cubes, device): + """Test that initialization fails when no rigid body is found at the provided prim path.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, _ = generate_cubes_scene(num_cubes=num_cubes, has_api=False, device=device) + + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(object_collection) < 10 + + # Play sim + with pytest.raises(RuntimeError): + sim.reset() + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_external_force_buffer(device): + """Test if external force buffer correctly updates in the force value is zero case.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + num_envs = 2 + num_cubes = 1 + object_collection, origins = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + sim.reset() + + # find objects to apply the force + object_ids, object_names = object_collection.find_bodies(".*") + # reset object + object_collection.reset() + + # perform simulation + for step in range(5): + # initiate force tensor + external_wrench_b = torch.zeros(object_collection.num_instances, len(object_ids), 6, device=sim.device) + + # decide if zero or non-zero force + if step == 0 or step == 3: + force = 1.0 + else: + force = 0.0 + + # apply force to the object + external_wrench_b[:, :, 0] = force + external_wrench_b[:, :, 3] = force + + object_collection.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + body_ids=object_ids, + env_ids=None, + ) + + # check if the object collection's force and torque buffers are correctly updated + for i in range(num_envs): + assert object_collection._permanent_wrench_composer.composed_force.torch[i, 0, 0].item() == force + assert object_collection._permanent_wrench_composer.composed_torque.torch[i, 0, 0].item() == force + + object_collection.instantaneous_wrench_composer.add_forces_and_torques_index( + body_ids=object_ids, + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + ) + + # apply action to the object collection + object_collection.write_data_to_sim() + sim.step() + object_collection.update(sim.cfg.dt) + + +@pytest.mark.parametrize("num_envs", [1, 2]) +@pytest.mark.parametrize("num_cubes", [1, 4]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_external_force_on_single_body(num_envs, num_cubes, device): + """Test application of external force on the base of the object.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, origins = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + sim.reset() + + # find objects to apply the force + object_ids, object_names = object_collection.find_bodies(".*") + + # Sample a force equal to the weight of the object + external_wrench_b = torch.zeros(object_collection.num_instances, len(object_ids), 6, device=sim.device) + # Every 2nd cube should have a force applied to it + external_wrench_b[:, 0::2, 2] = 9.81 * object_collection.data.body_mass.torch[:, 0::2] + + for i in range(5): + # reset object state + body_pose = object_collection.data.default_body_pose.torch.clone() + body_vel = object_collection.data.default_body_vel.torch.clone() + # need to shift the position of the cubes otherwise they will be on top of each other + body_pose[..., :2] += origins.unsqueeze(1)[..., :2] + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) + # reset object + object_collection.reset() + + is_global = False + if i % 2 == 0: + positions = object_collection.data.body_link_pos_w.torch[:, object_ids, :3] + is_global = True + else: + positions = None + + # apply force + object_collection.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=positions, + body_ids=object_ids, + env_ids=None, + is_global=is_global, + ) + for _ in range(10): + # write data to sim + object_collection.write_data_to_sim() + # step sim + sim.step() + # update object collection + object_collection.update(sim.cfg.dt) + + # First object should still be at the same Z position (1.0) + torch.testing.assert_close( + object_collection.data.body_link_pos_w.torch[:, 0::2, 2], + torch.ones_like(object_collection.data.body_link_pos_w.torch[:, 0::2, 2]), + ) + # Second object should have fallen, so it's Z height should be less than initial height of 1.0 + assert torch.all(object_collection.data.body_link_pos_w.torch[:, 1::2, 2] < 1.0) + + +@pytest.mark.parametrize("num_envs", [1, 2]) +@pytest.mark.parametrize("num_cubes", [1, 4]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_external_force_on_single_body_at_position(num_envs, num_cubes, device): + """Test application of external force on the base of the object at a specific position. + + In this test, we apply a force equal to the weight of an object on the base of + one of the objects at 1m in the Y direction, we check that the object rotates around it's X axis. + For the other object, we do not apply any force and check that it falls down. + """ + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, origins = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + sim.reset() + + # find objects to apply the force + object_ids, object_names = object_collection.find_bodies(".*") + + # Sample a force equal to the weight of the object + external_wrench_b = torch.zeros(object_collection.num_instances, len(object_ids), 6, device=sim.device) + external_wrench_positions_b = torch.zeros( + object_collection.num_instances, len(object_ids), 3, device=sim.device + ) + # Every 2nd cube should have a force applied to it + external_wrench_b[:, 0::2, 2] = 50.0 + external_wrench_positions_b[:, 0::2, 1] = 1.0 + + # Desired force and torque + for i in range(5): + # reset object state + body_pose = object_collection.data.default_body_pose.torch.clone() + body_vel = object_collection.data.default_body_vel.torch.clone() + # need to shift the position of the cubes otherwise they will be on top of each other + body_pose[..., :2] += origins.unsqueeze(1)[..., :2] + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) + # reset object + object_collection.reset() + + is_global = False + if i % 2 == 0: + body_com_pos_w = object_collection.data.body_link_pos_w.torch[:, object_ids, :3] + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + external_wrench_positions_b += body_com_pos_w + is_global = True + else: + external_wrench_positions_b[..., 0] = 0.0 + external_wrench_positions_b[..., 1] = 1.0 + external_wrench_positions_b[..., 2] = 0.0 + + # apply force + object_collection.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=object_ids, + env_ids=None, + is_global=is_global, + ) + object_collection.permanent_wrench_composer.add_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], + positions=external_wrench_positions_b, + body_ids=object_ids, + is_global=is_global, + ) + + for _ in range(10): + # write data to sim + object_collection.write_data_to_sim() + # step sim + sim.step() + # update object collection + object_collection.update(sim.cfg.dt) + + # First object should be rotating around it's X axis + assert torch.all(object_collection.data.body_com_ang_vel_b.torch[:, 0::2, 0] > 0.1) + # Second object should have fallen, so it's Z height should be less than initial height of 1.0 + assert torch.all(object_collection.data.body_link_pos_w.torch[:, 1::2, 2] < 1.0) + + +@pytest.mark.parametrize("num_envs", [1, 3]) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_object_state(num_envs, num_cubes, device): + """Test setting the state of the object. + + .. note:: + Turn off gravity for this test as we don't want any external forces acting on the object + to ensure state remains static + """ + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, origins = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + sim.reset() + + state_types = ["body_link_pos_w", "body_link_quat_w", "body_com_lin_vel_w", "body_com_ang_vel_w"] + + # Set each state type individually as they are dependent on each other + for state_type_to_randomize in state_types: + state_dict = { + "body_link_pos_w": torch.zeros_like(object_collection.data.body_link_pos_w.torch, device=sim.device), + "body_link_quat_w": default_orientation(num=num_cubes * num_envs, device=sim.device).view( + num_envs, num_cubes, 4 + ), + "body_com_lin_vel_w": torch.zeros_like( + object_collection.data.body_com_lin_vel_w.torch, device=sim.device + ), + "body_com_ang_vel_w": torch.zeros_like( + object_collection.data.body_com_ang_vel_w.torch, device=sim.device + ), + } + + for _ in range(5): + # reset object + object_collection.reset() + + # Set random state + if state_type_to_randomize == "body_link_quat_w": + state_dict[state_type_to_randomize] = random_orientation( + num=num_cubes * num_envs, device=sim.device + ).view(num_envs, num_cubes, 4) + else: + state_dict[state_type_to_randomize] = torch.randn(num_envs, num_cubes, 3, device=sim.device) + # make sure objects do not overlap + if state_type_to_randomize == "body_link_pos_w": + state_dict[state_type_to_randomize][..., :2] += origins.unsqueeze(1)[..., :2] + + # perform simulation + for _ in range(5): + body_pose = torch.cat( + [state_dict["body_link_pos_w"], state_dict["body_link_quat_w"]], + dim=-1, + ) + body_vel = torch.cat( + [state_dict["body_com_lin_vel_w"], state_dict["body_com_ang_vel_w"]], + dim=-1, + ) + # reset object state + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) + sim.step() + + # assert that set object quantities are equal to the ones set in the state_dict + for key, expected_value in state_dict.items(): + value = getattr(object_collection.data, key).torch + # Newton reads state directly from sim (not cached), so post-step drift + # from velocity integration causes larger differences than PhysX + torch.testing.assert_close(value, expected_value, rtol=1e-1, atol=1e-1) + + object_collection.update(sim.cfg.dt) + + +@pytest.mark.parametrize("num_envs", [1, 3]) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_reset_object_collection(num_envs, num_cubes, device): + """Test resetting the state of the rigid object.""" + with _newton_sim_context(device, gravity_enabled=True, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, _ = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + sim.reset() + + for i in range(5): + sim.step() + object_collection.update(sim.cfg.dt) + + # Move the object to a random position + body_pose = object_collection.data.default_body_pose.torch.clone() + body_pose[..., :3] = torch.randn(num_envs, num_cubes, 3, device=sim.device) + # Random orientation + body_pose[..., 3:7] = random_orientation(num=num_cubes, device=sim.device) + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + body_vel = object_collection.data.default_body_vel.torch.clone() + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) + + if i % 2 == 0: + object_collection.reset() + + # Reset should zero external forces and torques + assert not object_collection._instantaneous_wrench_composer.active + assert not object_collection._permanent_wrench_composer.active + assert torch.count_nonzero(object_collection._instantaneous_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(object_collection._instantaneous_wrench_composer.composed_torque.torch) == 0 + assert torch.count_nonzero(object_collection._permanent_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(object_collection._permanent_wrench_composer.composed_torque.torch) == 0 + + +@pytest.mark.parametrize("num_envs", [1, 3]) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +def test_set_material_properties(num_envs, num_cubes, device): + """Test getting and setting material properties of rigid object collection via view-level APIs.""" + with _newton_sim_context(device, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, _ = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + sim.reset() + + # Get friction/restitution bindings via view-level API + # The collection's _root_view stores data in flat view order: (num_envs * num_cubes, ...) + model = SimulationManager.get_model() + friction_raw = object_collection._root_view.get_attribute("shape_material_mu", model) + restitution_raw = object_collection._root_view.get_attribute("shape_material_restitution", model) + + # Shape is (num_envs * num_cubes, num_shapes_per_body, 1) — slice off trailing dim + friction_binding = friction_raw[:, :, 0] + restitution_binding = restitution_raw[:, :, 0] + + # Generate random values matching the flat view shape + friction = torch.empty_like(wp.to_torch(friction_binding)).uniform_(0.4, 0.8) + restitution = torch.empty_like(wp.to_torch(restitution_binding)).uniform_(0.0, 0.2) + + wp.to_torch(friction_binding)[:] = friction + wp.to_torch(restitution_binding)[:] = restitution + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + # Perform simulation + sim.step() + object_collection.update(sim.cfg.dt) + + # Verify by reading back from the binding + mu = wp.to_torch(friction_binding) + restitution_check = wp.to_torch(restitution_binding) + torch.testing.assert_close(mu, friction) + torch.testing.assert_close(restitution_check, restitution) + + +@pytest.mark.parametrize("num_envs", [1, 3]) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("gravity_enabled", [True, False]) +def test_gravity_vec_w(num_envs, num_cubes, device, gravity_enabled): + """Test that gravity vector direction is set correctly for the rigid object.""" + with _newton_sim_context(device, gravity_enabled=gravity_enabled, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + object_collection, _ = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) + + # Obtain gravity direction + gravity_dir = (0.0, 0.0, -1.0) if gravity_enabled else (0.0, 0.0, 0.0) + + sim.reset() + + # Check if gravity vector is set correctly + gravity_vec = object_collection.data.GRAVITY_VEC_W.torch + assert gravity_vec[0, 0, 0] == gravity_dir[0] + assert gravity_vec[0, 0, 1] == gravity_dir[1] + assert gravity_vec[0, 0, 2] == gravity_dir[2] + + # Perform simulation + for _ in range(2): + sim.step() + object_collection.update(sim.cfg.dt) + + # Expected gravity value is the acceleration of the body + gravity = torch.zeros(num_envs, num_cubes, 6, device=device) + if gravity_enabled: + gravity[..., 2] = -9.81 + + # Check the body accelerations are correct + torch.testing.assert_close(object_collection.data.body_com_acc_w.torch, gravity) + + +@pytest.mark.parametrize("num_envs", [1, 4]) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True, False]) +def test_object_state_properties(num_envs, num_cubes, device, with_offset): + """Test the object_com_state_w and object_link_state_w properties.""" + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object, env_pos = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, height=0.0, device=device) + + sim.reset() + + # check if cube_object is initialized + assert cube_object.is_initialized + + # change center of mass offset from link frame + offset = ( + torch.tensor([0.1, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) + if with_offset + else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) + ) + + # Set center of mass offset via Newton API (position only, shape (E, B, 3)) + cube_object.set_coms_index(coms=wp.from_torch(offset, dtype=wp.vec3f)) + # Flush the model change immediately so it takes effect before the next step + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check center of mass has been set + torch.testing.assert_close(cube_object.data.body_com_pos_b.torch, offset) + + # random z spin velocity + spin_twist = torch.zeros(6, device=device) + spin_twist[5] = torch.randn(1, device=device) + + # initial spawn point + init_com = cube_object.data.body_com_pose_w.torch[..., :3] + + for i in range(10): + # spin the object around Z axis (com) + cube_object.write_body_com_velocity_to_sim_index(body_velocities=spin_twist.repeat(num_envs, num_cubes, 1)) + sim.step() + cube_object.update(sim.cfg.dt) + + # get state properties + object_link_pose_w = cube_object.data.body_link_pose_w.torch + object_link_vel_w = cube_object.data.body_link_vel_w.torch + object_com_pose_w = cube_object.data.body_com_pose_w.torch + object_com_vel_w = cube_object.data.body_com_vel_w.torch + + # if offset is [0,0,0] all object_state_%_w will match and all body_%_w will match + if not with_offset: + torch.testing.assert_close(object_link_pose_w, object_com_pose_w) + torch.testing.assert_close(object_com_vel_w, object_link_vel_w) + else: + _tol = dict(atol=2e-3, rtol=2e-3) + # cubes are spinning around center of mass + # position will not match + # center of mass position will be constant (i.e. spinning around com) + torch.testing.assert_close(init_com, object_com_pose_w[..., :3], **_tol) + + # link position will be moving but should stay constant away from center of mass + object_link_state_pos_rel_com = quat_apply_inverse( + object_link_pose_w[..., 3:], + object_link_pose_w[..., :3] - object_com_pose_w[..., :3], + ) + + torch.testing.assert_close(-offset, object_link_state_pos_rel_com, **_tol) + + # orientation of com will be a constant rotation from link orientation + com_quat_b = cube_object.data.body_com_quat_b.torch + com_quat_w = quat_mul(object_link_pose_w[..., 3:], com_quat_b) + torch.testing.assert_close(com_quat_w, object_com_pose_w[..., 3:], **_tol) + + # orientation of link will match object state will always match + torch.testing.assert_close(object_link_pose_w[..., 3:], object_link_pose_w[..., 3:]) + + # lin_vel will not match + # center of mass vel will be constant (i.e. spinning around com) + torch.testing.assert_close( + torch.zeros_like(object_com_vel_w[..., :3]), + object_com_vel_w[..., :3], + **_tol, + ) + + # link frame will be moving, and should be equal to input angular velocity cross offset + lin_vel_rel_object_gt = quat_apply_inverse(object_link_pose_w[..., 3:], object_link_vel_w[..., :3]) + lin_vel_rel_gt = torch.linalg.cross(spin_twist.repeat(num_envs, num_cubes, 1)[..., 3:], -offset) + torch.testing.assert_close(lin_vel_rel_gt, lin_vel_rel_object_gt, **_tol) + + # ang_vel will always match + torch.testing.assert_close(object_com_vel_w[..., 3:], object_com_vel_w[..., 3:]) + torch.testing.assert_close(object_com_vel_w[..., 3:], object_link_vel_w[..., 3:]) + + +@pytest.mark.parametrize("num_envs", [1, 3]) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True, False]) +@pytest.mark.parametrize("state_location", ["com", "link"]) +def test_write_object_state(num_envs, num_cubes, device, with_offset, state_location): + """Test the setters for object_state using both the link frame and center of mass as reference frame.""" + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Create a scene with random cubes + cube_object, env_pos = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, height=0.0, device=device) + env_ids = torch.tensor([x for x in range(num_envs)], dtype=torch.int32) + object_ids = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32) + + sim.reset() + + # Check if cube_object is initialized + assert cube_object.is_initialized + + # change center of mass offset from link frame + offset = ( + torch.tensor([0.1, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) + if with_offset + else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) + ) + + # Set center of mass offset via Newton API (position only, shape (E, B, 3)) + cube_object.set_coms_index(coms=wp.from_torch(offset, dtype=wp.vec3f)) + # Flush the model change immediately so it takes effect before the next step + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check center of mass has been set + torch.testing.assert_close(cube_object.data.body_com_pos_b.torch, offset) + + rand_state = torch.zeros(num_envs, num_cubes, 13, device=device) + rand_state[..., :7] = cube_object.data.default_body_pose.torch + rand_state[..., :3] += cube_object.data.body_link_pos_w.torch + # make quaternion a unit vector + rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) + + env_ids = env_ids.to(device) + object_ids = object_ids.to(device) + for i in range(10): + sim.step() + cube_object.update(sim.cfg.dt) + + if state_location == "com": + if i % 2 == 0: + cube_object.write_body_com_pose_to_sim_index(body_poses=rand_state[..., :7]) + cube_object.write_body_com_velocity_to_sim_index(body_velocities=rand_state[..., 7:]) + else: + cube_object.write_body_com_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_com_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) + elif state_location == "link": + if i % 2 == 0: + cube_object.write_body_link_pose_to_sim_index(body_poses=rand_state[..., :7]) + cube_object.write_body_link_velocity_to_sim_index(body_velocities=rand_state[..., 7:]) + else: + cube_object.write_body_link_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_link_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) + + if state_location == "com": + torch.testing.assert_close(rand_state[..., :7], cube_object.data.body_com_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.body_com_vel_w.torch) + elif state_location == "link": + torch.testing.assert_close(rand_state[..., :7], cube_object.data.body_link_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.body_link_vel_w.torch) + + +@pytest.mark.parametrize("num_envs", [1, 3]) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("with_offset", [True]) +@pytest.mark.parametrize("state_location", ["com", "link", "root"]) +def test_write_object_state_functions_data_consistency(num_envs, num_cubes, device, with_offset, state_location): + """Test the setters for object_state using both the link frame and center of mass as reference frame.""" + with _newton_sim_context(device, gravity_enabled=False, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + # Create a scene with random cubes + cube_object, env_pos = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, height=0.0, device=device) + env_ids = torch.tensor([x for x in range(num_envs)], dtype=torch.int32) + object_ids = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32) + + sim.reset() + + # Check if cube_object is initialized + assert cube_object.is_initialized + + # change center of mass offset from link frame + offset = ( + torch.tensor([0.1, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) + if with_offset + else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) + ) + + # Set center of mass offset via Newton API (position only, shape (E, B, 3)) + cube_object.set_coms_index(coms=wp.from_torch(offset, dtype=wp.vec3f)) + # Flush the model change immediately so it takes effect before the next step + with wp.ScopedDevice(device): + SimulationManager._solver.notify_model_changed(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + + # check center of mass has been set + torch.testing.assert_close(cube_object.data.body_com_pos_b.torch, offset) + + rand_state = torch.rand(num_envs, num_cubes, 13, device=device) + rand_state[..., :3] += cube_object.data.body_link_pos_w.torch + # make quaternion a unit vector + rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) + + env_ids = env_ids.to(device) + object_ids = object_ids.to(device) + sim.step() + cube_object.update(sim.cfg.dt) + + body_link_pose_w = cube_object.data.body_link_pose_w.torch + body_com_pose_w = cube_object.data.body_com_pose_w.torch + object_link_to_com_pos, object_link_to_com_quat = subtract_frame_transforms( + body_link_pose_w[..., :3].view(-1, 3), + body_link_pose_w[..., 3:7].view(-1, 4), + body_com_pose_w[..., :3].view(-1, 3), + body_com_pose_w[..., 3:7].view(-1, 4), + ) + + if state_location == "com": + cube_object.write_body_com_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_com_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) + elif state_location == "link": + cube_object.write_body_link_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_link_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) + elif state_location == "root": + cube_object.write_body_link_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_com_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) + + if state_location == "com": + com_pose_w = cube_object.data.body_com_pose_w.torch + com_vel_w = cube_object.data.body_com_vel_w.torch + expected_root_link_pos, expected_root_link_quat = combine_frame_transforms( + com_pose_w[..., :3].view(-1, 3), + com_pose_w[..., 3:].view(-1, 4), + quat_rotate(quat_inv(object_link_to_com_quat), -object_link_to_com_pos), + quat_inv(object_link_to_com_quat), + ) + expected_object_link_pose = torch.cat((expected_root_link_pos, expected_root_link_quat), dim=1).view( + num_envs, -1, 7 + ) + link_pose_w = cube_object.data.body_link_pose_w.torch + link_vel_w = cube_object.data.body_link_vel_w.torch + # test both root_pose and root_link successfully updated when root_com updates + torch.testing.assert_close(expected_object_link_pose, link_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking + # if velocity update is triggered, which can be determined by comparing angular velocity + torch.testing.assert_close(com_vel_w[..., 3:], link_vel_w[..., 3:]) + torch.testing.assert_close(expected_object_link_pose, link_pose_w) + torch.testing.assert_close(com_vel_w[..., 3:], cube_object.data.body_com_vel_w.torch[..., 3:]) + elif state_location == "link": + link_pose_w = cube_object.data.body_link_pose_w.torch + link_vel_w = cube_object.data.body_link_vel_w.torch + expected_com_pos, expected_com_quat = combine_frame_transforms( + link_pose_w[..., :3].view(-1, 3), + link_pose_w[..., 3:].view(-1, 4), + object_link_to_com_pos, + object_link_to_com_quat, + ) + expected_object_com_pose = torch.cat((expected_com_pos, expected_com_quat), dim=1).view(num_envs, -1, 7) + com_pose_w = cube_object.data.body_com_pose_w.torch + com_vel_w = cube_object.data.body_com_vel_w.torch + # test both root_pose and root_com successfully updated when root_link updates + torch.testing.assert_close(expected_object_com_pose, com_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking + # if velocity update is triggered, which can be determined by comparing angular velocity + torch.testing.assert_close(link_vel_w[..., 3:], com_vel_w[..., 3:]) + torch.testing.assert_close(link_pose_w, cube_object.data.body_link_pose_w.torch) + torch.testing.assert_close(link_vel_w[..., 3:], cube_object.data.body_com_vel_w.torch[..., 3:]) + elif state_location == "root": + body_link_pose_w = cube_object.data.body_link_pose_w.torch + body_com_vel_w = cube_object.data.body_com_vel_w.torch + expected_object_com_pos, expected_object_com_quat = combine_frame_transforms( + body_link_pose_w[..., :3].view(-1, 3), + body_link_pose_w[..., 3:].view(-1, 4), + object_link_to_com_pos, + object_link_to_com_quat, + ) + expected_object_com_pose = torch.cat((expected_object_com_pos, expected_object_com_quat), dim=1).view( + num_envs, -1, 7 + ) + com_pose_w = cube_object.data.body_com_pose_w.torch + com_vel_w = cube_object.data.body_com_vel_w.torch + link_pose_w = cube_object.data.body_link_pose_w.torch + link_vel_w = cube_object.data.body_link_vel_w.torch + # test both root_com and root_link successfully updated when root_pose updates + torch.testing.assert_close(expected_object_com_pose, com_pose_w) + torch.testing.assert_close(body_com_vel_w, com_vel_w) + torch.testing.assert_close(body_link_pose_w, link_pose_w) + torch.testing.assert_close(body_com_vel_w[..., 3:], link_vel_w[..., 3:]) diff --git a/source/isaaclab_newton/test/cloner/__init__.py b/source/isaaclab_newton/test/cloner/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_newton/test/cloner/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_newton/test/physics/__init__.py b/source/isaaclab_newton/test/physics/__init__.py new file mode 100644 index 000000000000..ad6daf8a5502 --- /dev/null +++ b/source/isaaclab_newton/test/physics/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared utilities for physics and sensor tests.""" diff --git a/source/isaaclab_newton/test/physics/physics_test_utils.py b/source/isaaclab_newton/test/physics/physics_test_utils.py new file mode 100644 index 000000000000..65177a9c8a93 --- /dev/null +++ b/source/isaaclab_newton/test/physics/physics_test_utils.py @@ -0,0 +1,295 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared utilities for physics and sensor testing. + +This module provides common functionality used across multiple test files: +- Shape type definitions and helpers +- Simulation configuration +- Shape spawning utilities +""" + +import time +from contextlib import contextmanager +from enum import Enum, auto + +import pytest +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.sim import SimulationCfg + +## +# Newton Configuration +## + + +def make_sim_cfg( + use_mujoco_contacts: bool = False, device: str = "cuda:0", gravity: tuple = (0.0, 0.0, -9.81) +) -> SimulationCfg: + """Create simulation configuration with specified collision pipeline. + + Args: + use_mujoco_contacts: If True, use MuJoCo contact pipeline. If False, use Newton contact pipeline. + device: Device to run simulation on ("cuda:0" or "cpu"). + gravity: Gravity vector (x, y, z). + + Returns: + SimulationCfg configured for the specified collision pipeline. + """ + solver_cfg = MJWarpSolverCfg( + njmax=200, + ls_iterations=20, + cone="elliptic", + ls_parallel=False, + integrator="implicitfast", + use_mujoco_contacts=use_mujoco_contacts, + ) + + newton_cfg = NewtonCfg( + solver_cfg=solver_cfg, + num_substeps=1, + debug_mode=False, + use_cuda_graph=False, + ) + + return SimulationCfg( + dt=1.0 / 120.0, + device=device, + gravity=gravity, + create_stage_in_memory=False, + physics=newton_cfg, + ) + + +COLLISION_PIPELINES = [ + pytest.param(False, id="newton_contacts"), + pytest.param(True, id="mujoco_contacts"), +] + + +## +# Shape Types +## + + +class ShapeType(Enum): + SPHERE = auto() + BOX = auto() + CAPSULE = auto() + CYLINDER = auto() + CONE = auto() + MESH_SPHERE = auto() + MESH_BOX = auto() + MESH_CAPSULE = auto() + MESH_CYLINDER = auto() + MESH_CONE = auto() + + +PRIMITIVE_SHAPES = [ShapeType.SPHERE, ShapeType.BOX, ShapeType.CAPSULE, ShapeType.CYLINDER, ShapeType.CONE] +MESH_SHAPES = [ + ShapeType.MESH_SPHERE, + ShapeType.MESH_BOX, + ShapeType.MESH_CAPSULE, + ShapeType.MESH_CYLINDER, + ShapeType.MESH_CONE, +] +ALL_SHAPES = PRIMITIVE_SHAPES + MESH_SHAPES + +STABLE_SHAPES = [ + ShapeType.SPHERE, + ShapeType.BOX, + ShapeType.CAPSULE, + ShapeType.CYLINDER, + ShapeType.MESH_SPHERE, + ShapeType.MESH_BOX, + ShapeType.MESH_CAPSULE, + ShapeType.MESH_CYLINDER, +] + +BOX_SHAPES = [ShapeType.BOX, ShapeType.MESH_BOX] +SPHERE_SHAPES = [ShapeType.SPHERE, ShapeType.MESH_SPHERE] + + +def shape_type_to_str(shape_type: ShapeType) -> str: + return shape_type.name.lower() + + +def is_mesh_shape(shape_type: ShapeType) -> bool: + return shape_type in MESH_SHAPES + + +## +# Shape Configuration Factory +## + + +def create_shape_cfg( + shape_type: ShapeType, + prim_path: str, + pos: tuple = (0, 0, 1.0), + disable_gravity: bool = True, + activate_contact_sensors: bool = True, +) -> RigidObjectCfg: + """Create RigidObjectCfg for a shape type.""" + rigid_props = sim_utils.RigidBodyPropertiesCfg( + disable_gravity=disable_gravity, + linear_damping=0.0, + angular_damping=0.0, + ) + collision_props = sim_utils.CollisionPropertiesCfg(collision_enabled=True) + mass_props = sim_utils.MassPropertiesCfg(mass=1.0) + + spawner_map = { + ShapeType.SPHERE: lambda: sim_utils.SphereCfg( + radius=0.25, + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.4, 0.8)), + ), + ShapeType.BOX: lambda: sim_utils.CuboidCfg( + size=(0.5, 0.5, 0.5), + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.4, 0.8, 0.4)), + ), + ShapeType.CAPSULE: lambda: sim_utils.CapsuleCfg( + radius=0.15, + height=0.3, + axis="Z", + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.8, 0.4, 0.4)), + ), + ShapeType.CYLINDER: lambda: sim_utils.CylinderCfg( + radius=0.2, + height=0.4, + axis="Z", + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.8, 0.8, 0.4)), + ), + ShapeType.CONE: lambda: sim_utils.ConeCfg( + radius=0.25, + height=0.5, + axis="Z", + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.8, 0.4, 0.8)), + ), + ShapeType.MESH_SPHERE: lambda: sim_utils.MeshSphereCfg( + radius=0.25, + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.2, 0.2, 0.6)), + ), + ShapeType.MESH_BOX: lambda: sim_utils.MeshCuboidCfg( + size=(0.5, 0.5, 0.5), + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.2, 0.6, 0.2)), + ), + ShapeType.MESH_CAPSULE: lambda: sim_utils.MeshCapsuleCfg( + radius=0.15, + height=0.3, + axis="Z", + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.6, 0.2, 0.2)), + ), + ShapeType.MESH_CYLINDER: lambda: sim_utils.MeshCylinderCfg( + radius=0.2, + height=0.4, + axis="Z", + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.6, 0.6, 0.2)), + ), + ShapeType.MESH_CONE: lambda: sim_utils.MeshConeCfg( + radius=0.25, + height=0.5, + axis="Z", + rigid_props=rigid_props, + collision_props=collision_props, + mass_props=mass_props, + activate_contact_sensors=activate_contact_sensors, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.6, 0.2, 0.6)), + ), + } + + return RigidObjectCfg( + prim_path=prim_path, + spawn=spawner_map[shape_type](), + init_state=RigidObjectCfg.InitialStateCfg(pos=pos), + ) + + +def get_shape_extent(shape_type: ShapeType) -> float: + """Get the XY extent (radius/half-size) of a shape for positioning.""" + extents = { + ShapeType.SPHERE: 0.25, + ShapeType.BOX: 0.25, + ShapeType.CAPSULE: 0.15, + ShapeType.CYLINDER: 0.20, + ShapeType.CONE: 0.25, + ShapeType.MESH_SPHERE: 0.25, + ShapeType.MESH_BOX: 0.25, + ShapeType.MESH_CAPSULE: 0.15, + ShapeType.MESH_CYLINDER: 0.20, + ShapeType.MESH_CONE: 0.25, + } + return extents[shape_type] + + +def get_shape_height(shape_type: ShapeType) -> float: + """Get the height of a shape for stacking calculations.""" + heights = { + ShapeType.SPHERE: 0.5, + ShapeType.BOX: 0.5, + ShapeType.CAPSULE: 0.6, + ShapeType.CYLINDER: 0.4, + ShapeType.CONE: 0.5, + ShapeType.MESH_SPHERE: 0.5, + ShapeType.MESH_BOX: 0.5, + ShapeType.MESH_CAPSULE: 0.6, + ShapeType.MESH_CYLINDER: 0.4, + ShapeType.MESH_CONE: 0.5, + } + return heights[shape_type] + + +@contextmanager +def phase_timer(label: str): + """Context manager that prints elapsed wall-clock time for a test phase.""" + t0 = time.perf_counter() + yield + elapsed = time.perf_counter() - t0 + print(f" [{label}] {elapsed:.2f}s") + + +def perform_sim_step(sim, scene, dt: float): + """Perform a single simulation step and update scene.""" + scene.write_data_to_sim() + sim.step(render=True) + scene.update(dt=dt) diff --git a/source/isaaclab_newton/test/physics/test_newton_manager_abstraction.py b/source/isaaclab_newton/test/physics/test_newton_manager_abstraction.py new file mode 100644 index 000000000000..f8ac77586cc0 --- /dev/null +++ b/source/isaaclab_newton/test/physics/test_newton_manager_abstraction.py @@ -0,0 +1,274 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for the per-solver :class:`NewtonManager` abstraction. + +Covers: + +* :attr:`NewtonSolverCfg.class_type` resolves to the matching manager subclass. +* :meth:`NewtonCfg.__post_init__` propagates ``solver_cfg.class_type`` onto + :attr:`NewtonCfg.class_type` so that ``SimulationContext`` picks the right + manager. +* Each leaf manager subclasses :class:`NewtonManager` and implements + :meth:`_build_solver` (with the abstract base raising ``NotImplementedError``). +* The cross-config validation in :meth:`NewtonMJWarpManager._build_solver` + rejects the ``MJWarp + use_mujoco_contacts=True + collision_cfg`` combination. +* Manager name dispatch (used by :class:`InteractiveScene` and the various + factory dispatchers) still starts with ``"newton"``. +* End-to-end: spinning up a simulation with each solver builds the correct + solver, sets the right ``_use_single_state`` / ``_needs_collision_pipeline`` + flags, and lands canonical state on :class:`NewtonManager` so that external + ``NewtonManager._foo`` reads keep working. +""" + +from __future__ import annotations + +import pytest +from isaaclab_newton.physics import ( + FeatherstoneSolverCfg, + KaminoSolverCfg, + MJWarpSolverCfg, + NewtonCfg, + NewtonCollisionPipelineCfg, + NewtonFeatherstoneManager, + NewtonKaminoManager, + NewtonManager, + NewtonMJWarpManager, + NewtonSolverCfg, + NewtonXPBDManager, + XPBDSolverCfg, +) +from newton.solvers import SolverFeatherstone, SolverKamino, SolverMuJoCo, SolverXPBD + +from isaaclab.sim import SimulationCfg, build_simulation_context + +# --------------------------------------------------------------------------- +# Lightweight (no sim) parametrisation +# --------------------------------------------------------------------------- + +# (solver_cfg_factory, expected_manager, expected_solver_cls, +# expected_use_single_state, expected_needs_collision_pipeline) +SOLVER_MATRIX = [ + pytest.param( + lambda: MJWarpSolverCfg(use_mujoco_contacts=True), + NewtonMJWarpManager, + SolverMuJoCo, + True, + False, + id="mjwarp_internal_contacts", + ), + pytest.param( + lambda: MJWarpSolverCfg(use_mujoco_contacts=False), + NewtonMJWarpManager, + SolverMuJoCo, + True, + True, + id="mjwarp_newton_pipeline", + ), + pytest.param( + lambda: XPBDSolverCfg(), + NewtonXPBDManager, + SolverXPBD, + False, + True, + id="xpbd", + ), + pytest.param( + lambda: FeatherstoneSolverCfg(), + NewtonFeatherstoneManager, + SolverFeatherstone, + False, + True, + id="featherstone", + ), + pytest.param( + lambda: KaminoSolverCfg(use_collision_detector=True), + NewtonKaminoManager, + SolverKamino, + False, + False, + id="kamino_internal_contacts", + ), + pytest.param( + lambda: KaminoSolverCfg(use_collision_detector=False), + NewtonKaminoManager, + SolverKamino, + False, + True, + id="kamino_newton_pipeline", + ), +] + + +# --------------------------------------------------------------------------- +# class_type wiring (no SimulationContext required) +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "solver_cfg_factory, expected_manager, _solver_cls, _single_state, _pipeline", + SOLVER_MATRIX, +) +def test_solver_cfg_class_type_resolves_to_subclass( + solver_cfg_factory, expected_manager, _solver_cls, _single_state, _pipeline +): + """Each ``*SolverCfg.class_type`` resolves to its matching manager subclass.""" + solver_cfg = solver_cfg_factory() + # ``class_type`` is a lazy ``"module:Class"`` reference; calling its + # ``_resolve()`` returns the actual class. ``__name__`` works without + # forcing import (LazyType caches metadata) and is sufficient identity. + assert solver_cfg.class_type.__name__ == expected_manager.__name__ + + +@pytest.mark.parametrize( + "solver_cfg_factory, expected_manager, _solver_cls, _single_state, _pipeline", + SOLVER_MATRIX, +) +def test_newton_cfg_post_init_propagates_class_type( + solver_cfg_factory, expected_manager, _solver_cls, _single_state, _pipeline +): + """``NewtonCfg.__post_init__`` lifts ``solver_cfg.class_type`` onto ``NewtonCfg.class_type``.""" + cfg = NewtonCfg(solver_cfg=solver_cfg_factory()) + assert cfg.class_type.__name__ == expected_manager.__name__ + + +# --------------------------------------------------------------------------- +# Manager class hierarchy and factory contracts +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "manager", [NewtonMJWarpManager, NewtonXPBDManager, NewtonFeatherstoneManager, NewtonKaminoManager] +) +def test_subclass_of_newton_manager(manager): + """All concrete managers inherit from :class:`NewtonManager`.""" + assert issubclass(manager, NewtonManager) + # Subclasses must override the abstract factory. + assert manager._build_solver is not NewtonManager._build_solver + + +def test_abstract_build_solver_raises(): + """Calling :meth:`_build_solver` on the abstract base raises.""" + with pytest.raises(NotImplementedError): + NewtonManager._build_solver(model=None, solver_cfg=NewtonSolverCfg()) + + +@pytest.mark.parametrize( + "manager", [NewtonMJWarpManager, NewtonXPBDManager, NewtonFeatherstoneManager, NewtonKaminoManager] +) +def test_manager_name_starts_with_newton(manager): + """The ``"newton"`` prefix is required by :class:`InteractiveScene` and the + various backend factories that dispatch on ``physics_manager.__name__.lower()``. + """ + assert manager.__name__.lower().startswith("newton") + + +# --------------------------------------------------------------------------- +# End-to-end: build each solver via SimulationContext +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "solver_cfg_factory, expected_manager, expected_solver_cls," + " expected_use_single_state, expected_needs_collision_pipeline", + SOLVER_MATRIX, +) +def test_initialize_solver_populates_canonical_state( + solver_cfg_factory, + expected_manager, + expected_solver_cls, + expected_use_single_state, + expected_needs_collision_pipeline, +): + """End-to-end: ``SimulationContext`` resolves the right manager subclass and + ``initialize_solver`` lands the right solver + flags on :class:`NewtonManager`. + + External code reads :class:`NewtonManager` attributes directly (``_solver``, + ``_use_single_state``, ``_needs_collision_pipeline``). Even though dispatch + runs through a leaf subclass (e.g. :class:`NewtonMJWarpManager`), shared + state is assigned through the explicit base class so that those reads keep + working regardless of which leaf is active. This test is the regression + guard for that contract. + + The builder is pre-populated with a minimal one-body / one-joint scene + (instead of relying on a USD stage) for two reasons: + + 1. :class:`SolverMuJoCo` requires at least one joint to convert the model + to MJCF; a ground-plane-only scene fails MJCF conversion. + 2. Pre-populating ``NewtonManager._builder`` causes + :meth:`NewtonManager.start_simulation` to skip + :meth:`instantiate_builder_from_stage`, so the test does not depend on + USD asset packages. + """ + sim_cfg = SimulationCfg( + dt=1.0 / 120.0, + device="cuda:0", + gravity=(0.0, 0.0, -9.81), + physics=NewtonCfg(solver_cfg=solver_cfg_factory(), use_cuda_graph=False), + ) + + with build_simulation_context(sim_cfg=sim_cfg) as sim: + # Resolved manager class matches the expected leaf. + resolved_manager = sim.physics_manager + # ``physics_manager`` is a LazyType proxy — compare by ``__name__`` to + # avoid forcing identity-by-id checks against the unresolved proxy. + assert resolved_manager.__name__ == expected_manager.__name__ + assert resolved_manager.__name__.lower().startswith("newton") + + # Pre-populate the builder with a minimal scene so MJCF conversion has + # something to work with. + builder = NewtonManager.create_builder() + body = builder.add_body(mass=1.0) + builder.add_joint_revolute(parent=-1, child=body, axis=(0, 0, 1)) + NewtonManager.set_builder(builder) + + # Force resolution and bring up the solver. + sim.reset() + + # Canonical state lives on the base class. + assert NewtonManager._solver is not None + assert isinstance(NewtonManager._solver, expected_solver_cls) + assert NewtonManager._use_single_state is expected_use_single_state + assert NewtonManager._needs_collision_pipeline is expected_needs_collision_pipeline + + # ``_contacts`` is allocated whichever way contacts are handled + # (MuJoCo internal buffer or Newton pipeline output). + # Kamino with internal contacts does not currently set NewtonManager._contacts. + if expected_solver_cls is not SolverKamino: + assert NewtonManager._contacts is not None + + # One step should not raise — proves the dispatch wiring lines up + # end-to-end. (We do not assert physics; that's covered by the + # asset/sensor test suites.) + sim.step(render=False) + + +def test_mjwarp_internal_contacts_with_collision_cfg_raises(): + """Combining ``use_mujoco_contacts=True`` with a ``collision_cfg`` is rejected. + + The check lives in :meth:`NewtonMJWarpManager._build_solver` because it + needs both the solver cfg subtype and the parent :class:`NewtonCfg`, so it + fires during :meth:`NewtonManager.initialize_solver` (i.e. on + ``sim.reset()``) rather than at cfg construction time. + """ + sim_cfg = SimulationCfg( + dt=1.0 / 120.0, + device="cuda:0", + gravity=(0.0, 0.0, -9.81), + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg(use_mujoco_contacts=True), + collision_cfg=NewtonCollisionPipelineCfg(), + use_cuda_graph=False, + ), + ) + + with build_simulation_context(sim_cfg=sim_cfg) as sim: + builder = NewtonManager.create_builder() + body = builder.add_body(mass=1.0) + builder.add_joint_revolute(parent=-1, child=body, axis=(0, 0, 1)) + NewtonManager.set_builder(builder) + + with pytest.raises(ValueError, match="collision_cfg cannot be set"): + sim.reset() diff --git a/source/isaaclab_newton/test/sensors/__init__.py b/source/isaaclab_newton/test/sensors/__init__.py new file mode 100644 index 000000000000..acd8a74be49f --- /dev/null +++ b/source/isaaclab_newton/test/sensors/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +"""Tests for Newton sensors.""" diff --git a/source/isaaclab_newton/test/sensors/test_contact_sensor.py b/source/isaaclab_newton/test/sensors/test_contact_sensor.py new file mode 100644 index 000000000000..066803ee884b --- /dev/null +++ b/source/isaaclab_newton/test/sensors/test_contact_sensor.py @@ -0,0 +1,816 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests to verify contact sensor functionality using Newton physics. + +This test suite verifies that: +1. Contact detection is accurate (no false positives or true negatives) +2. Contact forces are reported correctly +3. Contact filtering works properly +4. Contact time tracking is accurate + +Uses proper collision scenarios (falling, stacking, horizontal collision) instead of +teleporting objects into interpenetrating states. +""" + +# pyright: reportPrivateUsage=none + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import math + +import pytest +import torch +from physics.physics_test_utils import ( + COLLISION_PIPELINES, + STABLE_SHAPES, + ShapeType, + create_shape_cfg, + get_shape_extent, + get_shape_height, + make_sim_cfg, + perform_sim_step, + shape_type_to_str, +) + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation, RigidObject, RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors import ContactSensor, ContactSensorCfg +from isaaclab.sim import build_simulation_context +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +from isaaclab_assets.robots.allegro import ALLEGRO_HAND_CFG + +## +# Scene Configuration +## + + +@configclass +class ContactSensorTestSceneCfg(InteractiveSceneCfg): + """Configuration for contact sensor test scenes.""" + + terrain: TerrainImporterCfg | None = None + object_a: RigidObjectCfg | None = None + object_b: RigidObjectCfg | None = None + object_c: RigidObjectCfg | None = None + contact_sensor_a: ContactSensorCfg | None = None + contact_sensor_b: ContactSensorCfg | None = None + + +SIM_DT = 1.0 / 120.0 + + +# =================================================================== +# Priority 1: Contact Detection Accuracy +# =================================================================== + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("use_mujoco_contacts", COLLISION_PIPELINES) +@pytest.mark.parametrize("shape_type", STABLE_SHAPES, ids=[shape_type_to_str(s) for s in STABLE_SHAPES]) +def test_contact_lifecycle(device: str, use_mujoco_contacts: bool, shape_type: ShapeType): + """Test full contact detection lifecycle with varied heights across environments. + + 8 environments (2 groups x 4 envs) with objects at different heights. + + Verifies: + - No contact initially while objects are falling + - Contact detected after landing (timing validated against physics) + - Lower drops land before higher drops + - Contact stops when objects are lifted + """ + num_envs = 8 + num_groups = 2 + envs_per_group = num_envs // num_groups + + base_heights = [0.5, 1.5] + object_offset = get_shape_height(shape_type) / 2 + + gravity_mag = 9.81 + total_fall_steps = 100 + lift_steps = 30 + + sim_cfg = make_sim_cfg(use_mujoco_contacts=use_mujoco_contacts, device=device, gravity=(0.0, 0.0, -gravity_mag)) + + with build_simulation_context(sim_cfg=sim_cfg, auto_add_lighting=True, add_ground_plane=True) as sim: + scene_cfg = ContactSensorTestSceneCfg(num_envs=num_envs, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.object_a = create_shape_cfg( + shape_type, + "{ENV_REGEX_NS}/Object", + pos=(0.0, 0.0, 3.0), + disable_gravity=False, + activate_contact_sensors=True, + ) + scene_cfg.contact_sensor_a = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Object", + update_period=0.0, + history_length=1, + track_air_time=True, + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + contact_sensor: ContactSensor = scene["contact_sensor_a"] + obj: RigidObject = scene["object_a"] + + root_pose = obj.data.root_link_pose_w.torch.clone() + for group_idx, base_height in enumerate(base_heights): + for i in range(envs_per_group): + env_idx = group_idx * envs_per_group + i + root_pose[env_idx, 2] = base_height + object_offset + obj.write_root_pose_to_sim_index(root_pose=root_pose) + + expected_land_ticks = [int(math.sqrt(2 * h / gravity_mag) / SIM_DT) for h in base_heights] + + contact_detected = [False] * num_envs + contact_tick = [-1] * num_envs + + for _ in range(5): + perform_sim_step(sim, scene, SIM_DT) + + forces = torch.norm(contact_sensor.data.net_forces_w.torch, dim=-1) + for env_idx in range(num_envs): + assert forces[env_idx].max().item() < 0.01, f"Env {env_idx}: No contact should be detected while in air." + + for tick in range(5, total_fall_steps): + perform_sim_step(sim, scene, SIM_DT) + forces = torch.norm(contact_sensor.data.net_forces_w.torch, dim=-1) + for env_idx in range(num_envs): + if forces[env_idx].max().item() > 0.1 and not contact_detected[env_idx]: + contact_detected[env_idx] = True + contact_tick[env_idx] = tick + + for env_idx in range(num_envs): + group_idx = env_idx // envs_per_group + assert contact_detected[env_idx], ( + f"Env {env_idx} (group {group_idx}, h={base_heights[group_idx]}m): Contact should be detected" + ) + + for env_idx in range(num_envs): + group_idx = env_idx // envs_per_group + expected_tick = expected_land_ticks[group_idx] + tolerance_ticks = int(0.3 * expected_tick) + 10 + assert abs(contact_tick[env_idx] - expected_tick) < tolerance_ticks, ( + f"Env {env_idx}: Contact at tick {contact_tick[env_idx]}, expected ~{expected_tick} ± {tolerance_ticks}" + ) + + group_land_times = [] + for group_idx in range(num_groups): + group_ticks = [contact_tick[group_idx * envs_per_group + i] for i in range(envs_per_group)] + group_land_times.append(sum(group_ticks) / len(group_ticks)) + + for i in range(num_groups - 1): + assert group_land_times[i] < group_land_times[i + 1], ( + f"Group {i} should land before Group {i + 1}. " + f"Avg ticks: {group_land_times[i]:.1f} vs {group_land_times[i + 1]:.1f}" + ) + + velocity = torch.zeros(num_envs, 6, device=device) + velocity[:, 2] = 5.0 + obj.write_root_velocity_to_sim_index(root_velocity=velocity) + + no_contact_detected = [False] * num_envs + for step in range(lift_steps): + perform_sim_step(sim, scene, SIM_DT) + if step > 10: + forces = torch.norm(contact_sensor.data.net_forces_w.torch, dim=-1) + for env_idx in range(num_envs): + if forces[env_idx].max().item() < 0.01: + no_contact_detected[env_idx] = True + + for env_idx in range(num_envs): + assert no_contact_detected[env_idx], f"Env {env_idx}: Contact should stop after lift." + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("use_mujoco_contacts", COLLISION_PIPELINES) +@pytest.mark.parametrize("shape_type", STABLE_SHAPES, ids=[shape_type_to_str(s) for s in STABLE_SHAPES]) +def test_horizontal_collision_detects_contact(device: str, use_mujoco_contacts: bool, shape_type: ShapeType): + """Test horizontal collision detection with varied velocities and separations. + + 8 environments (2 groups x 4 envs) with different collision speeds. + + Verifies: + - Contact detected for all collision configurations + - Both objects in each pair detect contact + """ + collision_steps = 90 + num_envs = 8 + num_groups = 2 + envs_per_group = num_envs // num_groups + extent = get_shape_extent(shape_type) + + group_configs = [ + (2.0, 0.6 + 2 * extent), + (4.0, 0.8 + 2 * extent), + ] + + sim_cfg = make_sim_cfg(use_mujoco_contacts=use_mujoco_contacts, device=device, gravity=(0.0, 0.0, 0.0)) + + with build_simulation_context(sim_cfg=sim_cfg, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + + max_separation = max(cfg[1] for cfg in group_configs) + scene_cfg = ContactSensorTestSceneCfg(num_envs=num_envs, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.object_a = create_shape_cfg( + shape_type, + "{ENV_REGEX_NS}/ObjectA", + pos=(-max_separation / 2, 0.0, 0.5), + disable_gravity=True, + activate_contact_sensors=True, + ) + scene_cfg.object_b = create_shape_cfg( + shape_type, + "{ENV_REGEX_NS}/ObjectB", + pos=(max_separation / 2, 0.0, 0.5), + disable_gravity=True, + activate_contact_sensors=True, + ) + scene_cfg.contact_sensor_a = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/ObjectA", + update_period=0.0, + history_length=3, + ) + scene_cfg.contact_sensor_b = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/ObjectB", + update_period=0.0, + history_length=3, + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + object_a: RigidObject = scene["object_a"] + object_b: RigidObject = scene["object_b"] + sensor_a: ContactSensor = scene["contact_sensor_a"] + sensor_b: ContactSensor = scene["contact_sensor_b"] + + pose_a = object_a.data.root_link_pose_w.torch.clone() + pose_b = object_b.data.root_link_pose_w.torch.clone() + for group_idx, (_, separation) in enumerate(group_configs): + for i in range(envs_per_group): + env_idx = group_idx * envs_per_group + i + pose_a[env_idx, 0] = -separation / 2 + pose_b[env_idx, 0] = separation / 2 + object_a.write_root_pose_to_sim_index(root_pose=pose_a) + object_b.write_root_pose_to_sim_index(root_pose=pose_b) + + velocity = torch.zeros(num_envs, 6, device=device) + for group_idx, (vel, _) in enumerate(group_configs): + for i in range(envs_per_group): + velocity[group_idx * envs_per_group + i, 0] = vel + object_a.write_root_velocity_to_sim_index(root_velocity=velocity) + + contact_detected_a = [False] * num_envs + contact_detected_b = [False] * num_envs + + for tick in range(collision_steps): + perform_sim_step(sim, scene, SIM_DT) + forces_a = torch.norm(sensor_a.data.net_forces_w.torch, dim=-1) + forces_b = torch.norm(sensor_b.data.net_forces_w.torch, dim=-1) + for env_idx in range(num_envs): + if forces_a[env_idx].max().item() > 0.1: + contact_detected_a[env_idx] = True + if forces_b[env_idx].max().item() > 0.1: + contact_detected_b[env_idx] = True + + for env_idx in range(num_envs): + group_idx = env_idx // envs_per_group + vel, sep = group_configs[group_idx] + assert contact_detected_a[env_idx], f"Env {env_idx} (v={vel}m/s): Object A should detect contact" + assert contact_detected_b[env_idx], f"Env {env_idx} (v={vel}m/s): Object B should detect contact" + + +# =================================================================== +# Priority 2: Net Forces +# =================================================================== + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("use_mujoco_contacts", COLLISION_PIPELINES) +def test_resting_object_contact_force(device: str, use_mujoco_contacts: bool): + """Test that resting object contact force equals weight and points upward. + + Two objects (light=2kg and heavy=4kg) rest on ground. + + Verifies: + - Force magnitude ~ mass x gravity + - Force direction is upward (positive Z) + - Heavier object has proportionally larger force + """ + settle_steps = 90 + num_envs = 4 + mass_a, mass_b = 2.0, 4.0 + gravity_magnitude = 9.81 + expected_force_a = mass_a * gravity_magnitude + expected_force_b = mass_b * gravity_magnitude + + sim_cfg = make_sim_cfg( + use_mujoco_contacts=use_mujoco_contacts, device=device, gravity=(0.0, 0.0, -gravity_magnitude) + ) + + with build_simulation_context(sim_cfg=sim_cfg, auto_add_lighting=True, add_ground_plane=True) as sim: + sim._app_control_on_stop_handle = None + + scene_cfg = ContactSensorTestSceneCfg(num_envs=num_envs, env_spacing=5.0, lazy_sensor_update=False) + rigid_props = sim_utils.RigidBodyPropertiesCfg(disable_gravity=False, linear_damping=0.5, angular_damping=0.5) + + scene_cfg.object_a = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/BoxA", + spawn=sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + rigid_props=rigid_props, + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=True), + mass_props=sim_utils.MassPropertiesCfg(mass=mass_a), + activate_contact_sensors=True, + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.5, 0.0, 0.2)), + ) + scene_cfg.object_b = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/BoxB", + spawn=sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + rigid_props=rigid_props, + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=True), + mass_props=sim_utils.MassPropertiesCfg(mass=mass_b), + activate_contact_sensors=True, + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.5, 0.0, 0.2)), + ) + scene_cfg.contact_sensor_a = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/BoxA", update_period=0.0, history_length=1 + ) + scene_cfg.contact_sensor_b = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/BoxB", update_period=0.0, history_length=1 + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + sensor_a: ContactSensor = scene["contact_sensor_a"] + sensor_b: ContactSensor = scene["contact_sensor_b"] + + # Average contact force over the last `avg_window` ticks of the settle period to reject + # per-step solver oscillation around the resting force. + avg_window = 20 + force_a_samples: list[torch.Tensor] = [] + force_b_samples: list[torch.Tensor] = [] + for step in range(settle_steps): + perform_sim_step(sim, scene, SIM_DT) + if step >= settle_steps - avg_window: + force_a_samples.append(sensor_a.data.net_forces_w.torch.clone()) + force_b_samples.append(sensor_b.data.net_forces_w.torch.clone()) + + forces_a = torch.stack(force_a_samples).mean(dim=0) + forces_b = torch.stack(force_b_samples).mean(dim=0) + force_mags_a = torch.norm(forces_a, dim=-1) + force_mags_b = torch.norm(forces_b, dim=-1) + + errs: list[str] = [] + for env_idx in range(num_envs): + fa = force_mags_a[env_idx].max().item() + fb = force_mags_b[env_idx].max().item() + + if abs(fa - expected_force_a) >= 0.02 * expected_force_a: + errs.append( + f"Env {env_idx}: BoxA ({mass_a}kg) force should be ~{expected_force_a:.2f} N. Got {fa:.2f} N" + ) + if abs(fb - expected_force_b) >= 0.02 * expected_force_b: + errs.append( + f"Env {env_idx}: BoxB ({mass_b}kg) force should be ~{expected_force_b:.2f} N. Got {fb:.2f} N" + ) + if fb <= fa: + errs.append(f"Env {env_idx}: Heavier BoxB should have larger force. A: {fa:.2f}, B: {fb:.2f}") + if forces_a[env_idx, 0, 2].item() <= 0.1: + errs.append(f"Env {env_idx}: BoxA Z force should be positive") + if forces_b[env_idx, 0, 2].item() <= 0.1: + errs.append(f"Env {env_idx}: BoxB Z force should be positive") + assert not errs, "\n".join(errs) + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("use_mujoco_contacts", COLLISION_PIPELINES) +def test_higher_drop_produces_larger_impact_force(device: str, use_mujoco_contacts: bool): + """Test that dropping from higher produces larger peak impact force. + + 8 environments with heights from 0.3m to 3.0m. + + Verifies: + - Peak impact force generally increases with height + - Overall trend: highest/lowest force ratio > 1.5 + """ + num_envs = 8 + min_height, max_height = 0.3, 3.0 + drop_heights = [min_height + (max_height - min_height) * i / (num_envs - 1) for i in range(num_envs)] + gravity_mag = 9.81 + object_radius = 0.25 + + sim_cfg = make_sim_cfg(use_mujoco_contacts=use_mujoco_contacts, device=device, gravity=(0.0, 0.0, -gravity_mag)) + + with build_simulation_context(sim_cfg=sim_cfg, auto_add_lighting=True, add_ground_plane=True) as sim: + sim._app_control_on_stop_handle = None + + scene_cfg = ContactSensorTestSceneCfg(num_envs=num_envs, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.object_a = create_shape_cfg( + ShapeType.SPHERE, + "{ENV_REGEX_NS}/Sphere", + pos=(0.0, 0.0, max_height + object_radius), + disable_gravity=False, + activate_contact_sensors=True, + ) + scene_cfg.contact_sensor_a = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Sphere", + update_period=0.0, + history_length=1, + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + obj: RigidObject = scene["object_a"] + contact_sensor: ContactSensor = scene["contact_sensor_a"] + + root_pose = obj.data.root_link_pose_w.torch.clone() + for env_idx in range(num_envs): + root_pose[env_idx, 2] = drop_heights[env_idx] + object_radius + obj.write_root_pose_to_sim_index(root_pose=root_pose) + + total_steps = int(((2 * max_height / gravity_mag) ** 0.5 + 0.5) / SIM_DT) + peak_forces = [0.0] * num_envs + contact_detected = [False] * num_envs + + for _ in range(total_steps): + perform_sim_step(sim, scene, SIM_DT) + force_magnitudes = torch.norm(contact_sensor.data.net_forces_w.torch, dim=-1) + for env_idx in range(num_envs): + f = force_magnitudes[env_idx].max().item() + if f > 0.1: + contact_detected[env_idx] = True + peak_forces[env_idx] = max(peak_forces[env_idx], f) + + for env_idx in range(num_envs): + assert contact_detected[env_idx], f"Env {env_idx} (h={drop_heights[env_idx]:.2f}m): No contact" + + violations = [] + for i in range(num_envs - 1): + if peak_forces[i + 1] < peak_forces[i] * 0.95: + violations.append( + f"Env {i} (h={drop_heights[i]:.2f}m, F={peak_forces[i]:.2f}N) -> " + f"Env {i + 1} (h={drop_heights[i + 1]:.2f}m, F={peak_forces[i + 1]:.2f}N)" + ) + assert len(violations) <= 2, "Peak force should increase with height. Violations:\n" + "\n".join(violations) + + force_ratio = peak_forces[-1] / peak_forces[0] if peak_forces[0] > 0 else 0 + assert force_ratio > 1.5, f"Force ratio (highest/lowest) should be > 1.5. Got {force_ratio:.2f}" + + +# =================================================================== +# Priority 3: Filtering +# =================================================================== + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize( + "use_mujoco_contacts", + [ + pytest.param( + False, + id="newton_contacts", + marks=pytest.mark.xfail( + reason="Newton force_matrix_w is non-deterministic across hardware (reports 0 or inflated values)", + strict=False, + ), + ), + pytest.param(True, id="mujoco_contacts"), + ], +) +def test_filter_enables_force_matrix(device: str, use_mujoco_contacts: bool): + """Test that filter_prim_paths_expr filters contacts and enables force_matrix_w. + + Object A rests on ground, Object B stacked on A. + Sensor on A is filtered for B only (not ground). + + Verifies: + - force_matrix_w reports only filtered contact (A-B) + - net_forces_w reports total contact (ground + B) + - force_matrix < net_forces (ground contact excluded from matrix) + """ + settle_steps = 240 + num_envs = 4 + mass_b = 2.0 + gravity = 9.81 + expected_force_from_b = mass_b * gravity + + sim_cfg = make_sim_cfg(use_mujoco_contacts=use_mujoco_contacts, device=device, gravity=(0.0, 0.0, -gravity)) + + with build_simulation_context(sim_cfg=sim_cfg, auto_add_lighting=True, add_ground_plane=True) as sim: + sim._app_control_on_stop_handle = None + + scene_cfg = ContactSensorTestSceneCfg(num_envs=num_envs, env_spacing=5.0, lazy_sensor_update=False) + + rigid_props_a = sim_utils.RigidBodyPropertiesCfg(disable_gravity=False, linear_damping=0.5, angular_damping=0.5) + scene_cfg.object_a = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/ObjectA", + spawn=sim_utils.CuboidCfg( + size=(0.5, 0.5, 0.3), + rigid_props=rigid_props_a, + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=True), + mass_props=sim_utils.MassPropertiesCfg(mass=5.0), + activate_contact_sensors=True, + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 0.2)), + ) + + rigid_props_b = sim_utils.RigidBodyPropertiesCfg(disable_gravity=False, linear_damping=2.0, angular_damping=2.0) + scene_cfg.object_b = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/ObjectB", + spawn=sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + rigid_props=rigid_props_b, + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=True), + mass_props=sim_utils.MassPropertiesCfg(mass=mass_b), + activate_contact_sensors=True, + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 0.55)), + ) + + scene_cfg.contact_sensor_a = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/ObjectA", + update_period=0.0, + history_length=1, + filter_prim_paths_expr=["{ENV_REGEX_NS}/ObjectB"], + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + contact_sensor: ContactSensor = scene["contact_sensor_a"] + + # Average over the last `avg_window` ticks to reject per-step solver oscillation. + avg_window = 20 + matrix_samples: list[torch.Tensor] = [] + net_samples: list[torch.Tensor] = [] + for step in range(settle_steps): + perform_sim_step(sim, scene, SIM_DT) + if step >= settle_steps - avg_window: + matrix_raw = contact_sensor.data.force_matrix_w + net_raw = contact_sensor.data.net_forces_w + if not matrix_samples: + assert matrix_raw is not None, "force_matrix_w should not be None when filter is set" + matrix_samples.append(matrix_raw.torch.clone()) + net_samples.append(net_raw.torch.clone()) + + force_matrix = torch.stack(matrix_samples).mean(dim=0) + net_forces = torch.stack(net_samples).mean(dim=0) + + expected_b_on_a = torch.tensor([0.0, 0.0, -expected_force_from_b], device=device) + tolerance = 0.05 * expected_force_from_b + errs: list[str] = [] + for env_idx in range(num_envs): + b_on_a = force_matrix[env_idx, 0, 0] + net_contact = net_forces[env_idx, 0] + + error = torch.norm(b_on_a - expected_b_on_a).item() + if error >= tolerance: + errs.append( + f"Env {env_idx}: B-on-A should be ~{expected_b_on_a.tolist()} N. " + f"Got {b_on_a.tolist()}, error {error:.2f} N" + ) + if torch.norm(b_on_a).item() >= torch.norm(net_contact).item(): + errs.append( + f"Env {env_idx}: |B-on-A| should be < |net contact|. " + f"B-on-A: {b_on_a.tolist()}, Net: {net_contact.tolist()}" + ) + assert not errs, "\n".join(errs) + + +# =================================================================== +# Priority 4: Articulated System +# =================================================================== + +ALLEGRO_FINGERTIP_OFFSETS = { + "index": (-0.052, -0.252, 0.052), + "middle": (-0.001, -0.252, 0.052), + "ring": (0.054, -0.252, 0.052), + "thumb": (-0.168, -0.039, 0.080), +} + +ALLEGRO_FINGER_LINKS = { + "index": "index_link_3", + "middle": "middle_link_3", + "ring": "ring_link_3", + "thumb": "thumb_link_3", +} + + +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize( + "use_mujoco_contacts", + [ + pytest.param( + False, + id="newton_contacts", + marks=pytest.mark.xfail( + reason="Newton contact pipeline reports inaccurate per-finger forces in articulated systems" + ), + ), + pytest.param(True, id="mujoco_contacts"), + ], +) +@pytest.mark.parametrize( + "drop_shape", + [ + pytest.param(ShapeType.SPHERE, id="sphere"), + pytest.param(ShapeType.MESH_BOX, id="mesh_box"), + ], +) +def test_finger_contact_sensor_isolation(device: str, use_mujoco_contacts: bool, drop_shape: ShapeType): + """Test contact sensor on Allegro hand fingers detects localized contacts. + + Uses 4 environments, each dropping a ball on a different finger + (env 0 -> index, env 1 -> middle, env 2 -> ring, env 3 -> thumb). + Verifies that in each env the target finger has the highest peak force. + """ + drop_steps = 120 + num_envs = 4 + hand_pos = (0.0, 0.0, 0.5) + finger_names = ["index", "middle", "ring", "thumb"] + + sim_cfg = make_sim_cfg(use_mujoco_contacts=use_mujoco_contacts, device=device, gravity=(0.0, 0.0, 0.0)) + + with build_simulation_context(sim_cfg=sim_cfg, add_ground_plane=True, add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + + scene_cfg = ContactSensorTestSceneCfg(num_envs=num_envs, env_spacing=1.0, lazy_sensor_update=False) + + scene_cfg.hand = ALLEGRO_HAND_CFG.copy() + scene_cfg.hand.prim_path = "{ENV_REGEX_NS}/Hand" + scene_cfg.hand.init_state.pos = hand_pos + + for finger in finger_names: + setattr( + scene_cfg, + f"contact_sensor_{finger}", + ContactSensorCfg( + prim_path=f"{{ENV_REGEX_NS}}/Hand/{ALLEGRO_FINGER_LINKS[finger]}", + update_period=0.0, + history_length=1, + ), + ) + + drop_rigid_props = sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, linear_damping=0.0, angular_damping=0.0 + ) + drop_collision_props = sim_utils.CollisionPropertiesCfg(collision_enabled=True) + drop_mass_props = sim_utils.MassPropertiesCfg(mass=1.0) + drop_visual = sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)) + + spawn_map = { + ShapeType.SPHERE: lambda: sim_utils.SphereCfg( + radius=0.035, + rigid_props=drop_rigid_props, + collision_props=drop_collision_props, + mass_props=drop_mass_props, + visual_material=drop_visual, + activate_contact_sensors=True, + ), + ShapeType.MESH_BOX: lambda: sim_utils.MeshCuboidCfg( + size=(0.05, 0.05, 0.05), + rigid_props=drop_rigid_props, + collision_props=drop_collision_props, + mass_props=drop_mass_props, + visual_material=drop_visual, + activate_contact_sensors=True, + ), + } + + default_offset = ALLEGRO_FINGERTIP_OFFSETS["index"] + default_drop_pos = ( + hand_pos[0] + default_offset[0], + hand_pos[1] + default_offset[1], + hand_pos[2] + default_offset[2] + 0.10, + ) + + scene_cfg.drop_object = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/DropObject", + spawn=spawn_map[drop_shape](), + init_state=RigidObjectCfg.InitialStateCfg(pos=default_drop_pos), + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + hand: Articulation = scene["hand"] + drop_object: RigidObject = scene["drop_object"] + finger_sensors = {f: scene[f"contact_sensor_{f}"] for f in finger_names} + + # Newton's articulation.reset() doesn't write default_joint_pos to sim (unlike + # ManagerBasedEnv's reset_scene_to_default event). Without this, joints start at 0.0 + # which is below thumb_joint_0's lower limit (0.279 rad), causing violent oscillation. + default_jpos = hand.data.default_joint_pos.torch.clone() + default_jvel = hand.data.default_joint_vel.torch.clone() + hand.write_joint_position_to_sim_index(position=default_jpos) + hand.write_joint_velocity_to_sim_index(velocity=default_jvel) + hand.set_joint_position_target_index(target=default_jpos) + + hand_world_pos = hand.data.root_link_pose_w.torch[:, :3] + drop_pose = drop_object.data.root_link_pose_w.torch.clone() + for env_idx, finger in enumerate(finger_names): + offset = ALLEGRO_FINGERTIP_OFFSETS[finger] + drop_pose[env_idx, 0] = hand_world_pos[env_idx, 0] + offset[0] + drop_pose[env_idx, 1] = hand_world_pos[env_idx, 1] + offset[1] + drop_pose[env_idx, 2] = hand_world_pos[env_idx, 2] + offset[2] + 0.10 + drop_object.write_root_pose_to_sim_index(root_pose=drop_pose) + + for _ in range(30): + perform_sim_step(sim, scene, SIM_DT) + + hand_world_pos = hand.data.root_link_pose_w.torch[:, :3] + drop_object.reset() + drop_pose = drop_object.data.root_link_pose_w.torch.clone() + for env_idx, finger in enumerate(finger_names): + offset = ALLEGRO_FINGERTIP_OFFSETS[finger] + drop_pose[env_idx, 0] = hand_world_pos[env_idx, 0] + offset[0] + drop_pose[env_idx, 1] = hand_world_pos[env_idx, 1] + offset[1] + drop_pose[env_idx, 2] = hand_world_pos[env_idx, 2] + offset[2] + 0.10 + drop_object.write_root_pose_to_sim_index(root_pose=drop_pose) + + initial_velocity = torch.zeros(num_envs, 6, device=device) + initial_velocity[:, 2] = -1.5 + drop_object.write_root_velocity_to_sim_index(root_velocity=initial_velocity) + + peak_forces = {f: [0.0] * num_envs for f in finger_names} + + for _ in range(drop_steps): + perform_sim_step(sim, scene, SIM_DT) + for finger_name, sensor in finger_sensors.items(): + if sensor.data.net_forces_w is not None: + forces = sensor.data.net_forces_w.torch + for env_idx in range(num_envs): + f = torch.norm(forces[env_idx]).item() + peak_forces[finger_name][env_idx] = max(peak_forces[finger_name][env_idx], f) + + for env_idx, target_finger in enumerate(finger_names): + target_peak = peak_forces[target_finger][env_idx] + assert target_peak > 0.5, ( + f"Env {env_idx}: Target finger '{target_finger}' peak force: {target_peak:.4f} N (expected > 0.5)" + ) + + for other_finger in finger_names: + if other_finger != target_finger: + assert target_peak >= peak_forces[other_finger][env_idx], ( + f"Env {env_idx}: '{target_finger}' (peak={target_peak:.4f}N) should have " + f"highest force, but '{other_finger}' had " + f"{peak_forces[other_finger][env_idx]:.4f}N" + ) + + +# =================================================================== +# Utility +# =================================================================== + + +def test_sensor_print(): + """Test that contact sensor print/repr works correctly.""" + sim_cfg = make_sim_cfg(use_mujoco_contacts=False, device="cuda:0", gravity=(0.0, 0.0, -9.81)) + + with build_simulation_context(sim_cfg=sim_cfg, auto_add_lighting=True, add_ground_plane=True) as sim: + sim._app_control_on_stop_handle = None + + scene_cfg = ContactSensorTestSceneCfg(num_envs=4, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.object_a = create_shape_cfg( + ShapeType.BOX, + "{ENV_REGEX_NS}/Object", + pos=(0.0, 0.0, 2.0), + disable_gravity=False, + activate_contact_sensors=True, + ) + scene_cfg.contact_sensor_a = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Object", + update_period=0.0, + history_length=3, + track_air_time=True, + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + sensor_str = str(scene["contact_sensor_a"]) + assert len(sensor_str) > 0 + print(sensor_str) diff --git a/source/isaaclab_newton/test/sensors/test_frame_transformer.py b/source/isaaclab_newton/test/sensors/test_frame_transformer.py new file mode 100644 index 000000000000..513ba9dda918 --- /dev/null +++ b/source/isaaclab_newton/test/sensors/test_frame_transformer.py @@ -0,0 +1,812 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests to verify frame transformer sensor functionality using Newton physics.""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import math + +import pytest +import scipy.spatial.transform as tf +import torch +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors import FrameTransformerCfg, OffsetCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +## +# Pre-defined configs +## +from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort:skip + + +def quat_from_euler_rpy(roll, pitch, yaw, degrees=False): + """Converts Euler XYZ to Quaternion (x, y, z, w).""" + quat = tf.Rotation.from_euler("xyz", (roll, pitch, yaw), degrees=degrees).as_quat() + return tuple(quat.tolist()) # scipy already returns xyzw + + +def euler_rpy_apply(rpy, xyz, degrees=False): + """Applies rotation from Euler XYZ on position vector.""" + rot = tf.Rotation.from_euler("xyz", rpy, degrees=degrees) + return tuple(rot.apply(xyz).tolist()) + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Example scene configuration.""" + + # terrain - flat terrain plane + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + + # articulation - robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # sensors - frame transformer (filled inside unit test) + frame_transformer: FrameTransformerCfg = None + + # block + cube: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(2.0, 0.0, 5)), + ) + + +@pytest.fixture +def sim(): + """Create a simulation context with Newton physics.""" + sim_cfg = SimulationCfg( + dt=1 / 120, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=70, + nconmax=70, + ls_iterations=40, + cone="elliptic", + impratio=100, + ls_parallel=False, + integrator="implicitfast", + ), + num_substeps=2, + debug_mode=True, + ), + ) + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim: + sim._app_control_on_stop_handle = None + # Set main camera + sim.set_camera_view(eye=(5.0, 5.0, 5.0), target=(0.0, 0.0, 0.0)) + yield sim + # Cleanup is handled by build_simulation_context + + +def test_frame_transformer_feet_wrt_base(sim): + """Test feet transformations w.r.t. base source frame. + + In this test, the source frame is the robot base. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="LF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="LH_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/LH_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(-0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RH_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/RH_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(-0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + + # Acquire the index of ground truth bodies + feet_indices, feet_names = scene.articulations["robot"].find_bodies(["LF_FOOT", "RF_FOOT", "LH_FOOT", "RH_FOOT"]) + + target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names + + # Reorder the feet indices to match the order of the target frames with _USER suffix removed + target_frame_names = [name.split("_USER")[0] for name in target_frame_names] + + # Find the indices of the feet in the order of the target frames + reordering_indices = [feet_names.index(name) for name in target_frame_names] + feet_indices = [feet_indices[i] for i in reordering_indices] + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() + # Define simulation stepping + sim_dt = sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch + # -- set root state + # -- robot + scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) + scene.articulations["robot"].write_root_velocity_to_sim(root_state[:, 7:]) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + root_pose_w = scene.articulations["robot"].data.root_pose_w.torch + feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w.torch[:, feet_indices] + feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w.torch[:, feet_indices] + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch + feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch + + # check if they are same + torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) + torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf) + torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf) + torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf) + + # check if relative transforms are same + feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch + for index in range(len(feet_indices)): + # ground-truth + foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( + root_pose_w[:, :3], root_pose_w[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] + ) + # check if they are same + torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b) + torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b) + + +def test_frame_transformer_feet_wrt_thigh(sim): + """Test feet transformation w.r.t. thigh source frame.""" + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/LF_THIGH", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="LF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + + # Acquire the index of ground truth bodies + source_frame_index = scene.articulations["robot"].find_bodies("LF_THIGH")[0][0] + feet_indices, feet_names = scene.articulations["robot"].find_bodies(["LF_FOOT", "RF_FOOT"]) + # Check names are parsed the same order + user_feet_names = [f"{name}_USER" for name in feet_names] + assert scene.sensors["frame_transformer"].data.target_frame_names == user_feet_names + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() + # Define simulation stepping + sim_dt = sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch + # -- set root state + # -- robot + scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) + scene.articulations["robot"].write_root_velocity_to_sim(root_state[:, 7:]) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + source_pose_w_gt = scene.articulations["robot"].data.body_state_w.torch[:, source_frame_index, :7] + feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w.torch[:, feet_indices] + feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w.torch[:, feet_indices] + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch + feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch + # check if they are same + torch.testing.assert_close(source_pose_w_gt[:, :3], source_pos_w_tf) + torch.testing.assert_close(source_pose_w_gt[:, 3:], source_quat_w_tf) + torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf) + torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf) + + # check if relative transforms are same + feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch + for index in range(len(feet_indices)): + # ground-truth + foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( + source_pose_w_gt[:, :3], source_pose_w_gt[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] + ) + # check if they are same + torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b) + torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b) + + +def test_frame_transformer_robot_body_to_external_cube(sim): + """Test transformation from robot body to a cube in the scene.""" + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="CUBE_USER", + prim_path="{ENV_REGEX_NS}/cube", + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() + # Define simulation stepping + sim_dt = sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch + # -- set root state + # -- robot + scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) + scene.articulations["robot"].write_root_velocity_to_sim(root_state[:, 7:]) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + root_pose_w = scene.articulations["robot"].data.root_pose_w.torch + cube_pos_w_gt = scene.rigid_objects["cube"].data.root_pos_w.torch + cube_quat_w_gt = scene.rigid_objects["cube"].data.root_quat_w.torch + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + cube_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch.squeeze() + cube_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch.squeeze() + + # check if they are same + torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) + torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf) + torch.testing.assert_close(cube_pos_w_gt, cube_pos_w_tf) + torch.testing.assert_close(cube_quat_w_gt, cube_quat_w_tf) + + # check if relative transforms are same + cube_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + cube_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch + # ground-truth + cube_pos_b, cube_quat_b = math_utils.subtract_frame_transforms( + root_pose_w[:, :3], root_pose_w[:, 3:], cube_pos_w_tf, cube_quat_w_tf + ) + # check if they are same + torch.testing.assert_close(cube_pos_source_tf[:, 0], cube_pos_b) + torch.testing.assert_close(cube_quat_source_tf[:, 0], cube_quat_b) + + +@pytest.mark.isaacsim_ci +def test_frame_transformer_offset_frames(sim): + """Test body transformation w.r.t. base source frame. + + In this test, the source frame is the cube frame. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/cube", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="CUBE_CENTER", + prim_path="{ENV_REGEX_NS}/cube", + ), + FrameTransformerCfg.FrameCfg( + name="CUBE_TOP", + prim_path="{ENV_REGEX_NS}/cube", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.1), + rot=(0.0, 0.0, 0.0, 1.0), + ), + ), + FrameTransformerCfg.FrameCfg( + name="CUBE_BOTTOM", + prim_path="{ENV_REGEX_NS}/cube", + offset=OffsetCfg( + pos=(0.0, 0.0, -0.1), + rot=(0.0, 0.0, 0.0, 1.0), + ), + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + + # Define simulation stepping + sim_dt = sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene["cube"].data.default_root_state.torch.clone() + root_state[:, :3] += scene.env_origins + # -- set root state + # -- cube + scene["cube"].write_root_pose_to_sim(root_state[:, :7]) + scene["cube"].write_root_velocity_to_sim(root_state[:, 7:]) + # reset buffers + scene.reset() + + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + cube_pos_w_gt = scene["cube"].data.root_pos_w.torch + cube_quat_w_gt = scene["cube"].data.root_quat_w.torch + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + target_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch.squeeze() + target_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch.squeeze() + target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names + + cube_center_idx = target_frame_names.index("CUBE_CENTER") + cube_bottom_idx = target_frame_names.index("CUBE_BOTTOM") + cube_top_idx = target_frame_names.index("CUBE_TOP") + + # check if they are same + torch.testing.assert_close(cube_pos_w_gt, source_pos_w_tf) + torch.testing.assert_close(cube_quat_w_gt, source_quat_w_tf) + torch.testing.assert_close(cube_pos_w_gt, target_pos_w_tf[:, cube_center_idx]) + torch.testing.assert_close(cube_quat_w_gt, target_quat_w_tf[:, cube_center_idx]) + + # test offsets are applied correctly + # -- cube top + cube_pos_top = target_pos_w_tf[:, cube_top_idx] + cube_quat_top = target_quat_w_tf[:, cube_top_idx] + torch.testing.assert_close( + cube_pos_top, cube_pos_w_gt + torch.tensor([0.0, 0.0, 0.1], device=cube_pos_w_gt.device) + ) + torch.testing.assert_close(cube_quat_top, cube_quat_w_gt) + + # -- cube bottom + cube_pos_bottom = target_pos_w_tf[:, cube_bottom_idx] + cube_quat_bottom = target_quat_w_tf[:, cube_bottom_idx] + torch.testing.assert_close( + cube_pos_bottom, cube_pos_w_gt + torch.tensor([0.0, 0.0, -0.1], device=cube_pos_w_gt.device) + ) + torch.testing.assert_close(cube_quat_bottom, cube_quat_w_gt) + + +@pytest.mark.isaacsim_ci +def test_frame_transformer_all_bodies(sim): + """Test transformation of all bodies w.r.t. base source frame. + + In this test, the source frame is the robot base. + + The target_frames are all bodies in the robot, implemented using .* pattern. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*", + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + + target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names + articulation_body_names = scene.articulations["robot"].data.body_names + + reordering_indices = [target_frame_names.index(name) for name in articulation_body_names] + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() + # Define simulation stepping + sim_dt = sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch + # -- set root state + # -- robot + scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) + scene.articulations["robot"].write_root_velocity_to_sim(root_state[:, 7:]) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + root_pose_w = scene.articulations["robot"].data.root_pose_w.torch + bodies_pos_w_gt = scene.articulations["robot"].data.body_pos_w.torch + bodies_quat_w_gt = scene.articulations["robot"].data.body_quat_w.torch + + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + bodies_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch + bodies_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch + + # check if they are same + torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) + torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf) + torch.testing.assert_close(bodies_pos_w_gt, bodies_pos_w_tf[:, reordering_indices]) + torch.testing.assert_close(bodies_quat_w_gt, bodies_quat_w_tf[:, reordering_indices]) + + bodies_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + bodies_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch + + # Go through each body and check if relative transforms are same + for index in range(len(articulation_body_names)): + body_pos_b, body_quat_b = math_utils.subtract_frame_transforms( + root_pose_w[:, :3], root_pose_w[:, 3:], bodies_pos_w_tf[:, index], bodies_quat_w_tf[:, index] + ) + + torch.testing.assert_close(bodies_pos_source_tf[:, index], body_pos_b) + torch.testing.assert_close(bodies_quat_source_tf[:, index], body_quat_b) + + +@pytest.mark.isaacsim_ci +def test_sensor_print(sim): + """Test sensor print is working correctly.""" + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*", + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + # print info + print(scene.sensors["frame_transformer"]) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("source_robot", ["Robot", "Robot_1"]) +@pytest.mark.parametrize("path_prefix", ["{ENV_REGEX_NS}", "/World"]) +def test_frame_transformer_duplicate_body_names(sim, source_robot, path_prefix): + """Test tracking bodies with same leaf name at different hierarchy levels. + + This test verifies that bodies with the same leaf name but different paths + (e.g., Robot/LF_SHANK vs Robot_1/LF_SHANK, or arm/link vs leg/link) are tracked + separately using their full relative paths internally. + + The test uses 4 target frames to cover both scenarios: + + Explicit frame names (recommended when bodies share the same leaf name): + User provides unique names like "Robot_LF_SHANK" and "Robot_1_LF_SHANK" to + distinguish between bodies at different hierarchy levels. This makes it + easy to identify which transform belongs to which body. + + Implicit frame names (backward compatibility): + When no name is provided, it defaults to the leaf body name (e.g., "RF_SHANK"). + This preserves backward compatibility for users who may have existing code like + `idx = target_frame_names.index("RF_SHANK")`. However, when multiple bodies share + the same leaf name, this results in duplicate frame names. The transforms are + still distinct because internal body tracking uses full relative paths. + + Args: + source_robot: The robot to use as the source frame ("Robot" or "Robot_1"). + This tests that both source frames work correctly when there are + duplicate body names. + path_prefix: The path prefix to use ("{ENV_REGEX_NS}" for env patterns or "/World" for direct paths). + """ + + # Create a custom scene config with two robots + @configclass + class MultiRobotSceneCfg(InteractiveSceneCfg): + """Scene with two robots having bodies with same names.""" + + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + + # Frame transformer will be set after config creation (needs source_robot parameter) + frame_transformer: FrameTransformerCfg = None # type: ignore + + # Use multiple envs for env patterns, single env for direct paths + num_envs = 2 if path_prefix == "{ENV_REGEX_NS}" else 1 + env_spacing = 10.0 if path_prefix == "{ENV_REGEX_NS}" else 0.0 + + # Create scene config with appropriate prim paths + scene_cfg = MultiRobotSceneCfg(num_envs=num_envs, env_spacing=env_spacing, lazy_sensor_update=False) + scene_cfg.robot = ANYMAL_C_CFG.replace(prim_path=f"{path_prefix}/Robot") + scene_cfg.robot_1 = ANYMAL_C_CFG.replace( + prim_path=f"{path_prefix}/Robot_1", + init_state=ANYMAL_C_CFG.init_state.replace(pos=(2.0, 0.0, 0.6)), + ) + + # Frame transformer tracking same-named bodies from both robots + # Source frame is parametrized to test both Robot/base and Robot_1/base + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path=f"{path_prefix}/{source_robot}/base", + target_frames=[ + # Explicit frame names (recommended when bodies share the same leaf name) + FrameTransformerCfg.FrameCfg( + name="Robot_LF_SHANK", + prim_path=f"{path_prefix}/Robot/LF_SHANK", + ), + FrameTransformerCfg.FrameCfg( + name="Robot_1_LF_SHANK", + prim_path=f"{path_prefix}/Robot_1/LF_SHANK", + ), + # Implicit frame names (backward compatibility) + FrameTransformerCfg.FrameCfg( + prim_path=f"{path_prefix}/Robot/RF_SHANK", + ), + FrameTransformerCfg.FrameCfg( + prim_path=f"{path_prefix}/Robot_1/RF_SHANK", + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + + # Get target frame names + target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names + + # Verify explicit frame names are present + assert "Robot_LF_SHANK" in target_frame_names, f"Expected 'Robot_LF_SHANK', got {target_frame_names}" + assert "Robot_1_LF_SHANK" in target_frame_names, f"Expected 'Robot_1_LF_SHANK', got {target_frame_names}" + + # Without explicit names, both RF_SHANK frames default to same name "RF_SHANK" + # This results in duplicate frame names (expected behavior for backwards compatibility) + rf_shank_count = target_frame_names.count("RF_SHANK") + assert rf_shank_count == 2, f"Expected 2 'RF_SHANK' entries (name collision), got {rf_shank_count}" + + # Get indices for explicit named frames + robot_lf_idx = target_frame_names.index("Robot_LF_SHANK") + robot_1_lf_idx = target_frame_names.index("Robot_1_LF_SHANK") + + # Get indices for implicit named frames (both named "RF_SHANK") + rf_shank_indices = [i for i, name in enumerate(target_frame_names) if name == "RF_SHANK"] + assert len(rf_shank_indices) == 2, f"Expected 2 RF_SHANK indices, got {rf_shank_indices}" + + # Acquire ground truth body indices + robot_base_body_idx = scene.articulations["robot"].find_bodies("base")[0][0] + robot_1_base_body_idx = scene.articulations["robot_1"].find_bodies("base")[0][0] + robot_lf_shank_body_idx = scene.articulations["robot"].find_bodies("LF_SHANK")[0][0] + robot_1_lf_shank_body_idx = scene.articulations["robot_1"].find_bodies("LF_SHANK")[0][0] + robot_rf_shank_body_idx = scene.articulations["robot"].find_bodies("RF_SHANK")[0][0] + robot_1_rf_shank_body_idx = scene.articulations["robot_1"].find_bodies("RF_SHANK")[0][0] + + # Determine expected source frame based on parameter + expected_source_robot = "robot" if source_robot == "Robot" else "robot_1" + expected_source_base_body_idx = robot_base_body_idx if source_robot == "Robot" else robot_1_base_body_idx + + # Define simulation stepping + sim_dt = sim.get_physics_dt() + + # Simulate physics + for count in range(20): + # Reset periodically + if count % 10 == 0: + # Reset robot + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() + root_state[:, :3] += scene.env_origins + scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) + scene.articulations["robot"].write_root_velocity_to_sim(root_state[:, 7:]) + scene.articulations["robot"].write_joint_state_to_sim( + scene.articulations["robot"].data.default_joint_pos.torch, + scene.articulations["robot"].data.default_joint_vel.torch, + ) + # Reset robot_1 + root_state_1 = scene.articulations["robot_1"].data.default_root_state.torch.clone() + root_state_1[:, :3] += scene.env_origins + scene.articulations["robot_1"].write_root_pose_to_sim(root_state_1[:, :7]) + scene.articulations["robot_1"].write_root_velocity_to_sim(root_state_1[:, 7:]) + scene.articulations["robot_1"].write_joint_state_to_sim( + scene.articulations["robot_1"].data.default_joint_pos.torch, + scene.articulations["robot_1"].data.default_joint_vel.torch, + ) + scene.reset() + + # Write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # Read data from sim + scene.update(sim_dt) + + # Get frame transformer data + frame_transformer_data = scene.sensors["frame_transformer"].data + source_pos_w = frame_transformer_data.source_pos_w.torch + source_quat_w = frame_transformer_data.source_quat_w.torch + target_pos_w = frame_transformer_data.target_pos_w.torch + + # Get ground truth positions and orientations (after scene.update() so they're current) + robot_lf_pos_w = scene.articulations["robot"].data.body_pos_w.torch[:, robot_lf_shank_body_idx] + robot_1_lf_pos_w = scene.articulations["robot_1"].data.body_pos_w.torch[:, robot_1_lf_shank_body_idx] + robot_rf_pos_w = scene.articulations["robot"].data.body_pos_w.torch[:, robot_rf_shank_body_idx] + robot_1_rf_pos_w = scene.articulations["robot_1"].data.body_pos_w.torch[:, robot_1_rf_shank_body_idx] + + # Get expected source frame positions and orientations (after scene.update() so they're current) + expected_source_base_pos_w = scene.articulations[expected_source_robot].data.body_pos_w.torch[ + :, expected_source_base_body_idx + ] + expected_source_base_quat_w = scene.articulations[expected_source_robot].data.body_quat_w.torch[ + :, expected_source_base_body_idx + ] + + # TEST 1: Verify source frame is correctly resolved + # The source_pos_w should match the expected source robot's base world position + torch.testing.assert_close(source_pos_w, expected_source_base_pos_w, rtol=1e-5, atol=1e-5) + torch.testing.assert_close(source_quat_w, expected_source_base_quat_w, rtol=1e-5, atol=1e-5) + + # TEST 2: Explicit named frames (LF_SHANK) should have DIFFERENT world positions + lf_pos_difference = torch.linalg.norm(target_pos_w[:, robot_lf_idx] - target_pos_w[:, robot_1_lf_idx], dim=-1) + assert torch.all(lf_pos_difference > 1.0), ( + f"Robot_LF_SHANK and Robot_1_LF_SHANK should have different positions (got diff={lf_pos_difference}). " + "This indicates body name collision bug." + ) + + # Verify explicit named frames match correct robot bodies + torch.testing.assert_close(target_pos_w[:, robot_lf_idx], robot_lf_pos_w) + torch.testing.assert_close(target_pos_w[:, robot_1_lf_idx], robot_1_lf_pos_w) + + # TEST 3: Implicit named frames (RF_SHANK) should also have DIFFERENT world positions + # Even though they have the same frame name, internal body tracking uses full paths + rf_pos_difference = torch.linalg.norm( + target_pos_w[:, rf_shank_indices[0]] - target_pos_w[:, rf_shank_indices[1]], dim=-1 + ) + assert torch.all(rf_pos_difference > 1.0), ( + f"The two RF_SHANK frames should have different positions (got diff={rf_pos_difference}). " + "This indicates body name collision bug in internal body tracking." + ) + + # Verify implicit named frames match correct robot bodies + # Note: Order depends on internal processing, so we check both match one of the robots + rf_positions = [target_pos_w[:, rf_shank_indices[0]], target_pos_w[:, rf_shank_indices[1]]] + + # Each tracked position should match one of the ground truth positions + for rf_pos in rf_positions: + matches_robot = torch.allclose(rf_pos, robot_rf_pos_w, atol=1e-5) + matches_robot_1 = torch.allclose(rf_pos, robot_1_rf_pos_w, atol=1e-5) + assert matches_robot or matches_robot_1, ( + f"RF_SHANK position {rf_pos} doesn't match either robot's RF_SHANK position" + ) diff --git a/source/isaaclab_newton/test/sensors/test_imu.py b/source/isaaclab_newton/test/sensors/test_imu.py new file mode 100644 index 000000000000..3edb4c908398 --- /dev/null +++ b/source/isaaclab_newton/test/sensors/test_imu.py @@ -0,0 +1,233 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests to verify IMU sensor functionality using Newton physics.""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import pytest +import torch +import warp as wp +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors.imu import Imu, ImuCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + + +@configclass +class ImuTestSceneCfg(InteractiveSceneCfg): + """Scene with a rigid cube and an IMU sensor.""" + + env_spacing = 2.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + imu = ImuCfg( + prim_path="{ENV_REGEX_NS}/Cube", + ) + + +@pytest.fixture +def sim(): + """Create a simulation context with Newton physics.""" + sim_cfg = SimulationCfg( + dt=1.0 / 200.0, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg(), + num_substeps=1, + ), + ) + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim: + sim._app_control_on_stop_handle = None + sim.set_camera_view(eye=(5.0, 5.0, 5.0), target=(0.0, 0.0, 0.0)) + yield sim + + +def test_sensor_initialization(sim): + """Test that the Newton IMU sensor initializes correctly.""" + scene_cfg = ImuTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + imu: Imu = scene["imu"] + assert imu.num_instances == 2 + assert imu.data.ang_vel_b is not None + assert imu.data.lin_acc_b is not None + + +def test_data_shapes(sim): + """Test that IMU output tensors have correct shapes.""" + scene_cfg = ImuTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + sim.step() + scene.update(sim.get_physics_dt()) + + imu: Imu = scene["imu"] + ang_vel = imu.data.ang_vel_b.torch + lin_acc = imu.data.lin_acc_b.torch + + assert ang_vel.shape == (2, 3) + assert lin_acc.shape == (2, 3) + + +def test_gravity_at_rest(sim): + """Test that a resting IMU measures gravity (~9.81 m/s^2 upward).""" + scene_cfg = ImuTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + # Step enough for the cube to settle on the ground + for _ in range(500): + sim.step() + scene.update(sim.get_physics_dt()) + + imu: Imu = scene["imu"] + lin_acc = imu.data.lin_acc_b.torch + + # At rest, accelerometer should read ~9.81 in the up direction (Z body frame) + torch.testing.assert_close( + lin_acc[:, 2], + torch.full((lin_acc.shape[0],), 9.81, dtype=lin_acc.dtype, device=lin_acc.device), + atol=0.5, + rtol=0.0, + ) + # X and Y components should be near zero + torch.testing.assert_close( + lin_acc[:, :2], + torch.zeros(lin_acc.shape[0], 2, dtype=lin_acc.dtype, device=lin_acc.device), + atol=0.5, + rtol=0.0, + ) + + +def test_angular_velocity_at_rest(sim): + """Test that a resting IMU reports near-zero angular velocity.""" + scene_cfg = ImuTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + for _ in range(500): + sim.step() + scene.update(sim.get_physics_dt()) + + imu: Imu = scene["imu"] + ang_vel = imu.data.ang_vel_b.torch + + torch.testing.assert_close( + ang_vel, + torch.zeros_like(ang_vel), + atol=0.1, + rtol=0.0, + ) + + +def test_reset(sim): + """Test that reset zeroes out IMU data.""" + scene_cfg = ImuTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + # Step enough for the cube to settle on the ground so the accelerometer reads gravity. + # The cube falls from z=1.0 (bottom at z=0.9) and reaches the ground in ~86 steps + # at 200 Hz; 200 steps gives time to settle after impact. + for _ in range(200): + sim.step() + scene.update(sim.get_physics_dt()) + + imu: Imu = scene["imu"] + + lin_acc = imu.data.lin_acc_b.torch + assert torch.any(lin_acc != 0), "Expected non-zero data before reset" + + imu.reset() + + # Access internal buffers directly: accessing imu.data triggers lazy re-evaluation + # which re-fills from the Newton sensor, so we check the raw buffers instead. + ang_vel_after = wp.to_torch(imu._data._ang_vel_b) + lin_acc_after = wp.to_torch(imu._data._lin_acc_b) + + torch.testing.assert_close(ang_vel_after, torch.zeros_like(ang_vel_after)) + torch.testing.assert_close(lin_acc_after, torch.zeros_like(lin_acc_after)) + + +@configclass +class FreefallSceneCfg(InteractiveSceneCfg): + """Scene with a rigid cube and IMU but no ground plane (freefall).""" + + env_spacing = 2.0 + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 5.0)), + ) + + imu = ImuCfg( + prim_path="{ENV_REGEX_NS}/Cube", + ) + + +def test_freefall_acceleration(sim): + """Test that a freefalling IMU measures near-zero acceleration.""" + scene_cfg = FreefallSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + # Step a few times while the cube is in freefall (no ground contact) + for _ in range(10): + sim.step() + scene.update(sim.get_physics_dt()) + + imu: Imu = scene["imu"] + lin_acc = imu.data.lin_acc_b.torch + + # In freefall, accelerometer should read near zero (gravity and inertial acceleration cancel) + acc_magnitude = torch.norm(lin_acc, dim=-1) + torch.testing.assert_close( + acc_magnitude, + torch.zeros_like(acc_magnitude), + atol=0.5, + rtol=0.0, + ) + + +def test_sensor_print(sim): + """Test that the sensor string representation works.""" + scene_cfg = ImuTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + imu: Imu = scene["imu"] + sensor_str = str(imu) + assert "newton" in sensor_str + assert "IMU sensor" in sensor_str diff --git a/source/isaaclab_newton/test/sensors/test_joint_wrench_sensor.py b/source/isaaclab_newton/test/sensors/test_joint_wrench_sensor.py new file mode 100644 index 000000000000..fedf7c359081 --- /dev/null +++ b/source/isaaclab_newton/test/sensors/test_joint_wrench_sensor.py @@ -0,0 +1,488 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for the Newton JointWrenchSensor.""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import pytest +import torch +import warp as wp +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets import Articulation, ArticulationCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors.joint_wrench import JointWrenchSensor, JointWrenchSensorCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass +from isaaclab.utils import math as math_utils +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR + +from isaaclab_assets.robots.ant import ANT_CFG + + +def _make_single_joint_articulation_cfg() -> ArticulationCfg: + """Single-joint revolute test articulation (root ``CenterPivot`` + arm ``Arm``).""" + return ArticulationCfg( + prim_path="{ENV_REGEX_NS}/Robot", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/IsaacSim/SimpleArticulation/revolute_articulation.usd", + joint_drive_props=sim_utils.JointDrivePropertiesCfg(max_effort=80.0, max_velocity=5.0), + ), + actuators={ + "joint": ImplicitActuatorCfg( + joint_names_expr=[".*"], + stiffness=2000.0, + damping=100.0, + ), + }, + init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + +def _make_cartpole_articulation_cfg(pole_damping: float = 0.0) -> ArticulationCfg: + """Two-joint cartpole articulation (cart + pole). + + Args: + pole_damping: Damping for the cart-to-pole revolute joint. + """ + return ArticulationCfg( + prim_path="{ENV_REGEX_NS}/Robot", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Classic/Cartpole/cartpole.usd", + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 2.0), + joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0}, + ), + actuators={ + "cart_actuator": ImplicitActuatorCfg( + joint_names_expr=["slider_to_cart"], effort_limit_sim=400.0, stiffness=0.0, damping=10.0 + ), + "pole_actuator": ImplicitActuatorCfg( + joint_names_expr=["cart_to_pole"], effort_limit_sim=400.0, stiffness=0.0, damping=pole_damping + ), + }, + ) + + +@configclass +class _SingleJointSceneCfg(InteractiveSceneCfg): + """Scene with a single-joint articulation and the joint-wrench sensor.""" + + env_spacing = 2.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = _make_single_joint_articulation_cfg() + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@configclass +class _CartpoleSceneCfg(InteractiveSceneCfg): + """Scene with a cartpole (2-joint) articulation and the joint-wrench sensor.""" + + env_spacing = 4.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = _make_cartpole_articulation_cfg() + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@configclass +class _CartpoleDampedSceneCfg(InteractiveSceneCfg): + """Cartpole with pole damping for steady-state physics validation tests.""" + + env_spacing = 4.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = _make_cartpole_articulation_cfg(pole_damping=10.0) + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@configclass +class _NestedRootAntSceneCfg(InteractiveSceneCfg): + """Ant USD asset whose articulation root is nested under the configured asset prim.""" + + env_spacing = 4.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = ANT_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@pytest.fixture +def sim(): + """Simulation context using the Newton backend.""" + sim_cfg = SimulationCfg( + dt=1.0 / 200.0, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg(), + num_substeps=1, + ), + ) + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim_ctx: + sim_ctx._app_control_on_stop_handle = None + yield sim_ctx + + +# --------------------------------------------------------------------------- +# Sensor data — pre-init contract +# --------------------------------------------------------------------------- + + +def test_data_before_init_is_none(): + """``force``/``torque`` return ``None`` before :meth:`create_buffers` runs.""" + from isaaclab_newton.sensors.joint_wrench import JointWrenchSensorData + + data = JointWrenchSensorData() + assert data.force is None + assert data.torque is None + + +# --------------------------------------------------------------------------- +# Initialization and shapes +# --------------------------------------------------------------------------- + + +def test_initialization_and_shapes(sim): + """Sensor initializes on sim reset and exposes correctly-shaped buffers.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=2)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + sim.step() + scene.update(sim.get_physics_dt()) + + # revolute_articulation has one joint whose child is "Arm". + num_envs = 2 + num_joints = 1 + assert sensor.data.force.torch.shape == (num_envs, num_joints, 3) + assert sensor.data.torque.torch.shape == (num_envs, num_joints, 3) + assert sensor.body_names == ["Arm"] + + +def test_multi_body_articulation(sim): + """Cartpole (2 joints) exposes a wrench for each joint labelled by its child body.""" + scene = InteractiveScene(_CartpoleSceneCfg(num_envs=2)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + sim.step() + scene.update(sim.get_physics_dt()) + + num_envs = 2 + num_joints = 2 + assert sensor.data.force.torch.shape == (num_envs, num_joints, 3) + assert sensor.data.torque.torch.shape == (num_envs, num_joints, 3) + assert len(sensor.body_names) == 2 + assert "rail" not in [n.lower() for n in sensor.body_names] + + +def test_nested_articulation_root_resolution(sim): + """Sensor accepts an asset prim path whose articulation root is nested in the USD asset.""" + scene = InteractiveScene(_NestedRootAntSceneCfg(num_envs=1)) + sim.reset() + + robot: Articulation = scene["robot"] + sensor: JointWrenchSensor = scene["wrench"] + sim.step() + scene.update(sim.get_physics_dt()) + + assert len(sensor.body_names) == robot.num_joints + assert set(sensor.body_names).issubset(set(robot.body_names)) + assert sensor.data.force.torch.shape == (1, robot.num_joints, 3) + assert sensor.data.torque.torch.shape == (1, robot.num_joints, 3) + + +# --------------------------------------------------------------------------- +# Physical correctness +# --------------------------------------------------------------------------- + + +def _compute_expected_wrench_in_joint_frame( + sensor, + robot, + env: int, + joint: int, + gravity: torch.Tensor, + ext_force_b: torch.Tensor | None = None, + ext_torque_b: torch.Tensor | None = None, + descendant_body_names: list[str] | None = None, +): + """Compute the analytical joint-frame wrench for a single joint. + + Uses the same geometric data (body_com, joint_X_c, body_q) and frame + transformations as the kernel, but computes the wrench analytically from + known loads rather than reading body_parent_f. Computes the moment of + forces about the joint anchor and rotates the result into the child-side + joint frame. + + For terminal links, the wrench is due to the child body alone. For + interior joints, pass all bodies in the subtree below the joint via + ``descendant_body_names`` so the helper sums their gravitational + contributions. + + Args: + sensor: The JointWrenchSensor instance (used to read Newton model bindings). + robot: The Articulation asset (used for body mass lookup). + env: Environment index. + joint: Joint index within the sensor. + gravity: Gravity vector in world frame, shape (3,). + ext_force_b: External force on the child body in body frame [N], shape (3,). + ext_torque_b: External torque on the child body in body frame [N·m], shape (3,). + descendant_body_names: Bodies whose gravitational load acts through this + joint. Defaults to the child body only (correct for terminal links). + For an interior joint, pass all bodies in the subtree below the joint. + + Returns: + A tuple of (force, torque) tensors, each shape (3,), in the child-side + joint frame. + """ + body_idx = wp.to_torch(sensor._joint_child)[joint].item() + + # Link transform in world (of the child body — defines the joint frame). + link_xform = wp.to_torch(sensor._sim_bind_body_q)[env, body_idx] # (7,) = pos(3) + quat(4) + link_pos = link_xform[:3] + link_quat = link_xform[3:] # wp.quatf = (x, y, z, w) + + # Joint anchor and orientation in world = link_xform * joint_X_c. + joint_X_c = wp.to_torch(sensor._sim_bind_joint_X_c)[env, joint] # (7,) + jxc_pos = joint_X_c[:3] + jxc_quat = joint_X_c[3:] + anchor_world = link_pos + math_utils.quat_apply(link_quat.unsqueeze(0), jxc_pos.unsqueeze(0)).squeeze(0) + joint_quat_world = math_utils.quat_mul(link_quat.unsqueeze(0), jxc_quat.unsqueeze(0)).squeeze(0) + + # Bodies whose weight contributes to the wrench at this joint. + if descendant_body_names is None: + descendant_body_names = [sensor.body_names[joint]] + + link_names = list(sensor._root_view.link_names) + + total_force_w = torch.zeros(3, device=gravity.device) + total_torque_w = torch.zeros(3, device=gravity.device) + + for body_name in descendant_body_names: + b_idx = link_names.index(body_name) + b_xform = wp.to_torch(sensor._sim_bind_body_q)[env, b_idx] + b_pos = b_xform[:3] + b_quat = b_xform[3:] + b_com_local = wp.to_torch(sensor._sim_bind_body_com)[env, b_idx] + b_com_world = b_pos + math_utils.quat_apply(b_quat.unsqueeze(0), b_com_local.unsqueeze(0)).squeeze(0) + + art_b_idx = robot.body_names.index(body_name) + mass = robot.data.body_mass.torch[env, art_b_idx].item() + weight_w = mass * gravity + + total_force_w = total_force_w + weight_w + r = b_com_world - anchor_world + total_torque_w = total_torque_w + torch.cross(r, weight_w, dim=-1) + + # External force/torque on the child body only (if provided). Actuator + # torque is intentionally omitted; see tolerance comment in calling tests. + if ext_force_b is not None: + ext_force_w = math_utils.quat_apply(link_quat.unsqueeze(0), ext_force_b.unsqueeze(0)).squeeze(0) + total_force_w = total_force_w + ext_force_w + # Moment of the external force about the joint anchor (applied at child COM). + child_com_local = wp.to_torch(sensor._sim_bind_body_com)[env, body_idx] + child_com_world = link_pos + math_utils.quat_apply( + link_quat.unsqueeze(0), child_com_local.unsqueeze(0) + ).squeeze(0) + total_torque_w = total_torque_w + torch.cross(child_com_world - anchor_world, ext_force_w, dim=-1) + if ext_torque_b is not None: + total_torque_w = total_torque_w + math_utils.quat_apply( + link_quat.unsqueeze(0), ext_torque_b.unsqueeze(0) + ).squeeze(0) + + # Reaction wrench = negation of total wrench (joint supports against all loads). + reaction_force_w = -total_force_w + reaction_torque_w = -total_torque_w + + # Rotate into joint frame. + expected_force = math_utils.quat_apply_inverse( + joint_quat_world.unsqueeze(0), reaction_force_w.unsqueeze(0) + ).squeeze(0) + expected_torque = math_utils.quat_apply_inverse( + joint_quat_world.unsqueeze(0), reaction_torque_w.unsqueeze(0) + ).squeeze(0) + + return expected_force, expected_torque + + +def test_force_and_torque_components_at_rest(sim): + """Component-level validation of force and torque against analytical expectations (gravity only).""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=1)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + robot: Articulation = scene["robot"] + for _ in range(400): + sim.step() + scene.update(sim.get_physics_dt()) + + gravity = torch.tensor(sim.cfg.gravity, device=sim.device) + expected_force, expected_torque = _compute_expected_wrench_in_joint_frame( + sensor, + robot, + env=0, + joint=0, + gravity=gravity, + ) + + force = sensor.data.force.torch[0, 0] + torque = sensor.data.torque.torch[0, 0] + + torch.testing.assert_close(force, expected_force, atol=1e-2, rtol=1e-3) + torch.testing.assert_close(torque, expected_torque, atol=1e-2, rtol=1e-3) + + +def test_wrench_with_external_force_and_torque(sim): + """Full analytical wrench validation with external force and torque applied. + + Mirrors the PhysX ``test_body_incoming_joint_wrench_b_single_joint`` pattern: + apply a known wrench, settle, compute the expected reaction wrench analytically, + and compare component-by-component. + """ + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=1)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + robot: Articulation = scene["robot"] + arm_idx = robot.body_names.index("Arm") + + # Apply 10 N in body-Y and 10 N·m in body-Z on the arm (matches PhysX test). + ext_force_b = torch.zeros((1, robot.num_bodies, 3), device=sim.device) + ext_force_b[:, arm_idx, 1] = 10.0 + ext_torque_b = torch.zeros((1, robot.num_bodies, 3), device=sim.device) + ext_torque_b[:, arm_idx, 2] = 10.0 + + for _ in range(800): + robot.permanent_wrench_composer.set_forces_and_torques_index(forces=ext_force_b, torques=ext_torque_b) + robot.write_data_to_sim() + sim.step() + scene.update(sim.get_physics_dt()) + + gravity = torch.tensor(sim.cfg.gravity, device=sim.device) + expected_force, expected_torque = _compute_expected_wrench_in_joint_frame( + sensor, + robot, + env=0, + joint=0, + gravity=gravity, + ext_force_b=ext_force_b[0, arm_idx], + ext_torque_b=ext_torque_b[0, arm_idx], + ) + + force = sensor.data.force.torch[0, 0] + torque = sensor.data.torque.torch[0, 0] + + # The PD actuator contributes a small torque (~0.1 N·m) to body_parent_f that is + # not modelled in the analytical helper. Force is unaffected (actuator is pure torque). + torch.testing.assert_close(force, expected_force, atol=1e-2, rtol=1e-3) + torch.testing.assert_close(torque, expected_torque, atol=0.15, rtol=1e-2) + + +def test_interior_joint_wrench_at_rest(sim): + """Interior joint wrench accounts for the weight of all descendant bodies. + + The cartpole has two joints: ``slider_to_cart`` (interior, supports cart + and pole) and ``cart_to_pole`` (terminal, supports pole only). At steady + state with gravity as the only load, the reaction wrench at the interior + joint must equal the combined weight of cart and pole, with torque + computed from each body's moment about the joint anchor. + """ + scene = InteractiveScene(_CartpoleDampedSceneCfg(num_envs=1)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + robot: Articulation = scene["robot"] + + for _ in range(800): + sim.step() + scene.update(sim.get_physics_dt()) + + gravity = torch.tensor(sim.cfg.gravity, device=sim.device) + + # Interior joint (index 0, slider_to_cart): reaction wrench supports + # all bodies in the subtree — both cart and pole. + expected_force, expected_torque = _compute_expected_wrench_in_joint_frame( + sensor, + robot, + env=0, + joint=0, + gravity=gravity, + descendant_body_names=list(sensor.body_names), + ) + + force = sensor.data.force.torch[0, 0] + torque = sensor.data.torque.torch[0, 0] + + torch.testing.assert_close(force, expected_force, atol=1e-2, rtol=1e-3) + torch.testing.assert_close(torque, expected_torque, atol=1e-2, rtol=1e-3) + + +# --------------------------------------------------------------------------- +# String representation +# --------------------------------------------------------------------------- + + +def test_sensor_print(sim): + """Test that the sensor string representation works.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=2)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + sensor_str = str(sensor) + assert "newton" in sensor_str + assert "Joint wrench sensor" in sensor_str + + +# --------------------------------------------------------------------------- +# Reset behavior +# --------------------------------------------------------------------------- + + +def test_reset_zeros_buffers(sim): + """Resetting the sensor clears the force / torque buffers.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=2)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + for _ in range(100): + sim.step() + scene.update(sim.get_physics_dt()) + + assert torch.any(sensor.data.force.torch != 0), "Expected non-zero data before reset" + + sensor.reset() + + # Access raw buffers to skip lazy re-population from the Newton view on the next data read. + force_after = wp.to_torch(sensor._data._force) + torque_after = wp.to_torch(sensor._data._torque) + torch.testing.assert_close(force_after, torch.zeros_like(force_after)) + torch.testing.assert_close(torque_after, torch.zeros_like(torque_after)) + + +def test_reset_with_env_ids_only_zeros_selected_envs(sim): + """Partial reset via env_ids should zero the selected envs and preserve the others.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=4)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + for _ in range(100): + sim.step() + scene.update(sim.get_physics_dt()) + + force_before = sensor.data.force.torch.clone() + assert torch.any(force_before != 0), "Expected non-zero data before reset" + + sensor.reset(env_ids=[0, 2]) + + force_after = wp.to_torch(sensor._data._force) + torch.testing.assert_close(force_after[0], torch.zeros_like(force_after[0])) + torch.testing.assert_close(force_after[2], torch.zeros_like(force_after[2])) + torch.testing.assert_close(force_after[1], force_before[1]) + torch.testing.assert_close(force_after[3], force_before[3]) diff --git a/source/isaaclab_newton/test/sensors/test_pva.py b/source/isaaclab_newton/test/sensors/test_pva.py new file mode 100644 index 000000000000..8d14b9cb77ef --- /dev/null +++ b/source/isaaclab_newton/test/sensors/test_pva.py @@ -0,0 +1,332 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests to verify PVA sensor functionality using Newton physics.""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import pytest +import torch +import warp as wp +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors.pva import Pva, PvaCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + + +@configclass +class PvaTestSceneCfg(InteractiveSceneCfg): + """Scene with a rigid cube and a PVA sensor.""" + + env_spacing = 2.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + pva = PvaCfg( + prim_path="{ENV_REGEX_NS}/Cube", + ) + + +@pytest.fixture +def sim(): + """Create a simulation context with Newton physics.""" + sim_cfg = SimulationCfg( + dt=1.0 / 200.0, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg(), + num_substeps=1, + ), + ) + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim: + sim._app_control_on_stop_handle = None + sim.set_camera_view(eye=(5.0, 5.0, 5.0), target=(0.0, 0.0, 0.0)) + yield sim + + +def test_sensor_initialization(sim): + """Test that the Newton PVA sensor initializes correctly.""" + scene_cfg = PvaTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + pva: Pva = scene["pva"] + assert pva.num_instances == 2 + assert pva.data.pos_w is not None + assert pva.data.quat_w is not None + assert pva.data.lin_vel_b is not None + assert pva.data.ang_vel_b is not None + assert pva.data.lin_acc_b is not None + assert pva.data.ang_acc_b is not None + assert pva.data.projected_gravity_b is not None + assert pva.data.pose_w is not None + + +def test_data_shapes(sim): + """Test that PVA output tensors have correct shapes.""" + scene_cfg = PvaTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + assert pva.data.pos_w.torch.shape == (2, 3) + assert pva.data.quat_w.torch.shape == (2, 4) + assert pva.data.pose_w.torch.shape == (2, 7) + assert pva.data.lin_vel_b.torch.shape == (2, 3) + assert pva.data.ang_vel_b.torch.shape == (2, 3) + assert pva.data.lin_acc_b.torch.shape == (2, 3) + assert pva.data.ang_acc_b.torch.shape == (2, 3) + assert pva.data.projected_gravity_b.torch.shape == (2, 3) + + +def test_gravity_at_rest(sim): + """Test that a resting PVA sensor reports correct projected gravity.""" + scene_cfg = PvaTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + # Cube falls from z=1.0 (bottom at z=0.9), reaches ground in ~86 steps at 200 Hz. + for _ in range(200): + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + proj_grav = pva.data.projected_gravity_b.torch + + expected = torch.tensor([[0.0, 0.0, -1.0]], dtype=proj_grav.dtype, device=proj_grav.device).repeat(2, 1) + torch.testing.assert_close(proj_grav, expected, atol=0.05, rtol=0.0) + + +def test_velocity_at_rest(sim): + """Test that a resting PVA sensor reports near-zero velocity.""" + scene_cfg = PvaTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + for _ in range(200): + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + lin_vel = pva.data.lin_vel_b.torch + ang_vel = pva.data.ang_vel_b.torch + + torch.testing.assert_close(lin_vel, torch.zeros_like(lin_vel), atol=0.05, rtol=0.0) + torch.testing.assert_close(ang_vel, torch.zeros_like(ang_vel), atol=0.05, rtol=0.0) + + +def test_position_nonzero(sim): + """Test that the PVA sensor reports a non-zero world-frame position.""" + scene_cfg = PvaTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + pos = pva.data.pos_w.torch + + assert torch.all(pos[:, 2] > 0.0), f"Expected positive z position, got {pos[:, 2]}" + + +def test_reset(sim): + """Test that reset zeroes out PVA data.""" + scene_cfg = PvaTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + for _ in range(10): + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + + pos = pva.data.pos_w.torch + assert torch.any(pos != 0), "Expected non-zero data before reset" + + pva.reset() + + # Access internal buffers directly to avoid lazy re-evaluation via pva.data + # (the data property triggers _update_buffers_impl which would overwrite reset values). + pos = wp.to_torch(pva._data._pos_w) + lin_vel = wp.to_torch(pva._data._lin_vel_b) + ang_vel = wp.to_torch(pva._data._ang_vel_b) + lin_acc = wp.to_torch(pva._data._lin_acc_b) + ang_acc = wp.to_torch(pva._data._ang_acc_b) + quat = wp.to_torch(pva._data._quat_w) + + torch.testing.assert_close(pos, torch.zeros_like(pos)) + torch.testing.assert_close(lin_vel, torch.zeros_like(lin_vel)) + torch.testing.assert_close(ang_vel, torch.zeros_like(ang_vel)) + torch.testing.assert_close(lin_acc, torch.zeros_like(lin_acc)) + torch.testing.assert_close(ang_acc, torch.zeros_like(ang_acc)) + expected_quat = torch.tensor([[0.0, 0.0, 0.0, 1.0]], dtype=quat.dtype, device=quat.device).repeat(2, 1) + torch.testing.assert_close(quat, expected_quat) + + +@configclass +class FreefallSceneCfg(InteractiveSceneCfg): + """Scene with a rigid cube and PVA but no ground plane (freefall).""" + + env_spacing = 2.0 + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 5.0)), + ) + + pva = PvaCfg( + prim_path="{ENV_REGEX_NS}/Cube", + ) + + +def test_freefall_velocity_increases(sim): + """Test that a freefalling body's downward velocity increases over time.""" + scene_cfg = FreefallSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + for _ in range(50): + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + lin_vel = pva.data.lin_vel_b.torch + + speed = torch.norm(lin_vel, dim=-1) + assert torch.all(speed > 0.1), f"Expected non-zero velocity in freefall, got {speed}" + + +def test_freefall_acceleration(sim): + """Test that a freefalling body reports coordinate acceleration equal to gravity. + + PVA reports coordinate acceleration (from body_qdd), not proper acceleration. + In freefall, coordinate acceleration equals gravitational acceleration (~9.81 m/s^2 + downward). For an upright body, this is (0, 0, -9.81) in the body frame. + """ + scene_cfg = FreefallSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + for _ in range(10): + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + lin_acc = pva.data.lin_acc_b.torch + ang_acc = pva.data.ang_acc_b.torch + + # Coordinate acceleration in freefall should be ~(0, 0, -9.81) in body frame. + expected_acc = torch.tensor([[0.0, 0.0, -9.81]], dtype=lin_acc.dtype, device=lin_acc.device).repeat(2, 1) + torch.testing.assert_close(lin_acc, expected_acc, atol=0.5, rtol=0.0) + + # Angular acceleration should be near zero (no torques in freefall). + torch.testing.assert_close(ang_acc, torch.zeros_like(ang_acc), atol=0.05, rtol=0.0) + + +def test_sensor_print(sim): + """Test that the sensor string representation works.""" + scene_cfg = PvaTestSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + pva: Pva = scene["pva"] + sensor_str = str(pva) + assert "newton" in sensor_str + assert "Pva sensor" in sensor_str + + +@configclass +class OffsetRotatedSceneCfg(InteractiveSceneCfg): + """Scene with a tilted cube and offset PVA sensor in freefall. + + The cube is rotated 90 degrees about the X axis and the sensor has a + +Z offset of 0.5 m in the body frame. This exercises the lever-arm + velocity/acceleration corrections and body-frame gravity projection. + """ + + env_spacing = 2.0 + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg( + pos=(0.0, 0.0, 5.0), + rot=(0.7071068, 0.0, 0.0, 0.7071068), # 90 deg about X (x, y, z, w) + ), + ) + + pva = PvaCfg( + prim_path="{ENV_REGEX_NS}/Cube", + offset=PvaCfg.OffsetCfg(pos=(0.0, 0.0, 0.5)), + ) + + +def test_offset_and_rotated_body(sim): + """Test position offset and body-frame gravity for a 90-degree-tilted body. + + With a 90-deg rotation about X, the body-frame +Z offset of 0.5 m maps + to world-frame (0, -0.5, 0). Projected gravity in the tilted sensor + frame should be approximately (0, -1, 0) instead of (0, 0, -1). + """ + scene_cfg = OffsetRotatedSceneCfg(num_envs=2) + scene = InteractiveScene(scene_cfg) + sim.reset() + + sim.step() + scene.update(sim.get_physics_dt()) + + pva: Pva = scene["pva"] + pos = pva.data.pos_w.torch + proj_grav = pva.data.projected_gravity_b.torch + + # pos_w is in absolute world frame; subtract env origins to get env-relative position. + # Expected env-relative: body at (0,0,5) + R_x(90) * (0,0,0.5) = (0, -0.5, 5) + env_pos = pos - scene.env_origins.to(pos.device) + torch.testing.assert_close(env_pos[:, 0], torch.zeros(2, dtype=pos.dtype, device=pos.device), atol=0.01, rtol=0.0) + torch.testing.assert_close( + env_pos[:, 1], torch.full((2,), -0.5, dtype=pos.dtype, device=pos.device), atol=0.01, rtol=0.0 + ) + assert torch.all(env_pos[:, 2] > 4.5), f"Expected z near 5.0, got {env_pos[:, 2]}" + + # Projected gravity: R_x(-90) * (0, 0, -1) = (0, -1, 0) + expected_grav = torch.tensor([[0.0, -1.0, 0.0]], dtype=proj_grav.dtype, device=proj_grav.device).repeat(2, 1) + torch.testing.assert_close(proj_grav, expected_grav, atol=0.05, rtol=0.0) diff --git a/source/isaaclab_newton/test/sensors/test_site_injection.py b/source/isaaclab_newton/test/sensors/test_site_injection.py new file mode 100644 index 000000000000..6bf40635a6c0 --- /dev/null +++ b/source/isaaclab_newton/test/sensors/test_site_injection.py @@ -0,0 +1,320 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for site injection, validation, and sensor index building.""" + +import pytest +import warp as wp +from isaaclab_newton.physics.newton_manager import NewtonManager +from isaaclab_newton.sensors.frame_transformer.frame_transformer import FrameTransformer + +from isaaclab.utils.warp.math_ops import transform_to_vec_quat + +# --------------------------------------------------------------------------- +# transform_to_vec_quat +# --------------------------------------------------------------------------- + + +class TestTransformToVecQuat: + """Tests for the zero-copy view split utility.""" + + def test_1d_pos_quat_split(self): + """1D array: position is first 3 floats, quaternion is last 4.""" + t = wp.zeros(3, dtype=wp.transformf, device="cpu") + pos, quat = transform_to_vec_quat(t) + assert pos.shape == (3,) + assert quat.shape == (3,) + assert pos.dtype == wp.vec3f + assert quat.dtype == wp.quatf + + def test_2d_pos_quat_split(self): + """2D array: shapes are (N, M) with vec3f and quatf dtypes.""" + t = wp.zeros((2, 4), dtype=wp.transformf, device="cpu") + pos, quat = transform_to_vec_quat(t) + assert pos.shape == (2, 4) + assert quat.shape == (2, 4) + assert pos.dtype == wp.vec3f + assert quat.dtype == wp.quatf + + def test_zero_copy_1d(self): + """Writes through pos/quat views are reflected in the original transform array.""" + t = wp.zeros(1, dtype=wp.transformf, device="cpu") + pos, quat = transform_to_vec_quat(t) + # Write known values through the views + pos.numpy()[0] = (1.0, 2.0, 3.0) + quat.numpy()[0] = (0.0, 0.0, 0.0, 1.0) + floats = t.view(wp.float32).numpy() + assert list(floats[0]) == pytest.approx([1.0, 2.0, 3.0, 0.0, 0.0, 0.0, 1.0]) + + def test_invalid_ndim_raises(self): + """Passing a 0D or 4D array raises an error.""" + with pytest.raises((ValueError, IndexError)): + transform_to_vec_quat(wp.zeros((), dtype=wp.transformf, device="cpu")) + + def test_wrong_dtype_raises(self): + """Passing wrong dtype raises TypeError.""" + with pytest.raises(TypeError): + transform_to_vec_quat(wp.zeros(3, dtype=wp.vec3f, device="cpu")) + + +# --------------------------------------------------------------------------- +# NewtonManager._cl_inject_sites_fallback +# --------------------------------------------------------------------------- + + +class MockBuilder: + """Minimal stand-in for ModelBuilder.""" + + def __init__(self, body_labels: list[str]): + self.body_label = body_labels + self._next_idx = 0 + + def add_site(self, body: int, xform: wp.transform, label: str) -> int: + idx = self._next_idx + self._next_idx += 1 + return idx + + +class TestFallbackGlobalSite: + """Global site (body_pattern=None) must produce a (int, None) entry.""" + + def setup_method(self): + NewtonManager.clear() + NewtonManager._builder = MockBuilder(["body0", "body1"]) + + def test_global_site_entry_is_int_none_tuple(self): + xform = wp.transform() + NewtonManager._cl_pending_sites = {(None, tuple(xform)): ("ft_0", xform)} + NewtonManager._cl_inject_sites_fallback() + + entry = NewtonManager._cl_site_index_map["ft_0"] + global_idx, per_world = entry + assert isinstance(global_idx, int) + assert per_world is None + + def test_global_site_pending_cleared(self): + xform = wp.transform() + NewtonManager._cl_pending_sites = {(None, tuple(xform)): ("ft_0", xform)} + NewtonManager._cl_inject_sites_fallback() + + assert len(NewtonManager._cl_pending_sites) == 0 + + +class TestFallbackLocalSingleBody: + """Single-body local site must produce a (None, [[idx]]) entry — one world.""" + + def setup_method(self): + NewtonManager.clear() + NewtonManager._builder = MockBuilder(["Robot/base", "Robot/hand"]) + + def test_single_body_entry_shape(self): + xform = wp.transform() + NewtonManager._cl_pending_sites = {("Robot/base", tuple(xform)): ("ft_0", xform)} + NewtonManager._cl_inject_sites_fallback() + + entry = NewtonManager._cl_site_index_map["ft_0"] + global_idx, per_world = entry + assert global_idx is None + assert isinstance(per_world, list) + assert len(per_world) == 1 # one world + assert len(per_world[0]) == 1 # one match + assert isinstance(per_world[0][0], int) + + +class TestFallbackLocalWildcard: + """Wildcard local site matching N bodies must produce (None, [[idx0..idxN-1]]) — one world.""" + + def setup_method(self): + NewtonManager.clear() + NewtonManager._builder = MockBuilder(["Robot/FL_foot", "Robot/FR_foot", "Robot/RL_foot", "Robot/RR_foot"]) + + def test_wildcard_entry_shape(self): + xform = wp.transform() + NewtonManager._cl_pending_sites = {("Robot/.*_foot", tuple(xform)): ("ft_0", xform)} + NewtonManager._cl_inject_sites_fallback() + + entry = NewtonManager._cl_site_index_map["ft_0"] + global_idx, per_world = entry + assert global_idx is None + assert len(per_world) == 1 # one world + assert len(per_world[0]) == 4 # four bodies matched + + def test_no_match_raises(self): + xform = wp.transform() + NewtonManager._cl_pending_sites = {("Robot/nonexistent", tuple(xform)): ("ft_0", xform)} + with pytest.raises(ValueError): + NewtonManager._cl_inject_sites_fallback() + + +# --------------------------------------------------------------------------- +# FrameTransformer._validate_site_map +# --------------------------------------------------------------------------- + + +def _make_site_map( + source_per_world: list[list[int]], + target_per_worlds: list[list[list[int]]], + world_origin_idx: int = 0, +) -> dict: + m = { + "world_origin": (world_origin_idx, None), + "source": (None, source_per_world), + } + for i, pw in enumerate(target_per_worlds): + m[f"target_{i}"] = (None, pw) + return m + + +class TestSourceValidation: + def test_valid_source_one_per_env(self): + site_map = _make_site_map([[10], [20]], []) + indices, _ = FrameTransformer._validate_site_map("source", "/Robot/base", [], [], site_map, num_envs=2) + assert indices == [10, 20] + + def test_source_wrong_env_count_raises(self): + # site map has 1 world entry but num_envs=2 + site_map = _make_site_map([[10]], []) + with pytest.raises(ValueError, match="1 world entries.*expected 2"): + FrameTransformer._validate_site_map("source", "/Robot/base", [], [], site_map, num_envs=2) + + def test_source_zero_in_env_raises(self): + site_map = _make_site_map([[], [20]], []) + with pytest.raises(ValueError, match="matched 0 bodies in env 0"): + FrameTransformer._validate_site_map("source", "/Robot/base", [], [], site_map, num_envs=2) + + def test_source_two_in_env_raises(self): + site_map = _make_site_map([[10, 11], [20]], []) + with pytest.raises(ValueError, match="matched 2 bodies in env 0"): + FrameTransformer._validate_site_map("source", "/Robot/base", [], [], site_map, num_envs=2) + + +class TestTargetValidation: + def test_valid_single_target_per_env(self): + site_map = _make_site_map([[10], [20]], [[[30], [40]]]) + _, tgt = FrameTransformer._validate_site_map( + "source", "/Robot/base", ["target_0"], ["/Robot/hand"], site_map, num_envs=2 + ) + assert tgt[0] == [[30], [40]] + + def test_valid_wildcard_two_bodies_per_env(self): + site_map = _make_site_map([[10], [20]], [[[30, 31], [40, 41]]]) + _, tgt = FrameTransformer._validate_site_map( + "source", "/Robot/base", ["target_0"], ["/Robot/foot.*"], site_map, num_envs=2 + ) + assert tgt[0] == [[30, 31], [40, 41]] + + def test_target_zero_bodies_raises(self): + site_map = _make_site_map([[10], [20]], [[[], []]]) + with pytest.raises(ValueError, match="matched no bodies"): + FrameTransformer._validate_site_map( + "source", "/Robot/base", ["target_0"], ["/Robot/foot.*"], site_map, num_envs=2 + ) + + def test_target_non_uniform_raises(self): + site_map = _make_site_map([[10], [20]], [[[30, 31], [40]]]) + with pytest.raises(ValueError, match="different numbers of bodies"): + FrameTransformer._validate_site_map( + "source", "/Robot/base", ["target_0"], ["/Robot/foot.*"], site_map, num_envs=2 + ) + + +# --------------------------------------------------------------------------- +# FrameTransformer._build_sensor_index_lists +# --------------------------------------------------------------------------- + + +def _call(source_indices, target_per_world, target_frame_body_names, shape_labels, world_origin_idx, num_envs): + return FrameTransformer._build_sensor_index_lists( + source_indices, target_per_world, target_frame_body_names, shape_labels, world_origin_idx, num_envs + ) + + +class TestZeroTargets: + def test_zero_targets_shapes_refs(self): + """0 targets: shapes/refs contain only source entries.""" + names, tgt_per_tgt, shapes, refs = _call( + source_indices=[10, 11], + target_per_world=[], + target_frame_body_names=[], + shape_labels={}, + world_origin_idx=0, + num_envs=2, + ) + assert shapes == [10, 11] + assert refs == [0, 0] + assert names == [] + assert tgt_per_tgt == [] + + +class TestSingleTarget: + def test_one_env_one_target(self): + """1 env, 1 target: [src, tgt] shapes, [world_orig, src] refs.""" + names, tgt_per_tgt, shapes, refs = _call( + source_indices=[10], + target_per_world=[[[20]]], + target_frame_body_names=["hand"], + shape_labels={}, + world_origin_idx=0, + num_envs=1, + ) + assert shapes == [10, 20] + assert refs == [0, 10] + assert names == ["hand"] + + def test_two_envs_two_targets(self): + """2 envs, 2 targets: stride-2 interleaved layout.""" + names, tgt_per_tgt, shapes, refs = _call( + source_indices=[10, 11], + target_per_world=[[[20], [21]], [[30], [31]]], + target_frame_body_names=["arm", "hand"], + shape_labels={}, + world_origin_idx=0, + num_envs=2, + ) + assert shapes == [10, 20, 30, 11, 21, 31] + assert refs == [0, 10, 10, 0, 11, 11] + assert names == ["arm", "hand"] + + +class TestWildcardExpansion: + def test_wildcard_two_bodies_per_env_indices(self): + """Wildcard: 2 bodies per env → 2 expanded target entries, correct indices.""" + shape_labels = {20: "FL_foot/label_0", 21: "FL_foot/label_0", 22: "FR_foot/label_0", 23: "FR_foot/label_0"} + names, tgt_per_tgt, shapes, refs = _call( + source_indices=[10, 11], + target_per_world=[[[20, 22], [21, 23]]], + target_frame_body_names=["foot"], + shape_labels=shape_labels, + world_origin_idx=0, + num_envs=2, + ) + assert shapes == [10, 20, 22, 11, 21, 23] + assert refs == [0, 10, 10, 0, 11, 11] + assert tgt_per_tgt == [[20, 21], [22, 23]] + + def test_wildcard_uses_body_names_from_shape_labels(self): + """Wildcard: body names derived from shape_labels when n_bodies > 1.""" + shape_labels = {20: "FL_foot/label_0", 21: "FL_foot/label_0", 22: "FR_foot/label_0", 23: "FR_foot/label_0"} + names, tgt_per_tgt, shapes, refs = _call( + source_indices=[10, 11], + target_per_world=[[[20, 22], [21, 23]]], + target_frame_body_names=["foot"], + shape_labels=shape_labels, + world_origin_idx=0, + num_envs=2, + ) + assert names == ["FL_foot", "FR_foot"] + + def test_wildcard_single_body_uses_config_name(self): + """Single body match: config name is used regardless of shape_labels.""" + names, tgt_per_tgt, shapes, refs = _call( + source_indices=[10, 11], + target_per_world=[[[20], [21]]], + target_frame_body_names=["foot"], + shape_labels={}, + world_origin_idx=0, + num_envs=2, + ) + assert names == ["foot"] diff --git a/source/isaaclab_newton/test/sim/__init__.py b/source/isaaclab_newton/test/sim/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_newton/test/sim/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_newton/test/sim/test_views_xform_prim_newton.py b/source/isaaclab_newton/test/sim/test_views_xform_prim_newton.py new file mode 100644 index 000000000000..e8b28aad2d95 --- /dev/null +++ b/source/isaaclab_newton/test/sim/test_views_xform_prim_newton.py @@ -0,0 +1,198 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton backend tests for FrameView. + +Imports the shared contract tests and provides the Newton-specific +``view_factory`` fixture. Also includes Newton-only guard tests and +the world-attached prim edge case. +""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) +sys.path.insert(0, str(Path(__file__).resolve().parents[3] / "isaaclab" / "test" / "sim")) + +import pytest +import torch +import warp as wp +from frame_view_contract_utils import * # noqa: F401, F403 — import all contract tests +from frame_view_contract_utils import CHILD_OFFSET, ViewBundle, _wp_vec3f, _wp_vec4f +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.physics.newton_manager import NewtonManager +from isaaclab_newton.sim.views import NewtonSiteFrameView as FrameView + +from pxr import Gf + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.utils import configclass + +NEWTON_SIM_CFG = SimulationCfg(physics=NewtonCfg(solver_cfg=MJWarpSolverCfg())) +WORLD_MARKER_POS = (5.0, 3.0, 1.0) + + +@configclass +class _SceneCfg(InteractiveSceneCfg): + cube: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + +def _sim_context(device, num_envs=4): + NEWTON_SIM_CFG.device = device + return build_simulation_context(device=device, sim_cfg=NEWTON_SIM_CFG, add_ground_plane=True) + + +def _get_body_positions(num_envs, device="cpu"): + model = NewtonManager.get_model() + body_labels = list(model.body_label) + body_q_t = wp.to_torch(NewtonManager.get_state_0().body_q) + return torch.stack([body_q_t[body_labels.index(f"/World/envs/env_{i}/Cube"), :3] for i in range(num_envs)]) + + +def _set_body_positions(positions, num_envs): + model = NewtonManager.get_model() + body_labels = list(model.body_label) + body_q_t = wp.to_torch(NewtonManager.get_state_0().body_q) + for i in range(num_envs): + body_q_t[body_labels.index(f"/World/envs/env_{i}/Cube"), :3] = positions[i] + + +# ------------------------------------------------------------------ +# Contract fixture +# ------------------------------------------------------------------ + + +@pytest.fixture +def view_factory(): + """Newton factory: CameraMount child Xform at CHILD_OFFSET under each Cube body.""" + + def factory(num_envs: int, device: str) -> ViewBundle: + ctx = _sim_context(device, num_envs=num_envs) + sim = ctx.__enter__() + sim._app_control_on_stop_handle = None + InteractiveScene(_SceneCfg(num_envs=num_envs, env_spacing=2.0)) + + stage = sim_utils.get_current_stage() + for i in range(num_envs): + prim = stage.DefinePrim(f"/World/envs/env_{i}/Cube/CameraMount", "Xform") + sim_utils.standardize_xform_ops(prim) + prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(*CHILD_OFFSET)) + prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(1.0, 0.0, 0.0, 0.0)) + + sim.reset() + view = FrameView("/World/envs/env_.*/Cube/CameraMount", device=device) + + return ViewBundle( + view=view, + get_parent_pos=_get_body_positions, + set_parent_pos=_set_body_positions, + teardown=lambda: ctx.__exit__(None, None, None), + ) + + return factory + + +# ================================================================== +# Newton-only: guard tests +# ================================================================== + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_reject_body_path(device): + """FrameView rejects prim paths that resolve to a Newton physics body.""" + ctx = _sim_context(device, num_envs=2) + sim = ctx.__enter__() + sim._app_control_on_stop_handle = None + InteractiveScene(_SceneCfg(num_envs=2, env_spacing=2.0)) + sim.reset() + + with pytest.raises(ValueError, match="physics body"): + FrameView("/World/envs/env_.*/Cube", device=device) + ctx.__exit__(None, None, None) + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_reject_shape_path(device): + """FrameView rejects prim paths that resolve to a Newton collision shape.""" + ctx = _sim_context(device, num_envs=2) + sim = ctx.__enter__() + sim._app_control_on_stop_handle = None + InteractiveScene(_SceneCfg(num_envs=2, env_spacing=2.0)) + sim.reset() + + shape_labels = list(NewtonManager.get_model().shape_label) + if not shape_labels: + pytest.skip("No shapes in model") + + with pytest.raises(ValueError, match="collision shape"): + FrameView(shape_labels[0], device=device) + ctx.__exit__(None, None, None) + + +# ================================================================== +# Newton edge case: world-attached prim (body=-1) +# ================================================================== + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_world_attached_returns_initial_pose(device): + """A world-rooted Xform returns its USD-authored position.""" + ctx = _sim_context(device, num_envs=2) + sim = ctx.__enter__() + sim._app_control_on_stop_handle = None + InteractiveScene(_SceneCfg(num_envs=2, env_spacing=2.0)) + + stage = sim_utils.get_current_stage() + prim = stage.DefinePrim("/World/StaticMarker", "Xform") + sim_utils.standardize_xform_ops(prim) + prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(*WORLD_MARKER_POS)) + prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(1.0, 0.0, 0.0, 0.0)) + + sim.reset() + view = FrameView("/World/StaticMarker", device=device) + + pos = view.get_world_poses()[0].torch + expected = torch.tensor([list(WORLD_MARKER_POS)], device=device) + torch.testing.assert_close(pos, expected, atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) + + +@pytest.mark.parametrize("device", ["cpu", "cuda:0"]) +def test_world_attached_set_world_roundtrip(device): + """A world-attached prim can be repositioned via set_world_poses.""" + ctx = _sim_context(device, num_envs=2) + sim = ctx.__enter__() + sim._app_control_on_stop_handle = None + InteractiveScene(_SceneCfg(num_envs=2, env_spacing=2.0)) + + stage = sim_utils.get_current_stage() + prim = stage.DefinePrim("/World/StaticMarker", "Xform") + sim_utils.standardize_xform_ops(prim) + prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(*WORLD_MARKER_POS)) + prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(1.0, 0.0, 0.0, 0.0)) + + sim.reset() + view = FrameView("/World/StaticMarker", device=device) + + new_pos = _wp_vec3f([[10.0, 20.0, 30.0]], device=device) + new_quat = _wp_vec4f([[0.0, 0.0, 0.0, 1.0]], device=device) + view.set_world_poses(new_pos, new_quat) + + ret_pos, ret_quat = view.get_world_poses() + torch.testing.assert_close(ret_pos.torch, wp.to_torch(new_pos), atol=1e-5, rtol=0) + torch.testing.assert_close(ret_quat.torch, wp.to_torch(new_quat), atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) diff --git a/source/isaaclab_newton/test/test_mock_interfaces/__init__.py b/source/isaaclab_newton/test/test_mock_interfaces/__init__.py new file mode 100644 index 000000000000..c704dd9c2ec3 --- /dev/null +++ b/source/isaaclab_newton/test/test_mock_interfaces/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for mock Newton interfaces.""" diff --git a/source/isaaclab_newton/test/test_mock_interfaces/test_factories.py b/source/isaaclab_newton/test/test_mock_interfaces/test_factories.py new file mode 100644 index 000000000000..5713fd1eff5c --- /dev/null +++ b/source/isaaclab_newton/test/test_mock_interfaces/test_factories.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for Newton mock factory functions.""" + +from isaaclab_newton.test.mock_interfaces.factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, +) +from isaaclab_newton.test.mock_interfaces.views import MockNewtonArticulationView + + +class TestFactories: + """Tests for Newton mock factory functions.""" + + def test_create_mock_articulation_view_basic(self): + """Test basic factory usage.""" + view = create_mock_articulation_view(count=4, num_joints=12, num_bodies=13) + assert view.count == 4 + assert view.joint_dof_count == 12 + assert view.link_count == 13 + assert isinstance(view, MockNewtonArticulationView) + + def test_create_mock_articulation_view_fixed_base(self): + """Test factory with fixed base flag.""" + view = create_mock_articulation_view(count=2, num_joints=6, num_bodies=7, is_fixed_base=True) + assert view.count == 2 + assert view.is_fixed_base is True + + def test_create_mock_articulation_view_custom_names(self): + """Test factory with custom names.""" + joint_names = ["j1", "j2"] + body_names = ["b1", "b2", "b3"] + view = create_mock_articulation_view( + count=1, num_joints=2, num_bodies=3, joint_names=joint_names, body_names=body_names + ) + assert view.joint_dof_names == joint_names + assert view.body_names == body_names + + def test_create_mock_quadruped_view(self): + """Test quadruped factory.""" + view = create_mock_quadruped_view(count=4) + assert view.count == 4 + assert view.joint_dof_count == 12 + assert view.link_count == 13 + assert view.is_fixed_base is False + assert "FL_hip_joint" in view.joint_dof_names + assert "base" in view.body_names + assert isinstance(view, MockNewtonArticulationView) + + def test_create_mock_humanoid_view(self): + """Test humanoid factory.""" + view = create_mock_humanoid_view(count=2) + assert view.count == 2 + assert view.joint_dof_count == 21 + assert view.link_count == 22 + assert view.is_fixed_base is False + assert "left_elbow" in view.joint_dof_names + assert "pelvis" in view.body_names + assert isinstance(view, MockNewtonArticulationView) diff --git a/source/isaaclab_newton/test/test_mock_interfaces/test_mock_articulation_view.py b/source/isaaclab_newton/test/test_mock_interfaces/test_mock_articulation_view.py new file mode 100644 index 000000000000..2f970323a63d --- /dev/null +++ b/source/isaaclab_newton/test/test_mock_interfaces/test_mock_articulation_view.py @@ -0,0 +1,383 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockNewtonArticulationView.""" + +import numpy as np +import pytest +import warp as wp +from isaaclab_newton.test.mock_interfaces.views import MockNewtonArticulationView + + +class TestMockNewtonArticulationViewInit: + """Tests for MockNewtonArticulationView initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockNewtonArticulationView() + assert view.count == 1 + assert view.joint_dof_count == 1 + assert view.link_count == 2 + assert view.is_fixed_base is False + + def test_custom_configuration(self): + """Test initialization with custom configuration.""" + view = MockNewtonArticulationView( + num_instances=4, + num_joints=12, + num_bodies=13, + is_fixed_base=True, + ) + assert view.count == 4 + assert view.joint_dof_count == 12 + assert view.link_count == 13 + assert view.is_fixed_base is True + + def test_custom_names(self): + """Test initialization with custom joint and body names.""" + joint_names = ["joint_0", "joint_1"] + body_names = ["base", "link_1", "link_2"] + view = MockNewtonArticulationView( + num_joints=2, + num_bodies=3, + joint_names=joint_names, + body_names=body_names, + ) + assert view.joint_dof_names == joint_names + assert view.body_names == body_names + + def test_fixed_base_config(self): + """Test fixed-base configuration.""" + view = MockNewtonArticulationView( + num_instances=2, + num_joints=6, + num_bodies=7, + is_fixed_base=True, + ) + assert view.is_fixed_base is True + # Root velocities should be None for fixed base + assert view.get_root_velocities(None) is None + assert view.get_link_velocities(None) is None + + +class TestMockNewtonArticulationViewRootGetters: + """Tests for MockNewtonArticulationView root getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + + def test_get_root_transforms_shape(self, view): + """Test root transforms shape - should be (N, 1) with wp.transformf dtype.""" + transforms = view.get_root_transforms(None) + assert transforms.shape == (4, 1) + assert transforms.dtype == wp.transformf + + def test_get_root_transforms_default_identity(self, view): + """Test that default root transforms are zero-initialized.""" + transforms = view.get_root_transforms(None) + transforms_np = transforms.numpy() + # Default is zero, meaning all zeros for position and quaternion + np.testing.assert_allclose(transforms_np, 0.0) + + def test_get_root_velocities_shape(self, view): + """Test root velocities shape - should be (N, 1) with wp.spatial_vectorf dtype.""" + velocities = view.get_root_velocities(None) + assert velocities.shape == (4, 1) + assert velocities.dtype == wp.spatial_vectorf + + def test_fixed_base_root_velocities_none(self): + """Test that fixed-base root velocities are None.""" + view = MockNewtonArticulationView( + num_instances=4, num_joints=12, num_bodies=13, is_fixed_base=True, device="cpu" + ) + assert view.get_root_velocities(None) is None + + def test_fixed_base_root_transforms_shape(self): + """Test that fixed-base root transforms have shape (N, 1, 1).""" + view = MockNewtonArticulationView( + num_instances=4, num_joints=12, num_bodies=13, is_fixed_base=True, device="cpu" + ) + transforms = view.get_root_transforms(None) + assert transforms.shape == (4, 1, 1) + assert transforms.dtype == wp.transformf + + +class TestMockNewtonArticulationViewLinkGetters: + """Tests for MockNewtonArticulationView link getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + + def test_get_link_transforms_shape(self, view): + """Test link transforms shape - should be (N, 1, L) with wp.transformf dtype.""" + transforms = view.get_link_transforms(None) + assert transforms.shape == (4, 1, 13) + assert transforms.dtype == wp.transformf + + def test_get_link_velocities_shape(self, view): + """Test link velocities shape - should be (N, 1, L) with wp.spatial_vectorf dtype.""" + velocities = view.get_link_velocities(None) + assert velocities.shape == (4, 1, 13) + assert velocities.dtype == wp.spatial_vectorf + + def test_fixed_base_link_velocities_none(self): + """Test that fixed-base link velocities are None.""" + view = MockNewtonArticulationView( + num_instances=4, num_joints=12, num_bodies=13, is_fixed_base=True, device="cpu" + ) + assert view.get_link_velocities(None) is None + + +class TestMockNewtonArticulationViewDOFGetters: + """Tests for MockNewtonArticulationView DOF getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + + def test_get_dof_positions_shape(self, view): + """Test DOF positions shape - should be (N, 1, J).""" + positions = view.get_dof_positions(None) + assert positions.shape == (4, 1, 12) + assert positions.dtype == wp.float32 + + def test_get_dof_velocities_shape(self, view): + """Test DOF velocities shape - should be (N, 1, J).""" + velocities = view.get_dof_velocities(None) + assert velocities.shape == (4, 1, 12) + assert velocities.dtype == wp.float32 + + @pytest.mark.parametrize( + "attr_name", + [ + "joint_limit_lower", + "joint_limit_upper", + "joint_target_ke", + "joint_target_kd", + "joint_armature", + "joint_friction", + "joint_velocity_limit", + "joint_effort_limit", + ], + ) + def test_get_joint_attribute_shape(self, view, attr_name): + """Test joint attribute shapes via get_attribute().""" + attr = view.get_attribute(attr_name, None) + assert attr.shape == (4, 1, 12) + assert attr.dtype == wp.float32 + + +class TestMockNewtonArticulationViewMassGetters: + """Tests for MockNewtonArticulationView mass property getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + + def test_get_body_mass_shape(self, view): + """Test body mass shape via get_attribute().""" + mass = view.get_attribute("body_mass", None) + assert mass.shape == (4, 1, 13) + assert mass.dtype == wp.float32 + + def test_get_body_com_shape(self, view): + """Test body COM shape via get_attribute().""" + com = view.get_attribute("body_com", None) + assert com.shape == (4, 1, 13) + assert com.dtype == wp.vec3f + + def test_get_body_inertia_shape(self, view): + """Test body inertia shape via get_attribute().""" + inertia = view.get_attribute("body_inertia", None) + assert inertia.shape == (4, 1, 13) + assert inertia.dtype == wp.mat33f + + +class TestMockNewtonArticulationViewSetters: + """Tests for MockNewtonArticulationView setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + + def test_set_root_transforms(self, view): + """Test setting root transforms round-trip.""" + new_transforms = wp.zeros((4, 1), dtype=wp.transformf, device="cpu") + view.set_root_transforms(None, new_transforms) + result = view.get_root_transforms(None) + np.testing.assert_allclose(result.numpy(), new_transforms.numpy()) + + def test_set_root_velocities(self, view): + """Test setting root velocities round-trip.""" + new_velocities = wp.zeros((4, 1), dtype=wp.spatial_vectorf, device="cpu") + view.set_root_velocities(None, new_velocities) + result = view.get_root_velocities(None) + np.testing.assert_allclose(result.numpy(), new_velocities.numpy()) + + def test_noop_setters_flag(self, view): + """Test that _noop_setters disables writes.""" + # Set initial data + initial_transforms = wp.zeros((4, 1), dtype=wp.transformf, device="cpu") + view.set_root_transforms(None, initial_transforms) + + # Enable noop + view._noop_setters = True + + # Try to write different data + new_transforms = wp.ones((4, 1), dtype=wp.transformf, device="cpu") + view.set_root_transforms(None, new_transforms) + + # Should still have initial data + result = view.get_root_transforms(None) + np.testing.assert_allclose(result.numpy(), initial_transforms.numpy()) + + +class TestMockNewtonArticulationViewMockSetters: + """Tests for MockNewtonArticulationView mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + + def test_set_mock_root_transforms(self, view): + """Test mock root transform setter round-trip.""" + mock_data = wp.zeros((4, 1), dtype=wp.transformf, device="cpu") + view.set_mock_root_transforms(mock_data) + result = view.get_root_transforms(None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy()) + + def test_set_mock_root_velocities(self, view): + """Test mock root velocity setter round-trip.""" + mock_data = wp.zeros((4, 1), dtype=wp.spatial_vectorf, device="cpu") + view.set_mock_root_velocities(mock_data) + result = view.get_root_velocities(None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy()) + + def test_set_mock_link_transforms(self, view): + """Test mock link transform setter round-trip.""" + mock_data = wp.zeros((4, 1, 13), dtype=wp.transformf, device="cpu") + view.set_mock_link_transforms(mock_data) + result = view.get_link_transforms(None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy()) + + def test_set_mock_link_velocities(self, view): + """Test mock link velocity setter round-trip.""" + mock_data = wp.zeros((4, 1, 13), dtype=wp.spatial_vectorf, device="cpu") + view.set_mock_link_velocities(mock_data) + result = view.get_link_velocities(None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy()) + + def test_set_mock_dof_positions(self, view): + """Test mock DOF position setter round-trip.""" + mock_data = wp.array(np.random.randn(4, 1, 12).astype(np.float32), dtype=wp.float32, device="cpu") + view.set_mock_dof_positions(mock_data) + result = view.get_dof_positions(None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy(), rtol=1e-5) + + def test_set_mock_dof_velocities(self, view): + """Test mock DOF velocity setter round-trip.""" + mock_data = wp.array(np.random.randn(4, 1, 12).astype(np.float32), dtype=wp.float32, device="cpu") + view.set_mock_dof_velocities(mock_data) + result = view.get_dof_velocities(None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy(), rtol=1e-5) + + def test_set_mock_masses(self, view): + """Test mock body mass setter round-trip.""" + mock_data = wp.array((np.random.rand(4, 1, 13) * 10).astype(np.float32), dtype=wp.float32, device="cpu") + view.set_mock_masses(mock_data) + result = view.get_attribute("body_mass", None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy(), rtol=1e-5) + + def test_set_mock_coms(self, view): + """Test mock body COM setter round-trip.""" + mock_data = wp.zeros((4, 1, 13), dtype=wp.vec3f, device="cpu") + view.set_mock_coms(mock_data) + result = view.get_attribute("body_com", None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy()) + + def test_set_mock_inertias(self, view): + """Test mock body inertia setter round-trip.""" + mock_data = wp.zeros((4, 1, 13), dtype=wp.mat33f, device="cpu") + view.set_mock_inertias(mock_data) + result = view.get_attribute("body_inertia", None) + np.testing.assert_allclose(result.numpy(), mock_data.numpy()) + + +class TestMockNewtonArticulationViewRandomData: + """Tests for MockNewtonArticulationView random data generation.""" + + def test_set_random_mock_data_populates_arrays(self): + """Test that set_random_mock_data populates non-None arrays.""" + view = MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + view.set_random_mock_data() + + # Root state should be populated + transforms = view.get_root_transforms(None) + assert transforms is not None + assert transforms.shape == (4, 1) + + velocities = view.get_root_velocities(None) + assert velocities is not None + assert velocities.shape == (4, 1) + + # Link state should be populated + link_transforms = view.get_link_transforms(None) + assert link_transforms is not None + assert link_transforms.shape == (4, 1, 13) + + link_velocities = view.get_link_velocities(None) + assert link_velocities is not None + assert link_velocities.shape == (4, 1, 13) + + # DOF state should be populated + positions = view.get_dof_positions(None) + assert positions is not None + assert positions.shape == (4, 1, 12) + + velocities = view.get_dof_velocities(None) + assert velocities is not None + assert velocities.shape == (4, 1, 12) + + # Attributes should be populated + mass = view.get_attribute("body_mass", None) + assert mass is not None + assert mass.shape == (4, 1, 13) + + def test_set_random_mock_data_fixed_base(self): + """Test random data with fixed base (no velocities).""" + view = MockNewtonArticulationView( + num_instances=4, num_joints=12, num_bodies=13, is_fixed_base=True, device="cpu" + ) + view.set_random_mock_data() + + # Root transforms should have fixed-base shape + transforms = view.get_root_transforms(None) + assert transforms.shape == (4, 1, 1) + + # Velocities should still be None for fixed base + assert view.get_root_velocities(None) is None + assert view.get_link_velocities(None) is None + + def test_set_random_mock_data_has_nonzero_values(self): + """Test that random data has non-zero values.""" + view = MockNewtonArticulationView(num_instances=4, num_joints=12, num_bodies=13, device="cpu") + view.set_random_mock_data() + + positions = view.get_dof_positions(None) + assert not np.allclose(positions.numpy(), 0.0) + + def test_get_attribute_unknown_raises(self): + """Test that requesting an unknown attribute raises KeyError.""" + view = MockNewtonArticulationView() + with pytest.raises(KeyError): + view.get_attribute("nonexistent_attribute", None) diff --git a/source/isaaclab_ov/changelog.d/.gitkeep b/source/isaaclab_ov/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_ov/changelog.d/pbarejko-open-usd.rst b/source/isaaclab_ov/changelog.d/pbarejko-open-usd.rst new file mode 100644 index 000000000000..455768ad5a5c --- /dev/null +++ b/source/isaaclab_ov/changelog.d/pbarejko-open-usd.rst @@ -0,0 +1,7 @@ +Fixed +^^^^^ + +* Fixed ``AttributeError: 'Renderer' object has no attribute 'add_usd'`` in + :class:`~isaaclab_ov.renderers.OVRTXRenderer` when using ``ovrtx`` 0.3.0 or + newer. The renderer now calls :meth:`ovrtx.Renderer.open_usd` on 0.3.0+ and + falls back to ``Renderer.add_usd`` on older versions. diff --git a/source/isaaclab_ov/changelog.d/rschmitt_decouple_renderer_camera.rst b/source/isaaclab_ov/changelog.d/rschmitt_decouple_renderer_camera.rst new file mode 100644 index 000000000000..1de2259dc2c3 --- /dev/null +++ b/source/isaaclab_ov/changelog.d/rschmitt_decouple_renderer_camera.rst @@ -0,0 +1,4 @@ +Changed +^^^^^^^^ + +* Modified the OVRTX renderer to use the new patterns from renderer/camera decoupling. diff --git a/source/isaaclab_ov/config/extension.toml b/source/isaaclab_ov/config/extension.toml new file mode 100644 index 000000000000..ba8f5046c4eb --- /dev/null +++ b/source/isaaclab_ov/config/extension.toml @@ -0,0 +1,11 @@ +[package] +version = "0.1.3" +title = "Omniverse renderers for IsaacLab" +description = "Extension providing Omniverse renderers (OVRTX, ovphysx, etc.) for tiled camera rendering." +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["robotics", "simulation", "rendering", "ovrtx", "omniverse"] + +[dependencies] +"isaaclab" = {} diff --git a/source/isaaclab_ov/docs/CHANGELOG.rst b/source/isaaclab_ov/docs/CHANGELOG.rst new file mode 100644 index 000000000000..e8afeda30bda --- /dev/null +++ b/source/isaaclab_ov/docs/CHANGELOG.rst @@ -0,0 +1,64 @@ +Changelog +--------- + +0.1.3 (2026-04-30) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Simple-shading outputs, with RTX Minimal mode resolved from the requested camera data types and written on + the injected render product in USD. +* Expanded unit tests for OVRTX Warp kernels in ``test_ovrtx_renderer_kernels.py``. + +Changed +^^^^^^^ + +* OVRTX integration now branches ``read_gpu_transforms``, depth tile extraction, and semantic ID coloring kernels on + ovrtx **0.3.0** vs older versions so tiled buffers and transforms stay correct across ovrtx versions. +* RGB tiling reads ``LdrColor`` and supports both 3- and 4-channel buffers. + +Removed +^^^^^^^ + +* Removed ``OVRTXRendererCfg.simple_shading_mode``. Request simple shading via the simple-shading data types on the + camera instead; the renderer derives RTX minimal mode from the data types. + +0.1.2 (2026-03-23) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Semantic segmentation in :class:`~isaaclab_ov.renderers.OVRTXRenderer` maps + semantic instance IDs to RGBA using the same pseudo-random per-ID HSV scheme as the + Isaac Sim RTX render backend, so OVRTX and Isaac RTX produce matching colors for the + same IDs. Numeric IDs ``0`` (BACKGROUND) and ``1`` (UNLABELLED) use fixed RGBA. + +0.1.1 (2026-03-07) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``ovrtx>=0.2.0,<0.3.0`` as a declared dependency, installable from the + public NVIDIA package index (``pypi.nvidia.com``). +* Added ``ov`` to the list of valid sub-packages for selective installation via + ``./isaaclab.sh -i ov``. + +0.1.0 (2026-03-04) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_ov.renderers` module with OVRTX renderer for tiled camera + rendering: + + * :class:`~isaaclab_ov.renderers.OVRTXRenderer` and + :class:`~isaaclab_ov.renderers.OVRTXRendererCfg`: RTX-based rendering via the + ovrtx library, with stage export, USD cloning, and camera/object bindings. + + * :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.prepare_stage`: Base + interface hook for stage preprocessing before create_render_data (OVRTX + exports USD stage; Isaac RTX and Newton Warp use no-op implementations). diff --git a/source/isaaclab_ov/docs/README.md b/source/isaaclab_ov/docs/README.md new file mode 100644 index 000000000000..70356450bd35 --- /dev/null +++ b/source/isaaclab_ov/docs/README.md @@ -0,0 +1,6 @@ +# Isaac Lab: Omniverse Renderers + +This extension provides Omniverse renderers for tiled camera rendering in Isaac Lab, +including OVRTX for RTX-based path tracing without requiring Isaac Sim. + +OvRTX is an experimental rendering backend. diff --git a/source/isaaclab_ov/isaaclab_ov/__init__.py b/source/isaaclab_ov/isaaclab_ov/__init__.py new file mode 100644 index 000000000000..131d4c9809f3 --- /dev/null +++ b/source/isaaclab_ov/isaaclab_ov/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package containing Omniverse renderers for IsaacLab (OVRTX, ovphysx, etc.).""" diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/__init__.py b/source/isaaclab_ov/isaaclab_ov/renderers/__init__.py new file mode 100644 index 000000000000..f3d2478c322a --- /dev/null +++ b/source/isaaclab_ov/isaaclab_ov/renderers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for OVRTX renderer backend.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/__init__.pyi b/source/isaaclab_ov/isaaclab_ov/renderers/__init__.pyi new file mode 100644 index 000000000000..70526feeb84a --- /dev/null +++ b/source/isaaclab_ov/isaaclab_ov/renderers/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "OVRTXRenderer", + "OVRTXRendererCfg", + "Renderer", +] + +from .ovrtx_renderer import OVRTXRenderer +from .ovrtx_renderer import OVRTXRenderer as Renderer +from .ovrtx_renderer_cfg import OVRTXRendererCfg diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer.py b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer.py new file mode 100644 index 000000000000..5d1782373d87 --- /dev/null +++ b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer.py @@ -0,0 +1,656 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""OVRTX Renderer implementation. + +How it fits together +-------------------- +- **ovrtx_renderer.py** (this file): Orchestrates the pipeline. Owns the OVRTX Renderer, + USD loading/cloning, camera and object bindings, and output buffers. Each frame it: + updates camera/object transforms (using kernels), steps the renderer, then extracts + tiles from the tiled framebuffer (kernels). + +- **ovrtx_renderer_kernels.py**: Warp GPU kernels and DEVICE constant. + +- **ovrtx_usd.py**: USD helpers for OVRTX: render var config, camera injection, etc. +""" + +from __future__ import annotations + +import logging +import math +import os +from typing import TYPE_CHECKING, Any + +logger = logging.getLogger(__name__) + +import numpy as np +import torch +import warp as wp + +# The ovrtx C library links to its own version of the USD libraries. Having +# the pxr Python package available can cause the C library to load an +# incompatible version of libusd, potentially leading to undefined behavior. +# By setting OVRTX_SKIP_USD_CHECK, we prevent the C library from loading the pxr Python package. +os.environ["OVRTX_SKIP_USD_CHECK"] = "1" + +import ovrtx +from ovrtx import Device, PrimMode, Renderer, RendererConfig, Semantic +from packaging.version import Version + +from isaaclab.renderers import BaseRenderer, RenderBufferKind, RenderBufferSpec +from isaaclab.utils.math import convert_camera_frame_orientation_convention + +from .ovrtx_renderer_cfg import OVRTXRendererCfg +from .ovrtx_renderer_kernels import ( + DEVICE, + create_camera_transforms_kernel, + extract_all_depth_tiles_kernel, + extract_all_depth_tiles_kernel_legacy, + extract_all_rgba_tiles_kernel, + generate_random_colors_from_ids_kernel, + generate_random_colors_from_ids_kernel_legacy, + sync_newton_transforms_kernel, +) +from .ovrtx_usd import ( + create_cloning_attributes, + export_stage_for_ovrtx, + inject_cameras_into_usd, +) + +if TYPE_CHECKING: + from isaaclab.sensors.camera.camera_data import CameraData + +from isaaclab.renderers.camera_render_spec import CameraRenderSpec + +# Shared integration floor for this module; reuse for ovrtx features that share one support floor. +_OVRTX_VERSION = Version(ovrtx.__version__) +_IS_OVRTX_0_3_0_OR_NEWER = Version("0.3.0") <= _OVRTX_VERSION + +# The resolved integer value is assigned to the ``omni:rtx:minimal:mode`` attribute of the render product. +_RTX_MINIMAL_MODES = { + RenderBufferKind.SIMPLE_SHADING_CONSTANT_DIFFUSE.value: 1, + RenderBufferKind.SIMPLE_SHADING_DIFFUSE_MDL.value: 2, + RenderBufferKind.SIMPLE_SHADING_FULL_MDL.value: 3, +} + + +def _resolve_rtx_minimal_mode(data_types: list[str]) -> int | None: + """Resolve the RTX minimal mode from data types. + + RTX minimal mode is used to control the rendering quality. The higher the mode, the higher the quality. + + If multiple simple shading data types are requested, the first one in the list is used and a warning is logged. + + If no simple shading data types are requested, None is returned. + + Args: + data_types: List of data types. + + Returns: + The resolved RTX minimal mode if simple shading data types are requested, otherwise None. + """ + filtered_data_types = [data_type for data_type in data_types if data_type in _RTX_MINIMAL_MODES] + if not filtered_data_types: + return None + + if len(filtered_data_types) > 1: + logger.warning( + "Multiple simple shading data types requested (%s). Using the first in the list (%s).", + filtered_data_types, + filtered_data_types[0], + ) + + return _RTX_MINIMAL_MODES[filtered_data_types[0]] + + +class OVRTXRenderData: + """OVRTX-specific RenderData. Holds warp output buffers sized from :class:`CameraRenderSpec`.""" + + def __init__(self, spec: CameraRenderSpec, device): + """Create render data from a camera render specification.""" + self.width = spec.cfg.width + self.height = spec.cfg.height + self.num_envs = spec.num_instances + self.data_types = spec.cfg.data_types if spec.cfg.data_types else ["rgb"] + self.num_cols = math.ceil(math.sqrt(self.num_envs)) + self.num_rows = math.ceil(self.num_envs / self.num_cols) + self.warp_buffers: dict[str, wp.array] = {} + + +class OVRTXRenderer(BaseRenderer): + """OVRTX Renderer implementation using the ovrtx library. + + This renderer uses the ovrtx library for high-fidelity RTX-based rendering, + providing ray-traced rendering capabilities for Isaac Lab environments. + """ + + cfg: OVRTXRendererCfg + + def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: + """Publish the per-output layout this OVRTX backend writes. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.supported_output_types`.""" + return { + RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.ALBEDO: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.SIMPLE_SHADING_CONSTANT_DIFFUSE: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.SIMPLE_SHADING_DIFFUSE_MDL: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.SIMPLE_SHADING_FULL_MDL: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.SEMANTIC_SEGMENTATION: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.DEPTH: RenderBufferSpec(1, torch.float32), + RenderBufferKind.DISTANCE_TO_IMAGE_PLANE: RenderBufferSpec(1, torch.float32), + RenderBufferKind.DISTANCE_TO_CAMERA: RenderBufferSpec(1, torch.float32), + } + + def __init__(self, cfg: OVRTXRendererCfg): + self.cfg = cfg + self._usd_handles = [] + self._render_product_paths = [] + self._camera_binding = None + self._object_binding = None + self._object_newton_indices: wp.array | None = None + self._initialized_scene = False + self._exported_usd_path: str | None = None + self._camera_rel_path: str | None = None + self._output_semantic_color_buffer: wp.array | None = None + + def prepare_stage(self, stage: Any, num_envs: int) -> None: + """Export the USD stage for OVRTX before create_render_data. + + Adds cloning attributes and exports the stage to a temporary file. + The exported path is used by create_render_data when loading into OVRTX. + """ + if stage is None: + return + + use_cloning = self.cfg.use_cloning + + logger.info("Preparing stage for export (%d envs, cloning=%s)...", num_envs, use_cloning) + create_cloning_attributes(stage, num_envs, use_cloning) + + export_path = "/tmp/stage_before_ovrtx.usda" + export_stage_for_ovrtx(stage, export_path, num_envs, use_cloning) + self._exported_usd_path = export_path + logger.info("Exported to %s", export_path) + + def initialize(self, spec: CameraRenderSpec): + """Initialize the OVRTX renderer with internal environment cloning. + + Args: + spec: Tiled camera description (resolution, paths, data types). + """ + width = spec.cfg.width + height = spec.cfg.height + num_envs = spec.num_instances + data_types = spec.cfg.data_types if spec.cfg.data_types else ["rgb"] + + env_0_prefix = "/World/envs/env_0/" + first_cam_path = spec.camera_prim_paths[0] + if not first_cam_path.startswith(env_0_prefix): + raise RuntimeError(f"Expected camera prim under '{env_0_prefix}', got '{first_cam_path}'") + self._camera_rel_path = spec.camera_path_relative_to_env_0 + + usd_scene_path = self._exported_usd_path + use_cloning = self.cfg.use_cloning + + logger.info("Creating OVRTX renderer...") + OVRTX_CONFIG = RendererConfig( + log_file_path=self.cfg.log_file_path, + log_level=self.cfg.log_level, + read_gpu_transforms=_IS_OVRTX_0_3_0_OR_NEWER, + ) + self._renderer = Renderer(OVRTX_CONFIG) + assert self._renderer, "Renderer should be valid after creation" + logger.info("OVRTX renderer created successfully") + + if usd_scene_path is not None: + logger.info("Injecting camera definitions...") + + combined_usd_path, render_product_path = inject_cameras_into_usd( + usd_scene_path, + self.cfg, + width=width, + height=height, + num_envs=num_envs, + data_types=data_types, + minimal_mode=_resolve_rtx_minimal_mode(data_types), + camera_rel_path=self._camera_rel_path, + ) + self._render_product_paths.append(render_product_path) + + logger.info("Loading USD into OvRTX...") + try: + if _IS_OVRTX_0_3_0_OR_NEWER: + self._renderer.open_usd(combined_usd_path) + logger.info("USD loaded as root layer (path: %s)", combined_usd_path) + else: + handle = self._renderer.add_usd(combined_usd_path, path_prefix=None) + self._usd_handles.append(handle) + logger.info("USD loaded (path: %s, handle: %s)", combined_usd_path, handle) + except Exception as e: + logger.exception("Error loading USD: %s", e) + raise + + if use_cloning and num_envs > 1: + logger.info("Using OVRTX internal cloning") + self._clone_environments_in_ovrtx(num_envs) + self._update_scene_partitions_after_clone(combined_usd_path, num_envs) + + self._initialized_scene = True + + camera_paths = [f"/World/envs/env_{i}/{self._camera_rel_path}" for i in range(num_envs)] + self._camera_binding = self._renderer.bind_attribute( + prim_paths=camera_paths, + attribute_name="omni:xform", + semantic=Semantic.XFORM_MAT4x4, + prim_mode=PrimMode.EXISTING_ONLY, + ) + + # OVRTX requires omni:resetXformStack on cameras for correct world transform binding + try: + self._renderer.write_attribute( + prim_paths=camera_paths, + attribute_name="omni:resetXformStack", + tensor=np.full(num_envs, True, dtype=np.bool_), + ) + except Exception as e: + logger.warning("Failed to write omni:resetXformStack: %s", e) + + if self._camera_binding is not None: + logger.info("Camera binding created successfully") + else: + logger.warning("Camera binding is None") + + self._setup_object_bindings() + + def _clone_environments_in_ovrtx(self, num_envs: int): + """Clone base environment (env_0) to all other environments using OvRTX.""" + logger.info("Cloning base environment to %d targets...", num_envs - 1) + source_path = "/World/envs/env_0" + target_paths = [f"/World/envs/env_{i}" for i in range(1, num_envs)] + try: + self._renderer.clone_usd(source_path, target_paths) + logger.info("Cloned %d environments successfully", len(target_paths)) + except Exception as e: + logger.error("Failed to clone environments: %s", e) + raise RuntimeError(f"OvRTX environment cloning failed: {e}") + + def _update_scene_partitions_after_clone(self, usd_file_path: str, num_envs: int): + """Update scene partition attributes on cloned environments and cameras in OvRTX.""" + logger.info("Writing scene partitions for %d environments...", num_envs) + partition_tokens = [f"env_{i}" for i in range(num_envs)] + env_prim_paths = [f"/World/envs/env_{i}" for i in range(num_envs)] + camera_prim_paths = [f"/World/envs/env_{i}/{self._camera_rel_path}" for i in range(num_envs)] + + try: + self._renderer.write_attribute( + env_prim_paths, + "primvars:omni:scenePartition", + partition_tokens, + semantic=Semantic.TOKEN_STRING, + ) + logger.info("Written primvars:omni:scenePartition to %d environments", num_envs) + + self._renderer.write_attribute( + camera_prim_paths, + "omni:scenePartition", + partition_tokens, + semantic=Semantic.TOKEN_STRING, + ) + logger.info("Written omni:scenePartition to %d cameras", num_envs) + except Exception as e: + logger.warning("Failed to write scene partitions: %s", e, exc_info=True) + + def _setup_object_bindings(self): + """Setup OVRTX bindings for scene objects to sync with Newton physics.""" + try: + from isaaclab.sim import SimulationContext + + provider = SimulationContext.instance().initialize_scene_data_provider() + newton_model = provider.get_newton_model() + if newton_model is None: + logger.info("Newton model not available, skipping object bindings") + return + + all_body_paths = getattr(newton_model, "body_label", None) + if all_body_paths is None: + logger.info("Newton model has no body_label, skipping object bindings") + return + + object_paths = [] + newton_indices = [] + for idx, path in enumerate(all_body_paths): + if "/World/envs/" in path and self._camera_rel_path not in path and "GroundPlane" not in path: + object_paths.append(path) + newton_indices.append(idx) + + if len(object_paths) == 0: + logger.info("No dynamic objects found for binding") + return + + self._object_binding = self._renderer.bind_attribute( + prim_paths=object_paths, + attribute_name="omni:xform", + semantic=Semantic.XFORM_MAT4x4, + prim_mode=PrimMode.EXISTING_ONLY, + ) + + try: + self._renderer.write_attribute( + prim_paths=object_paths, + attribute_name="omni:resetXformStack", + tensor=np.full(len(object_paths), True, dtype=np.bool_), + ) + except Exception as e: + logger.warning("Failed to write omni:resetXformStack on objects: %s", e) + + if self._object_binding is not None: + logger.info("Object binding created successfully") + self._object_newton_indices = wp.array(newton_indices, dtype=wp.int32, device=DEVICE) + else: + logger.warning("Object binding is None") + except ImportError: + logger.info("Newton not available, skipping object bindings") + except Exception as e: + logger.warning("Error setting up object bindings: %s", e) + + def create_render_data(self, spec: CameraRenderSpec) -> OVRTXRenderData: + """Create OVRTX-specific RenderData with GPU buffers. + + Performs OVRTX initialization (stage export, USD load, bindings) on first call, + matching the interface of Isaac RTX and Newton Warp which need no separate initialize(). + """ + if not self._initialized_scene: + self.initialize(spec) + return OVRTXRenderData(spec, DEVICE) + + # Map torch dtypes to their warp counterparts for zero-copy wrapping. + _TORCH_TO_WP_DTYPE: dict[torch.dtype, Any] = { + torch.uint8: wp.uint8, + torch.float32: wp.float32, + torch.int32: wp.int32, + } + + def set_outputs(self, render_data: OVRTXRenderData, output_data: dict[str, torch.Tensor]) -> None: + """Wrap caller-owned torch output tensors as zero-copy warp arrays. + + Aliased views over a contiguous sibling (e.g. ``rgb`` over ``rgba``) are + skipped; any other non-contiguous tensor raises ``ValueError``. + + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.set_outputs`. + """ + render_data.warp_buffers = {} + for name, tensor in output_data.items(): + if not tensor.is_contiguous(): + if tensor.data_ptr() in {t.data_ptr() for t in output_data.values() if t.is_contiguous()}: + continue + raise ValueError( + f"OVRTXRenderer.set_outputs: output '{name}' is non-contiguous and is not an" + " alias of a contiguous output tensor; cannot wrap as a zero-copy warp array." + ) + wp_dtype = self._TORCH_TO_WP_DTYPE.get(tensor.dtype) + if wp_dtype is None: + raise ValueError( + f"OVRTXRenderer.set_outputs: unsupported torch dtype {tensor.dtype} for output" + f" '{name}'. Add it to OVRTXRenderer._TORCH_TO_WP_DTYPE." + ) + torch_array = wp.from_torch(tensor) + render_data.warp_buffers[name] = wp.array( + ptr=torch_array.ptr, + dtype=wp_dtype, + shape=tuple(tensor.shape), + device=torch_array.device, + copy=False, + ) + + def update_transforms(self) -> None: + """Sync physics objects to OVRTX.""" + if self._object_binding is None or self._object_newton_indices is None: + return + + try: + from isaaclab.sim import SimulationContext + + provider = SimulationContext.instance().initialize_scene_data_provider() + newton_state = provider.get_newton_state() + if newton_state is None: + return + body_q = getattr(newton_state, "body_q", None) + if body_q is None: + return + + with self._object_binding.map(device=Device.CUDA, device_id=0) as attr_mapping: + ovrtx_transforms = wp.from_dlpack(attr_mapping.tensor, dtype=wp.mat44d) + wp.launch( + kernel=sync_newton_transforms_kernel, + dim=len(self._object_newton_indices), + inputs=[ovrtx_transforms, self._object_newton_indices, body_q], + device=DEVICE, + ) + except Exception as e: + logger.warning("Failed to update object transforms: %s", e) + + def update_camera( + self, + render_data: OVRTXRenderData, + positions: torch.Tensor, + orientations: torch.Tensor, + intrinsics: torch.Tensor, + ) -> None: + """Update camera transforms in OVRTX binding.""" + num_envs = positions.shape[0] + camera_quats_opengl = convert_camera_frame_orientation_convention(orientations, origin="world", target="opengl") + camera_positions_wp = wp.from_torch(positions.contiguous(), dtype=wp.vec3) + camera_orientations_wp = wp.from_torch(camera_quats_opengl.contiguous(), dtype=wp.quatf) + camera_transforms = wp.zeros(num_envs, dtype=wp.mat44d, device=DEVICE) + wp.launch( + kernel=create_camera_transforms_kernel, + dim=num_envs, + inputs=[camera_positions_wp, camera_orientations_wp, camera_transforms], + device=DEVICE, + ) + if self._camera_binding is not None: + with self._camera_binding.map(device=Device.CUDA, device_id=0) as attr_mapping: + wp_transforms_view = wp.from_dlpack(attr_mapping.tensor, dtype=wp.mat44d) + wp.copy(wp_transforms_view, camera_transforms) + + def read_output( + self, + render_data: OVRTXRenderData, + camera_data: CameraData, + ) -> None: + """No-op: outputs already live in the caller's torch storage. + + :meth:`set_outputs` wraps each ``camera_data.output`` tensor as a + zero-copy warp array stored in ``render_data.warp_buffers``, and + :meth:`render` writes the rendered tiles directly into those warp + arrays. There is therefore nothing to copy here. + + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.read_output`. + """ + + def _generate_random_colors_from_ids(self, input_ids: wp.array) -> wp.array: + """Generate pseudo-random colors from semantic IDs.""" + if self._output_semantic_color_buffer is None or self._output_semantic_color_buffer.shape != input_ids.shape: + self._output_semantic_color_buffer = wp.zeros(shape=input_ids.shape, dtype=wp.uint32, device=DEVICE) + + output_colors = self._output_semantic_color_buffer + + wp.launch( + kernel=( + generate_random_colors_from_ids_kernel + if _IS_OVRTX_0_3_0_OR_NEWER + else generate_random_colors_from_ids_kernel_legacy + ), + dim=input_ids.shape, + inputs=[input_ids, output_colors], + device=DEVICE, + ) + + return output_colors + + def _extract_rgba_tiles( + self, + render_data: OVRTXRenderData, + tiled_data: wp.array, + output_buffers: dict, + buffer_key: str, + suffix: str = "", + ) -> None: + """Extract per-env RGBA tiles from tiled buffer into output_buffers (single kernel launch).""" + output_buffer = output_buffers[buffer_key] + num_channels = output_buffer.shape[-1] + if num_channels not in (3, 4): + raise ValueError(f"Expected RGB (3 channels) or RGBA (4 channels), got {num_channels}") + + wp.launch( + kernel=extract_all_rgba_tiles_kernel, + dim=(render_data.num_envs, render_data.height, render_data.width), + inputs=[ + tiled_data, + output_buffer, + render_data.num_cols, + render_data.width, + render_data.height, + num_channels, + ], + device=DEVICE, + ) + + def _extract_depth_tiles( + self, render_data: OVRTXRenderData, tiled_depth_data: wp.array, output_buffers: dict + ) -> None: + """Extract per-env depth tiles into output_buffers (single kernel launch).""" + kernel = extract_all_depth_tiles_kernel if _IS_OVRTX_0_3_0_OR_NEWER else extract_all_depth_tiles_kernel_legacy + + for depth_type in ["depth", "distance_to_image_plane", "distance_to_camera"]: + if depth_type in output_buffers: + wp.launch( + kernel=kernel, + dim=(render_data.num_envs, render_data.height, render_data.width), + inputs=[ + tiled_depth_data, + output_buffers[depth_type], + render_data.num_cols, + render_data.width, + render_data.height, + ], + device=DEVICE, + ) + + def _process_render_frame(self, render_data: OVRTXRenderData, frame, output_buffers: dict) -> None: + """Extract RGB, depth, albedo, and semantic from a single render frame into output_buffers.""" + if "LdrColor" in frame.render_vars: + buffer_key = None + + if "rgba" in output_buffers: + buffer_key = "rgba" + else: + # The output buffers must contain only one simple shading data type at most after resolution of the data + # types during creation of the output buffers (OVRTXRenderData._create_warp_buffers). + for dt in _RTX_MINIMAL_MODES: + if dt in output_buffers: + buffer_key = dt + break + + if buffer_key is not None: + with frame.render_vars["LdrColor"].map(device=Device.CUDA) as mapping: + tiled_data = wp.from_dlpack(mapping.tensor) + self._extract_rgba_tiles(render_data, tiled_data, output_buffers, buffer_key) + + for depth_var in ["DistanceToImagePlaneSD", "DepthSD"]: + if depth_var not in frame.render_vars: + continue + with frame.render_vars[depth_var].map(device=Device.CUDA) as mapping: + tiled_depth_data = wp.from_dlpack(mapping.tensor) + if tiled_depth_data.dtype == wp.uint32: + tiled_depth_data = wp.from_torch( + wp.to_torch(tiled_depth_data).view(torch.float32), dtype=wp.float32 + ) + self._extract_depth_tiles(render_data, tiled_depth_data, output_buffers) + break + + if "DiffuseAlbedoSD" in frame.render_vars and "albedo" in output_buffers: + with frame.render_vars["DiffuseAlbedoSD"].map(device=Device.CUDA) as mapping: + tiled_albedo_data = wp.from_dlpack(mapping.tensor) + self._extract_rgba_tiles(render_data, tiled_albedo_data, output_buffers, "albedo", suffix="albedo") + + if "SemanticSegmentation" in frame.render_vars and "semantic_segmentation" in output_buffers: + with frame.render_vars["SemanticSegmentation"].map(device=Device.CUDA) as mapping: + tiled_semantic_data = wp.from_dlpack(mapping.tensor) + + if tiled_semantic_data.dtype == wp.uint32: + semantic_colors = self._generate_random_colors_from_ids(tiled_semantic_data) + + semantic_torch = wp.to_torch(semantic_colors) + semantic_uint8 = semantic_torch.view(torch.uint8) + + if semantic_torch.dim() == 2: + h, w = semantic_torch.shape + semantic_uint8 = semantic_uint8.reshape(h, w, 4) + + tiled_semantic_data = wp.from_torch(semantic_uint8, dtype=wp.uint8) + + self._extract_rgba_tiles( + render_data, + tiled_semantic_data, + output_buffers, + "semantic_segmentation", + suffix="semantic", + ) + + def render(self, render_data: OVRTXRenderData) -> None: + """Render the scene into the provided RenderData.""" + if not self._initialized_scene: + raise RuntimeError("Scene not initialized. Call initialize() first.") + if self._renderer is None or len(self._render_product_paths) == 0: + return + try: + products = self._renderer.step( + render_products=set(self._render_product_paths), + delta_time=1.0 / 60.0, + ) + product_path = self._render_product_paths[0] + if product_path in products and len(products[product_path].frames) > 0: + self._process_render_frame( + render_data, + products[product_path].frames[0], + render_data.warp_buffers, + ) + except Exception as e: + logger.warning("OVRTX rendering failed: %s", e, exc_info=True) + + def cleanup(self, render_data: OVRTXRenderData | None) -> None: + """Release renderer resources. See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.cleanup`.""" + + # Unbind before tearing down renderer + def _safe_unbind(binding, name: str) -> None: + if binding is None: + return + try: + binding.unbind() + except Exception as e: + if "destroyed" not in str(e).lower(): + logger.warning("Error unbinding %s: %s", name, e) + + _safe_unbind(self._camera_binding, "camera transforms") + self._camera_binding = None + _safe_unbind(self._object_binding, "object transforms") + self._object_binding = None + + if self._renderer: + if self._usd_handles: + for handle in self._usd_handles: + try: + self._renderer.remove_usd(handle) + except Exception as e: + logger.warning("Error removing USD: %s", e) + self._usd_handles.clear() + self._renderer = None + + self._render_product_paths.clear() + self._output_semantic_color_buffer = None + self._initialized_scene = False diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_cfg.py b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_cfg.py new file mode 100644 index 000000000000..9c26d3c79bf1 --- /dev/null +++ b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_cfg.py @@ -0,0 +1,44 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for OVRTX Renderer.""" + +import tempfile +from pathlib import Path + +from isaaclab.renderers.renderer_cfg import RendererCfg +from isaaclab.utils import configclass + + +@configclass +class OVRTXRendererCfg(RendererCfg): + """Configuration for OVRTX Renderer. + + The OVRTX renderer uses the ovrtx library for high-fidelity RTX-based rendering. + width, height, num_envs, and data_types are obtained from the + :class:`~isaaclab.renderers.camera_render_spec.CameraRenderSpec` when + :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.create_render_data` is called + (same pattern as Isaac RTX). + """ + + renderer_type: str = "ovrtx" + """Type identifier for OVRTX renderer.""" + + temp_usd_dir: str = str(Path(tempfile.gettempdir()) / "ovrtx") + """Directory for temporary combined USD files (scene + injected cameras). + Used by the OVRTX renderer when building the render scope; must be writable. + """ + + temp_usd_suffix: str = ".usda" + """File suffix for temporary combined USD files (e.g. '.usda' or '.usdc').""" + + use_cloning: bool = False + """When True, export only env_0 and use OVRTX clone_usd. When False, export full stage.""" + + log_level: str = "verbose" + """OVRTX carb log level: "verbose", "info", "warn", "error".""" + + log_file_path: str = "/tmp/ovrtx_renderer.log" + """Path for OVRTX log file.""" diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_kernels.py b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_kernels.py new file mode 100644 index 000000000000..c287f1257632 --- /dev/null +++ b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_kernels.py @@ -0,0 +1,330 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp kernels and device constant for OVRTX renderer.""" + +import warp as wp + +DEVICE = "cuda:0" + + +@wp.kernel +def create_camera_transforms_kernel( + positions: wp.array(dtype=wp.vec3), # type: ignore + orientations: wp.array(dtype=wp.quatf), # type: ignore + transforms: wp.array(dtype=wp.mat44d), # type: ignore +): + """Build camera 4x4 transforms from positions and quaternions (column-major for OVRTX).""" + i = wp.tid() + pos = positions[i] + quat = orientations[i] + qx, qy, qz, qw = quat[0], quat[1], quat[2], quat[3] + + r00 = 1.0 - 2.0 * (qy * qy + qz * qz) + r01 = 2.0 * (qx * qy - qw * qz) + r02 = 2.0 * (qx * qz + qw * qy) + r10 = 2.0 * (qx * qy + qw * qz) + r11 = 1.0 - 2.0 * (qx * qx + qz * qz) + r12 = 2.0 * (qy * qz - qw * qx) + r20 = 2.0 * (qx * qz - qw * qy) + r21 = 2.0 * (qy * qz + qw * qx) + r22 = 1.0 - 2.0 * (qx * qx + qy * qy) + + _0 = wp.float64(0.0) + _1 = wp.float64(1.0) + transforms[i] = wp.mat44d( # type: ignore + wp.float64(r00), + wp.float64(r10), + wp.float64(r20), + _0, + wp.float64(r01), + wp.float64(r11), + wp.float64(r21), + _0, + wp.float64(r02), + wp.float64(r12), + wp.float64(r22), + _0, + wp.float64(float(pos[0])), + wp.float64(float(pos[1])), + wp.float64(float(pos[2])), + _1, + ) + + +@wp.kernel +def extract_tile_from_tiled_buffer_kernel( + tiled_buffer: wp.array(dtype=wp.uint8, ndim=3), # type: ignore + tile_buffer: wp.array(dtype=wp.uint8, ndim=3), # type: ignore + tile_x: int, + tile_y: int, + tile_width: int, + tile_height: int, +): + """Extract one RGBA tile from a tiled buffer.""" + y, x = wp.tid() + src_x = tile_x * tile_width + x + src_y = tile_y * tile_height + y + tile_buffer[y, x, 0] = tiled_buffer[src_y, src_x, 0] + tile_buffer[y, x, 1] = tiled_buffer[src_y, src_x, 1] + tile_buffer[y, x, 2] = tiled_buffer[src_y, src_x, 2] + tile_buffer[y, x, 3] = tiled_buffer[src_y, src_x, 3] + + +@wp.kernel +def extract_all_rgba_tiles_kernel( + tiled_buffer: wp.array(dtype=wp.uint8, ndim=3), # type: ignore + output_buffer: wp.array(dtype=wp.uint8, ndim=4), # type: ignore + num_cols: int, + tile_width: int, + tile_height: int, + num_channels: int, +): + """Extract ALL RGBA or RGB tiles from a tiled buffer in a single kernel launch. + + Args: + tiled_buffer: 3D uint8 array of shape (H, W, 4) for RGBA or (H, W, 3) for RGB. + output_buffer: 4D uint8 array of shape (num_envs, H, W, 4) for RGBA or (num_envs, H, W, 3) for RGB. + num_cols: number of columns in the tiled buffer. + tile_width: width of each tile. + tile_height: height of each tile. + num_channels: number of channels in the output buffer. Use 3 for RGB or 4 for RGBA. + If a value other than 3 or 4 is given, it will be treated as 3 (RGB). + """ + env_idx, y, x = wp.tid() + tile_x = env_idx % num_cols + tile_y = env_idx // num_cols + src_x = tile_x * tile_width + x + src_y = tile_y * tile_height + y + + # RGB + output_buffer[env_idx, y, x, 0] = tiled_buffer[src_y, src_x, 0] + output_buffer[env_idx, y, x, 1] = tiled_buffer[src_y, src_x, 1] + output_buffer[env_idx, y, x, 2] = tiled_buffer[src_y, src_x, 2] + + # Alpha (if it is RGBA) + if num_channels == 4: + output_buffer[env_idx, y, x, 3] = tiled_buffer[src_y, src_x, 3] + + +@wp.kernel +def extract_all_depth_tiles_kernel_legacy( + tiled_buffer: wp.array(dtype=wp.float32, ndim=2), # type: ignore + output_buffer: wp.array(dtype=wp.float32, ndim=4), # type: ignore + num_cols: int, + tile_width: int, + tile_height: int, +): + """Extract all depth tiles from a tiled buffer in a single kernel launch. + + Used with ovrtx older than 0.3.0. + + Args: + tiled_buffer: 2D float32 array of shape (H, W) for depth. + output_buffer: 4D float32 array of shape (num_envs, H, W, 1) for depth. + num_cols: number of columns in the tiled buffer. + tile_width: width of each tile. + tile_height: height of each tile. + """ + env_idx, y, x = wp.tid() + tile_x = env_idx % num_cols + tile_y = env_idx // num_cols + src_x = tile_x * tile_width + x + src_y = tile_y * tile_height + y + output_buffer[env_idx, y, x, 0] = tiled_buffer[src_y, src_x] + + +@wp.kernel +def extract_all_depth_tiles_kernel( + tiled_buffer: wp.array(dtype=wp.float32, ndim=3), # type: ignore + output_buffer: wp.array(dtype=wp.float32, ndim=4), # type: ignore + num_cols: int, + tile_width: int, + tile_height: int, +): + """Extract all depth tiles from a tiled buffer in a single kernel launch. + + Args: + tiled_buffer: 3D float32 array of shape (H, W, 1) for depth. + output_buffer: 4D float32 array of shape (num_envs, H, W, 1) for depth. + num_cols: number of columns in the tiled buffer. + tile_width: width of each tile. + tile_height: height of each tile. + """ + env_idx, y, x = wp.tid() + tile_x = env_idx % num_cols + tile_y = env_idx // num_cols + src_x = tile_x * tile_width + x + src_y = tile_y * tile_height + y + output_buffer[env_idx, y, x, 0] = tiled_buffer[src_y, src_x, 0] + + +@wp.kernel +def extract_depth_tile_from_tiled_buffer_kernel( + tiled_buffer: wp.array(dtype=wp.float32, ndim=2), # type: ignore + tile_buffer: wp.array(dtype=wp.float32, ndim=3), # type: ignore + tile_x: int, + tile_y: int, + tile_width: int, + tile_height: int, +): + """Extract one depth tile from a tiled depth buffer.""" + y, x = wp.tid() + src_x = tile_x * tile_width + x + src_y = tile_y * tile_height + y + tile_buffer[y, x, 0] = tiled_buffer[src_y, src_x] + + +@wp.func +def color_hash(seed: wp.uint32) -> wp.uint32: + """Simple hash function for better distribution. Used for colorization.""" + # uint32 integers are promoted to uint64 to avoid overflow during multiplication. + h = wp.uint64(seed) + h = h ^ (h >> wp.uint64(16)) + h = h * wp.uint64(wp.uint32(0x85EBCA6B)) + h = h ^ (h >> wp.uint64(13)) + h = h * wp.uint64(wp.uint32(0xC2B2AE35)) + h = h ^ (h >> wp.uint64(16)) + return wp.uint32(h) + + +@wp.func +def random_color_from_id(input_id: wp.uint32) -> wp.uint32: + """Generate random color from a single ID. + + Generate visually distinct colours by linearly spacing the hue channel in HSV space and then convert to RGB space. + + Args: + input_id: uint32 semantic ID + + Returns: + uint32 color: ``r | (g<<8) | (b<<16) | (a<<24)`` + """ + if input_id == wp.uint32(0): + # BACKGROUND special case + return wp.uint32(0) + if input_id == wp.uint32(1): + # UNLABELLED special case + return wp.uint32(0xFF000000) + + hash_val = color_hash(input_id) + + # Golden ratio inverse = 1.0 / 1.618033988749895 (Replicator constant) + GOLDEN_RATIO_INV = wp.float64(1.0) / wp.float64(1.618033988749895) + + # Use golden ratio spacing for maximum hue spread + hue_tmp = wp.float64(input_id) * GOLDEN_RATIO_INV + hue = hue_tmp - wp.floor(hue_tmp) + + # Add hash-based perturbation for better distribution + hue_perturbation = wp.float64(hash_val & wp.uint32(0xFFFF)) / wp.float64(65536.0) + hue_tmp = hue + hue_perturbation * wp.float64(0.1) + hue = hue_tmp - wp.floor(hue_tmp) + + # Use hash to determine saturation and value for maximum spread + sat_part = wp.uint32((hash_val >> wp.uint32(16)) & wp.uint32(0xFF)) + val_part = wp.uint32((hash_val >> wp.uint32(8)) & wp.uint32(0xFF)) + + # Saturation: 0.7 to 1.0 for vibrant colors + saturation = wp.float64(0.7) + wp.float64(0.3) * (wp.float64(sat_part) / wp.float64(255.0)) + + # Value: 0.8 to 1.0 for bright colors + value = wp.float64(0.8) + wp.float64(0.2) * (wp.float64(val_part) / wp.float64(255.0)) + + # HSV to RGB conversion (match Replicator: ``f`` uses pre-modulo ``int(hue*6)``, then ``i %= 6``). + hue_i = wp.int32(hue * wp.float64(6.0)) + hue_f = hue * wp.float64(6.0) - wp.float64(hue_i) + p = value * (wp.float64(1.0) - saturation) + q = value * (wp.float64(1.0) - saturation * hue_f) + t = value * (wp.float64(1.0) - saturation * (wp.float64(1.0) - hue_f)) + + r = wp.float64(0.0) + g = wp.float64(0.0) + b = wp.float64(0.0) + + hue_i = hue_i % 6 + + if hue_i == 0: + r = value + g = t + b = p + elif hue_i == 1: + r = q + g = value + b = p + elif hue_i == 2: + r = p + g = value + b = t + elif hue_i == 3: + r = p + g = q + b = value + elif hue_i == 4: + r = t + g = p + b = value + else: + r = value + g = p + b = q + + ri = wp.min(255, wp.max(0, wp.int32(r * wp.float64(255.0)))) + gi = wp.min(255, wp.max(0, wp.int32(g * wp.float64(255.0)))) + bi = wp.min(255, wp.max(0, wp.int32(b * wp.float64(255.0)))) + ai = wp.int32(255) + + color = ( + wp.uint32(ri) + | (wp.uint32(gi) << wp.uint32(8)) + | (wp.uint32(bi) << wp.uint32(16)) + | (wp.uint32(ai) << wp.uint32(24)) + ) + return color + + +@wp.kernel +def generate_random_colors_from_ids_kernel_legacy( + input_ids: wp.array(dtype=wp.uint32, ndim=2), # type: ignore + output_colors: wp.array(dtype=wp.uint32, ndim=2), # type: ignore +): + """Generate random colors given IDs (e.g. semantic IDs). + + Used with ovrtx older than 0.3.0. + + Args: + input_ids: 2D uint32 array of shape (H, W) for semantic IDs per pixel. + output_data: 2D uint32 array; each word is `r | (g<<8) | (b<<16) | (a<<24)` + """ + i, j = wp.tid() + output_colors[i, j] = random_color_from_id(input_ids[i, j]) + + +@wp.kernel +def generate_random_colors_from_ids_kernel( + input_ids: wp.array(dtype=wp.uint32, ndim=3), # type: ignore + output_colors: wp.array(dtype=wp.uint32, ndim=3), # type: ignore +): + """Generate random colors given IDs (e.g. semantic IDs). + + Args: + input_ids: 3D uint32 array for semantic IDs per pixel. + output_colors: 3D uint32 array for colors per pixel; each word is ``r | (g<<8) | (b<<16) | (a<<24)``. + """ + i, j, k = wp.tid() + output_colors[i, j, k] = random_color_from_id(input_ids[i, j, k]) + + +@wp.kernel +def sync_newton_transforms_kernel( + ovrtx_transforms: wp.array(dtype=wp.mat44d), # type: ignore + newton_body_indices: wp.array(dtype=wp.int32), # type: ignore + newton_body_q: wp.array(dtype=wp.transformf), # type: ignore +): + """Sync Newton physics body transforms to OVRTX 4x4 column-major matrices.""" + i = wp.tid() + body_idx = newton_body_indices[i] + transform = newton_body_q[body_idx] + ovrtx_transforms[i] = wp.transpose(wp.mat44d(wp.math.transform_to_matrix(transform))) diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_usd.py b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_usd.py new file mode 100644 index 000000000000..a222981ea2ed --- /dev/null +++ b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_usd.py @@ -0,0 +1,240 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""USD manipulation for OVRTX: Render scope building, camera injection, and stage prim activation.""" + +import logging +import math +import tempfile +from pathlib import Path +from typing import TYPE_CHECKING + +from pxr import Sdf, Usd, UsdGeom + +if TYPE_CHECKING: + from .ovrtx_renderer_cfg import OVRTXRendererCfg + +logger = logging.getLogger(__name__) + + +def get_render_var_config(data_types: list[str]) -> tuple[str, str, str]: + """Return (render_var_path, render_var_name, source_name) from data_types.""" + use_depth = any(dt in ["depth", "distance_to_image_plane", "distance_to_camera"] for dt in data_types) + use_albedo = "albedo" in data_types + use_semantic = "semantic_segmentation" in data_types + use_rgb = any(dt in ["rgb", "rgba"] for dt in data_types) + + if use_depth and not (use_rgb or use_albedo or use_semantic): + return "/Render/Vars/depth", "depth", "DistanceToImagePlaneSD" + if use_albedo and not (use_rgb or use_semantic): + return "/Render/Vars/albedo", "albedo", "DiffuseAlbedoSD" + if use_semantic and not (use_rgb or use_albedo): + return "/Render/Vars/semantic", "semantic", "SemanticSegmentation" + return "/Render/Vars/LdrColor", "LdrColor", "LdrColor" + + +def build_render_scope_usd( + camera_paths: list[str], + render_product_name: str, + render_var_path: str, + render_var_name: str, + source_name: str, + tiled_width: int, + tiled_height: int, + minimal_mode: int | None = None, +) -> str: + """Build the Render scope USD string (def Scope Render, RenderProduct, Vars). + + Args: + camera_paths: List of camera prim paths. + render_product_name: Name of the render product. + render_var_path: Path of the render variable. + render_var_name: Name of the render variable. + source_name: Name of the source. + tiled_width: Width of the tiled image. + tiled_height: Height of the tiled image. + minimal_mode: RTX minimal mode. None if not requested. Valid values are 1, 2, 3. + + Returns: + The USD string for the render scope. + """ + camera_rel_list = ", ".join([f"<{p}>" for p in camera_paths]) + + if minimal_mode is None: + render_mode_lines = ['token omni:rtx:rendermode = "RealTimePathTracing"'] + else: + render_mode_lines = [ + 'token omni:rtx:rendermode = "Minimal"', + f"int omni:rtx:minimal:mode = {minimal_mode}", + ] + + render_mode_block = "\n ".join(render_mode_lines) + + return f''' +def Scope "Render" +{{ + def RenderProduct "{render_product_name}" ( + prepend apiSchemas = ["OmniRtxSettingsCommonAdvancedAPI_1"] + ) {{ + rel camera = [{camera_rel_list}] + token omni:rtx:background:source:type = "domeLight" + float omni:rtx:rt:ambientLight:intensity = 1.0 + {render_mode_block} + token[] omni:rtx:waitForEvents = ["AllLoadingFinished", "OnlyOnFirstRequest"] + rel orderedVars = <{render_var_path}> + uniform int2 resolution = ({tiled_width}, {tiled_height}) + }} + + def "Vars" + {{ + def RenderVar "{render_var_name}" + {{ + uniform string sourceName = "{source_name}" + }} + }} +}} +''' + + +def _tiled_resolution(num_envs: int, width: int, height: int) -> tuple[int, int]: + """Compute tiled width and height from env count and per-env resolution (same as Camera).""" + num_cols = math.ceil(math.sqrt(num_envs)) + num_rows = math.ceil(num_envs / num_cols) + return num_cols * width, num_rows * height + + +def inject_cameras_into_usd( + usd_scene_path: str, + cfg: "OVRTXRendererCfg", + width: int, + height: int, + num_envs: int, + data_types: list[str], + minimal_mode: int | None = None, + camera_rel_path: str = "Camera", +) -> tuple[str, str]: + """Inject camera and render product definitions into an existing USD file. + + Reads the USD file, appends a Render scope (cameras + RenderProduct + Vars), + writes to a temp file in cfg.temp_usd_dir, and returns (path_to_combined_usd, render_product_path). + + Args: + usd_scene_path: Path to the base USD scene. + cfg: OVRTX renderer config (simple_shading_mode, temp_usd_dir, temp_usd_suffix). + width: Tile width from sensor config. + height: Tile height from sensor config. + num_envs: Number of environments from scene. + data_types: Data types from sensor config. + minimal_mode: RTX minimal mode. None if not requested. Valid values are 1, 2, 3. + camera_rel_path: Camera prim path relative to the env root (e.g. ``"Camera"`` or ``"Robot/head_cam"``). + """ + with open(usd_scene_path) as f: + original_usd = f.read() + + data_types = data_types if data_types else ["rgb"] + tiled_width, tiled_height = _tiled_resolution(num_envs, width, height) + + camera_paths = [f"/World/envs/env_{i}/{camera_rel_path}" for i in range(num_envs)] + render_product_name = "RenderProduct" + render_product_path = f"/Render/{render_product_name}" + + render_var_path, render_var_name, source_name = get_render_var_config(data_types) + + camera_content = build_render_scope_usd( + camera_paths, + render_product_name, + render_var_path, + render_var_name, + source_name, + tiled_width, + tiled_height, + minimal_mode, + ) + combined_usd = original_usd.rstrip() + "\n\n" + camera_content + + Path(cfg.temp_usd_dir).mkdir(parents=True, exist_ok=True) + with tempfile.NamedTemporaryFile(mode="w", suffix=cfg.temp_usd_suffix, delete=False, dir=cfg.temp_usd_dir) as f: + f.write(combined_usd) + temp_path = f.name + logger.info("Created combined USD: %s", temp_path) + return temp_path, render_product_path + + +def create_cloning_attributes(stage, num_envs: int = 1, use_cloning: bool = True) -> int: + """Create OVRTX cloning attributes (scene partition, xform) on env_0 only. + + Only env_0 is exported for OVRTX; env_1..env_{n-1} are deactivated before export. + OVRTX clones env_0 internally and _update_scene_partitions_after_clone sets + partition attributes on the clones. So we only need to set attributes on env_0 here. + + Camera prims are discovered by USD type (``UsdGeom.Camera``) rather than by + name, so this works regardless of where the camera is placed in the hierarchy. + + Args: + stage: USD stage to modify. + num_envs: Number of environments. + use_cloning: Whether OVRTX cloning is enabled. + + Returns: + Total number of objects (non-camera prims) that received partition attributes. + """ + total_objects = 0 + env_indices = [0] if use_cloning else range(num_envs) + for env_idx in env_indices: + env_path = f"/World/envs/env_{env_idx}" + env_prim = stage.GetPrimAtPath(env_path) + if not env_prim.IsValid(): + continue + partition_name = f"env_{env_idx}" + attr = env_prim.CreateAttribute("primvars:omni:scenePartition", Sdf.ValueTypeNames.Token) + attr.Set(partition_name) + for prim in Usd.PrimRange(env_prim): + if prim.GetPath() == env_prim.GetPath(): + continue + if prim.IsA(UsdGeom.Camera): + prim.CreateAttribute("omni:scenePartition", Sdf.ValueTypeNames.Token).Set(partition_name) + else: + prim.CreateAttribute("primvars:omni:scenePartition", Sdf.ValueTypeNames.Token).Set(partition_name) + total_objects += 1 + return total_objects + + +def export_stage_for_ovrtx(stage, export_path: str, num_envs: int, use_cloning: bool = True) -> str: + """Export the stage to a USD file; when num_envs > 1, only env_0 is exported for OVRTX cloning. + + When num_envs > 1, deactivates env_1..env_{num_envs-1} before export and reactivates + them after, so the file contains only env_0. The stage is modified in place. + + Args: + stage: USD stage to export. + export_path: Path for the exported file. + num_envs: Number of environments. + + Returns: + export_path (same as input). + """ + deactivated = [] + if use_cloning and num_envs > 1: + logger.info("Deactivating %d cloned environments...", num_envs - 1) + for env_idx in range(1, num_envs): + env_path = f"/World/envs/env_{env_idx}" + prim = stage.GetPrimAtPath(env_path) + if prim.IsValid() and prim.IsActive(): + prim.SetActive(False) + deactivated.append(prim) + if env_idx <= 3 or env_idx == num_envs - 1: + logger.info("Deactivated: %s", env_path) + if num_envs > 5: + logger.info("... (deactivated %d environments total)", len(deactivated)) + + try: + stage.Export(export_path) + return export_path + finally: + if deactivated: + logger.info("Reactivating %d environments...", len(deactivated)) + for prim in deactivated: + if prim.IsValid(): + prim.SetActive(True) diff --git a/source/isaaclab_ov/setup.py b/source/isaaclab_ov/setup.py new file mode 100644 index 000000000000..c955c54bfa15 --- /dev/null +++ b/source/isaaclab_ov/setup.py @@ -0,0 +1,38 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_ov' python package.""" + +from setuptools import setup + +EXTRAS_REQUIRE = { + "ovrtx": [ + "ovrtx>=0.2.0,<0.3.0", + ], +} + +# add "[all]" for convenience +EXTRAS_REQUIRE["all"] = sorted(set(dep for deps in EXTRAS_REQUIRE.values() for dep in deps)) + +setup( + name="isaaclab_ov", + version="0.1.1", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url="https://github.com/isaac-sim/IsaacLab", + description="Extension providing Omniverse renderers (OVRTX, ovphysx, etc.) for IsaacLab.", + keywords=["robotics", "simulation", "rendering", "ovrtx", "omniverse"], + license="BSD-3-Clause", + include_package_data=True, + python_requires=">=3.12", + install_requires=[], + extras_require=EXTRAS_REQUIRE, + packages=["isaaclab_ov"], + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.12", + ], + zip_safe=False, +) diff --git a/source/isaaclab_ov/test/test_ovrtx_renderer_contract.py b/source/isaaclab_ov/test/test_ovrtx_renderer_contract.py new file mode 100644 index 000000000000..b6c590a54a54 --- /dev/null +++ b/source/isaaclab_ov/test/test_ovrtx_renderer_contract.py @@ -0,0 +1,105 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for the OVRTX renderer output contract.""" + +import pytest +import torch + +pytest.importorskip("isaaclab_ov") +pytest.importorskip("ovrtx") + +from isaaclab_ov.renderers import OVRTXRendererCfg +from isaaclab_ov.renderers.ovrtx_renderer import OVRTXRenderData, OVRTXRenderer + +from isaaclab.sensors.camera import CameraCfg +from isaaclab.sensors.camera.camera_data import CameraData, RenderBufferKind, RenderBufferSpec +from isaaclab.sim import PinholeCameraCfg + +pytestmark = pytest.mark.isaacsim_ci + +_SPAWN = PinholeCameraCfg( + focal_length=24.0, + focus_distance=400.0, + horizontal_aperture=20.955, + clipping_range=(0.1, 1.0e5), +) + + +def _make_camera_cfg(data_types: list[str]) -> CameraCfg: + return CameraCfg( + height=8, + width=16, + prim_path="/World/Camera", + spawn=_SPAWN, + data_types=data_types, + ) + + +def _make_ovrtx_render_data() -> OVRTXRenderData: + rd = OVRTXRenderData.__new__(OVRTXRenderData) + rd.warp_buffers = {} + return rd + + +def test_ovrtx_supported_output_types_key_set(): + """OVRTX publishes the documented key set and per-output spec.""" + renderer = OVRTXRenderer(OVRTXRendererCfg()) + specs = renderer.supported_output_types() + + assert set(specs.keys()) == { + RenderBufferKind.RGB, + RenderBufferKind.RGBA, + RenderBufferKind.ALBEDO, + RenderBufferKind.SEMANTIC_SEGMENTATION, + RenderBufferKind.DEPTH, + RenderBufferKind.DISTANCE_TO_IMAGE_PLANE, + RenderBufferKind.DISTANCE_TO_CAMERA, + } + assert specs[RenderBufferKind.RGBA] == RenderBufferSpec(4, torch.uint8) + assert specs[RenderBufferKind.DEPTH] == RenderBufferSpec(1, torch.float32) + + +def test_ovrtx_set_outputs_wraps_caller_torch_zero_copy(): + """OVRTXRenderer.set_outputs publishes warp views over the caller's torch storage.""" + import warp as wp + + renderer = OVRTXRenderer(OVRTXRendererCfg()) + + if not torch.cuda.is_available(): + pytest.skip("OVRTX zero-copy wrapping requires a CUDA device") + device = "cuda" + + cfg = _make_camera_cfg(["rgb", "rgba", "depth"]) + data = CameraData.allocate( + data_types=cfg.data_types, + height=8, + width=16, + num_views=2, + device=device, + supported_specs=renderer.supported_output_types(), + ) + render_data = _make_ovrtx_render_data() + renderer.set_outputs(render_data, data.output) + + assert set(render_data.warp_buffers.keys()) >= {"rgba", "depth"} + assert render_data.warp_buffers["rgba"].ptr == wp.from_torch(data.output["rgba"]).ptr + assert render_data.warp_buffers["depth"].ptr == wp.from_torch(data.output["depth"]).ptr + assert "rgb" not in render_data.warp_buffers + + +def test_ovrtx_read_output_is_a_no_op_after_consolidation(): + """OVRTXRenderer.read_output is a no-op once set_outputs wires up zero-copy.""" + renderer = OVRTXRenderer(OVRTXRendererCfg()) + render_data = _make_ovrtx_render_data() + camera_data = CameraData() + camera_data.info = {} + camera_data.output = {} + + result = renderer.read_output(render_data, camera_data) + assert result is None + assert render_data.warp_buffers == {} + assert camera_data.info == {} + assert camera_data.output == {} diff --git a/source/isaaclab_ov/test/test_ovrtx_renderer_kernels.py b/source/isaaclab_ov/test/test_ovrtx_renderer_kernels.py new file mode 100644 index 000000000000..ed416d05a7e6 --- /dev/null +++ b/source/isaaclab_ov/test/test_ovrtx_renderer_kernels.py @@ -0,0 +1,578 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for OVRTX renderer kernels.""" + +import math + +import numpy as np +import pytest +import warp as wp +from isaaclab_ov.renderers.ovrtx_renderer_kernels import ( + DEVICE, + extract_all_depth_tiles_kernel, + extract_all_depth_tiles_kernel_legacy, + extract_all_rgba_tiles_kernel, + generate_random_colors_from_ids_kernel, + generate_random_colors_from_ids_kernel_legacy, +) + + +def _color_hash(seed: int) -> int: + h = seed + h ^= h >> 16 + h *= 0x85EBCA6B + h ^= h >> 13 + h *= 0xC2B2AE35 + h ^= h >> 16 + return h + + +def _random_colours_id(input_id: int) -> tuple[int, int, int, int]: + GOLDEN_RATIO_INV = 1.0 / 1.618033988749895 + + hash_val = _color_hash(input_id) + hue = math.fmod(input_id * GOLDEN_RATIO_INV, 1.0) + hue_perturbation = (hash_val & 0xFFFF) / 65536.0 + hue = math.fmod(hue + hue_perturbation * 0.1, 1.0) + sat_hash = hash_val >> 16 + val_hash = hash_val >> 8 + saturation = 0.7 + 0.3 * ((sat_hash & 0xFF) / 255.0) + value = 0.8 + 0.2 * ((val_hash & 0xFF) / 255.0) + i = int(hue * 6.0) + f = (hue * 6.0) - i + p = value * (1.0 - saturation) + q = value * (1.0 - saturation * f) + t = value * (1.0 - saturation * (1.0 - f)) + i = i % 6 + if i == 0: + r, g, b = value, t, p + elif i == 1: + r, g, b = q, value, p + elif i == 2: + r, g, b = p, value, t + elif i == 3: + r, g, b = p, q, value + elif i == 4: + r, g, b = t, p, value + else: + r, g, b = value, p, q + return (int(r * 255), int(g * 255), int(b * 255), 255) + + +def _reference_color(input_id: int) -> int: + if input_id == 0: + return 0 + if input_id == 1: + return 0xFF000000 + + r, g, b, a = _random_colours_id(input_id) + return r | (g << 8) | (b << 16) | (a << 24) + + +def _reference_extract_all_depth_tiles_legacy( + tiled_2d: np.ndarray, + num_envs: int, + num_cols: int, + tile_width: int, + tile_height: int, +) -> np.ndarray: + """NumPy reference for ``extract_all_depth_tiles_kernel_legacy`` (2D tiled buffer).""" + out = np.zeros((num_envs, tile_height, tile_width, 1), dtype=np.float32) + for env_idx in range(num_envs): + tile_x = env_idx % num_cols + tile_y = env_idx // num_cols + for y in range(tile_height): + for x in range(tile_width): + src_y = tile_y * tile_height + y + src_x = tile_x * tile_width + x + out[env_idx, y, x, 0] = tiled_2d[src_y, src_x] + return out + + +def _reference_extract_all_depth_tiles( + tiled_np: np.ndarray, + num_envs: int, + num_cols: int, + tile_width: int, + tile_height: int, +) -> np.ndarray: + """NumPy reference for ``extract_all_depth_tiles_kernel``.""" + return _reference_extract_all_depth_tiles_legacy(tiled_np[..., 0], num_envs, num_cols, tile_width, tile_height) + + +def _reference_extract_all_rgba_tiles( + tiled_np: np.ndarray, + num_envs: int, + num_cols: int, + tile_width: int, + tile_height: int, + num_channels: int, +) -> np.ndarray: + """NumPy reference for ``extract_all_rgba_tiles_kernel``.""" + out_c = 4 if num_channels == 4 else 3 + out = np.zeros((num_envs, tile_height, tile_width, out_c), dtype=np.uint8) + for env_idx in range(num_envs): + tile_x = env_idx % num_cols + tile_y = env_idx // num_cols + for y in range(tile_height): + for x in range(tile_width): + src_y = tile_y * tile_height + y + src_x = tile_x * tile_width + x + out[env_idx, y, x, 0] = tiled_np[src_y, src_x, 0] + out[env_idx, y, x, 1] = tiled_np[src_y, src_x, 1] + out[env_idx, y, x, 2] = tiled_np[src_y, src_x, 2] + if num_channels == 4: + out[env_idx, y, x, 3] = tiled_np[src_y, src_x, 3] + return out + + +class TestExtractAllDepthTilesKernel: + """Tests for ``extract_all_depth_tiles_kernel``.""" + + def test_two_by_two_tile_grid(self): + num_cols = 2 + num_envs = 4 + tile_width = 2 + tile_height = 3 + tiled_h = (num_envs // num_cols) * tile_height + tiled_w = num_cols * tile_width + tiled_np = np.zeros((tiled_h, tiled_w, 1), dtype=np.float32) + for h in range(tiled_h): + for w in range(tiled_w): + tiled_np[h, w, 0] = float(h * 1000 + w) + + tiled_wp = wp.array(tiled_np, dtype=wp.float32, ndim=3, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, 1), dtype=wp.float32, device=DEVICE) + + wp.launch( + kernel=extract_all_depth_tiles_kernel, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_depth_tiles(tiled_np, num_envs, num_cols, tile_width, tile_height) + np.testing.assert_allclose(output_wp.numpy(), expected, rtol=0, atol=0) + + def test_single_tile(self): + num_cols = 1 + num_envs = 1 + tile_width = 4 + tile_height = 4 + tiled_np = np.arange(tile_height * tile_width, dtype=np.float32).reshape(tile_height, tile_width, 1) + + tiled_wp = wp.array(tiled_np, dtype=wp.float32, ndim=3, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, 1), dtype=wp.float32, device=DEVICE) + + wp.launch( + kernel=extract_all_depth_tiles_kernel, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_depth_tiles(tiled_np, num_envs, num_cols, tile_width, tile_height) + np.testing.assert_array_equal(output_wp.numpy(), expected) + + @pytest.mark.parametrize( + ("num_cols", "num_envs", "tile_width", "tile_height"), + [ + (3, 6, 2, 2), + (1, 3, 5, 1), + (4, 8, 1, 1), + ], + ) + def test_various_layouts(self, num_cols, num_envs, tile_width, tile_height): + num_rows = (num_envs + num_cols - 1) // num_cols + tiled_h = num_rows * tile_height + tiled_w = num_cols * tile_width + rng = np.random.default_rng(12345) + tiled_np = rng.random((tiled_h, tiled_w, 1), dtype=np.float32).astype(np.float32) + + tiled_wp = wp.array(tiled_np, dtype=wp.float32, ndim=3, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, 1), dtype=wp.float32, device=DEVICE) + + wp.launch( + kernel=extract_all_depth_tiles_kernel, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_depth_tiles(tiled_np, num_envs, num_cols, tile_width, tile_height) + np.testing.assert_allclose(output_wp.numpy(), expected, rtol=1e-6, atol=1e-6) + + +class TestExtractAllDepthTilesKernelLegacy: + """Tests for ``extract_all_depth_tiles_kernel_legacy`` (ovrtx < 0.3.0, 2D tiled buffer).""" + + def test_two_by_two_tile_grid(self): + num_cols = 2 + num_envs = 4 + tile_width = 2 + tile_height = 3 + tiled_h = (num_envs // num_cols) * tile_height + tiled_w = num_cols * tile_width + tiled_np = np.zeros((tiled_h, tiled_w), dtype=np.float32) + for h in range(tiled_h): + for w in range(tiled_w): + tiled_np[h, w] = float(h * 1000 + w) + + tiled_wp = wp.array(tiled_np, dtype=wp.float32, ndim=2, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, 1), dtype=wp.float32, device=DEVICE) + + wp.launch( + kernel=extract_all_depth_tiles_kernel_legacy, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_depth_tiles_legacy(tiled_np, num_envs, num_cols, tile_width, tile_height) + np.testing.assert_allclose(output_wp.numpy(), expected, rtol=0, atol=0) + + def test_single_tile(self): + num_cols = 1 + num_envs = 1 + tile_width = 4 + tile_height = 4 + tiled_np = np.arange(tile_height * tile_width, dtype=np.float32).reshape(tile_height, tile_width) + + tiled_wp = wp.array(tiled_np, dtype=wp.float32, ndim=2, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, 1), dtype=wp.float32, device=DEVICE) + + wp.launch( + kernel=extract_all_depth_tiles_kernel_legacy, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_depth_tiles_legacy(tiled_np, num_envs, num_cols, tile_width, tile_height) + np.testing.assert_array_equal(output_wp.numpy(), expected) + + @pytest.mark.parametrize( + ("num_cols", "num_envs", "tile_width", "tile_height"), + [ + (3, 6, 2, 2), + (1, 3, 5, 1), + (4, 8, 1, 1), + ], + ) + def test_various_layouts(self, num_cols, num_envs, tile_width, tile_height): + num_rows = (num_envs + num_cols - 1) // num_cols + tiled_h = num_rows * tile_height + tiled_w = num_cols * tile_width + rng = np.random.default_rng(12345) + tiled_np = rng.random((tiled_h, tiled_w), dtype=np.float32).astype(np.float32) + + tiled_wp = wp.array(tiled_np, dtype=wp.float32, ndim=2, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, 1), dtype=wp.float32, device=DEVICE) + + wp.launch( + kernel=extract_all_depth_tiles_kernel_legacy, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_depth_tiles_legacy(tiled_np, num_envs, num_cols, tile_width, tile_height) + np.testing.assert_allclose(output_wp.numpy(), expected, rtol=1e-6, atol=1e-6) + + +class TestExtractAllRgbaTilesKernel: + """Tests for ``extract_all_rgba_tiles_kernel``.""" + + def test_two_by_two_tile_grid_rgba(self): + num_cols = 2 + num_envs = 4 + tile_width = 2 + tile_height = 3 + num_channels = 4 + tiled_h = (num_envs // num_cols) * tile_height + tiled_w = num_cols * tile_width + tiled_np = np.zeros((tiled_h, tiled_w, 4), dtype=np.uint8) + for h in range(tiled_h): + for w in range(tiled_w): + tiled_np[h, w, 0] = (h * 17 + w) % 256 + tiled_np[h, w, 1] = (h * 31 + w * 3) % 256 + tiled_np[h, w, 2] = (h + w * 11) % 256 + tiled_np[h, w, 3] = (h * 7 + w * 13) % 256 + + tiled_wp = wp.array(tiled_np, dtype=wp.uint8, ndim=3, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, num_channels), dtype=wp.uint8, device=DEVICE) + + wp.launch( + kernel=extract_all_rgba_tiles_kernel, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height, num_channels], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_rgba_tiles( + tiled_np, num_envs, num_cols, tile_width, tile_height, num_channels + ) + np.testing.assert_array_equal(output_wp.numpy(), expected) + + def test_single_tile_rgb(self): + num_cols = 1 + num_envs = 1 + tile_width = 4 + tile_height = 4 + num_channels = 3 + tiled_np = np.arange(tile_height * tile_width * 3, dtype=np.uint8).reshape(tile_height, tile_width, 3) + + tiled_wp = wp.array(tiled_np, dtype=wp.uint8, ndim=3, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, num_channels), dtype=wp.uint8, device=DEVICE) + + wp.launch( + kernel=extract_all_rgba_tiles_kernel, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height, num_channels], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_rgba_tiles( + tiled_np, num_envs, num_cols, tile_width, tile_height, num_channels + ) + np.testing.assert_array_equal(output_wp.numpy(), expected) + + def test_num_channels_not_four_skips_alpha(self): + """Values other than 4 use the RGB-only path (same as RGB tiled input).""" + num_cols = 1 + num_envs = 1 + tile_width = 2 + tile_height = 2 + tiled_np = np.array( + [ + [[1, 2, 3, 99], [4, 5, 6, 88]], + [[7, 8, 9, 77], [10, 11, 12, 66]], + ], + dtype=np.uint8, + ) + + tiled_wp = wp.array(tiled_np, dtype=wp.uint8, ndim=3, device=DEVICE) + output_wp = wp.zeros(shape=(1, 2, 2, 3), dtype=wp.uint8, device=DEVICE) + + wp.launch( + kernel=extract_all_rgba_tiles_kernel, + dim=(1, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height, 2], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_rgba_tiles(tiled_np, num_envs, num_cols, tile_width, tile_height, 2) + np.testing.assert_array_equal(output_wp.numpy(), expected) + + @pytest.mark.parametrize( + ("num_cols", "num_envs", "tile_width", "tile_height", "num_channels"), + [ + (3, 6, 2, 2, 3), + (3, 6, 2, 2, 4), + (1, 3, 5, 1, 3), + (4, 8, 1, 1, 4), + ], + ) + def test_various_layouts(self, num_cols, num_envs, tile_width, tile_height, num_channels): + num_rows = (num_envs + num_cols - 1) // num_cols + tiled_h = num_rows * tile_height + tiled_w = num_cols * tile_width + c_in = 4 if num_channels == 4 else 3 + rng = np.random.default_rng(24680) + tiled_np = rng.integers(0, 256, size=(tiled_h, tiled_w, c_in), dtype=np.uint8) + + tiled_wp = wp.array(tiled_np, dtype=wp.uint8, ndim=3, device=DEVICE) + output_wp = wp.zeros( + shape=(num_envs, tile_height, tile_width, num_channels), + dtype=wp.uint8, + device=DEVICE, + ) + + wp.launch( + kernel=extract_all_rgba_tiles_kernel, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height, num_channels], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_rgba_tiles( + tiled_np, num_envs, num_cols, tile_width, tile_height, num_channels + ) + np.testing.assert_array_equal(output_wp.numpy(), expected) + + +class TestRandomColorsFromIdsKernel: + """Tests for generate_random_colors_from_ids_kernel.""" + + def test_random_colors(self): + inputs_np = np.array([[[0], [1]], [[2], [3]]], dtype=np.uint32) + input_ids = wp.array(inputs_np, dtype=wp.uint32, ndim=3, device=DEVICE) + output_colors = wp.zeros(shape=inputs_np.shape, dtype=wp.uint32, device=DEVICE) + + wp.launch( + kernel=generate_random_colors_from_ids_kernel, + dim=inputs_np.shape, + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + + out_np = output_colors.numpy() + for (i, j, k), input_id in np.ndenumerate(inputs_np): + input_id = int(np.uint32(input_id)) + ref_color = _reference_color(input_id) + out_color = int(out_np[i, j, k]) + assert out_color == ref_color, ( + f"At ({i},{j},{k}) id={input_id}: expected 0x{ref_color:08x}, got 0x{out_color:08x}" + ) + + def test_deterministic_across_launches(self): + shape = (4, 4, 1) + rng = np.random.default_rng(42) + inputs_np = rng.integers(0, 2**31, size=shape, dtype=np.uint32) + input_ids = wp.array(inputs_np, dtype=wp.uint32, ndim=3, device=DEVICE) + output_colors = wp.zeros(shape=shape, dtype=wp.uint32, device=DEVICE) + + wp.launch( + kernel=generate_random_colors_from_ids_kernel, + dim=shape, + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + first_run = output_colors.numpy().copy() + + wp.launch( + kernel=generate_random_colors_from_ids_kernel, + dim=shape, + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + second_run = output_colors.numpy() + + np.testing.assert_array_equal(first_run, second_run) + + @pytest.mark.parametrize( + "input_value", + [ + 0, + 1, + 2, + 3, + 100, + ], + ) + def test_single_value(self, input_value): + inputs_np = np.array([[[input_value]]], dtype=np.uint32) + input_ids = wp.array(inputs_np, dtype=wp.uint32, ndim=3, device=DEVICE) + output_colors = wp.zeros(shape=(1, 1, 1), dtype=wp.uint32, device=DEVICE) + + wp.launch( + kernel=generate_random_colors_from_ids_kernel, + dim=(1, 1, 1), + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + + ref_color = _reference_color(int(np.uint32(input_value))) + out_color = int(output_colors.numpy()[0, 0, 0]) + assert out_color == ref_color, ( + f"id=0x{int(np.uint32(input_value)):08x}: expected 0x{ref_color:08x}, got 0x{out_color:08x}" + ) + + +class TestRandomColorsFromIdsKernelLegacy: + """Tests for ``generate_random_colors_from_ids_kernel_legacy`` (ovrtx < 0.3.0, 2D buffers).""" + + def test_random_colors(self): + inputs_np = np.array([[0, 1], [2, 3]], dtype=np.uint32) + input_ids = wp.array(inputs_np, dtype=wp.uint32, ndim=2, device=DEVICE) + output_colors = wp.zeros(shape=inputs_np.shape, dtype=wp.uint32, device=DEVICE) + + wp.launch( + kernel=generate_random_colors_from_ids_kernel_legacy, + dim=inputs_np.shape, + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + + out_np = output_colors.numpy() + for (i, j), input_id in np.ndenumerate(inputs_np): + input_id = int(np.uint32(input_id)) + ref_color = _reference_color(input_id) + out_color = int(out_np[i, j]) + assert out_color == ref_color, ( + f"At ({i},{j}) id={input_id}: expected 0x{ref_color:08x}, got 0x{out_color:08x}" + ) + + def test_deterministic_across_launches(self): + shape = (4, 4) + rng = np.random.default_rng(42) + inputs_np = rng.integers(0, 2**31, size=shape, dtype=np.uint32) + input_ids = wp.array(inputs_np, dtype=wp.uint32, ndim=2, device=DEVICE) + output_colors = wp.zeros(shape=shape, dtype=wp.uint32, device=DEVICE) + + wp.launch( + kernel=generate_random_colors_from_ids_kernel_legacy, + dim=shape, + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + first_run = output_colors.numpy().copy() + + wp.launch( + kernel=generate_random_colors_from_ids_kernel_legacy, + dim=shape, + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + second_run = output_colors.numpy() + + np.testing.assert_array_equal(first_run, second_run) + + @pytest.mark.parametrize( + "input_value", + [ + 0, + 1, + 2, + 3, + 100, + ], + ) + def test_single_value(self, input_value): + inputs_np = np.array([[input_value]], dtype=np.uint32) + input_ids = wp.array(inputs_np, dtype=wp.uint32, ndim=2, device=DEVICE) + output_colors = wp.zeros(shape=(1, 1), dtype=wp.uint32, device=DEVICE) + + wp.launch( + kernel=generate_random_colors_from_ids_kernel_legacy, + dim=(1, 1), + inputs=[input_ids, output_colors], + device=DEVICE, + ) + wp.synchronize() + + ref_color = _reference_color(int(np.uint32(input_value))) + out_color = int(output_colors.numpy()[0, 0]) + assert out_color == ref_color, ( + f"id=0x{int(np.uint32(input_value)):08x}: expected 0x{ref_color:08x}, got 0x{out_color:08x}" + ) diff --git a/source/isaaclab_ovphysx/changelog.d/pr-5458-merge-develop.rst b/source/isaaclab_ovphysx/changelog.d/pr-5458-merge-develop.rst new file mode 100644 index 000000000000..546cb8acc1c7 --- /dev/null +++ b/source/isaaclab_ovphysx/changelog.d/pr-5458-merge-develop.rst @@ -0,0 +1,7 @@ +Removed +^^^^^^^ + +* Removed ``ArticulationData.body_incoming_joint_wrench_b`` to match the + shared articulation data API. Code that needs incoming joint reaction + wrenches should use a backend joint-wrench sensor instead of the articulation + data object. diff --git a/source/isaaclab_ovphysx/config/extension.toml b/source/isaaclab_ovphysx/config/extension.toml new file mode 100644 index 000000000000..8648b7fa9587 --- /dev/null +++ b/source/isaaclab_ovphysx/config/extension.toml @@ -0,0 +1,21 @@ +[package] + +# Note: Semantic Versioning is used: https://semver.org/ +version = "0.1.2" + +# Description +title = "OvPhysX simulation interfaces for IsaacLab core package" +description = "Extension providing IsaacLab with ovphysx/TensorBindingsAPI specific abstractions." +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["robotics", "simulation", "ovphysx", "tensorbindingsapi"] + +[dependencies] +"isaaclab" = {} + +[core] +reloadable = false + +[[python.module]] +name = "isaaclab_ovphysx" diff --git a/source/isaaclab_ovphysx/docs/CHANGELOG.rst b/source/isaaclab_ovphysx/docs/CHANGELOG.rst new file mode 100644 index 000000000000..b2eb969d7845 --- /dev/null +++ b/source/isaaclab_ovphysx/docs/CHANGELOG.rst @@ -0,0 +1,43 @@ +Changelog +--------- + +0.1.2 (2026-04-23) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Wrapped :attr:`~isaaclab_ovphysx.assets.ArticulationData.GRAVITY_VEC_W` and + :attr:`~isaaclab_ovphysx.assets.ArticulationData.FORWARD_VEC_B` in + :class:`~isaaclab.utils.warp.ProxyArray` to match the PhysX and Newton + backends. Public observations such as + :func:`~isaaclab.envs.mdp.observations.projected_gravity` access + ``asset.data.GRAVITY_VEC_W.torch``; the previous raw ``wp.array`` lacked + ``.torch`` and raised ``AttributeError`` on the ovphysx backend. + + +0.1.1 (2026-04-21) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Replaced private ``_find_names`` (fnmatch + regex) with the standard + :func:`~isaaclab.utils.string.resolve_matching_names` for all finder + methods, unifying name-resolution behavior across backends. Fnmatch-style + glob patterns (e.g. ``joint_*``) are no longer supported; use regex + equivalents (e.g. ``joint_.*``). ``find_fixed_tendons`` and + ``find_spatial_tendons`` now raise ``ValueError`` on empty tendon lists, + matching the PhysX backend. +* Changed ``find_joints`` ``joint_subset`` parameter from ``list[int]`` + (indices) to ``list[str]`` (names) to match the ``BaseArticulation`` + interface. Callers passing indices should convert to names first. + + +0.1.0 (2026-04-20) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Initial release of the ``isaaclab_ovphysx`` extension. diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/__init__.py new file mode 100644 index 000000000000..69b5f5512333 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package containing the ovphysx/TensorBindingsAPI simulation interfaces for IsaacLab.""" + +import os + +import toml + +ISAACLAB_OVPHYSX_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) +"""Path to the extension source directory.""" + +# Find config/extension.toml: bundled inside the package (wheel install) or in the +# parent directory (editable install). +_pkg_dir = os.path.dirname(os.path.abspath(__file__)) +_toml_path = os.path.join(_pkg_dir, "config", "extension.toml") +if not os.path.isfile(_toml_path): + _toml_path = os.path.join(ISAACLAB_OVPHYSX_EXT_DIR, "config", "extension.toml") + +ISAACLAB_OVPHYSX_METADATA = toml.load(_toml_path) +"""Extension metadata dictionary parsed from the extension.toml file.""" + +__version__ = ISAACLAB_OVPHYSX_METADATA["package"]["version"] diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/__init__.py new file mode 100644 index 000000000000..c644c05de13c --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package for ovphysx-backed assets.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/__init__.pyi b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/__init__.pyi new file mode 100644 index 000000000000..516e15c5ef6a --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/__init__.pyi @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Articulation", + "ArticulationData", +] + +from .articulation import Articulation, ArticulationData diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/__init__.py new file mode 100644 index 000000000000..d3798bf9652b --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for ovphysx-backed articulated assets.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/__init__.pyi b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/__init__.pyi new file mode 100644 index 000000000000..9e482491fe54 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Articulation", + "ArticulationData", +] + +from .articulation import Articulation +from .articulation_data import ArticulationData diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/articulation.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/articulation.py new file mode 100644 index 000000000000..f3919d56da73 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/articulation.py @@ -0,0 +1,2675 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Articulation implementation backed by ovphysx TensorBindingsAPI.""" + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import numpy as np +import torch +import warp as wp + +from isaaclab.assets.articulation.base_articulation import BaseArticulation +from isaaclab.physics import PhysicsManager +from isaaclab.utils.string import resolve_matching_names +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab_ovphysx import tensor_types as TT +from isaaclab_ovphysx.physics import OvPhysxManager + +from .articulation_data import ArticulationData +from .kernels import _body_wrench_to_world, _scatter_rows_partial, update_soft_joint_pos_limits + +if TYPE_CHECKING: + from isaaclab.actuators import ActuatorBase + from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg + +logger = logging.getLogger(__name__) + + +class Articulation(BaseArticulation): + """Articulation backed by the ovphysx TensorBindingsAPI. + + Reads and writes simulation state through ovphysx.TensorBinding objects created + from the OvPhysxManager's PhysX instance. + """ + + __backend_name__ = "ovphysx" + + cfg: ArticulationCfg + + def __init__(self, cfg: ArticulationCfg): + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> ArticulationData: + """Data container with simulation state for this articulation.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of articulation instances (environments).""" + return self._num_instances + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation is a fixed-base or floating-base system.""" + return self._is_fixed_base + + @property + def num_joints(self) -> int: + """Number of joints in the articulation.""" + return self._num_joints + + @property + def num_fixed_tendons(self) -> int: + """Number of fixed tendons in the articulation.""" + return getattr(self, "_num_fixed_tendons", 0) + + @property + def num_spatial_tendons(self) -> int: + """Number of spatial tendons in the articulation.""" + return getattr(self, "_num_spatial_tendons", 0) + + @property + def num_bodies(self) -> int: + """Number of bodies (links) in the articulation.""" + return self._num_bodies + + @property + def joint_names(self) -> list[str]: + """Ordered names of joints in the articulation.""" + return self._joint_names + + @property + def fixed_tendon_names(self) -> list[str]: + """Ordered names of fixed tendons in the articulation.""" + return getattr(self, "_fixed_tendon_names", []) + + @property + def spatial_tendon_names(self) -> list[str]: + """Ordered names of spatial tendons in the articulation.""" + return getattr(self, "_spatial_tendon_names", []) + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies (links) in the articulation.""" + return self._body_names + + @property + def root_view(self) -> Any: + """Root articulation view (not available for ovphysx backend).""" + return None + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer | None: + """Wrench composer for forces applied only during the current step.""" + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer | None: + """Wrench composer for forces applied persistently every step.""" + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the articulation. + + .. caution:: + If both `env_ids` and `env_mask` are provided, then `env_mask` takes precedence over `env_ids`. + + Args: + env_ids: Environment indices. If None, then all indices are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # use ellipses object to skip initial indices. + if (env_ids is None) or (env_ids == slice(None)): + env_ids = slice(None) + # reset actuators + for actuator in self.actuators.values(): + actuator.reset(env_ids) + # reset external wrenches. + self._instantaneous_wrench_composer.reset(env_ids, env_mask) + self._permanent_wrench_composer.reset(env_ids, env_mask) + + def write_data_to_sim(self) -> None: + """Apply external wrenches, actuator model, and write commands into the simulation.""" + # Apply external wrenches (before actuators, same as PhysX backend). + self._apply_external_wrenches() + + self._apply_actuator_model() + # Write effort tensor to simulation. + if self._effort_binding is not None: + self._effort_binding.write(self._effort_write_view) + # Write position and velocity targets in one shot (not per-actuator). + if self._has_implicit_actuators: + if self._pos_target_binding is not None: + self._pos_target_binding.write(self._pos_target_write_view) + if self._vel_target_binding is not None: + self._vel_target_binding.write(self._vel_target_write_view) + + def update(self, dt: float) -> None: + """Update internal data buffers after a simulation step. + + Args: + dt: The simulation time step [s] used for finite-difference quantities. + """ + self._data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Please check the :func:`isaaclab.utils.string.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return resolve_matching_names(name_keys, self._body_names, preserve_order) + + def find_joints( + self, + name_keys: str | Sequence[str], + joint_subset: list[str] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find joints in the articulation based on the name keys. + + Please check the :func:`isaaclab.utils.string.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint names. + joint_subset: A subset of joints to search for. Defaults to None, which means all joints + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the joint indices and names. + """ + if joint_subset is None: + joint_subset = self._joint_names + return resolve_matching_names(name_keys, joint_subset, preserve_order) + + def find_fixed_tendons( + self, + name_keys: str | Sequence[str], + tendon_subsets: list[str] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find fixed tendons in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions + to match the joint names with fixed tendons. + tendon_subsets: A subset of joints with fixed tendons to search + for. Defaults to None, which means all joints in the + articulation are searched. + preserve_order: Whether to preserve the order of the name keys in + the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + tendon_subsets = self.fixed_tendon_names + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + def find_spatial_tendons( + self, + name_keys: str | Sequence[str], + tendon_subsets: list[str] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find spatial tendons in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions + to match the tendon names. + tendon_subsets: A subset of tendons to search for. Defaults to + None, which means all tendons in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in + the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + tendon_subsets = self.spatial_tendon_names + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + """ + Operations - State Writers. + """ + + def write_root_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + self._write_root_state(TT.ROOT_POSE, root_pose, env_ids) + + def write_root_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root pose over masked environments into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + self._write_root_state(TT.ROOT_POSE, root_pose, mask=env_mask) + + def write_root_link_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + self._write_root_state(TT.ROOT_POSE, root_pose, env_ids) + + def write_root_link_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link pose over masked environments into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root link poses in simulation frame. Shape is (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + self._write_root_state(TT.ROOT_POSE, root_pose, mask=env_mask) + + def write_root_com_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids),) + with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + self.assert_shape_and_dtype(root_pose, (n,), wp.transformf, "root_pose") + self._write_root_state(TT.ROOT_POSE, root_pose, env_ids) + + def write_root_com_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass pose over masked environments into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (num_instances,) + with dtype wp.transformf. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(root_pose, (self._num_instances,), wp.transformf, "root_pose") + self._write_root_state(TT.ROOT_POSE, root_pose, mask=env_mask) + + def write_root_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set the root velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + Args: + root_velocity: Root velocities in simulation world frame. Shape is (len(env_ids),) + with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + self._write_root_state(TT.ROOT_VELOCITY, root_velocity, env_ids) + + def write_root_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root velocity over masked environments into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + Args: + root_velocity: Root velocities in simulation world frame. Shape is (num_instances,) + with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + self._write_root_state(TT.ROOT_VELOCITY, root_velocity, mask=env_mask) + + def write_root_com_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + self._write_root_state(TT.ROOT_VELOCITY, root_velocity, env_ids) + + def write_root_com_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over masked environments into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + self._write_root_state(TT.ROOT_VELOCITY, root_velocity, mask=env_mask) + + def write_root_link_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. + Shape is (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + self.assert_shape_and_dtype(root_velocity, (n,), wp.spatial_vectorf, "root_velocity") + self._write_root_state(TT.ROOT_VELOCITY, root_velocity, env_ids) + + def write_root_link_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link velocity over masked environments into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. + Shape is (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(root_velocity, (self._num_instances,), wp.spatial_vectorf, "root_velocity") + self._write_root_state(TT.ROOT_VELOCITY, root_velocity, mask=env_mask) + + """ + Operations - Joint State Writers. + """ + + def write_joint_state_to_sim_mask( + self, + joint_pos: torch.Tensor | wp.array, + joint_vel: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + joint_mask: wp.array | None = None, + ) -> None: + """Write joint positions and velocities over masked environments into the simulation. + + Args: + joint_pos: Joint positions. Shape is (num_instances, num_joints). + joint_vel: Joint velocities. Shape is (num_instances, num_joints). + env_mask: Environment mask. If None, then all instances are updated. + joint_mask: Joint mask. If None, then all joints are updated. + """ + self.write_joint_position_to_sim_mask(position=joint_pos, env_mask=env_mask, joint_mask=joint_mask) + self.write_joint_velocity_to_sim_mask(velocity=joint_vel, env_mask=env_mask, joint_mask=joint_mask) + + def write_joint_position_to_sim_index( + self, + *, + position: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint positions over selected environment and joint indices into the simulation. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(position, (n, d), wp.float32, "position") + self._write_flat_tensor(TT.DOF_POSITION, position, env_ids, joint_ids) + self.data._joint_pos_buf.timestamp = -1.0 + + def write_joint_position_to_sim_mask( + self, + *, + position: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint positions over masked environments into the simulation. + + Args: + position: Joint positions. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(position, (self._num_instances, self._num_joints), wp.float32, "position") + self._write_flat_tensor_mask(TT.DOF_POSITION, position, env_mask, joint_mask) + self.data._joint_pos_buf.timestamp = -1.0 + + def write_joint_velocity_to_sim_index( + self, + *, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint velocities over selected environment and joint indices into the simulation. + + Args: + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(velocity, (n, d), wp.float32, "velocity") + self._write_flat_tensor(TT.DOF_VELOCITY, velocity, env_ids, joint_ids) + self.data._joint_vel_buf.timestamp = -1.0 + + def write_joint_velocity_to_sim_mask( + self, + *, + velocity: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint velocities over masked environments into the simulation. + + Args: + velocity: Joint velocities. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(velocity, (self._num_instances, self._num_joints), wp.float32, "velocity") + self._write_flat_tensor_mask(TT.DOF_VELOCITY, velocity, env_mask, joint_mask) + self.data._joint_vel_buf.timestamp = -1.0 + + """ + Operations - Simulation Parameters Writers. + """ + + def write_joint_stiffness_to_sim_index( + self, + *, + stiffness: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint stiffness over selected indices into the simulation. + + Args: + stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(stiffness, (n, d), wp.float32, "stiffness") + self._write_flat_tensor(TT.DOF_STIFFNESS, stiffness, env_ids, joint_ids) + + def write_joint_stiffness_to_sim_mask( + self, + *, + stiffness: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint stiffness over masked environments into the simulation. + + Args: + stiffness: Joint stiffness. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(stiffness, (self._num_instances, self._num_joints), wp.float32, "stiffness") + self._write_flat_tensor_mask(TT.DOF_STIFFNESS, stiffness, env_mask, joint_mask) + + def write_joint_damping_to_sim_index( + self, + *, + damping: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint damping over selected indices into the simulation. + + Args: + damping: Joint damping. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(damping, (n, d), wp.float32, "damping") + self._write_flat_tensor(TT.DOF_DAMPING, damping, env_ids, joint_ids) + + def write_joint_damping_to_sim_mask( + self, + *, + damping: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint damping over masked environments into the simulation. + + Args: + damping: Joint damping. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(damping, (self._num_instances, self._num_joints), wp.float32, "damping") + self._write_flat_tensor_mask(TT.DOF_DAMPING, damping, env_mask, joint_mask) + + def write_joint_position_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + warn_limit_violation: bool = True, + ) -> None: + """Write joint position limits over selected environment indices into the simulation. + + Args: + limits: Joint position limits [rad or m]. Shape is (len(env_ids), len(joint_ids)) + with dtype wp.vec2f. + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + warn_limit_violation: Whether to use warning or info level logging when default joint + positions exceed the new limits. Defaults to True. + """ + if isinstance(limits, (int, float)): + raise ValueError("Float scalars are not supported for position limits (vec2f dtype)") + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(limits, (n, d), wp.vec2f, "limits") + self._write_flat_tensor(TT.DOF_LIMIT, limits, env_ids, joint_ids) + + def write_joint_position_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + warn_limit_violation: bool = True, + ) -> None: + """Write joint position limits over masked environments into the simulation. + + Args: + limits: Joint position limits [rad or m]. Shape is (num_instances, num_joints) + with dtype wp.vec2f. + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + warn_limit_violation: Whether to use warning or info level logging when default joint + positions exceed the new limits. Defaults to True. + """ + if isinstance(limits, (int, float)): + raise ValueError("Float scalars are not supported for position limits (vec2f dtype)") + self.assert_shape_and_dtype(limits, (self._num_instances, self._num_joints), wp.vec2f, "limits") + self._write_flat_tensor_mask(TT.DOF_LIMIT, limits, env_mask, joint_mask) + + def write_joint_velocity_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint max velocity over selected environment indices into the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. + + Args: + limits: Joint max velocity [rad/s or m/s]. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(limits, (n, d), wp.float32, "limits") + self._write_flat_tensor(TT.DOF_MAX_VELOCITY, limits, env_ids, joint_ids) + + def write_joint_velocity_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint max velocity over masked environments into the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. + + Args: + limits: Joint max velocity [rad/s or m/s]. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(limits, (self._num_instances, self._num_joints), wp.float32, "limits") + self._write_flat_tensor_mask(TT.DOF_MAX_VELOCITY, limits, env_mask, joint_mask) + + def write_joint_effort_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint effort limits over selected environment indices into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. + + Args: + limits: Joint effort limits [N or N*m]. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(limits, (n, d), wp.float32, "limits") + self._write_flat_tensor(TT.DOF_MAX_FORCE, limits, env_ids, joint_ids) + + def write_joint_effort_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint effort limits over masked environments into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. + + Args: + limits: Joint effort limits [N or N*m]. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(limits, (self._num_instances, self._num_joints), wp.float32, "limits") + self._write_flat_tensor_mask(TT.DOF_MAX_FORCE, limits, env_mask, joint_mask) + + def write_joint_armature_to_sim_index( + self, + *, + armature: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint armature over selected environment indices into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + Args: + armature: Joint armature [kg*m^2 or kg]. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(armature, (n, d), wp.float32, "armature") + self._write_flat_tensor(TT.DOF_ARMATURE, armature, env_ids, joint_ids) + + def write_joint_armature_to_sim_mask( + self, + *, + armature: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint armature over masked environments into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + Args: + armature: Joint armature [kg*m^2 or kg]. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype(armature, (self._num_instances, self._num_joints), wp.float32, "armature") + self._write_flat_tensor_mask(TT.DOF_ARMATURE, armature, env_mask, joint_mask) + + def write_joint_friction_coefficient_to_sim_index( + self, + *, + joint_friction_coeff: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Write joint friction coefficients over selected indices into the simulation. + + Args: + joint_friction_coeff: Joint friction coefficients. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + d = len(joint_ids) if joint_ids is not None else self._num_joints + self.assert_shape_and_dtype(joint_friction_coeff, (n, d), wp.float32, "joint_friction_coeff") + self._write_friction_column(joint_friction_coeff, env_ids, joint_ids) + + def write_joint_friction_coefficient_to_sim_mask( + self, + *, + joint_friction_coeff: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint friction coefficients over masked environments into the simulation. + + Args: + joint_friction_coeff: Joint friction coefficients. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self.assert_shape_and_dtype( + joint_friction_coeff, (self._num_instances, self._num_joints), wp.float32, "joint_friction_coeff" + ) + self._write_friction_column_mask(joint_friction_coeff, env_mask, joint_mask) + + """ + Operations - Setters. + """ + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set masses of all bodies using indices. + + Args: + masses: Masses of all bodies [kg]. Shape is (len(env_ids), len(body_ids)). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + n = self._n_envs_index(env_ids) + b = len(body_ids) if body_ids is not None else self._num_bodies + self.assert_shape_and_dtype(masses, (n, b), wp.float32, "masses") + self._write_flat_tensor(TT.BODY_MASS, masses, env_ids, body_ids) + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set masses of all bodies using masks. + + Args: + masses: Masses of all bodies [kg]. Shape is (num_instances, num_bodies). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.assert_shape_and_dtype(masses, (self._num_instances, self._num_bodies), wp.float32, "masses") + self._write_flat_tensor_mask(TT.BODY_MASS, masses, env_mask, body_mask) + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set center of mass pose of all bodies using indices. + + Args: + coms: Center of mass pose of all bodies. Shape is (len(env_ids), len(body_ids)) + with dtype wp.transformf. + body_ids: The body indices to set the center of mass pose for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass pose for. + Defaults to None (all environments). + """ + n = self._n_envs_index(env_ids) + b = len(body_ids) if body_ids is not None else self._num_bodies + self.assert_shape_and_dtype(coms, (n, b), wp.transformf, "coms") + self._write_flat_tensor(TT.BODY_COM_POSE, coms, env_ids, body_ids) + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set center of mass pose of all bodies using masks. + + Args: + coms: Center of mass pose of all bodies. Shape is (num_instances, num_bodies) + with dtype wp.transformf. + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.assert_shape_and_dtype(coms, (self._num_instances, self._num_bodies), wp.transformf, "coms") + self._write_flat_tensor_mask(TT.BODY_COM_POSE, coms, env_mask, body_mask) + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set inertias of all bodies using indices. + + Args: + inertias: Inertias of all bodies [kg*m^2]. Shape is (len(env_ids), len(body_ids), 9). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + n = self._n_envs_index(env_ids) + b = len(body_ids) if body_ids is not None else self._num_bodies + self.assert_shape_and_dtype(inertias, (n, b, 9), wp.float32, "inertias") + self._write_flat_tensor(TT.BODY_INERTIA, inertias, env_ids, body_ids) + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set inertias of all bodies using masks. + + Args: + inertias: Inertias of all bodies [kg*m^2]. Shape is (num_instances, num_bodies, 9). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.assert_shape_and_dtype(inertias, (self._num_instances, self._num_bodies, 9), wp.float32, "inertias") + self._write_flat_tensor_mask(TT.BODY_INERTIA, inertias, env_mask, body_mask) + + """ + Operations - Target Setters. + """ + + def set_joint_position_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set joint position targets into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint position targets [rad or m]. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + self._set_target_into_buffer(self._data._joint_pos_target, target, env_ids, joint_ids) + + def set_joint_position_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint position targets into internal buffers using masks. + + Args: + target: Joint position targets [rad or m]. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self._set_target_into_buffer_mask(self._data._joint_pos_target, target, env_mask, joint_mask) + + def set_joint_velocity_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set joint velocity targets into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint velocity targets [rad/s or m/s]. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + self._set_target_into_buffer(self._data._joint_vel_target, target, env_ids, joint_ids) + + def set_joint_velocity_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint velocity targets into internal buffers using masks. + + Args: + target: Joint velocity targets [rad/s or m/s]. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self._set_target_into_buffer_mask(self._data._joint_vel_target, target, env_mask, joint_mask) + + def set_joint_effort_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Set joint efforts into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint effort targets [N or N*m]. Shape is (len(env_ids), len(joint_ids)). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + """ + self._set_target_into_buffer(self._data._joint_effort_target, target, env_ids, joint_ids) + + def set_joint_effort_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint efforts into internal buffers using masks. + + Args: + target: Joint effort targets [N or N*m]. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are updated. + env_mask: Environment mask. If None, then all instances are updated. + """ + self._set_target_into_buffer_mask(self._data._joint_effort_target, target, env_mask, joint_mask) + + """ + Operations - Tendons. + """ + + def set_fixed_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon stiffness into internal buffers using indices. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + Args: + stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(fixed_tendon_ids) if fixed_tendon_ids else self._nft() + self.assert_shape_and_dtype(stiffness, (n, t), wp.float32, "stiffness") + if self._data._fixed_tendon_stiffness is not None: + self._set_target_into_buffer(self._data._fixed_tendon_stiffness, stiffness, env_ids, fixed_tendon_ids) + + def set_fixed_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon stiffness into internal buffers using masks. + + Args: + stiffness: Fixed tendon stiffness. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(stiffness, (self._num_instances, self._nft()), wp.float32, "stiffness") + if self._data._fixed_tendon_stiffness is not None: + self._set_target_into_buffer_mask( + self._data._fixed_tendon_stiffness, stiffness, env_mask, fixed_tendon_mask + ) + + def set_fixed_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon damping into internal buffers using indices. + + Args: + damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(fixed_tendon_ids) if fixed_tendon_ids else self._nft() + self.assert_shape_and_dtype(damping, (n, t), wp.float32, "damping") + if self._data._fixed_tendon_damping is not None: + self._set_target_into_buffer(self._data._fixed_tendon_damping, damping, env_ids, fixed_tendon_ids) + + def set_fixed_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon damping into internal buffers using masks. + + Args: + damping: Fixed tendon damping. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(damping, (self._num_instances, self._nft()), wp.float32, "damping") + if self._data._fixed_tendon_damping is not None: + self._set_target_into_buffer_mask(self._data._fixed_tendon_damping, damping, env_mask, fixed_tendon_mask) + + def set_fixed_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon limit stiffness into internal buffers using indices. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(fixed_tendon_ids) if fixed_tendon_ids else self._nft() + self.assert_shape_and_dtype(limit_stiffness, (n, t), wp.float32, "limit_stiffness") + if self._data._fixed_tendon_limit_stiffness is not None: + self._set_target_into_buffer( + self._data._fixed_tendon_limit_stiffness, limit_stiffness, env_ids, fixed_tendon_ids + ) + + def set_fixed_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon limit stiffness into internal buffers using masks. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(limit_stiffness, (self._num_instances, self._nft()), wp.float32, "limit_stiffness") + if self._data._fixed_tendon_limit_stiffness is not None: + self._set_target_into_buffer_mask( + self._data._fixed_tendon_limit_stiffness, limit_stiffness, env_mask, fixed_tendon_mask + ) + + def set_fixed_tendon_position_limit_index( + self, + *, + limit: torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon position limits into internal buffers using indices. + + Args: + limit: Fixed tendon position limits. Shape is (len(env_ids), len(fixed_tendon_ids)) + with dtype wp.vec2f. + fixed_tendon_ids: The tendon indices. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(fixed_tendon_ids) if fixed_tendon_ids else self._nft() + self.assert_shape_and_dtype(limit, (n, t), wp.vec2f, "limit") + if self._data._fixed_tendon_pos_limits is not None: + self._set_target_into_buffer(self._data._fixed_tendon_pos_limits, limit, env_ids, fixed_tendon_ids) + + def set_fixed_tendon_position_limit_mask( + self, + *, + limit: torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon position limits into internal buffers using masks. + + Args: + limit: Fixed tendon position limits. Shape is (num_instances, num_fixed_tendons) + with dtype wp.vec2f. + fixed_tendon_mask: Tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(limit, (self._num_instances, self._nft()), wp.vec2f, "limit") + if self._data._fixed_tendon_pos_limits is not None: + self._set_target_into_buffer_mask(self._data._fixed_tendon_pos_limits, limit, env_mask, fixed_tendon_mask) + + def set_fixed_tendon_rest_length_index( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon rest length into internal buffers using indices. + + Args: + rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(fixed_tendon_ids) if fixed_tendon_ids else self._nft() + self.assert_shape_and_dtype(rest_length, (n, t), wp.float32, "rest_length") + if self._data._fixed_tendon_rest_length is not None: + self._set_target_into_buffer(self._data._fixed_tendon_rest_length, rest_length, env_ids, fixed_tendon_ids) + + def set_fixed_tendon_rest_length_mask( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon rest length into internal buffers using masks. + + Args: + rest_length: Fixed tendon rest length. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(rest_length, (self._num_instances, self._nft()), wp.float32, "rest_length") + if self._data._fixed_tendon_rest_length is not None: + self._set_target_into_buffer_mask( + self._data._fixed_tendon_rest_length, rest_length, env_mask, fixed_tendon_mask + ) + + def set_fixed_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set fixed tendon offset into internal buffers using indices. + + Args: + offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(fixed_tendon_ids) if fixed_tendon_ids else self._nft() + self.assert_shape_and_dtype(offset, (n, t), wp.float32, "offset") + if self._data._fixed_tendon_offset is not None: + self._set_target_into_buffer(self._data._fixed_tendon_offset, offset, env_ids, fixed_tendon_ids) + + def set_fixed_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon offset into internal buffers using masks. + + Args: + offset: Fixed tendon offset. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(offset, (self._num_instances, self._nft()), wp.float32, "offset") + if self._data._fixed_tendon_offset is not None: + self._set_target_into_buffer_mask(self._data._fixed_tendon_offset, offset, env_mask, fixed_tendon_mask) + + def write_fixed_tendon_properties_to_sim_index( + self, + *, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write fixed tendon properties into the simulation using indices. + + Args: + fixed_tendon_ids: The fixed tendon indices to write the properties for. Defaults to None + (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + if self._nft() == 0: + return + for tt, buf in [ + (TT.FIXED_TENDON_STIFFNESS, self._data._fixed_tendon_stiffness), + (TT.FIXED_TENDON_DAMPING, self._data._fixed_tendon_damping), + (TT.FIXED_TENDON_LIMIT_STIFFNESS, self._data._fixed_tendon_limit_stiffness), + (TT.FIXED_TENDON_LIMIT, self._data._fixed_tendon_pos_limits), + (TT.FIXED_TENDON_REST_LENGTH, self._data._fixed_tendon_rest_length), + (TT.FIXED_TENDON_OFFSET, self._data._fixed_tendon_offset), + ]: + if buf is not None: + self._write_flat_tensor(tt, buf, env_ids, fixed_tendon_ids) + + def write_fixed_tendon_properties_to_sim_mask( + self, + *, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write fixed tendon properties into the simulation using masks. + + Args: + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + if self._nft() == 0: + return + for tt, buf in [ + (TT.FIXED_TENDON_STIFFNESS, self._data._fixed_tendon_stiffness), + (TT.FIXED_TENDON_DAMPING, self._data._fixed_tendon_damping), + (TT.FIXED_TENDON_LIMIT_STIFFNESS, self._data._fixed_tendon_limit_stiffness), + (TT.FIXED_TENDON_LIMIT, self._data._fixed_tendon_pos_limits), + (TT.FIXED_TENDON_REST_LENGTH, self._data._fixed_tendon_rest_length), + (TT.FIXED_TENDON_OFFSET, self._data._fixed_tendon_offset), + ]: + if buf is not None: + self._write_flat_tensor_mask(tt, buf, env_mask, fixed_tendon_mask) + + def set_spatial_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon stiffness into internal buffers using indices. + + Args: + stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(spatial_tendon_ids) if spatial_tendon_ids else self._nst() + self.assert_shape_and_dtype(stiffness, (n, t), wp.float32, "stiffness") + if self._data._spatial_tendon_stiffness is not None: + self._set_target_into_buffer(self._data._spatial_tendon_stiffness, stiffness, env_ids, spatial_tendon_ids) + + def set_spatial_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon stiffness into internal buffers using masks. + + Args: + stiffness: Spatial tendon stiffness. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(stiffness, (self._num_instances, self._nst()), wp.float32, "stiffness") + if self._data._spatial_tendon_stiffness is not None: + self._set_target_into_buffer_mask( + self._data._spatial_tendon_stiffness, stiffness, env_mask, spatial_tendon_mask + ) + + def set_spatial_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon damping into internal buffers using indices. + + Args: + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(spatial_tendon_ids) if spatial_tendon_ids else self._nst() + self.assert_shape_and_dtype(damping, (n, t), wp.float32, "damping") + if self._data._spatial_tendon_damping is not None: + self._set_target_into_buffer(self._data._spatial_tendon_damping, damping, env_ids, spatial_tendon_ids) + + def set_spatial_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon damping into internal buffers using masks. + + Args: + damping: Spatial tendon damping. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(damping, (self._num_instances, self._nst()), wp.float32, "damping") + if self._data._spatial_tendon_damping is not None: + self._set_target_into_buffer_mask( + self._data._spatial_tendon_damping, damping, env_mask, spatial_tendon_mask + ) + + def set_spatial_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon limit stiffness into internal buffers using indices. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(spatial_tendon_ids) if spatial_tendon_ids else self._nst() + self.assert_shape_and_dtype(limit_stiffness, (n, t), wp.float32, "limit_stiffness") + if self._data._spatial_tendon_limit_stiffness is not None: + self._set_target_into_buffer( + self._data._spatial_tendon_limit_stiffness, limit_stiffness, env_ids, spatial_tendon_ids + ) + + def set_spatial_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon limit stiffness into internal buffers using masks. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(limit_stiffness, (self._num_instances, self._nst()), wp.float32, "limit_stiffness") + if self._data._spatial_tendon_limit_stiffness is not None: + self._set_target_into_buffer_mask( + self._data._spatial_tendon_limit_stiffness, limit_stiffness, env_mask, spatial_tendon_mask + ) + + def set_spatial_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set spatial tendon offset into internal buffers using indices. + + Args: + offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + n = self._n_envs_index(env_ids) + t = len(spatial_tendon_ids) if spatial_tendon_ids else self._nst() + self.assert_shape_and_dtype(offset, (n, t), wp.float32, "offset") + if self._data._spatial_tendon_offset is not None: + self._set_target_into_buffer(self._data._spatial_tendon_offset, offset, env_ids, spatial_tendon_ids) + + def set_spatial_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon offset into internal buffers using masks. + + Args: + offset: Spatial tendon offset. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + self.assert_shape_and_dtype(offset, (self._num_instances, self._nst()), wp.float32, "offset") + if self._data._spatial_tendon_offset is not None: + self._set_target_into_buffer_mask(self._data._spatial_tendon_offset, offset, env_mask, spatial_tendon_mask) + + def write_spatial_tendon_properties_to_sim_index( + self, + *, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write spatial tendon properties into the simulation using indices. + + Args: + spatial_tendon_ids: The spatial tendon indices to write the properties for. Defaults to None + (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + if self._nst() == 0: + return + for tt, buf in [ + (TT.SPATIAL_TENDON_STIFFNESS, self._data._spatial_tendon_stiffness), + (TT.SPATIAL_TENDON_DAMPING, self._data._spatial_tendon_damping), + (TT.SPATIAL_TENDON_LIMIT_STIFFNESS, self._data._spatial_tendon_limit_stiffness), + (TT.SPATIAL_TENDON_OFFSET, self._data._spatial_tendon_offset), + ]: + if buf is not None: + self._write_flat_tensor(tt, buf, env_ids, spatial_tendon_ids) + + def write_spatial_tendon_properties_to_sim_mask( + self, + *, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write spatial tendon properties into the simulation using masks. + + Args: + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. + """ + if self._nst() == 0: + return + for tt, buf in [ + (TT.SPATIAL_TENDON_STIFFNESS, self._data._spatial_tendon_stiffness), + (TT.SPATIAL_TENDON_DAMPING, self._data._spatial_tendon_damping), + (TT.SPATIAL_TENDON_LIMIT_STIFFNESS, self._data._spatial_tendon_limit_stiffness), + (TT.SPATIAL_TENDON_OFFSET, self._data._spatial_tendon_offset), + ]: + if buf is not None: + self._write_flat_tensor_mask(tt, buf, env_mask, spatial_tendon_mask) + + """ + Deprecated methods. + """ + + def write_root_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Deprecated in base class. Use :meth:`write_root_pose_to_sim_index` and + :meth:`write_root_velocity_to_sim_index` instead.""" + self._write_root_state(TT.ROOT_POSE, root_state[:, :7], env_ids) + self._write_root_state(TT.ROOT_VELOCITY, root_state[:, 7:], env_ids) + + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Deprecated in base class. Use :meth:`write_root_com_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index` instead.""" + self._write_root_state(TT.ROOT_POSE, root_state[:, :7], env_ids) + self._write_root_state(TT.ROOT_VELOCITY, root_state[:, 7:], env_ids) + + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Deprecated in base class. Use :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_link_velocity_to_sim_index` instead.""" + self._write_root_state(TT.ROOT_POSE, root_state[:, :7], env_ids) + self._write_root_state(TT.ROOT_VELOCITY, root_state[:, 7:], env_ids) + + def write_joint_state_to_sim( + self, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | wp.array | None = None, + ) -> None: + """Deprecated in base class. Use :meth:`write_joint_position_to_sim_index` and + :meth:`write_joint_velocity_to_sim_index` instead.""" + self.write_joint_position_to_sim_index(position=position, joint_ids=joint_ids, env_ids=env_ids) + self.write_joint_velocity_to_sim_index(velocity=velocity, joint_ids=joint_ids, env_ids=env_ids) + + """ + Internal helper. + """ + + def _initialize_impl(self) -> None: + physx_instance = OvPhysxManager.get_physx_instance() + if physx_instance is None: + raise RuntimeError("OvPhysxManager has not been initialized yet.") + + prim_path = self.cfg.prim_path + # Convert IsaacLab prim-path notation to the glob patterns ovphysx expects. + # IsaacLab uses two conventions: + # /World/envs/env_.*/Robot -- regex dot-star for "any env index" + # /World/envs/{ENV_REGEX_NS}/Robot -- explicit placeholder + # ovphysx create_tensor_binding() uses fnmatch-style globs, so both map to '*'. + pattern = re.sub(r"\{ENV_REGEX_NS\}", "*", prim_path) + pattern = re.sub(r"\.\*", "*", pattern) # env_.* -> env_* + + # The pattern above points to the ArticulationCfg prim (e.g. /World/envs/env_*/Robot). + # However, PhysicsArticulationRootAPI may be on a CHILD prim (e.g. /Robot/torso) + # rather than on the prim itself. create_tensor_binding() only matches prims that + # *have* PhysicsArticulationRootAPI, so we need to extend the pattern to the actual + # articulation root. Mirror the PhysX backend's discovery logic: find the first + # matching prim in the USD stage, walk its subtree for the articulation root, and + # append the relative suffix to the glob pattern. + from pxr import UsdPhysics + + from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims + + stage = PhysicsManager._sim.stage + first_prim = find_first_matching_prim(prim_path, stage=stage) + if first_prim is None: + raise RuntimeError(f"OvPhysxManager: no prim found for path '{prim_path}'.") + first_prim_path = first_prim.GetPath().pathString + + root_prims = get_all_matching_child_prims( + first_prim_path, + predicate=lambda p: p.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"No prim with PhysicsArticulationRootAPI found under '{first_prim_path}'." + " Check that the articulation has 'PhysicsArticulationRootAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Multiple articulation roots found under '{first_prim_path}': {root_prims}." + " There must be exactly one articulation root per prim path." + ) + self._articulation_root_path = root_prims[0].GetPath().pathString + root_relative = self._articulation_root_path[len(first_prim_path) :] + if root_relative: + # e.g. first_prim_path=/World/envs/env_0/Robot, root_relative=/torso + # pattern becomes /World/envs/env_*/Robot/torso + pattern = pattern + root_relative + logger.info("OvPhysxManager: articulation root at '%s' (pattern extended to '%s')", root_relative, pattern) + + # Bindings are created lazily (on first access) to avoid allocating + # handles for tensor types the user never queries. Only the root-pose + # binding is created eagerly because we need it to read articulation + # metadata (joint count, body count, names, fixed-base flag). + self._bindings: dict[int, Any] = {} + self._physx_instance = physx_instance + self._binding_pattern = pattern + + eager_types = [ + TT.ROOT_POSE, + TT.DOF_POSITION, + TT.DOF_STIFFNESS, + TT.DOF_DAMPING, + TT.DOF_LIMIT, + TT.DOF_MAX_VELOCITY, + TT.DOF_MAX_FORCE, + TT.DOF_ARMATURE, + TT.DOF_FRICTION_PROPERTIES, + TT.BODY_MASS, + TT.BODY_COM_POSE, + TT.BODY_INERTIA, + ] + for tt in eager_types: + try: + self._bindings[tt] = physx_instance.create_tensor_binding(pattern=pattern, tensor_type=tt) + except Exception: + logger.debug("Could not create tensor binding for type %s on pattern %s", tt, pattern) + + sample = next(iter(self._bindings.values())) + self._num_instances = sample.count + self._num_joints = sample.dof_count + self._num_bodies = sample.body_count + self._is_fixed_base = sample.is_fixed_base + self._joint_names = list(sample.dof_names) + self._body_names = list(sample.body_names) + + # Create data container. + self._data = ArticulationData(self._bindings, self._device, binding_getter=self._get_binding) + + # Discover tendon counts/names before buffer allocation so that + # _create_buffers can size the tendon property arrays. + self._process_tendons() + + self._create_buffers() + + self._process_cfg() + self._process_actuators_cfg() + self._validate_cfg() + self._log_articulation_info() + + # Cache the effort binding and a stable float32 view of the applied_torque + # buffer for write_data_to_sim(). The binding's internal write cache + # (keyed on object identity) handles the fast path automatically. + self._effort_binding = self._get_binding(TT.DOF_ACTUATION_FORCE) + if self._effort_binding is not None: + torque = self._data.applied_torque + shape = self._effort_binding.shape + self._effort_write_view = wp.array( + ptr=torque.ptr, + shape=shape, + dtype=wp.float32, + device=str(torque.device), + copy=False, + ) + else: + self._effort_write_view = None + + # Cache position/velocity target bindings + views for one-shot writes. + def _make_write_view(tt, buf): + b = self._get_binding(tt) + if b is None or buf is None: + return None, None + v = wp.array(ptr=buf.ptr, shape=b.shape, dtype=wp.float32, device=str(buf.device), copy=False) + return b, v + + self._pos_target_binding, self._pos_target_write_view = _make_write_view( + TT.DOF_POSITION_TARGET, self._data.joint_pos_target + ) + self._vel_target_binding, self._vel_target_write_view = _make_write_view( + TT.DOF_VELOCITY_TARGET, self._data.joint_vel_target + ) + + # Let the articulation data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _create_buffers(self) -> None: + self._data._create_buffers() + + self._ALL_INDICES = wp.array(np.arange(self._num_instances, dtype=np.int32), device=self._device) + + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + self._wrench_buf = wp.zeros((self._num_instances, self._num_bodies, 9), dtype=wp.float32, device=self._device) + + # Joint-index arrays for each actuator (filled by _process_actuators_cfg). + self._joint_ids_per_actuator: dict[str, list[int]] = {} + self._write_scratch: dict[int, wp.array] = {} + + def _process_cfg(self) -> None: + """Process the articulation configuration (initial state, soft limits, etc.).""" + cfg = self.cfg + N = self._num_instances + D = self._num_joints + dev = self._device + + # Default root state from config (matching PhysX pattern). + default_root_pose = tuple(cfg.init_state.pos) + tuple(cfg.init_state.rot) + default_root_vel = tuple(cfg.init_state.lin_vel) + tuple(cfg.init_state.ang_vel) + np_pose = np.tile(np.array(default_root_pose, dtype=np.float32), (N, 1)) + np_vel = np.tile(np.array(default_root_vel, dtype=np.float32), (N, 1)) + wp.copy( + self._data._default_root_pose, + wp.from_numpy(np_pose, dtype=wp.transformf, device=dev), + ) + wp.copy( + self._data._default_root_vel, + wp.from_numpy(np_vel, dtype=wp.spatial_vectorf, device=dev), + ) + + # Default joint positions / velocities from config patterns. + self._resolve_joint_values(cfg.init_state.joint_pos, self._data._default_joint_pos) + self._resolve_joint_values(cfg.init_state.joint_vel, self._data._default_joint_vel) + + # Keep soft-limit computation on-device, matching the PhysX/Newton path. + wp.launch( + update_soft_joint_pos_limits, + dim=(N, D), + inputs=[self._data.joint_pos_limits, cfg.soft_joint_pos_limit_factor], + outputs=[self._data._soft_joint_pos_limits], + device=dev, + ) + + def _invalidate_initialize_callback(self, event) -> None: + self._is_initialized = False + + def _process_actuators_cfg(self) -> None: + """Build actuator instances from the config and write drive properties to PhysX. + + Mirrors what the legacy PhysX backend does in its own _process_actuators_cfg: + - For ImplicitActuator: write the configured stiffness / damping to the PhysX + drive so the solver uses exactly the values from the actuator config. + - For all explicit actuators: zero out PhysX stiffness / damping so the + USD-authored drive gains cannot interfere with the explicit torque path. + - For all actuators: write effort_limit_sim and velocity_limit_sim. + + These writes happen via TensorBinding (GPU-resident) after warmup has + allocated the GPU buffers (MODEL_INIT fires post-warmup). + """ + from isaaclab.actuators import ImplicitActuator + + self.actuators: dict[str, ActuatorBase] = {} + self._has_implicit_actuators = False + for name, act_cfg in self.cfg.actuators.items(): + joint_ids, joint_names = self.find_joints(act_cfg.joint_names_expr) + if not joint_ids: + logger.warning("Actuator '%s': no joints matched '%s'", name, act_cfg.joint_names_expr) + continue + act_cfg_copy = act_cfg.copy() + act = act_cfg_copy.class_type( + act_cfg_copy, + joint_names=joint_names, + joint_ids=joint_ids, + num_envs=self._num_instances, + device=self._device, + ) + self.actuators[name] = act + self._joint_ids_per_actuator[name] = joint_ids + + # Write drive gains and limits to PhysX to match the actuator config. + # Without this, PhysX retains whatever stiffness/damping was authored in the + # USD file, which can produce large restoring forces if the USD gains differ + # from the actuator config (e.g. a position-controlled robot exported with + # non-zero drive stiffness but configured with ImplicitActuator(stiffness=0)). + jids = list(joint_ids) + if isinstance(act, ImplicitActuator): + self._has_implicit_actuators = True + stiffness = act.stiffness # torch (N, J) + damping = act.damping # torch (N, J) + else: + stiffness = wp.zeros((self._num_instances, len(jids)), dtype=wp.float32, device=self._device) + damping = wp.zeros((self._num_instances, len(jids)), dtype=wp.float32, device=self._device) + self.write_joint_stiffness_to_sim_index(stiffness=stiffness, joint_ids=jids) + self.write_joint_damping_to_sim_index(damping=damping, joint_ids=jids) + self.write_joint_effort_limit_to_sim_index(limits=act.effort_limit_sim, joint_ids=jids) + self.write_joint_velocity_limit_to_sim_index(limits=act.velocity_limit_sim, joint_ids=jids) + + def _process_tendons(self) -> None: + """Discover tendon counts from binding metadata and names from USD. + + Tendon counts come from the ovphysx binding metadata. Tendon names are + recovered from the exported USD articulation subtree because ovphysx + exposes joint names/counts, but not the per-joint USD paths that the + PhysX backend can query directly. + """ + self._fixed_tendon_names = [] + self._spatial_tendon_names = [] + + sample = next(iter(self._bindings.values())) + self._num_fixed_tendons = getattr(sample, "fixed_tendon_count", 0) + self._num_spatial_tendons = getattr(sample, "spatial_tendon_count", 0) + + if self._num_fixed_tendons > 0 or self._num_spatial_tendons > 0: + stage_path = OvPhysxManager._stage_path + if stage_path is not None: + try: + from pxr import Usd, UsdPhysics + + from isaaclab.sim.utils.queries import get_all_matching_child_prims + + stage = Usd.Stage.Open(stage_path) + articulation_root_path = getattr(self, "_articulation_root_path", None) + if articulation_root_path is None: + joint_prims = stage.Traverse() + else: + joint_prims = get_all_matching_child_prims( + articulation_root_path, + predicate=lambda p: p.IsA(UsdPhysics.Joint), + stage=stage, + traverse_instance_prims=False, + ) + for prim in joint_prims: + if not prim.IsA(UsdPhysics.Joint): + continue + schema_names = list(prim.GetAppliedSchemas()) + metadata = prim.GetMetadata("apiSchemas") + if metadata is not None: + for field in ("prependedItems", "appendedItems", "explicitItems"): + items = getattr(metadata, field, None) + if items: + schema_names.extend(str(item) for item in items) + schemas_str = " ".join(schema_names) + name = prim.GetPath().name + if "PhysxTendonAxisRootAPI" in schemas_str: + self._fixed_tendon_names.append(name) + elif ( + "PhysxTendonAttachmentRootAPI" in schemas_str + or "PhysxTendonAttachmentLeafAPI" in schemas_str + ): + self._spatial_tendon_names.append(name) + except Exception: + logger.debug("Could not parse USD stage for tendon names at %s", stage_path) + + self._data._num_fixed_tendons = self._num_fixed_tendons + self._data._num_spatial_tendons = self._num_spatial_tendons + self._data.fixed_tendon_names = self._fixed_tendon_names + self._data.spatial_tendon_names = self._spatial_tendon_names + + def _apply_external_wrenches(self) -> None: + """Compose and write external wrenches to the LINK_WRENCH binding. + + WrenchComposer accumulates forces/torques in body (link) frame. + The LINK_WRENCH binding expects world-frame [fx,fy,fz,tx,ty,tz,px,py,pz]. + We rotate the body-frame vectors to world frame using the link quaternion + and pack them into the [N, L, 9] tensor with application position = origin. + """ + inst = self._instantaneous_wrench_composer + perm = self._permanent_wrench_composer + if not inst.active and not perm.active: + return + if inst.active: + if perm.active: + inst.add_forces_and_torques_index( + forces=perm.composed_force, + torques=perm.composed_torque, + body_ids=list(range(self._num_bodies)), + env_ids=list(range(self._num_instances)), + ) + force_b = inst.composed_force + torque_b = inst.composed_torque + else: + force_b = perm.composed_force + torque_b = perm.composed_torque + + poses = self._data.body_link_pose_w + wp.launch( + _body_wrench_to_world, + dim=(self._num_instances, self._num_bodies), + inputs=[force_b, torque_b, poses], + outputs=[self._wrench_buf], + device=self._device, + ) + wrench_binding = self._get_binding(TT.LINK_WRENCH) + if wrench_binding is not None: + wrench_binding.write(self._wrench_buf) + inst.reset() + + def _apply_actuator_model(self) -> None: + """Run the actuator model to compute torques from user targets. + + IsaacLab actuators are torch-based. We convert warp -> torch via + DLPack (zero-copy on GPU), run the actuator, then write results back. + """ + from isaaclab.utils.types import ArticulationActions + + for name, act in self.actuators.items(): + jids = act.joint_indices + if jids is None: + continue + jids_t = jids if isinstance(jids, list) else list(jids) + all_joints = len(jids_t) == self._num_joints + + # warp -> torch (zero-copy on same device via DLPack) + jp_target_full = self._data.joint_pos_target.torch + jv_target_full = self._data.joint_vel_target.torch + je_target_full = self._data.joint_effort_target.torch + jp_target = jp_target_full if all_joints else jp_target_full[:, jids_t] + jv_target = jv_target_full if all_joints else jv_target_full[:, jids_t] + je_target = je_target_full if all_joints else je_target_full[:, jids_t] + + control_action = ArticulationActions( + joint_positions=jp_target, + joint_velocities=jv_target, + joint_efforts=je_target, + ) + + jp_cur_full = self._data.joint_pos.torch + jv_cur_full = self._data.joint_vel.torch + jp_cur = jp_cur_full if all_joints else jp_cur_full[:, jids_t] + jv_cur = jv_cur_full if all_joints else jv_cur_full[:, jids_t] + + control_action = act.compute(control_action, jp_cur, jv_cur) + + if act.computed_effort is not None: + ct = wp.to_torch(self._data._computed_torque) + at = wp.to_torch(self._data._applied_torque) + if all_joints: + ct[:] = act.computed_effort + at[:] = act.applied_effort + else: + ct[:, jids_t] = act.computed_effort + at[:, jids_t] = act.applied_effort + + def _validate_cfg(self) -> None: + pass + + def _log_articulation_info(self) -> None: + """Log information about the articulation. + + .. note:: We purposefully read the values from the simulator to ensure that the values are configured as + expected. + """ + from prettytable import PrettyTable + + def format_large_number(_, v: float) -> str: + if abs(v) >= 1e3: + return f"{v:.1e}" + return f"{v:.3f}" + + def format_limits(_, v: tuple[float, float]) -> str: + if abs(v[0]) >= 1e3 or abs(v[1]) >= 1e3: + return f"[{v[0]:.1e}, {v[1]:.1e}]" + return f"[{v[0]:.3f}, {v[1]:.3f}]" + + stiffnesses = self.data.joint_stiffness.warp.numpy()[0].tolist() + dampings = self.data.joint_damping.warp.numpy()[0].tolist() + armatures = self.data.joint_armature.warp.numpy()[0].tolist() + frictions = self.data.joint_friction_coeff.warp.numpy()[0].tolist() + pos_limits_np = self.data.joint_pos_limits.warp.numpy().reshape(self._num_instances, self._num_joints, 2) + position_limits = [tuple(pos_limits_np[0, j].tolist()) for j in range(self._num_joints)] + velocity_limits = self.data.joint_vel_limits.warp.numpy()[0].tolist() + effort_limits = self.data.joint_effort_limits.warp.numpy()[0].tolist() + + joint_table = PrettyTable() + joint_table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" + joint_table.field_names = [ + "Index", + "Name", + "Stiffness", + "Damping", + "Armature", + "Friction", + "Position Limits", + "Velocity Limits", + "Effort Limits", + ] + joint_table.custom_format["Stiffness"] = format_large_number + joint_table.custom_format["Damping"] = format_large_number + joint_table.custom_format["Armature"] = format_large_number + joint_table.custom_format["Friction"] = format_large_number + joint_table.custom_format["Position Limits"] = format_limits + joint_table.custom_format["Velocity Limits"] = format_large_number + joint_table.custom_format["Effort Limits"] = format_large_number + joint_table.align["Name"] = "l" + + for index, name in enumerate(self.joint_names): + joint_table.add_row( + [ + index, + name, + stiffnesses[index], + dampings[index], + armatures[index], + frictions[index], + position_limits[index], + velocity_limits[index], + effort_limits[index], + ] + ) + logger.info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + joint_table.get_string()) + + if self.num_fixed_tendons > 0: + ft_stiffnesses = self.data.fixed_tendon_stiffness.warp.numpy()[0].tolist() + ft_dampings = self.data.fixed_tendon_damping.warp.numpy()[0].tolist() + ft_limit_stiffnesses = self.data.fixed_tendon_limit_stiffness.warp.numpy()[0].tolist() + ft_limits_np = self.data.fixed_tendon_pos_limits.warp.numpy().reshape( + self._num_instances, self.num_fixed_tendons, 2 + ) + ft_limits = [tuple(ft_limits_np[0, t].tolist()) for t in range(self.num_fixed_tendons)] + ft_rest_lengths = self.data.fixed_tendon_rest_length.warp.numpy()[0].tolist() + ft_offsets = self.data.fixed_tendon_offset.warp.numpy()[0].tolist() + + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Fixed Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Limits", + "Rest Length", + "Offset", + ] + tendon_table.custom_format["Stiffness"] = format_large_number + tendon_table.custom_format["Damping"] = format_large_number + tendon_table.custom_format["Limit Stiffness"] = format_large_number + tendon_table.custom_format["Limits"] = format_limits + tendon_table.custom_format["Rest Length"] = format_large_number + tendon_table.custom_format["Offset"] = format_large_number + for index in range(self.num_fixed_tendons): + tendon_table.add_row( + [ + index, + ft_stiffnesses[index], + ft_dampings[index], + ft_limit_stiffnesses[index], + ft_limits[index], + ft_rest_lengths[index], + ft_offsets[index], + ] + ) + logger.info( + f"Simulation parameters for fixed tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + if self.num_spatial_tendons > 0: + st_stiffnesses = self.data.spatial_tendon_stiffness.warp.numpy()[0].tolist() + st_dampings = self.data.spatial_tendon_damping.warp.numpy()[0].tolist() + st_limit_stiffnesses = self.data.spatial_tendon_limit_stiffness.warp.numpy()[0].tolist() + st_offsets = self.data.spatial_tendon_offset.warp.numpy()[0].tolist() + + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Spatial Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Offset", + ] + tendon_table.float_format = ".3" + for index in range(self.num_spatial_tendons): + tendon_table.add_row( + [ + index, + st_stiffnesses[index], + st_dampings[index], + st_limit_stiffnesses[index], + st_offsets[index], + ] + ) + logger.info( + f"Simulation parameters for spatial tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + """ + Internal helpers -- Bindings. + """ + + def _get_binding(self, tensor_type: int): + """Return a cached TensorBinding, creating it on first access. + + Bindings are lightweight handles (a pointer + shape metadata into + PhysX's shared GPU buffer). Creating one does NOT allocate new GPU + memory -- the underlying simulation buffers are allocated once by PhysX + regardless of how many bindings point into them. Still, we defer + creation so that tensor types the user never queries are never looked up. + """ + binding = self._bindings.get(tensor_type) + if binding is not None: + return binding + try: + binding = self._physx_instance.create_tensor_binding(pattern=self._binding_pattern, tensor_type=tensor_type) + self._bindings[tensor_type] = binding + return binding + except Exception: + logger.debug("Could not create tensor binding for type %s", tensor_type) + return None + + """ + Internal helpers -- Write. + """ + + def _to_flat_f32(self, data, target_shape: tuple[int, ...] | None = None) -> wp.array | np.ndarray: + """Ensure data is a contiguous float32 tensor suitable for binding I/O. + + State tensor bindings (positions, velocities, poses) live on the + simulation device (GPU in GPU mode). We always return data on + self._device so the binding device check passes. + + For structured warp dtypes (transformf, spatial_vectorf, etc.) a + zero-copy flat float32 view is created instead of roundtripping + through CPU numpy. + """ + dev = self._device + if isinstance(data, wp.array): + if str(data.device) != dev: + data = wp.clone(data, device=dev) + if data.dtype == wp.float32: + return data + # Structured dtype: zero-copy flat float32 view. + # transformf -> [N, 7], spatial_vectorf -> [N, 6], etc. + floats_per_elem = data.strides[0] // 4 + return wp.array( + ptr=data.ptr, + shape=(data.shape[0], floats_per_elem), + dtype=wp.float32, + device=dev, + copy=False, + ) + elif isinstance(data, torch.Tensor): + if data.is_cuda and dev.startswith("cuda"): + return wp.from_torch(data.detach().contiguous().float()) + np_data = data.detach().cpu().numpy().astype(np.float32) + return wp.from_numpy(np_data, dtype=wp.float32, device=dev) + elif isinstance(data, np.ndarray): + return wp.from_numpy(data.astype(np.float32), dtype=wp.float32, device=dev) + elif isinstance(data, (int, float)): + return wp.from_numpy(np.array(data, dtype=np.float32), dtype=wp.float32, device=dev) + return wp.from_numpy(np.asarray(data, dtype=np.float32), dtype=wp.float32, device=dev) + + def _as_gpu_f32_2d(self, data, cols: int) -> wp.array: + """View/convert data as 2D [rows, cols] float32 on self._device. + + For warp arrays with structured dtypes (transformf, spatial_vectorf), + creates a zero-copy flat float32 view. For torch/numpy, converts to + warp on the simulation device. + """ + dev = self._device + if isinstance(data, wp.array): + if str(data.device) != dev: + data = wp.clone(data, device=dev) + if data.dtype == wp.float32 and data.ndim == 2: + return data + n = data.shape[0] + return wp.array( + ptr=data.ptr, + shape=(n, cols), + dtype=wp.float32, + device=dev, + copy=False, + ) + if isinstance(data, torch.Tensor) and data.is_cuda and dev.startswith("cuda"): + return wp.from_torch(data.detach().contiguous().float().reshape(-1, cols)) + np_data = self._to_cpu_numpy(data).reshape(-1, cols) + return wp.from_numpy(np_data, dtype=wp.float32, device=dev) + + def _get_write_scratch(self, tensor_type: int, binding) -> wp.array: + """Return a cached GPU scratch buffer for read-modify-write.""" + if not hasattr(self, "_write_scratch"): + self._write_scratch = {} + buf = self._write_scratch.get(tensor_type) + if buf is None: + buf = wp.zeros(binding.shape, dtype=wp.float32, device=self._device) + self._write_scratch[tensor_type] = buf + return buf + + def _write_root_state(self, tensor_type: int, data, env_ids=None, mask=None, _ids_gpu=None) -> None: + """GPU-native write for root pose [N,7] or velocity [N,6]. + + Three paths, fastest first: + - Full write (no env_ids, no mask): zero-copy DLPack. + - Indexed write with full-size data: zero-copy view + indices. + The binding API only copies the indexed rows from the full buffer, + so no read-modify-write is needed when data is already [N,...]. + - Indexed write with partial data [K,...]: scatter kernel into a GPU + scratch buffer, then write with indices. + - Masked write: data is always full [N,...], pass directly with mask. + + Args: + _ids_gpu: Pre-converted GPU warp int32 array of env indices. + When provided, skips the per-call GPU->CPU->GPU conversion + of env_ids. + """ + binding = self._get_binding(tensor_type) + if binding is None: + return + N, C = binding.shape + + if env_ids is None and _ids_gpu is None and mask is None: + binding.write(self._to_flat_f32(data)) + self._invalidate_root_caches(tensor_type) + return + + src = self._as_gpu_f32_2d(data, C) + + if env_ids is not None or _ids_gpu is not None: + if _ids_gpu is None: + _ids_gpu = self._env_ids_to_gpu_warp(env_ids) + K = _ids_gpu.shape[0] + if src.shape[0] == N: + binding.write(src, indices=_ids_gpu) + else: + scratch = self._get_write_scratch(tensor_type, binding) + binding.read(scratch) + wp.launch( + _scatter_rows_partial, + dim=(K, C), + inputs=[scratch, src, _ids_gpu], + device=self._device, + ) + binding.write(scratch, indices=_ids_gpu) + else: + mask_u8 = wp.from_numpy( + self._to_cpu_numpy(mask).astype(np.uint8), + device=self._device, + ) + binding.write(src, mask=mask_u8) + self._invalidate_root_caches(tensor_type) + + def _invalidate_root_caches(self, tensor_type: int) -> None: + """Force re-read from GPU on next property access after a binding write.""" + if tensor_type == TT.ROOT_POSE: + self.data._root_link_pose_w.timestamp = -1.0 + self.data._root_com_pose_w.timestamp = -1.0 + elif tensor_type == TT.ROOT_VELOCITY: + self.data._root_link_vel_w.timestamp = -1.0 + self.data._root_com_vel_w.timestamp = -1.0 + + def _write_flat_tensor(self, tensor_type: int, data, env_ids=None, joint_ids=None, _ids_gpu=None) -> None: + """Write a 2-D tensor to a binding, with optional env/joint index subsetting.""" + if isinstance(data, (int, float)): + return + binding = self._get_binding(tensor_type) + if binding is None: + return + from isaaclab_ovphysx.tensor_types import _CPU_ONLY_TYPES + + is_cpu_only = tensor_type in _CPU_ONLY_TYPES + + # CPU-only types or column scatter must go through numpy. + if is_cpu_only or joint_ids is not None: + target_device = "cpu" if is_cpu_only else self._device + np_data = self._to_cpu_numpy(data) + if joint_ids is not None: + if is_cpu_only: + full = np.zeros(binding.shape, dtype=np.float32) + binding.read(full) + else: + scratch = self._get_write_scratch(tensor_type, binding) + binding.read(scratch) + full = scratch.numpy() + jids = self._to_cpu_indices(joint_ids, np.intp) + if env_ids is not None: + eids = self._to_cpu_indices(env_ids, np.intp) + full[np.ix_(eids, jids)] = np_data.reshape(len(eids), len(jids), *np_data.shape[2:]) + else: + full[:, jids] = np_data.reshape(full.shape[0], len(jids), *np_data.shape[2:]) + binding.write(wp.from_numpy(full, dtype=wp.float32, device=target_device)) + elif env_ids is not None: + if is_cpu_only: + full = np.zeros(binding.shape, dtype=np.float32) + binding.read(full) + else: + scratch = self._get_write_scratch(tensor_type, binding) + binding.read(scratch) + full = scratch.numpy() + eids = self._to_cpu_indices(env_ids, np.intp) + full[eids] = np_data if np_data.shape[0] == len(eids) else np_data[eids] + flat = wp.from_numpy(full.astype(np.float32), dtype=wp.float32, device=target_device) + idx = _ids_gpu if _ids_gpu is not None else self._env_ids_to_gpu_warp(env_ids) + binding.write(flat, indices=idx) + else: + binding.write(wp.from_numpy(np_data.astype(np.float32), dtype=wp.float32, device=target_device)) + return + + # GPU path: data stays on device. + if env_ids is None and _ids_gpu is None: + binding.write(self._to_flat_f32(data)) + return + + N, C = binding.shape[0], binding.shape[1] + src = self._as_gpu_f32_2d(data, C) + if _ids_gpu is None: + _ids_gpu = self._env_ids_to_gpu_warp(env_ids) + K = _ids_gpu.shape[0] + if src.shape[0] == N: + binding.write(src, indices=_ids_gpu) + else: + scratch = self._get_write_scratch(tensor_type, binding) + binding.read(scratch) + wp.launch( + _scatter_rows_partial, + dim=(K, C), + inputs=[scratch, src, _ids_gpu], + device=self._device, + ) + binding.write(scratch, indices=_ids_gpu) + + def _write_flat_tensor_mask(self, tensor_type: int, data, env_mask=None, joint_mask=None) -> None: + """Write a 2-D tensor to a binding, with optional env/joint mask subsetting.""" + if isinstance(data, (int, float)): + return + binding = self._get_binding(tensor_type) + if binding is None: + return + from isaaclab_ovphysx.tensor_types import _CPU_ONLY_TYPES + + is_cpu_only = tensor_type in _CPU_ONLY_TYPES + + # CPU-only types or column-mask scatter must go through numpy. + if is_cpu_only or joint_mask is not None: + target_device = "cpu" if is_cpu_only else self._device + np_data = self._to_cpu_numpy(data) + if joint_mask is not None: + # GPU bindings cannot read into numpy directly; read into GPU + # scratch first, then pull to CPU for column scatter. + if is_cpu_only: + full = np.zeros(binding.shape, dtype=np.float32) + binding.read(full) + else: + scratch = self._get_write_scratch(tensor_type, binding) + binding.read(scratch) + full = scratch.numpy() + jmask = self._to_cpu_numpy(joint_mask).astype(bool) + cols = np.where(jmask)[0] + if env_mask is not None: + emask = self._to_cpu_numpy(env_mask).astype(bool) + rows = np.where(emask)[0] + full[rows[:, None], cols] = np_data[rows[:, None], cols] + else: + full[:, cols] = np_data[:, cols] + binding.write(wp.from_numpy(full.astype(np.float32), dtype=wp.float32, device=target_device)) + elif env_mask is not None: + flat = wp.from_numpy(np_data.astype(np.float32), dtype=wp.float32, device=target_device) + mask_u8 = wp.from_numpy( + self._to_cpu_numpy(env_mask).astype(np.uint8), + device=target_device, + ) + binding.write(flat, mask=mask_u8) + else: + binding.write(wp.from_numpy(np_data.astype(np.float32), dtype=wp.float32, device=target_device)) + return + + # GPU path: data stays on device. + if env_mask is None: + binding.write(self._to_flat_f32(data)) + return + + # Data is full [N, D], the binding API selects rows via the mask. + mask_u8 = wp.from_numpy( + self._to_cpu_numpy(env_mask).astype(np.uint8), + device=self._device, + ) + binding.write(self._to_flat_f32(data), mask=mask_u8) + + def _write_friction_column(self, data, env_ids=None, joint_ids=None) -> None: + """Write static friction coefficient into column 0 of DOF_FRICTION_PROPERTIES [N,D,3].""" + binding = self._get_binding(TT.DOF_FRICTION_PROPERTIES) + if binding is None: + return + full = np.zeros(binding.shape, dtype=np.float32) + binding.read(full) + if isinstance(data, (int, float)): + if env_ids is not None and joint_ids is not None: + eids = self._to_cpu_numpy(env_ids).astype(np.intp) + jids = self._to_cpu_indices(joint_ids, np.intp) + full[np.ix_(eids, jids, [0])] = data + elif env_ids is not None: + eids = self._to_cpu_numpy(env_ids).astype(np.intp) + full[eids, :, 0] = data + elif joint_ids is not None: + jids = self._to_cpu_indices(joint_ids, np.intp) + full[:, jids, 0] = data + else: + full[..., 0] = data + binding.write(wp.from_numpy(full.astype(np.float32), dtype=wp.float32, device="cpu")) + return + np_data = self._to_cpu_numpy(data) + if env_ids is not None and joint_ids is not None: + eids = self._to_cpu_numpy(env_ids).astype(np.intp) + jids = self._to_cpu_indices(joint_ids, np.intp) + full[np.ix_(eids, jids, [0])] = np_data.reshape(len(eids), len(jids), 1) + elif env_ids is not None: + eids = self._to_cpu_numpy(env_ids).astype(np.intp) + full[eids, :, 0] = np_data.reshape(len(eids), -1) + elif joint_ids is not None: + jids = self._to_cpu_indices(joint_ids, np.intp) + full[:, jids, 0] = np_data.reshape(full.shape[0], len(jids)) + else: + full[..., 0] = np_data.reshape(full.shape[0], full.shape[1]) + binding.write(wp.from_numpy(full.astype(np.float32), dtype=wp.float32, device="cpu")) + + def _write_friction_column_mask(self, data, env_mask=None, joint_mask=None) -> None: + """Write static friction coefficient via mask into column 0 of DOF_FRICTION_PROPERTIES.""" + binding = self._get_binding(TT.DOF_FRICTION_PROPERTIES) + if binding is None: + return + full = np.zeros(binding.shape, dtype=np.float32) + binding.read(full) + if isinstance(data, (int, float)): + new_col = np.full((full.shape[0], full.shape[1]), data, dtype=np.float32) + else: + new_col = self._to_cpu_numpy(data).reshape(full.shape[0], full.shape[1]) + if env_mask is not None: + emask = self._to_cpu_numpy(env_mask).astype(bool) + if joint_mask is not None: + jmask = self._to_cpu_numpy(joint_mask).astype(bool) + rows = np.where(emask)[0] + cols = np.where(jmask)[0] + full[rows[:, None], cols, 0] = new_col[rows[:, None], cols] + else: + full[emask, :, 0] = new_col[emask] + elif joint_mask is not None: + jmask = self._to_cpu_numpy(joint_mask).astype(bool) + full[:, jmask, 0] = new_col[:, jmask] + else: + full[..., 0] = new_col + binding.write(wp.from_numpy(full.astype(np.float32), dtype=wp.float32, device="cpu")) + + def _write_joint_subset(self, tensor_type: int, buffer: wp.array, joint_ids: list[int]) -> None: + """Write a full-width joint buffer into the simulation for an actuator's joints.""" + binding = self._get_binding(tensor_type) + if binding is None: + return + if not hasattr(self, "_write_dltensor_cache"): + self._write_dltensor_cache = {} + cache_key = (tensor_type, buffer.ptr) + cached = self._write_dltensor_cache.get(cache_key) + if cached is None: + flat = self._to_flat_f32(buffer) + from ovphysx._dlpack_utils import acquire_dltensor + + dl, keepalive = acquire_dltensor(flat) + self._write_dltensor_cache[cache_key] = (dl, keepalive, flat) + cached = self._write_dltensor_cache[cache_key] + binding.write(cached[0]) + + @staticmethod + def _to_cpu_numpy(data) -> np.ndarray: + """Convert data (warp, torch, numpy, scalar) to a CPU numpy array.""" + if isinstance(data, wp.array): + return data.numpy().astype(np.float32) + if isinstance(data, torch.Tensor): + return data.detach().cpu().numpy().astype(np.float32) + return np.asarray(data, dtype=np.float32) + + @staticmethod + def _to_cpu_indices(data, dtype=np.int32) -> np.ndarray: + """Convert index array (warp, torch, list, numpy) to CPU numpy int array.""" + if isinstance(data, torch.Tensor): + return data.detach().cpu().numpy().astype(dtype) + if isinstance(data, wp.array): + return data.numpy().astype(dtype) + return np.asarray(data, dtype=dtype) + + def _env_ids_to_gpu_warp(self, env_ids) -> wp.array: + """Convert env_ids to a GPU int32 warp array, with single-entry caching. + + The cache avoids repeated GPU -> CPU -> GPU round-trips when the same + ``env_ids`` object is passed to multiple binding writes in a single step + (e.g. reset writes root_pose, root_vel, joint_pos, joint_vel). A new + object identity (``id()``) or shape change invalidates the cache. + """ + if hasattr(env_ids, "data_ptr"): + key = (env_ids.data_ptr(), env_ids.shape[0]) + elif isinstance(env_ids, wp.array): + key = (env_ids.ptr, env_ids.shape[0]) + else: + key = None + + if key is not None and hasattr(self, "_ids_cache_key") and self._ids_cache_key == key: + return self._ids_cache_val + + result = wp.array(self._to_cpu_indices(env_ids, np.int32), device=self._device) + if key is not None: + self._ids_cache_key = key + self._ids_cache_val = result + return result + + def _set_target_into_buffer(self, buffer: wp.array, data, env_ids=None, joint_ids=None) -> None: + """Set user-provided target data into a warp command buffer. + + For the common case (no index subset), this uses wp.copy to stay on + the simulation device. Subset writes (specific env_ids or joint_ids) + fall back to CPU because warp does not support scatter indexing. + """ + # Fast path: all-joints shortcut. When joint_ids covers every joint + # and env_ids is None, the subset is equivalent to a full copy. + if joint_ids is not None and env_ids is None: + n_joints = buffer.shape[1] if len(buffer.shape) > 1 else 1 + if hasattr(joint_ids, "__len__") and len(joint_ids) == n_joints: + joint_ids = None + if env_ids is None and joint_ids is None: + src = self._to_flat_f32(data) + if isinstance(src, np.ndarray): + src = wp.from_numpy(src, dtype=wp.float32, device=buffer.device) + wp.copy(buffer, src) + else: + np_data = self._to_cpu_numpy(data) + buf_np = buffer.numpy() + env_idx = self._to_cpu_numpy(env_ids).astype(np.intp) if env_ids is not None else None + jnt_idx = self._to_cpu_numpy(joint_ids).astype(np.intp) if joint_ids is not None else None + if env_idx is not None and jnt_idx is not None: + buf_np[np.ix_(env_idx, jnt_idx)] = np_data + elif env_idx is not None: + buf_np[env_idx] = np_data + else: + buf_np[:, jnt_idx] = np_data + wp.copy(buffer, wp.from_numpy(buf_np, dtype=wp.float32, device=buffer.device)) + + def _set_target_into_buffer_mask(self, buffer: wp.array, data, env_mask=None, joint_mask=None) -> None: + """Set user-provided target data into a warp command buffer using masks.""" + if env_mask is None: + src = self._to_flat_f32(data) + if isinstance(src, np.ndarray): + src = wp.from_numpy(src, dtype=wp.float32, device=buffer.device) + wp.copy(buffer, src) + else: + np_data = self._to_cpu_numpy(data) + buf_np = buffer.numpy() + mask_np = self._to_cpu_numpy(env_mask).astype(bool) + buf_np[mask_np] = np_data[mask_np] + wp.copy(buffer, wp.from_numpy(buf_np, dtype=wp.float32, device=buffer.device)) + + """ + Internal helpers -- Utilities. + """ + + def _n_envs_index(self, env_ids): + """Return the number of environments from an env_ids argument.""" + if env_ids is None: + return self._num_instances + if isinstance(env_ids, (list, tuple)): + return len(env_ids) + return env_ids.shape[0] if hasattr(env_ids, "shape") else len(env_ids) + + def _nft(self): + """Return the number of fixed tendons (0 if none).""" + return getattr(self, "_num_fixed_tendons", 0) + + def _nst(self): + """Return the number of spatial tendons (0 if none).""" + return getattr(self, "_num_spatial_tendons", 0) + + def _resolve_joint_values(self, pattern_dict: dict[str, float], buffer: wp.array) -> None: + """Resolve a {pattern: value} dict into a per-joint buffer. + + Builds values on CPU then copies to buffer's device (GPU arrays' + .numpy() returns a read-only copy, not a writable view). + """ + buf_np = buffer.numpy() + modified = False + for pattern, value in pattern_dict.items(): + for j, name in enumerate(self._joint_names): + if re.fullmatch(pattern, name): + buf_np[:, j] = value + modified = True + if modified: + wp.copy(buffer, wp.from_numpy(buf_np, dtype=buffer.dtype, device=str(buffer.device))) diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/articulation_data.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/articulation_data.py new file mode 100644 index 000000000000..10c7e4b7ecd6 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/articulation_data.py @@ -0,0 +1,1760 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Articulation data backed by ovphysx TensorBindingsAPI.""" + +from __future__ import annotations + +import warnings +from typing import Any + +import numpy as np +import warp as wp + +from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.warp import ProxyArray + +from isaaclab_ovphysx import tensor_types as TT + +from .kernels import ( + _compose_body_com_poses, + _compose_root_com_pose, + _compute_heading, + _copy_first_body, + _fd_joint_acc, + _projected_gravity, + _world_vel_to_body_ang, + _world_vel_to_body_lin, +) + + +class ArticulationData(BaseArticulationData): + """Data container for an articulation backed by ovphysx tensor bindings. + + This class contains the data for an articulation in the simulation. The data includes the state of + the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is + stored in the simulation world frame unless otherwise specified. + + An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames + of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame + can be interpreted as the link frame. + + Uses ovphysx :class:`TensorBinding` objects to lazily read simulation state into warp + arrays. Writes happen via the :class:`Articulation` class. + """ + + __backend_name__: str = "ovphysx" + """The name of the backend for the articulation data.""" + + def __init__(self, bindings: dict[int, Any], device: str, binding_getter=None): + """Initialize the articulation data. + + Args: + bindings: Mapping from ovphysx tensor type constant to a + live TensorBinding for this articulation. + device: The compute device (``"cpu"`` or ``"cuda:N"``). + binding_getter: Optional callable(tensor_type) -> TensorBinding + that lazily creates bindings on first access. When provided, + ``_get_binding()`` delegates to this instead of only checking + the static ``bindings`` dict. + """ + super().__init__(root_view=None, device=device) + self._bindings = bindings + self._binding_getter = binding_getter + self._sim_timestamp: float = 0.0 + self._is_primed = False + + # Metadata from an arbitrary articulation binding. + sample = next(iter(bindings.values())) + self._num_instances = sample.count + self._num_joints = sample.dof_count + self._num_bodies = sample.body_count + self._is_fixed_base = sample.is_fixed_base + + self.body_names = list(sample.body_names) + self.joint_names = list(sample.dof_names) + self.fixed_tendon_names: list[str] = [] + self.spatial_tendon_names: list[str] = [] + + self._num_fixed_tendons = 0 + self._num_spatial_tendons = 0 + + # Initialize parametric gravity and forward vectors (matching PhysX/Newton pattern). + # Guard against None sim context (e.g. mock/test environments). + from isaaclab.physics import PhysicsManager + + gravity = (0.0, 0.0, -9.81) + if PhysicsManager._sim is not None and hasattr(PhysicsManager._sim, "cfg"): + gravity = PhysicsManager._sim.cfg.gravity + gravity_np = np.array(gravity, dtype=np.float32) + gravity_mag = np.linalg.norm(gravity_np) + if gravity_mag == 0.0: + gravity_dir = np.array([0.0, 0.0, -1.0], dtype=np.float32) + else: + gravity_dir = gravity_np / gravity_mag + gravity_dir_tiled = np.tile(gravity_dir, (self._num_instances, 1)) + forward_tiled = np.tile(np.array([1.0, 0.0, 0.0], dtype=np.float32), (self._num_instances, 1)) + + self.GRAVITY_VEC_W = ProxyArray(wp.from_numpy(gravity_dir_tiled, dtype=wp.vec3f, device=device)) + self.FORWARD_VEC_B = ProxyArray(wp.from_numpy(forward_tiled, dtype=wp.vec3f, device=device)) + + def update(self, dt: float) -> None: + """Update the data for the articulation. + + Args: + dt: The time step for the update [s]. This must be a positive value. + """ + self._sim_timestamp += dt + + # Finite-difference joint acceleration from velocity. + if dt > 0.0 and self._previous_joint_vel is not None: + cur_vel = self.joint_vel + wp.launch( + _fd_joint_acc, + dim=(self._num_instances, self._num_joints), + inputs=[cur_vel, self._previous_joint_vel, 1.0 / dt], + outputs=[self._joint_acc.data], + device=self.device, + ) + self._joint_acc.timestamp = self._sim_timestamp + + @property + def is_primed(self) -> bool: + """Whether the articulation data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the articulation data is fully instantiated and ready to use. + + .. note:: + Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the articulation data is already primed. + """ + if self._is_primed: + raise ValueError("The articulation data is already primed.") + self._is_primed = True + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + joint_names: list[str] = None + """Joint names in the order parsed by the simulation view.""" + + fixed_tendon_names: list[str] = None + """Fixed tendon names in the order parsed by the simulation view.""" + + spatial_tendon_names: list[str] = None + """Spatial tendon names in the order parsed by the simulation view.""" + + """ + Defaults - Initial state. + """ + + @property + def default_root_pose(self) -> ProxyArray: + """Default root pose ``[pos, quat]`` in the local environment frame. + + The position and quaternion are of the articulation root's actor frame. + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + """ + if self._default_root_pose_ta is None: + self._default_root_pose_ta = ProxyArray(self._default_root_pose) + return self._default_root_pose_ta + + @default_root_pose.setter + def default_root_pose(self, value: wp.array) -> None: + """Set the default root pose. + + Args: + value: The default root pose. Shape is (num_instances, 7). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_pose.assign(value) + + @property + def default_root_vel(self) -> ProxyArray: + """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. + + The linear and angular velocities are of the articulation root's center of mass frame. + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + """ + if self._default_root_vel_ta is None: + self._default_root_vel_ta = ProxyArray(self._default_root_vel) + return self._default_root_vel_ta + + @default_root_vel.setter + def default_root_vel(self, value: wp.array) -> None: + """Set the default root velocity. + + Args: + value: The default root velocity. Shape is (num_instances, 6). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_vel.assign(value) + + @property + def default_joint_pos(self) -> ProxyArray: + """Default joint positions of all joints [m or rad, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + if self._default_joint_pos_ta is None: + self._default_joint_pos_ta = ProxyArray(self._default_joint_pos) + return self._default_joint_pos_ta + + @default_joint_pos.setter + def default_joint_pos(self, value: wp.array) -> None: + """Set the default joint positions. + + Args: + value: The default joint positions. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_pos.assign(value) + + @property + def default_joint_vel(self) -> ProxyArray: + """Default joint velocities of all joints [m/s or rad/s, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + if self._default_joint_vel_ta is None: + self._default_joint_vel_ta = ProxyArray(self._default_joint_vel) + return self._default_joint_vel_ta + + @default_joint_vel.setter + def default_joint_vel(self, value: wp.array) -> None: + """Set the default joint velocities. + + Args: + value: The default joint velocities. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_vel.assign(value) + + """ + Joint commands -- Set into simulation. + """ + + @property + def joint_pos_target(self) -> ProxyArray: + """Joint position targets commanded by the user [m or rad, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques + (see :attr:`applied_torque`), which are then set into the simulation. + """ + if self._joint_pos_target_ta is None: + self._joint_pos_target_ta = ProxyArray(self._joint_pos_target) + return self._joint_pos_target_ta + + @property + def joint_vel_target(self) -> ProxyArray: + """Joint velocity targets commanded by the user [m/s or rad/s, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques + (see :attr:`applied_torque`), which are then set into the simulation. + """ + if self._joint_vel_target_ta is None: + self._joint_vel_target_ta = ProxyArray(self._joint_vel_target) + return self._joint_vel_target_ta + + @property + def joint_effort_target(self) -> ProxyArray: + """Joint effort targets commanded by the user [N or N*m, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques + (see :attr:`applied_torque`), which are then set into the simulation. + """ + if self._joint_effort_target_ta is None: + self._joint_effort_target_ta = ProxyArray(self._joint_effort_target) + return self._joint_effort_target_ta + + """ + Joint commands -- Explicit actuators. + """ + + @property + def computed_torque(self) -> ProxyArray: + """Joint torques computed from the actuator model (before clipping) [N*m]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + This quantity is the raw torque output from the actuator model, before any clipping is applied. + It is exposed for users who want to inspect the computations inside the actuator model. + For instance, to penalize the learning agent for a difference between the computed and applied torques. + """ + if self._computed_torque_ta is None: + self._computed_torque_ta = ProxyArray(self._computed_torque) + return self._computed_torque_ta + + @property + def applied_torque(self) -> ProxyArray: + """Joint torques applied from the actuator model (after clipping) [N*m]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the + actuator model. + """ + if self._applied_torque_ta is None: + self._applied_torque_ta = ProxyArray(self._applied_torque) + return self._applied_torque_ta + + """ + Joint properties + """ + + @property + def joint_stiffness(self) -> ProxyArray: + """Joint stiffness provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + if self._joint_stiffness_ta is None: + self._joint_stiffness_ta = ProxyArray(self._joint_stiffness) + return self._joint_stiffness_ta + + @property + def joint_damping(self) -> ProxyArray: + """Joint damping provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + if self._joint_damping_ta is None: + self._joint_damping_ta = ProxyArray(self._joint_damping) + return self._joint_damping_ta + + @property + def joint_armature(self) -> ProxyArray: + """Joint armature provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + if self._joint_armature_ta is None: + self._joint_armature_ta = ProxyArray(self._joint_armature) + return self._joint_armature_ta + + @property + def joint_friction_coeff(self) -> ProxyArray: + """Joint static friction coefficient provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + if self._joint_friction_coeff_ta is None: + self._joint_friction_coeff_ta = ProxyArray(self._joint_friction_coeff) + return self._joint_friction_coeff_ta + + @property + def joint_pos_limits(self) -> ProxyArray: + """Joint position limits provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. + """ + if self._joint_pos_limits_ta is None: + self._joint_pos_limits_ta = ProxyArray(self._joint_pos_limits) + return self._joint_pos_limits_ta + + @property + def joint_vel_limits(self) -> ProxyArray: + """Joint maximum velocity provided to the simulation [m/s or rad/s, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + if self._joint_vel_limits_ta is None: + self._joint_vel_limits_ta = ProxyArray(self._joint_vel_limits) + return self._joint_vel_limits_ta + + @property + def joint_effort_limits(self) -> ProxyArray: + """Joint maximum effort provided to the simulation [N or N*m, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + if self._joint_effort_limits_ta is None: + self._joint_effort_limits_ta = ProxyArray(self._joint_effort_limits) + return self._joint_effort_limits_ta + + """ + Joint properties - Custom. + """ + + @property + def soft_joint_pos_limits(self) -> ProxyArray: + r"""Soft joint position limits for all joints. + + Shape is (num_instances, num_joints), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. The soft joint position limits are computed as + a sub-region of the :attr:`joint_pos_limits` based on the + :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. + + Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits + :math:`[soft\_lower, soft\_upper]`. The soft joint position limits are computed as: + + .. math:: + + soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 + soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 + + The soft joint position limits help specify a safety region around the joint limits. It isn't used by the + simulation, but is useful for learning agents to prevent the joint positions from violating the limits. + """ + if self._soft_joint_pos_limits_ta is None: + self._soft_joint_pos_limits_ta = ProxyArray(self._soft_joint_pos_limits) + return self._soft_joint_pos_limits_ta + + @property + def soft_joint_vel_limits(self) -> ProxyArray: + """Soft joint velocity limits for all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model + has a variable velocity limit model. For instance, in a variable gear ratio actuator model. + """ + if self._soft_joint_vel_limits_ta is None: + self._soft_joint_vel_limits_ta = ProxyArray(self._soft_joint_vel_limits) + return self._soft_joint_vel_limits_ta + + @property + def gear_ratio(self) -> ProxyArray: + """Gear ratio for relating motor torques to applied joint torques. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + if self._gear_ratio_ta is None: + self._gear_ratio_ta = ProxyArray(self._gear_ratio) + return self._gear_ratio_ta + + """ + Fixed tendon properties. + """ + + @property + def fixed_tendon_stiffness(self) -> ProxyArray: + """Fixed tendon stiffness provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_stiffness_ta is None: + self._fixed_tendon_stiffness_ta = ProxyArray(self._fixed_tendon_stiffness) + return self._fixed_tendon_stiffness_ta + + @property + def fixed_tendon_damping(self) -> ProxyArray: + """Fixed tendon damping provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_damping_ta is None: + self._fixed_tendon_damping_ta = ProxyArray(self._fixed_tendon_damping) + return self._fixed_tendon_damping_ta + + @property + def fixed_tendon_limit_stiffness(self) -> ProxyArray: + """Fixed tendon limit stiffness provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_limit_stiffness_ta is None: + self._fixed_tendon_limit_stiffness_ta = ProxyArray(self._fixed_tendon_limit_stiffness) + return self._fixed_tendon_limit_stiffness_ta + + @property + def fixed_tendon_rest_length(self) -> ProxyArray: + """Fixed tendon rest length provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_rest_length_ta is None: + self._fixed_tendon_rest_length_ta = ProxyArray(self._fixed_tendon_rest_length) + return self._fixed_tendon_rest_length_ta + + @property + def fixed_tendon_offset(self) -> ProxyArray: + """Fixed tendon offset provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_offset_ta is None: + self._fixed_tendon_offset_ta = ProxyArray(self._fixed_tendon_offset) + return self._fixed_tendon_offset_ta + + @property + def fixed_tendon_pos_limits(self) -> ProxyArray: + """Fixed tendon position limits provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_fixed_tendons, 2). + """ + if self._fixed_tendon_pos_limits_ta is None: + self._fixed_tendon_pos_limits_ta = ProxyArray(self._fixed_tendon_pos_limits) + return self._fixed_tendon_pos_limits_ta + + """ + Spatial tendon properties. + """ + + @property + def spatial_tendon_stiffness(self) -> ProxyArray: + """Spatial tendon stiffness provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_stiffness_ta is None: + self._spatial_tendon_stiffness_ta = ProxyArray(self._spatial_tendon_stiffness) + return self._spatial_tendon_stiffness_ta + + @property + def spatial_tendon_damping(self) -> ProxyArray: + """Spatial tendon damping provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_damping_ta is None: + self._spatial_tendon_damping_ta = ProxyArray(self._spatial_tendon_damping) + return self._spatial_tendon_damping_ta + + @property + def spatial_tendon_limit_stiffness(self) -> ProxyArray: + """Spatial tendon limit stiffness provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_limit_stiffness_ta is None: + self._spatial_tendon_limit_stiffness_ta = ProxyArray(self._spatial_tendon_limit_stiffness) + return self._spatial_tendon_limit_stiffness_ta + + @property + def spatial_tendon_offset(self) -> ProxyArray: + """Spatial tendon offset provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_offset_ta is None: + self._spatial_tendon_offset_ta = ProxyArray(self._spatial_tendon_offset) + return self._spatial_tendon_offset_ta + + """ + Root state properties. + """ + + @property + def root_link_pose_w(self) -> ProxyArray: + """Root link pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + + This quantity is the pose of the articulation root's actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + self._read_transform_binding(TT.ROOT_POSE, self._root_link_pose_w) + if self._root_link_pose_w_ta is None: + self._root_link_pose_w_ta = ProxyArray(self._root_link_pose_w.data) + return self._root_link_pose_w_ta + + @property + def root_link_vel_w(self) -> ProxyArray: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's actor frame + relative to the world. + """ + # ovphysx ROOT_VELOCITY is COM velocity; link velocity comes from the first + # element of the per-link velocity tensor. + self._read_spatial_vector_binding(TT.LINK_VELOCITY, self._body_link_vel_w) + if self._root_link_vel_w.timestamp < self._sim_timestamp: + wp.launch( + _copy_first_body, + dim=self._num_instances, + inputs=[self._body_link_vel_w.data], + outputs=[self._root_link_vel_w.data], + device=self.device, + ) + self._root_link_vel_w.timestamp = self._sim_timestamp + if self._root_link_vel_w_ta is None: + self._root_link_vel_w_ta = ProxyArray(self._root_link_vel_w.data) + return self._root_link_vel_w_ta + + @property + def root_com_pose_w(self) -> ProxyArray: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + + This quantity is the pose of the articulation root's center of mass frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + wp.launch( + _compose_root_com_pose, + dim=self._num_instances, + inputs=[self.root_link_pose_w, self.body_com_pose_b], + outputs=[self._root_com_pose_w.data], + device=self.device, + ) + self._root_com_pose_w.timestamp = self._sim_timestamp + if self._root_com_pose_w_ta is None: + self._root_com_pose_w_ta = ProxyArray(self._root_com_pose_w.data) + return self._root_com_pose_w_ta + + @property + def root_com_vel_w(self) -> ProxyArray: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's center of mass frame + relative to the world. + """ + self._read_spatial_vector_binding(TT.ROOT_VELOCITY, self._root_com_vel_w) + if self._root_com_vel_w_ta is None: + self._root_com_vel_w_ta = ProxyArray(self._root_com_vel_w.data) + return self._root_com_vel_w_ta + + """ + Body state properties. + """ + + @property + def body_mass(self) -> ProxyArray: + """Body mass in the world frame [kg]. + + Shape is (num_instances, num_bodies), dtype = wp.float32. In torch this resolves to + (num_instances, num_bodies). + """ + if self._body_mass_ta is None: + self._body_mass_ta = ProxyArray(self._body_mass) + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Flattened body inertia in the world frame [kg*m^2]. + + Shape is (num_instances, num_bodies, 9), dtype = wp.float32. In torch this resolves to + (num_instances, num_bodies, 9). + + Stored as a flattened 3x3 inertia matrix per body. + """ + if self._body_inertia_ta is None: + self._body_inertia_ta = ProxyArray(self._body_inertia) + return self._body_inertia_ta + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the articulation links' actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + self._read_transform_binding(TT.LINK_POSE, self._body_link_pose_w) + if self._body_link_pose_w_ta is None: + self._body_link_pose_w_ta = ProxyArray(self._body_link_pose_w.data) + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' actor frame + relative to the world. + """ + self._read_spatial_vector_binding(TT.LINK_VELOCITY, self._body_link_vel_w) + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w.data) + return self._body_link_vel_w_ta + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the articulation links relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + wp.launch( + _compose_body_com_poses, + dim=(self._num_instances, self._num_bodies), + inputs=[self.body_link_pose_w, self.body_com_pose_b], + outputs=[self._body_com_pose_w.data], + device=self.device, + ) + self._body_com_pose_w.timestamp = self._sim_timestamp + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w.data) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' center of mass frame + relative to the world. + + .. note:: + This is currently approximated using the link velocity. A proper COM velocity derivation + accounting for the COM offset is not yet implemented. + """ + return self.body_link_vel_w + + @property + def body_com_acc_w(self) -> ProxyArray: + """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]`` [m/s^2, rad/s^2]. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + All values are relative to the world. + """ + self._read_spatial_vector_binding(TT.LINK_ACCELERATION, self._body_com_acc_w) + if self._body_com_acc_w_ta is None: + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w.data) + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + self._read_transform_binding(TT.BODY_COM_POSE, self._body_com_pose_b) + if self._body_com_pose_b_ta is None: + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b.data) + return self._body_com_pose_b_ta + + """ + Joint state properties. + """ + + @property + def joint_pos(self) -> ProxyArray: + """Joint positions of all joints [m or rad, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + self._read_binding_into_buf(TT.DOF_POSITION, self._joint_pos_buf) + if self._joint_pos_ta is None: + self._joint_pos_ta = ProxyArray(self._joint_pos_buf.data) + return self._joint_pos_ta + + @property + def joint_vel(self) -> ProxyArray: + """Joint velocities of all joints [m/s or rad/s, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + self._read_binding_into_buf(TT.DOF_VELOCITY, self._joint_vel_buf) + if self._joint_vel_ta is None: + self._joint_vel_ta = ProxyArray(self._joint_vel_buf.data) + return self._joint_vel_ta + + @property + def joint_acc(self) -> ProxyArray: + """Joint acceleration of all joints [m/s^2 or rad/s^2, depending on joint type]. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + + .. note:: + This quantity is computed via finite differencing of joint velocities. + """ + if self._joint_acc_ta is None: + self._joint_acc_ta = ProxyArray(self._joint_acc.data) + return self._joint_acc_ta + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self) -> ProxyArray: + """Projection of the gravity direction on base frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._projected_gravity_b.timestamp < self._sim_timestamp: + wp.launch( + _projected_gravity, + dim=self._num_instances, + inputs=[self.GRAVITY_VEC_W, self.root_link_pose_w], + outputs=[self._projected_gravity_b.data], + device=self.device, + ) + self._projected_gravity_b.timestamp = self._sim_timestamp + if self._projected_gravity_b_ta is None: + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b.data) + return self._projected_gravity_b_ta + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading of the base frame (in radians). + Shape is (num_instances,), dtype = wp.float32. + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + if self._heading_w.timestamp < self._sim_timestamp: + wp.launch( + _compute_heading, + dim=self._num_instances, + inputs=[self.FORWARD_VEC_B, self.root_link_pose_w], + outputs=[self._heading_w.data], + device=self.device, + ) + self._heading_w.timestamp = self._sim_timestamp + if self._heading_w_ta is None: + self._heading_w_ta = ProxyArray(self._heading_w.data) + return self._heading_w_ta + + @property + def root_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in base frame [m/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the articulation root's actor frame with respect to its actor frame. + """ + if self._root_link_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + _world_vel_to_body_lin, + dim=self._num_instances, + inputs=[self.root_link_pose_w, self.root_link_vel_w], + outputs=[self._root_link_lin_vel_b.data], + device=self.device, + ) + self._root_link_lin_vel_b.timestamp = self._sim_timestamp + if self._root_link_lin_vel_b_ta is None: + self._root_link_lin_vel_b_ta = ProxyArray(self._root_link_lin_vel_b.data) + return self._root_link_lin_vel_b_ta + + @property + def root_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in base frame [rad/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the articulation root's actor frame with respect to its actor frame. + """ + if self._root_link_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + _world_vel_to_body_ang, + dim=self._num_instances, + inputs=[self.root_link_pose_w, self.root_link_vel_w], + outputs=[self._root_link_ang_vel_b.data], + device=self.device, + ) + self._root_link_ang_vel_b.timestamp = self._sim_timestamp + if self._root_link_ang_vel_b_ta is None: + self._root_link_ang_vel_b_ta = ProxyArray(self._root_link_ang_vel_b.data) + return self._root_link_ang_vel_b_ta + + @property + def root_com_lin_vel_b(self) -> ProxyArray: + """Root center of mass linear velocity in base frame [m/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the articulation root's center of mass frame + with respect to its actor frame. + """ + if self._root_com_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + _world_vel_to_body_lin, + dim=self._num_instances, + inputs=[self.root_link_pose_w, self.root_com_vel_w], + outputs=[self._root_com_lin_vel_b.data], + device=self.device, + ) + self._root_com_lin_vel_b.timestamp = self._sim_timestamp + if self._root_com_lin_vel_b_ta is None: + self._root_com_lin_vel_b_ta = ProxyArray(self._root_com_lin_vel_b.data) + return self._root_com_lin_vel_b_ta + + @property + def root_com_ang_vel_b(self) -> ProxyArray: + """Root center of mass angular velocity in base frame [rad/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the articulation root's center of mass frame + with respect to its actor frame. + """ + if self._root_com_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + _world_vel_to_body_ang, + dim=self._num_instances, + inputs=[self.root_link_pose_w, self.root_com_vel_w], + outputs=[self._root_com_ang_vel_b.data], + device=self.device, + ) + self._root_com_ang_vel_b.timestamp = self._sim_timestamp + if self._root_com_ang_vel_b_ta is None: + self._root_com_ang_vel_b_ta = ProxyArray(self._root_com_ang_vel_b.data) + return self._root_com_ang_vel_b_ta + + """ + Sliced properties. + """ + + @property + def root_link_pos_w(self) -> ProxyArray: + """Root link position in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + parent = self.root_link_pose_w + if self._root_link_pos_w_ta is None: + self._root_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._root_link_pos_w_ta + + @property + def root_link_quat_w(self) -> ProxyArray: + """Root link orientation (x, y, z, w) in simulation world frame. + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + parent = self.root_link_pose_w + if self._root_link_quat_w_ta is None: + self._root_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._root_link_quat_w_ta + + @property + def root_link_lin_vel_w(self) -> ProxyArray: + """Root linear velocity in simulation world frame [m/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + parent = self.root_link_vel_w + if self._root_link_lin_vel_w_ta is None: + self._root_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._root_link_lin_vel_w_ta + + @property + def root_link_ang_vel_w(self) -> ProxyArray: + """Root link angular velocity in simulation world frame [rad/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + parent = self.root_link_vel_w + if self._root_link_ang_vel_w_ta is None: + self._root_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._root_link_ang_vel_w_ta + + @property + def root_com_pos_w(self) -> ProxyArray: + """Root center of mass position in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the position of the center of mass frame of the root rigid body relative to the world. + """ + parent = self.root_com_pose_w + if self._root_com_pos_w_ta is None: + self._root_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._root_com_pos_w_ta + + @property + def root_com_quat_w(self) -> ProxyArray: + """Root center of mass orientation (x, y, z, w) in simulation world frame. + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + + This quantity is the orientation of the principal axes of inertia of the root rigid body relative to the world. + """ + parent = self.root_com_pose_w + if self._root_com_quat_w_ta is None: + self._root_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._root_com_quat_w_ta + + @property + def root_com_lin_vel_w(self) -> ProxyArray: + """Root center of mass linear velocity in simulation world frame [m/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + parent = self.root_com_vel_w + if self._root_com_lin_vel_w_ta is None: + self._root_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._root_com_lin_vel_w_ta + + @property + def root_com_ang_vel_w(self) -> ProxyArray: + """Root center of mass angular velocity in simulation world frame [rad/s]. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + parent = self.root_com_vel_w + if self._root_com_ang_vel_w_ta is None: + self._root_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._root_com_ang_vel_w_ta + + @property + def body_link_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame relative to the world. + """ + parent = self.body_link_pose_w + if self._body_link_pos_w_ta is None: + self._body_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_link_pos_w_ta + + @property + def body_link_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame relative to the world. + """ + parent = self.body_link_pose_w + if self._body_link_quat_w_ta is None: + self._body_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_link_quat_w_ta + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame [m/s]. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' actor frame relative to the world. + """ + parent = self.body_link_vel_w + if self._body_link_lin_vel_w_ta is None: + self._body_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_link_lin_vel_w_ta + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame [rad/s]. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' actor frame relative to the world. + """ + parent = self.body_link_vel_w + if self._body_link_ang_vel_w_ta is None: + self._body_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_link_ang_vel_w_ta + + @property + def body_com_pos_w(self) -> ProxyArray: + """Positions of all bodies' center of mass in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' center of mass frame. + """ + parent = self.body_com_pose_w + if self._body_com_pos_w_ta is None: + self._body_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_w_ta + + @property + def body_com_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' principal axes of inertia. + """ + parent = self.body_com_pose_w + if self._body_com_quat_w_ta is None: + self._body_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_w_ta + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame [m/s]. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame. + """ + parent = self.body_com_vel_w + if self._body_com_lin_vel_w_ta is None: + self._body_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_vel_w_ta + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame [rad/s]. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame. + """ + parent = self.body_com_vel_w + if self._body_com_ang_vel_w_ta is None: + self._body_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_vel_w_ta + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Linear acceleration of all bodies in simulation world frame [m/s^2]. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear acceleration of the articulation bodies' center of mass frame. + """ + parent = self.body_com_acc_w + if self._body_com_lin_acc_w_ta is None: + self._body_com_lin_acc_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_acc_w_ta + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Angular acceleration of all bodies in simulation world frame [rad/s^2]. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular acceleration of the articulation bodies' center of mass frame. + """ + parent = self.body_com_acc_w + if self._body_com_ang_acc_w_ta is None: + self._body_com_ang_acc_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_acc_w_ta + + @property + def body_com_pos_b(self) -> ProxyArray: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the center of mass location relative to its body's link frame. + """ + parent = self.body_com_pose_b + if self._body_com_pos_b_ta is None: + self._body_com_pos_b_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_b_ta + + @property + def body_com_quat_b(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their + respective link frames. + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the principal axes of inertia relative to its body's link frame. + """ + parent = self.body_com_pose_b + if self._body_com_quat_b_ta is None: + self._body_com_quat_b_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_b_ta + + """ + Deprecated in base class (required by ABC for backward compatibility). + """ + + @property + def default_root_state(self) -> ProxyArray: + """Deprecated. Use :attr:`default_root_pose` and :attr:`default_root_vel` instead.""" + warnings.warn( + "default_root_state is deprecated. Use default_root_pose and default_root_vel.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_state_w_buf is None: + self._root_state_w_buf = wp.zeros( + self._num_instances, dtype=wp.types.vector(13, wp.float32), device=self.device + ) + if self._default_root_state_ta is None: + self._default_root_state_ta = ProxyArray(self._root_state_w_buf) + return self._default_root_state_ta + + @property + def root_state_w(self) -> ProxyArray: + """Deprecated. Use :attr:`root_link_pose_w` and :attr:`root_com_vel_w` instead.""" + warnings.warn( + "root_state_w is deprecated. Use root_link_pose_w and root_com_vel_w.", + DeprecationWarning, + stacklevel=2, + ) + return self.root_link_pose_w + + @property + def root_link_state_w(self) -> ProxyArray: + """Deprecated. Use :attr:`root_link_pose_w` and :attr:`root_link_vel_w` instead.""" + warnings.warn( + "root_link_state_w is deprecated. Use root_link_pose_w and root_link_vel_w.", + DeprecationWarning, + stacklevel=2, + ) + return self.root_link_pose_w + + @property + def root_com_state_w(self) -> ProxyArray: + """Deprecated. Use :attr:`root_com_pose_w` and :attr:`root_com_vel_w` instead.""" + warnings.warn( + "root_com_state_w is deprecated. Use root_com_pose_w and root_com_vel_w.", + DeprecationWarning, + stacklevel=2, + ) + return self.root_com_pose_w + + @property + def body_state_w(self) -> ProxyArray: + """Deprecated. Use :attr:`body_link_pose_w` and :attr:`body_com_vel_w` instead.""" + warnings.warn( + "body_state_w is deprecated. Use body_link_pose_w and body_com_vel_w.", + DeprecationWarning, + stacklevel=2, + ) + return self.body_link_pose_w + + @property + def body_link_state_w(self) -> ProxyArray: + """Deprecated. Use :attr:`body_link_pose_w` and :attr:`body_link_vel_w` instead.""" + warnings.warn( + "body_link_state_w is deprecated. Use body_link_pose_w and body_link_vel_w.", + DeprecationWarning, + stacklevel=2, + ) + return self.body_link_pose_w + + @property + def body_com_state_w(self) -> ProxyArray: + """Deprecated. Use :attr:`body_com_pose_w` and :attr:`body_com_vel_w` instead.""" + warnings.warn( + "body_com_state_w is deprecated. Use body_com_pose_w and body_com_vel_w.", + DeprecationWarning, + stacklevel=2, + ) + return self.body_com_pose_w + + """ + Internal helper. + """ + + def _create_buffers(self) -> None: # noqa: C901 + super()._create_buffers() + # Scratch buffers for _read_binding_into_* methods, allocated lazily + # on first use and reused every subsequent step to avoid per-step + # allocation overhead on the hot RL path. + self._read_scratch: dict = {} + + N = self._num_instances + D = self._num_joints + L = self._num_bodies + dev = self.device + + # -- Root state buffers + self._root_link_pose_w = TimestampedBuffer(N, dev, wp.transformf) + self._root_link_vel_w = TimestampedBuffer(N, dev, wp.spatial_vectorf) + self._root_com_pose_w = TimestampedBuffer(N, dev, wp.transformf) + self._root_com_vel_w = TimestampedBuffer(N, dev, wp.spatial_vectorf) + + # -- Body state buffers + self._body_link_pose_w = TimestampedBuffer((N, L), dev, wp.transformf) + self._body_link_vel_w = TimestampedBuffer((N, L), dev, wp.spatial_vectorf) + self._body_com_pose_b = TimestampedBuffer((N, L), dev, wp.transformf) + self._body_com_pose_w = TimestampedBuffer((N, L), dev, wp.transformf) + self._body_com_vel_w = TimestampedBuffer((N, L), dev, wp.spatial_vectorf) + self._body_com_acc_w = TimestampedBuffer((N, L), dev, wp.spatial_vectorf) + # -- Joint state buffers + self._joint_pos_buf = TimestampedBuffer((N, D), dev, wp.float32) + self._joint_vel_buf = TimestampedBuffer((N, D), dev, wp.float32) + self._joint_acc = TimestampedBuffer((N, D), dev, wp.float32) + self._previous_joint_vel = wp.zeros((N, D), dtype=wp.float32, device=dev) + + # -- Joint properties + self._joint_stiffness = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._joint_damping = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._joint_armature = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._joint_friction_coeff = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._joint_pos_limits = wp.zeros((N, D), dtype=wp.vec2f, device=dev) + self._joint_vel_limits = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._joint_effort_limits = wp.zeros((N, D), dtype=wp.float32, device=dev) + + # -- Body properties + self._body_mass = wp.zeros((N, L), dtype=wp.float32, device=dev) + self._body_inertia = wp.zeros((N, L, 9), dtype=wp.float32, device=dev) + + # -- Soft limits / custom properties + self._soft_joint_pos_limits = wp.zeros((N, D), dtype=wp.vec2f, device=dev) + self._soft_joint_vel_limits = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._gear_ratio = wp.ones((N, D), dtype=wp.float32, device=dev) + + # -- Command buffers + self._joint_pos_target = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._joint_vel_target = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._joint_effort_target = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._computed_torque = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._applied_torque = wp.zeros((N, D), dtype=wp.float32, device=dev) + + # -- Default state + self._default_root_pose = wp.zeros(N, dtype=wp.transformf, device=dev) + self._default_root_vel = wp.zeros(N, dtype=wp.spatial_vectorf, device=dev) + self._default_joint_pos = wp.zeros((N, D), dtype=wp.float32, device=dev) + self._default_joint_vel = wp.zeros((N, D), dtype=wp.float32, device=dev) + + # -- Derived property buffers + self._projected_gravity_b = TimestampedBuffer(N, dev, wp.vec3f) + self._heading_w = TimestampedBuffer(N, dev, wp.float32) + self._root_link_lin_vel_b = TimestampedBuffer(N, dev, wp.vec3f) + self._root_link_ang_vel_b = TimestampedBuffer(N, dev, wp.vec3f) + self._root_com_lin_vel_b = TimestampedBuffer(N, dev, wp.vec3f) + self._root_com_ang_vel_b = TimestampedBuffer(N, dev, wp.vec3f) + + # -- Deprecated combined state buffers + self._root_state_w_buf = None + self._root_link_state_w_buf = None + self._root_com_state_w_buf = None + self._body_state_w_buf = None + self._body_link_state_w_buf = None + self._body_com_state_w_buf = None + + # -- Tendon property buffers + T_fix = getattr(self, "_num_fixed_tendons", 0) + T_spa = getattr(self, "_num_spatial_tendons", 0) + if T_fix > 0: + self._fixed_tendon_stiffness = wp.zeros((N, T_fix), dtype=wp.float32, device=dev) + self._fixed_tendon_damping = wp.zeros((N, T_fix), dtype=wp.float32, device=dev) + self._fixed_tendon_limit_stiffness = wp.zeros((N, T_fix), dtype=wp.float32, device=dev) + self._fixed_tendon_rest_length = wp.zeros((N, T_fix), dtype=wp.float32, device=dev) + self._fixed_tendon_offset = wp.zeros((N, T_fix), dtype=wp.float32, device=dev) + self._fixed_tendon_pos_limits = wp.zeros((N, T_fix), dtype=wp.vec2f, device=dev) + else: + self._fixed_tendon_stiffness = None + self._fixed_tendon_damping = None + self._fixed_tendon_limit_stiffness = None + self._fixed_tendon_rest_length = None + self._fixed_tendon_offset = None + self._fixed_tendon_pos_limits = None + if T_spa > 0: + self._spatial_tendon_stiffness = wp.zeros((N, T_spa), dtype=wp.float32, device=dev) + self._spatial_tendon_damping = wp.zeros((N, T_spa), dtype=wp.float32, device=dev) + self._spatial_tendon_limit_stiffness = wp.zeros((N, T_spa), dtype=wp.float32, device=dev) + self._spatial_tendon_offset = wp.zeros((N, T_spa), dtype=wp.float32, device=dev) + else: + self._spatial_tendon_stiffness = None + self._spatial_tendon_damping = None + self._spatial_tendon_limit_stiffness = None + self._spatial_tendon_offset = None + + # Read initial joint properties from bindings + self._read_initial_properties() + + # Initialize ProxyArray wrappers (lazily created on first access) + self._pin_proxy_arrays() + + def _read_initial_properties(self) -> None: + """Read static/initial joint and body properties from ovphysx bindings. + + These are one-time reads at init. Property tensors (stiffness, + damping, limits, mass, etc.) are CPU-resident in PhysX even in GPU + mode, so we read them via CPU numpy buffers and then copy to the + simulation device. + """ + + # Property reads always use CPU numpy (property tensors are host-side). + def _read_cpu(tensor_type): + binding = self._get_binding(tensor_type) + if binding is None: + return None + np_buf = np.zeros(binding.shape, dtype=np.float32) + binding.read(np_buf) + return np_buf + + for tt, dst in [ + (TT.DOF_STIFFNESS, self._joint_stiffness), + (TT.DOF_DAMPING, self._joint_damping), + (TT.DOF_ARMATURE, self._joint_armature), + (TT.DOF_MAX_VELOCITY, self._joint_vel_limits), + (TT.DOF_MAX_FORCE, self._joint_effort_limits), + (TT.BODY_MASS, self._body_mass), + ]: + np_buf = _read_cpu(tt) + if np_buf is not None: + wp.copy(dst, wp.from_numpy(np_buf, dtype=wp.float32, device=self.device)) + + # Joint position limits: [N, D, 2] -> (N, D) wp.vec2f + np_lim = _read_cpu(TT.DOF_LIMIT) + if np_lim is not None: + self._joint_pos_limits = wp.from_numpy( + np_lim.reshape(self._num_instances, self._num_joints, 2), dtype=wp.vec2f, device=self.device + ) + + # Body inertia: [N, L, 9] + np_iner = _read_cpu(TT.BODY_INERTIA) + if np_iner is not None: + self._body_inertia = wp.from_numpy(np_iner, dtype=wp.float32, device=self.device) + + # Friction: [N, D, 3] -> extract static friction (column 0) + np_fric = _read_cpu(TT.DOF_FRICTION_PROPERTIES) + if np_fric is not None: + self._joint_friction_coeff = wp.from_numpy(np_fric[..., 0].copy(), dtype=wp.float32, device=self.device) + + # Fixed tendon properties (CPU-side, read once) + T_fix = getattr(self, "_num_fixed_tendons", 0) + if T_fix > 0: + for tt, dst in [ + (TT.FIXED_TENDON_STIFFNESS, self._fixed_tendon_stiffness), + (TT.FIXED_TENDON_DAMPING, self._fixed_tendon_damping), + (TT.FIXED_TENDON_LIMIT_STIFFNESS, self._fixed_tendon_limit_stiffness), + (TT.FIXED_TENDON_REST_LENGTH, self._fixed_tendon_rest_length), + (TT.FIXED_TENDON_OFFSET, self._fixed_tendon_offset), + ]: + np_buf = _read_cpu(tt) + if np_buf is not None and dst is not None: + wp.copy(dst, wp.from_numpy(np_buf, dtype=wp.float32, device=self.device)) + # Fixed tendon limits: [N, T, 2] -> (N, T) wp.vec2f + np_tlim = _read_cpu(TT.FIXED_TENDON_LIMIT) + if np_tlim is not None and self._fixed_tendon_pos_limits is not None: + self._fixed_tendon_pos_limits = wp.from_numpy( + np_tlim.reshape(self._num_instances, T_fix, 2), dtype=wp.vec2f, device=self.device + ) + + # Spatial tendon properties (CPU-side, read once) + T_spa = getattr(self, "_num_spatial_tendons", 0) + if T_spa > 0: + for tt, dst in [ + (TT.SPATIAL_TENDON_STIFFNESS, self._spatial_tendon_stiffness), + (TT.SPATIAL_TENDON_DAMPING, self._spatial_tendon_damping), + (TT.SPATIAL_TENDON_LIMIT_STIFFNESS, self._spatial_tendon_limit_stiffness), + (TT.SPATIAL_TENDON_OFFSET, self._spatial_tendon_offset), + ]: + np_buf = _read_cpu(tt) + if np_buf is not None and dst is not None: + wp.copy(dst, wp.from_numpy(np_buf, dtype=wp.float32, device=self.device)) + + def _pin_proxy_arrays(self) -> None: + """Create pinned ProxyArray wrappers for all data buffers. + + This is called once from :meth:`_create_buffers` during initialization. + All ``_ta`` fields are lazily populated on first property access. + """ + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + # Defaults + self._default_root_pose_ta: ProxyArray | None = None + self._default_root_vel_ta: ProxyArray | None = None + self._default_joint_pos_ta: ProxyArray | None = None + self._default_joint_vel_ta: ProxyArray | None = None + # Joint commands (set into simulation) + self._joint_pos_target_ta: ProxyArray | None = None + self._joint_vel_target_ta: ProxyArray | None = None + self._joint_effort_target_ta: ProxyArray | None = None + # Joint commands (explicit actuator model) + self._computed_torque_ta: ProxyArray | None = None + self._applied_torque_ta: ProxyArray | None = None + # Joint properties + self._joint_stiffness_ta: ProxyArray | None = None + self._joint_damping_ta: ProxyArray | None = None + self._joint_armature_ta: ProxyArray | None = None + self._joint_friction_coeff_ta: ProxyArray | None = None + self._joint_pos_limits_ta: ProxyArray | None = None + self._joint_vel_limits_ta: ProxyArray | None = None + self._joint_effort_limits_ta: ProxyArray | None = None + # Joint properties (custom) + self._soft_joint_pos_limits_ta: ProxyArray | None = None + self._soft_joint_vel_limits_ta: ProxyArray | None = None + self._gear_ratio_ta: ProxyArray | None = None + # Fixed tendon properties + self._fixed_tendon_stiffness_ta: ProxyArray | None = None + self._fixed_tendon_damping_ta: ProxyArray | None = None + self._fixed_tendon_limit_stiffness_ta: ProxyArray | None = None + self._fixed_tendon_rest_length_ta: ProxyArray | None = None + self._fixed_tendon_offset_ta: ProxyArray | None = None + self._fixed_tendon_pos_limits_ta: ProxyArray | None = None + # Spatial tendon properties + self._spatial_tendon_stiffness_ta: ProxyArray | None = None + self._spatial_tendon_damping_ta: ProxyArray | None = None + self._spatial_tendon_limit_stiffness_ta: ProxyArray | None = None + self._spatial_tendon_offset_ta: ProxyArray | None = None + # Root state (timestamped) + self._root_link_pose_w_ta: ProxyArray | None = None + self._root_link_vel_w_ta: ProxyArray | None = None + self._root_com_pose_w_ta: ProxyArray | None = None + self._root_com_vel_w_ta: ProxyArray | None = None + # Body state (timestamped) + self._body_link_pose_w_ta: ProxyArray | None = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_acc_w_ta: ProxyArray | None = None + self._body_com_pose_b_ta: ProxyArray | None = None + # Body properties + self._body_mass_ta: ProxyArray | None = None + self._body_inertia_ta: ProxyArray | None = None + # Joint state (timestamped) + self._joint_pos_ta: ProxyArray | None = None + self._joint_vel_ta: ProxyArray | None = None + self._joint_acc_ta: ProxyArray | None = None + # Derived properties (timestamped) + self._projected_gravity_b_ta: ProxyArray | None = None + self._heading_w_ta: ProxyArray | None = None + self._root_link_lin_vel_b_ta: ProxyArray | None = None + self._root_link_ang_vel_b_ta: ProxyArray | None = None + self._root_com_lin_vel_b_ta: ProxyArray | None = None + self._root_com_ang_vel_b_ta: ProxyArray | None = None + # Sliced properties (root link) + self._root_link_pos_w_ta: ProxyArray | None = None + self._root_link_quat_w_ta: ProxyArray | None = None + self._root_link_lin_vel_w_ta: ProxyArray | None = None + self._root_link_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (root com) + self._root_com_pos_w_ta: ProxyArray | None = None + self._root_com_quat_w_ta: ProxyArray | None = None + self._root_com_lin_vel_w_ta: ProxyArray | None = None + self._root_com_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (body link) + self._body_link_pos_w_ta: ProxyArray | None = None + self._body_link_quat_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w_ta: ProxyArray | None = None + self._body_link_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (body com) + self._body_com_pos_w_ta: ProxyArray | None = None + self._body_com_quat_w_ta: ProxyArray | None = None + self._body_com_lin_vel_w_ta: ProxyArray | None = None + self._body_com_ang_vel_w_ta: ProxyArray | None = None + self._body_com_lin_acc_w_ta: ProxyArray | None = None + self._body_com_ang_acc_w_ta: ProxyArray | None = None + # Sliced properties (body com in body frame) + self._body_com_pos_b_ta: ProxyArray | None = None + self._body_com_quat_b_ta: ProxyArray | None = None + # Deprecated state-concat properties + self._default_root_state_ta: ProxyArray | None = None + + """ + Internal helpers -- Bindings. + """ + + def _get_binding(self, tensor_type: int): + """Return a binding, lazily creating it if a binding_getter was provided.""" + b = self._bindings.get(tensor_type) + if b is not None: + return b + if self._binding_getter is not None: + b = self._binding_getter(tensor_type) + if b is not None: + self._bindings[tensor_type] = b + return b + return None + + def _get_read_scratch(self, tensor_type: int) -> wp.array | None: + """Return a pre-allocated flat float32 scratch buffer for a binding. + + Allocated once on first use, then reused every step. CPU-only + bindings (body properties, DOF properties) get CPU scratch; GPU + bindings get GPU scratch. wp.copy handles cross-device transfer + when the destination buffer lives on a different device. + """ + if tensor_type in self._read_scratch: + return self._read_scratch[tensor_type] + binding = self._get_binding(tensor_type) + if binding is None: + return None + from isaaclab_ovphysx.tensor_types import _CPU_ONLY_TYPES + + dev = "cpu" if tensor_type in _CPU_ONLY_TYPES else self.device + buf = wp.zeros(binding.shape, dtype=wp.float32, device=dev) + self._read_scratch[tensor_type] = buf + return buf + + def _get_read_view(self, tensor_type: int, wp_array: wp.array, floats_per_elem: int = 0) -> wp.array | None: + """Return a stable float32 view of a warp buffer for reading from a binding. + + For structured-dtype buffers (transformf, spatial_vectorf), the view + reinterprets the same GPU memory as a flat float32 array matching the + binding's shape. For plain float32 buffers, returns the array as-is. + + The returned view is cached so that ``binding.read(view)`` sees the + same object on every call, enabling the binding's internal read cache. + """ + if not hasattr(self, "_read_view_cache"): + self._read_view_cache = {} + cache_key = (tensor_type, wp_array.ptr) + cached = self._read_view_cache.get(cache_key) + if cached is not None: + return cached + + binding = self._get_binding(tensor_type) + if binding is None: + self._read_view_cache[cache_key] = None + return None + + if floats_per_elem > 0: + view = wp.array( + ptr=wp_array.ptr, + shape=binding.shape, + dtype=wp.float32, + device=str(wp_array.device), + copy=False, + ) + else: + view = wp_array + + self._read_view_cache[cache_key] = view + return view + + def _read_binding_into_flat(self, tensor_type: int, wp_array: wp.array) -> None: + """Read a flat binding (no structured dtype) into an existing warp array. + + Reads directly into the target array -- no scratch buffer, no extra copy. + """ + binding = self._get_binding(tensor_type) + if binding is None: + return + binding.read(wp_array) + + def _read_binding_into_buf(self, tensor_type: int, buf: TimestampedBuffer) -> None: + """Read from an ovphysx binding into a TimestampedBuffer, skipping if fresh.""" + if buf.timestamp >= self._sim_timestamp: + return + view = self._get_read_view(tensor_type, buf.data) + if view is None: + return + self._get_binding(tensor_type).read(view) + buf.timestamp = self._sim_timestamp + + def _read_transform_binding(self, tensor_type: int, buf: TimestampedBuffer) -> None: + """Read a pose binding (float32 view of transformf buffer), skipping if fresh.""" + if buf.timestamp >= self._sim_timestamp: + return + view = self._get_read_view(tensor_type, buf.data, 7) + if view is None: + return + self._get_binding(tensor_type).read(view) + buf.timestamp = self._sim_timestamp + + def _read_spatial_vector_binding(self, tensor_type: int, buf: TimestampedBuffer) -> None: + """Read a velocity binding (float32 view of spatial_vectorf buffer), skipping if fresh.""" + if buf.timestamp >= self._sim_timestamp: + return + view = self._get_read_view(tensor_type, buf.data, 6) + if view is None: + return + self._get_binding(tensor_type).read(view) + buf.timestamp = self._sim_timestamp + + """ + Internal helpers -- Extraction. + """ + + def _get_pos_from_transform(self, transform: wp.array) -> wp.array: + return wp.array( + ptr=transform.ptr, + shape=transform.shape, + dtype=wp.vec3f, + strides=transform.strides, + device=self.device, + ) + + def _get_quat_from_transform(self, transform: wp.array) -> wp.array: + return wp.array( + ptr=transform.ptr + 3 * 4, + shape=transform.shape, + dtype=wp.quatf, + strides=transform.strides, + device=self.device, + ) + + def _get_lin_vel_from_spatial_vector(self, sv: wp.array) -> wp.array: + return wp.array( + ptr=sv.ptr, + shape=sv.shape, + dtype=wp.vec3f, + strides=sv.strides, + device=self.device, + ) + + def _get_ang_vel_from_spatial_vector(self, sv: wp.array) -> wp.array: + return wp.array( + ptr=sv.ptr + 3 * 4, + shape=sv.shape, + dtype=wp.vec3f, + strides=sv.strides, + device=self.device, + ) diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/kernels.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/kernels.py new file mode 100644 index 000000000000..b93c4e6d4b41 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/assets/articulation/kernels.py @@ -0,0 +1,217 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp kernels for the ovphysx articulation.""" + +import warp as wp + + +@wp.kernel +def _body_wrench_to_world( + force_b: wp.array(dtype=wp.vec3f, ndim=2), + torque_b: wp.array(dtype=wp.vec3f, ndim=2), + poses: wp.array(dtype=wp.transformf, ndim=2), + wrench_out: wp.array(dtype=wp.float32, ndim=3), +): + """Rotate body-frame force/torque to world frame and pack into [N, L, 9].""" + i, j = wp.tid() + q = wp.transform_get_rotation(poses[i, j]) + f_w = wp.quat_rotate(q, force_b[i, j]) + t_w = wp.quat_rotate(q, torque_b[i, j]) + wrench_out[i, j, 0] = f_w[0] + wrench_out[i, j, 1] = f_w[1] + wrench_out[i, j, 2] = f_w[2] + wrench_out[i, j, 3] = t_w[0] + wrench_out[i, j, 4] = t_w[1] + wrench_out[i, j, 5] = t_w[2] + p_w = wp.transform_get_translation(poses[i, j]) + wrench_out[i, j, 6] = p_w[0] + wrench_out[i, j, 7] = p_w[1] + wrench_out[i, j, 8] = p_w[2] + + +@wp.kernel +def _scatter_rows_partial( + dst: wp.array2d(dtype=wp.float32), + src: wp.array2d(dtype=wp.float32), + ids: wp.array(dtype=wp.int32), +): + """dst[ids[i], j] = src[i, j] -- scatter partial [K,C] into full [N,C] on GPU.""" + i, j = wp.tid() + dst[ids[i], j] = src[i, j] + + +@wp.func +def compute_soft_joint_pos_limits_func( + joint_pos_limits: wp.vec2f, + soft_limit_factor: wp.float32, +): + """Compute soft joint position limits from hard limits.""" + joint_pos_mean = (joint_pos_limits[0] + joint_pos_limits[1]) / 2.0 + joint_pos_range = joint_pos_limits[1] - joint_pos_limits[0] + return wp.vec2f( + joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor, + joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor, + ) + + +@wp.kernel +def update_soft_joint_pos_limits( + joint_pos_limits: wp.array2d(dtype=wp.vec2f), + soft_limit_factor: wp.float32, + soft_joint_pos_limits: wp.array2d(dtype=wp.vec2f), +): + """Update soft joint position limits from hard limits and a scale factor.""" + i, j = wp.tid() + soft_joint_pos_limits[i, j] = compute_soft_joint_pos_limits_func(joint_pos_limits[i, j], soft_limit_factor) + + +""" +Data-layer kernels (used by ArticulationData). +""" + + +@wp.kernel +def _fd_joint_acc( + cur_vel: wp.array2d(dtype=wp.float32), + prev_vel: wp.array2d(dtype=wp.float32), + inv_dt: float, + out: wp.array2d(dtype=wp.float32), +): + """Compute joint acceleration via finite differencing and update previous velocity. + + Args: + cur_vel: Current joint velocities. Shape is (num_envs, num_joints). + prev_vel: Previous joint velocities (updated in-place). Shape is (num_envs, num_joints). + inv_dt: Inverse time step (1/dt) [1/s]. + out: Output joint accelerations. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + out[i, j] = (cur_vel[i, j] - prev_vel[i, j]) * inv_dt + prev_vel[i, j] = cur_vel[i, j] + + +@wp.kernel +def _copy_first_body( + body_vel: wp.array(dtype=wp.spatial_vectorf, ndim=2), + root_vel: wp.array(dtype=wp.spatial_vectorf), +): + """Copy the first body's velocity to the root velocity buffer. + + Args: + body_vel: Body velocities. Shape is (num_envs, num_bodies). + root_vel: Output root velocities. Shape is (num_envs,). + """ + i = wp.tid() + root_vel[i] = body_vel[i, 0] + + +@wp.kernel +def _compose_root_com_pose( + link_pose: wp.array(dtype=wp.transformf), + com_pose_b: wp.array(dtype=wp.transformf, ndim=2), + com_pose_w: wp.array(dtype=wp.transformf), +): + """Compose root link pose with body-frame CoM offset to get world-frame root CoM pose. + + Args: + link_pose: Root link poses in world frame. Shape is (num_envs,). + com_pose_b: Body-frame CoM offsets. Shape is (num_envs, num_bodies). + com_pose_w: Output world-frame root CoM poses. Shape is (num_envs,). + """ + i = wp.tid() + com_pose_w[i] = wp.transform_multiply(link_pose[i], com_pose_b[i, 0]) + + +@wp.kernel +def _compose_body_com_poses( + link_pose: wp.array(dtype=wp.transformf, ndim=2), + com_pose_b: wp.array(dtype=wp.transformf, ndim=2), + com_pose_w: wp.array(dtype=wp.transformf, ndim=2), +): + """Compose body link poses with body-frame CoM offsets to get world-frame CoM poses. + + Args: + link_pose: Body link poses in world frame. Shape is (num_envs, num_bodies). + com_pose_b: Body-frame CoM offsets. Shape is (num_envs, num_bodies). + com_pose_w: Output world-frame body CoM poses. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + com_pose_w[i, j] = wp.transform_multiply(link_pose[i, j], com_pose_b[i, j]) + + +@wp.kernel +def _projected_gravity( + gravity_vec_w: wp.array(dtype=wp.vec3f), + root_pose: wp.array(dtype=wp.transformf), + out: wp.array(dtype=wp.vec3f), +): + """Project world-frame gravity direction into the root body frame. + + Args: + gravity_vec_w: Gravity unit vector per instance in world frame. Shape is (num_envs,). + root_pose: Root link poses in world frame. Shape is (num_envs,). + out: Output projected gravity in body frame. Shape is (num_envs,). + """ + i = wp.tid() + q = wp.transform_get_rotation(root_pose[i]) + out[i] = wp.quat_rotate_inv(q, gravity_vec_w[i]) + + +@wp.kernel +def _compute_heading( + forward_vec_b: wp.array(dtype=wp.vec3f), + root_pose: wp.array(dtype=wp.transformf), + out: wp.array(dtype=wp.float32), +): + """Compute yaw heading angle from the forward direction rotated into the world frame. + + Args: + forward_vec_b: Forward direction in body frame per instance. Shape is (num_envs,). + root_pose: Root link poses in world frame. Shape is (num_envs,). + out: Output heading angles [rad]. Shape is (num_envs,). + """ + i = wp.tid() + q = wp.transform_get_rotation(root_pose[i]) + forward = wp.quat_rotate(q, forward_vec_b[i]) + out[i] = wp.atan2(forward[1], forward[0]) + + +@wp.kernel +def _world_vel_to_body_lin( + root_pose: wp.array(dtype=wp.transformf), + vel_w: wp.array(dtype=wp.spatial_vectorf), + out: wp.array(dtype=wp.vec3f), +): + """Rotate world-frame linear velocity into the root body frame. + + Args: + root_pose: Root link poses in world frame. Shape is (num_envs,). + vel_w: Spatial velocities in world frame. Shape is (num_envs,). + out: Output linear velocity in body frame. Shape is (num_envs,). + """ + i = wp.tid() + q = wp.transform_get_rotation(root_pose[i]) + lin = wp.spatial_top(vel_w[i]) + out[i] = wp.quat_rotate_inv(q, lin) + + +@wp.kernel +def _world_vel_to_body_ang( + root_pose: wp.array(dtype=wp.transformf), + vel_w: wp.array(dtype=wp.spatial_vectorf), + out: wp.array(dtype=wp.vec3f), +): + """Rotate world-frame angular velocity into the root body frame. + + Args: + root_pose: Root link poses in world frame. Shape is (num_envs,). + vel_w: Spatial velocities in world frame. Shape is (num_envs,). + out: Output angular velocity in body frame. Shape is (num_envs,). + """ + i = wp.tid() + q = wp.transform_get_rotation(root_pose[i]) + ang = wp.spatial_bottom(vel_w[i]) + out[i] = wp.quat_rotate_inv(q, ang) diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/cloner/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/cloner/__init__.py new file mode 100644 index 000000000000..3b9a12007792 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/cloner/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .ovphysx_replicate import ovphysx_replicate + +__all__ = ["ovphysx_replicate"] diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/cloner/ovphysx_replicate.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/cloner/ovphysx_replicate.py new file mode 100644 index 000000000000..7c46a6060b88 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/cloner/ovphysx_replicate.py @@ -0,0 +1,103 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""OvPhysX replication hook for IsaacLab's cloning pipeline. + +Called by :func:`isaaclab.cloner.clone_from_template` in place of the PhysX +or Newton replicators. Unlike those replicators, ovphysx.PhysX does not exist +yet at this point in the scene setup — it is created lazily on the first +:meth:`~isaaclab_ovphysx.physics.OvPhysxManager.reset` call. + +This function records a *pending clone* on :class:`OvPhysxManager`. When +:meth:`~isaaclab_ovphysx.physics.OvPhysxManager._warmup_and_load` eventually +creates the ``PhysX`` instance and loads the USD stage (which contains only +``env_0`` physics — env_1..N are empty Xform containers), it replays every +pending clone via ``physx.clone(source, targets)`` to create the remaining +environments entirely inside the physics runtime without touching USD. +""" + +from __future__ import annotations + +import torch + +from pxr import Usd + + +def ovphysx_replicate( + stage: Usd.Stage, + sources: list[str], + destinations: list[str], + env_ids: torch.Tensor, + mapping: torch.Tensor, + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + device: str = "cpu", +) -> None: + """Record a physics clone for later execution by OvPhysxManager. + + Translates the generic IsaacLab source/destination/mapping representation + into ``(source_path, [target_paths])`` pairs and registers them on + :class:`~isaaclab_ovphysx.physics.OvPhysxManager`. The actual + ``physx.clone()`` calls happen in ``_warmup_and_load()`` after the USD + stage has been loaded. + + The ``positions`` parameter contains the 2-D grid world positions for all + environments. They are forwarded to the C++ clone plugin so that the + parent Xform prim for each clone (e.g. ``/World/envs/env_N``) is placed at + the correct grid location in Fabric. The exported USD stage only contains + ``env_0``; without explicit positions all clone parents would be created at + the origin, causing all articulations to pile up and the GPU solver to + diverge on the first warmup step. + + Args: + stage: USD stage (not modified by this function). + sources: Source prim paths (one per prototype). + destinations: Destination path templates with ``"{}"`` for env index. + env_ids: Environment indices tensor. + mapping: ``(num_sources, num_envs)`` bool tensor; True selects which + environments receive each source. + positions: World (x, y, z) positions for every environment, shape + ``[num_envs, 3]``. Used to place clone parent Xform prims in + Fabric at correct grid locations. + quaternions: Ignored — orientations are set at first reset. + device: Torch device (unused; kept for API compatibility). + """ + # Deferred import to avoid circular dependency at module load time. + from isaaclab_ovphysx.physics.ovphysx_manager import OvPhysxManager + + for i, src in enumerate(sources): + active_env_ids = env_ids[mapping[i]].tolist() + + # Exclude the source environment from its own target list. + # physx.clone() is only needed for *other* envs; the source env_0 is + # already loaded from USD. We detect self by matching the source path + # against the destination template. + pre, _, suf = destinations[i].partition("{}") + self_env_id: int | None = None + candidate = src.removeprefix(pre).removesuffix(suf) + if candidate.isdigit(): + self_env_id = int(candidate) + + # Build parallel (targets, parent_positions) lists for non-self envs. + # parent_positions[j] is the world (x,y,z) for the parent Xform of + # targets[j] (e.g. /World/envs/env_N). These positions are passed to + # the C++ clone plugin so that env_N Xform prims — absent from the + # exported USD stage — are created at the correct 2-D grid location + # rather than the origin. Without this, all clones pile up at env_0's + # position during the warmup physics step and the GPU solver diverges. + targets: list[str] = [] + parent_positions: list[tuple[float, float, float]] = [] + for e in active_env_ids: + if e == self_env_id: + continue + targets.append(destinations[i].format(e)) + if positions is not None and e < len(positions): + pos = positions[e] + parent_positions.append((float(pos[0]), float(pos[1]), float(pos[2]))) + else: + parent_positions.append((0.0, 0.0, 0.0)) + + if targets: + OvPhysxManager.register_clone(src, targets, parent_positions) diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/__init__.py new file mode 100644 index 000000000000..853ebc18725a --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""OvPhysX physics manager implementation.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/__init__.pyi b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/__init__.pyi new file mode 100644 index 000000000000..bfae28a137eb --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "OvPhysxManager", + "OvPhysxCfg", +] + +from .ovphysx_manager import OvPhysxManager +from .ovphysx_manager_cfg import OvPhysxCfg diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/ovphysx_manager.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/ovphysx_manager.py new file mode 100644 index 000000000000..9063078e45b3 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/ovphysx_manager.py @@ -0,0 +1,330 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""OvPhysX Manager for Isaac Lab. + +This module manages an ovphysx-based physics simulation lifecycle without Kit dependencies. +It exports the current USD stage to disk, loads it into ovphysx, and steps the simulation +using the ovphysx C/Python API. +""" + +from __future__ import annotations + +import atexit +import logging +import os +import tempfile +from typing import TYPE_CHECKING, Any, ClassVar + +from isaaclab.physics import PhysicsEvent, PhysicsManager + +if TYPE_CHECKING: + from isaaclab.sim.simulation_context import SimulationContext + + from .ovphysx_manager_cfg import OvPhysxCfg + +__all__ = ["OvPhysxManager"] + +logger = logging.getLogger(__name__) + + +class OvPhysxManager(PhysicsManager): + """Manages an ovphysx-backed physics simulation lifecycle. + + Unlike PhysxManager, this manager does not depend on Kit, Carbonite, or the + Omniverse timeline. It drives the simulation entirely through the ovphysx + Python wheel. + + Lifecycle: initialize() -> reset() -> step() (repeated) -> close() + """ + + _cfg: ClassVar[OvPhysxCfg | None] = None + _physx: ClassVar[Any] = None # ovphysx.PhysX (lazy import) + _usd_handle: ClassVar[Any] = None + _stage_path: ClassVar[str | None] = None + _warmup_done: ClassVar[bool] = False + _tmp_dir: ClassVar[tempfile.TemporaryDirectory | None] = None + # Pending (source, targets, parent_positions) triples registered by + # ovphysx_replicate() before the PhysX instance exists. Replayed via + # physx.clone() in _warmup_and_load(). + # parent_positions is a list of (x, y, z) tuples — one per target. + _pending_clones: ClassVar[list[tuple[str, list[str], list[tuple[float, float, float]]]]] = [] + _atexit_registered: ClassVar[bool] = False + + @classmethod + def register_clone( + cls, source: str, targets: list[str], parent_positions: list[tuple[float, float, float]] | None = None + ) -> None: + """Register a (source, targets, parent_positions) triple for replay via physx.clone(). + + Called by :func:`~isaaclab_ovphysx.cloner.ovphysx_replicate` during + scene setup, before the PhysX instance exists. The clone operations + are executed in :meth:`_warmup_and_load` immediately after + ``physx.add_usd()``. + + Args: + source: Source prim path (env_0 articulation root). + targets: Target prim paths for env_1..N. + parent_positions: World positions (x, y, z) for each target's parent + Xform prim (e.g. /World/envs/env_N). When provided the clone + plugin sets those transforms in Fabric so all environments start + at their correct grid locations, preventing solver divergence + during the warmup step. + """ + cls._pending_clones.append((source, targets, parent_positions or [])) + + @classmethod + def initialize(cls, sim_context: SimulationContext) -> None: + """Initialize the physics manager with simulation context. + + This stores the config and device but does not create the ovphysx + instance yet -- the USD stage may not be fully populated at this point. + The actual creation happens lazily in :meth:`reset`. + """ + super().initialize(sim_context) + cls._warmup_done = False + cls._physx = None + cls._usd_handle = None + cls._stage_path = None + cls._pending_clones = [] + + @classmethod + def reset(cls, soft: bool = False) -> None: + """Reset physics simulation. + + On the first (non-soft) reset the method: + - Exports the current USD stage to a temp file + - Creates the ovphysx.PhysX instance + - Loads the exported USD + - Warms up GPU buffers (if on CUDA) + - Dispatches PHYSICS_READY + """ + if not soft: + if not cls._warmup_done: + cls._warmup_and_load() + cls.dispatch_event(PhysicsEvent.PHYSICS_READY, payload={}) + + @classmethod + def forward(cls) -> None: + """No-op -- ovphysx does not have a fabric/rendering pipeline.""" + pass + + @classmethod + def step(cls) -> None: + """Step the simulation by one physics timestep.""" + if cls._physx is None: + return + dt = cls.get_physics_dt() + sim_time = PhysicsManager._sim_time + cls._physx.step_sync(dt=dt, sim_time=sim_time) + PhysicsManager._sim_time += dt + + @classmethod + def close(cls) -> None: + """Release ovphysx resources and clean up.""" + cls._release_physx() + + cls._usd_handle = None + cls._stage_path = None + cls._warmup_done = False + + if cls._tmp_dir is not None: + cls._tmp_dir.cleanup() + cls._tmp_dir = None + + super().close() + + @classmethod + def _release_physx(cls) -> None: + """Release the ovphysx instance if it exists. Safe to call multiple times. + + With ovphysx<=0.3.7 and Kit's pxr in the same process, physx.release() + deadlocks due to dual-Carbonite static destructor races. Skip the + native release and let os._exit() (registered via atexit) terminate the + process; GPU resources are reclaimed by the driver. + """ + if cls._physx is not None: + cls._physx = None + + @classmethod + def get_physx_instance(cls) -> Any: + """Return the underlying ovphysx.PhysX instance (or None if not yet created).""" + return cls._physx + + # ------------------------------------------------------------------ + # Internal helpers + # ------------------------------------------------------------------ + + @classmethod + def _warmup_and_load(cls) -> None: + """Export the USD stage, create the ovphysx instance, and load the scene.""" + sim = PhysicsManager._sim + if sim is None: + raise RuntimeError("OvPhysxManager: SimulationContext is not set.") + + device_str = PhysicsManager._device + if "cuda" in device_str: + parts = device_str.split(":") + gpu_index = int(parts[1]) if len(parts) > 1 else 0 + ovphysx_device = "gpu" + else: + gpu_index = 0 + ovphysx_device = "cpu" + + scene_prim = sim.stage.GetPrimAtPath(sim.cfg.physics_prim_path) + if scene_prim.IsValid() and ovphysx_device == "gpu": + cls._configure_physx_scene_prim(scene_prim, PhysicsManager._cfg) + + # Export the current USD stage to a temporary file so ovphysx can load it. + cls._tmp_dir = tempfile.TemporaryDirectory(prefix="isaaclab_ovphysx_") + stage_file = os.path.join(cls._tmp_dir.name, "scene.usda") + sim.stage.Export(stage_file) + cls._stage_path = stage_file + logger.info("OvPhysxManager: exported USD stage to %s", stage_file) + + # HACK (temporary): hide pxr from sys.modules during ovphysx bootstrap. + # IsaacSim's pxr reports version 0.25.5 (pip convention) while ovphysx + # expects 25.11 (OpenUSD release convention). Hiding pxr causes + # ovphysx.check_usd_compatibility() to skip the Python-side version + # check. This should go away once ovphysx ships a namespaced USD + # copy with isolated symbols (same "import pxr" API, no collision). + import sys as _sys + + _hidden_pxr = {k: _sys.modules.pop(k) for k in list(_sys.modules) if k == "pxr" or k.startswith("pxr.")} + try: + import ovphysx as _ovphysx_bootstrap + + _ovphysx_bootstrap.bootstrap() + finally: + _sys.modules.update(_hidden_pxr) + + import ovphysx + + cls._physx = ovphysx.PhysX(device=ovphysx_device, gpu_index=gpu_index) + + # Without worker threads the stepper runs simulate()+fetchResults() + # synchronously, blocking the calling thread for the full GPU step time. + # + # COMPAT(ovphysx<=0.3.7): The public 0.3.7 wheel exposes typed config + # setters (set_config_int32 etc.) rather than the Carbonite-settings-based + # set_setting() added in newer internal builds. This guard keeps both + # working. REVERT once the public wheel ships set_setting(). + if hasattr(cls._physx, "set_setting"): + cls._physx.set_setting("/persistent/physics/numThreads", "8") + cls._physx.set_setting("/physics/physxDispatcher", "true") + cls._physx.set_setting("/physics/updateToUsd", "false") + cls._physx.set_setting("/physics/updateVelocitiesToUsd", "false") + cls._physx.set_setting("/physics/updateParticlesToUsd", "false") + else: + cls._physx.set_config_int32(ovphysx.ConfigInt32.NUM_THREADS, 8) + + # FIXME(malesiani): re-evaluate this when carbonite ships an isolated copy. + # At process exit, two Carbonite instances are in memory: + # 1. ovphysx's bundled libcarb.so (RPATH $ORIGIN/../plugins/) + # 2. kit's libcarb.so (pulled in via LD_LIBRARY_PATH by Fabric/usdrt plugins) + # + # Why does kit's libcarb end up here even though we skip AppLauncher? + # Note: AppLauncher always starts the full Kit runtime — even headless=True + # still loads Kit. "Kitless" in IsaacLab means AppLauncher is not used at all. + # But we still import `pxr` from IsaacSim's Kit USD build. The moment `import pxr` runs, the Kit USD + # runtime loads Fabric infrastructure (omni.physx.fabric.plugin, usdrt.population.plugin) + # from kit's plugin directories, which are on LD_LIBRARY_PATH via setup_python_env.sh. + # Those plugins link against kit's libcarb.so, so kit's Carbonite lands in memory + # purely from `import pxr`, regardless of whether the Kit App is launched. + # + # Both Carbonite instances register C++ static destructors. At process exit those + # destructors race and segfault. The workaround is to release ovphysx cleanly + # (so GPU resources are freed) and then call os._exit() to skip the static destructor + # phase entirely. os._exit() terminates the process without running C++ atexit + # handlers or static destructors, sidestepping the conflict. + # + # Proper long-term fix: ovphysx ships a fully namespace-isolated Carbonite + # (different soname / hidden visibility) so its symbols never collide with kit's. + if not cls._atexit_registered: + + def _atexit_release_and_exit(): + # Skip physx.release() -- it deadlocks due to dual-Carbonite + # static destructor races (ovphysx's bundled libcarb vs Kit's). + # GPU resources are reclaimed by the driver at process exit. + os._exit(0) + + atexit.register(_atexit_release_and_exit) + cls._atexit_registered = True + + usd_handle, op_idx = cls._physx.add_usd(stage_file) + cls._physx.wait_op(op_idx) + cls._usd_handle = usd_handle + logger.info("OvPhysxManager: loaded USD into ovphysx (device=%s)", ovphysx_device) + + # Replay pending physics clones registered by ovphysx_replicate(). + # The USD stage contains only env_0's physics; env_1..N are empty + # Xform containers. physx.clone() creates the remaining environments + # in the physics runtime without modifying the USD file. + if cls._pending_clones: + # ovphysx_replicate() only registers pending clones when clone_usd=False, + # meaning the USD contains only env_0 physics and physx.clone() is required + # to populate env_1..N in the physics runtime. Execute unconditionally — + # no USD content heuristic is needed. + for source, targets, parent_positions in cls._pending_clones: + logger.info( + "OvPhysxManager: cloning %s -> %d targets (%s ... %s)", + source, + len(targets), + targets[0], + targets[-1], + ) + if parent_positions: + transforms = [(x, y, z, 0.0, 0.0, 0.0, 1.0) for x, y, z in parent_positions] + else: + transforms = None + op_idx = cls._physx.clone(source, targets, transforms) + cls._physx.wait_op(op_idx) + cls._pending_clones = [] + + if ovphysx_device == "gpu": + cls._physx.warmup_gpu() + + cls.dispatch_event(PhysicsEvent.MODEL_INIT, payload={}) + cls._warmup_done = True + + @staticmethod + def _configure_physx_scene_prim(scene_prim, cfg) -> None: + """Apply PhysxSceneAPI schema and GPU dynamics attributes to a scene prim. + + The PhysxSchema USD plugin may not be loaded in standalone ovphysx mode, + so we write the apiSchemas list entry and scene attributes directly via + raw Sdf metadata manipulation instead of using the high-level USD API. + + Without these attributes PhysX defaults to CPU broadphase even when + ovphysx is created with device="gpu". + """ + from pxr import Sdf + + schemas = Sdf.TokenListOp() + current = scene_prim.GetMetadata("apiSchemas") or Sdf.TokenListOp() + items = list(current.prependedItems) if current.prependedItems else [] + if "PhysxSceneAPI" not in items: + items.append("PhysxSceneAPI") + schemas.prependedItems = items + scene_prim.SetMetadata("apiSchemas", schemas) + scene_prim.CreateAttribute("physxScene:enableGPUDynamics", Sdf.ValueTypeNames.Bool).Set(True) + scene_prim.CreateAttribute("physxScene:broadphaseType", Sdf.ValueTypeNames.String).Set("GPU") + + if cfg is not None: + for attr, val in [ + ("gpuMaxRigidContactCount", cfg.gpu_max_rigid_contact_count), + ("gpuMaxRigidPatchCount", cfg.gpu_max_rigid_patch_count), + ("gpuFoundLostPairsCapacity", cfg.gpu_found_lost_pairs_capacity), + ("gpuFoundLostAggregatePairsCapacity", cfg.gpu_found_lost_aggregate_pairs_capacity), + ("gpuTotalAggregatePairsCapacity", cfg.gpu_total_aggregate_pairs_capacity), + ("gpuCollisionStackSize", cfg.gpu_collision_stack_size), + ]: + scene_prim.CreateAttribute(f"physxScene:{attr}", Sdf.ValueTypeNames.UInt).Set(val) + + # Propagate scene query support from SimulationCfg so omni.physx creates + # the scene with the correct query mode. OvPhysxCfg does not carry this field. + sim_cfg = PhysicsManager._sim.cfg if PhysicsManager._sim is not None else None + enable_sq = getattr(sim_cfg, "enable_scene_query_support", False) + scene_prim.CreateAttribute("physxScene:enableSceneQuerySupport", Sdf.ValueTypeNames.Bool).Set(enable_sq) diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/ovphysx_manager_cfg.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/ovphysx_manager_cfg.py new file mode 100644 index 000000000000..c74dc56209ea --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/physics/ovphysx_manager_cfg.py @@ -0,0 +1,41 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the OvPhysX physics manager.""" + +from __future__ import annotations + +from isaaclab.physics import PhysicsCfg +from isaaclab.utils import configclass + + +@configclass +class OvPhysxCfg(PhysicsCfg): + """Configuration for the ovphysx physics manager. + + PhysX scene-level parameters (solver iterations, GPU buffer sizes, etc.) are + read from the USD PhysicsScene prim. Only ovphysx-specific settings that are + not captured in USD live here. + """ + + class_type = "{DIR}.ovphysx_manager:OvPhysxManager" + + gpu_max_rigid_contact_count: int = 2**23 + """Size of the GPU rigid-body contact buffer.""" + + gpu_max_rigid_patch_count: int = 5 * 2**15 + """Size of the GPU rigid-body patch buffer.""" + + gpu_found_lost_pairs_capacity: int = 2**21 + """Capacity for GPU found/lost broadphase pairs.""" + + gpu_found_lost_aggregate_pairs_capacity: int = 2**25 + """Capacity for GPU found/lost *aggregate* broadphase pairs.""" + + gpu_total_aggregate_pairs_capacity: int = 2**21 + """Capacity for total GPU aggregate broadphase pairs.""" + + gpu_collision_stack_size: int = 2**26 + """GPU collision stack size in bytes.""" diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/tensor_types.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/tensor_types.py new file mode 100644 index 000000000000..44a5cadeeb0a --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/tensor_types.py @@ -0,0 +1,334 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""IsaacLab re-exports of ovphysx TensorType with short backward-compat aliases. + +Import TensorType directly for new code: + from ovphysx.types import TensorType + +Or use the module-level short aliases (existing code pattern): + import isaaclab_ovphysx.tensor_types as TT + TT.DOF_STIFFNESS # resolves to TensorType.ARTICULATION_DOF_STIFFNESS + +ovphysx.types is pure Python with zero native dependencies, so this module is +always safe to import regardless of USD state or native library loading. +""" + +from ovphysx.types import TensorType # noqa: F401 — re-exported for new code + +_TT = TensorType # shorter reference for alias block + +# Short aliases -- existing code using ``TT.DOF_STIFFNESS`` etc. continues to work. +# All values are IntEnum members (== plain ints) of TensorType. + +# fmt: off -- aligned columns are intentional; do not reformat + +""" +Root state (GPU) +""" + +ROOT_POSE = _TT.ARTICULATION_ROOT_POSE +"""Root pose of each articulation instance. + +Shape is ``[N, 7]``, dtype ``float32`` (px, py, pz, qx, qy, qz, qw). +""" + +ROOT_VELOCITY = _TT.ARTICULATION_ROOT_VELOCITY +"""Root velocity of each articulation instance. + +Shape is ``[N, 6]``, dtype ``float32`` (vx, vy, vz, wx, wy, wz). +""" + +""" +Link (body) state (GPU) +""" + +LINK_POSE = _TT.ARTICULATION_LINK_POSE +"""Pose of every link (body) in each articulation instance. + +Shape is ``[N, L, 7]``, dtype ``float32``. +""" + +LINK_VELOCITY = _TT.ARTICULATION_LINK_VELOCITY +"""Velocity of every link (body) in each articulation instance. + +Shape is ``[N, L, 6]``, dtype ``float32``. +""" + +LINK_ACCELERATION = _TT.ARTICULATION_LINK_ACCELERATION +"""Acceleration of every link (body) in each articulation instance. + +Shape is ``[N, L, 6]``, dtype ``float32``. +""" + +""" +DOF state (GPU) +""" + +DOF_POSITION = _TT.ARTICULATION_DOF_POSITION +"""DOF (joint) positions. + +Shape is ``[N, D]``, dtype ``float32`` [m or rad]. +""" + +DOF_VELOCITY = _TT.ARTICULATION_DOF_VELOCITY +"""DOF (joint) velocities. + +Shape is ``[N, D]``, dtype ``float32`` [m/s or rad/s]. +""" + +""" +DOF command targets (GPU, write-only) +""" + +DOF_POSITION_TARGET = _TT.ARTICULATION_DOF_POSITION_TARGET +"""DOF position targets for the PD controller. + +Shape is ``[N, D]``, dtype ``float32``. +""" + +DOF_VELOCITY_TARGET = _TT.ARTICULATION_DOF_VELOCITY_TARGET +"""DOF velocity targets for the PD controller. + +Shape is ``[N, D]``, dtype ``float32``. +""" + +DOF_ACTUATION_FORCE = _TT.ARTICULATION_DOF_ACTUATION_FORCE +"""DOF actuation (effort) forces applied directly. + +Shape is ``[N, D]``, dtype ``float32`` [N or N·m]. +""" + +""" +DOF properties (CPU) +""" + +DOF_STIFFNESS = _TT.ARTICULATION_DOF_STIFFNESS +"""DOF stiffness (spring constant for PD controller). + +Shape is ``[N, D]``, dtype ``float32``. +""" + +DOF_DAMPING = _TT.ARTICULATION_DOF_DAMPING +"""DOF damping (damper constant for PD controller). + +Shape is ``[N, D]``, dtype ``float32``. +""" + +DOF_LIMIT = _TT.ARTICULATION_DOF_LIMIT +"""DOF position limits (lower, upper). + +Shape is ``[N, D, 2]``, dtype ``float32``. +""" + +DOF_MAX_VELOCITY = _TT.ARTICULATION_DOF_MAX_VELOCITY +"""DOF maximum velocity. + +Shape is ``[N, D]``, dtype ``float32``. +""" + +DOF_MAX_FORCE = _TT.ARTICULATION_DOF_MAX_FORCE +"""DOF maximum force. + +Shape is ``[N, D]``, dtype ``float32``. +""" + +DOF_ARMATURE = _TT.ARTICULATION_DOF_ARMATURE +"""DOF armature (added inertia on the diagonal of the joint-space mass matrix). + +Shape is ``[N, D]``, dtype ``float32``. +""" + +DOF_FRICTION_PROPERTIES = _TT.ARTICULATION_DOF_FRICTION_PROPERTIES +"""DOF friction properties (static, dynamic, viscous). + +Shape is ``[N, D, 3]``, dtype ``float32``. +""" + +""" +External wrench (GPU, write-only) +""" + +LINK_WRENCH = _TT.ARTICULATION_LINK_WRENCH +"""External wrench applied to each link. + +Shape is ``[N, L, 9]``, dtype ``float32`` (fx, fy, fz, tx, ty, tz, px, py, pz). +""" + +""" +Body properties (CPU) +""" + +BODY_MASS = _TT.ARTICULATION_BODY_MASS +"""Mass of each body (link). + +Shape is ``[N, L]``, dtype ``float32`` [kg]. +""" + +BODY_COM_POSE = _TT.ARTICULATION_BODY_COM_POSE +"""Center-of-mass pose of each body in local frame. + +Shape is ``[N, L, 7]``, dtype ``float32``. +""" + +BODY_INERTIA = _TT.ARTICULATION_BODY_INERTIA +"""Inertia tensor of each body. + +Shape is ``[N, L, 9]``, dtype ``float32`` [kg·m^2]. +""" + +BODY_INV_MASS = _TT.ARTICULATION_BODY_INV_MASS +"""Inverse mass of each body. + +Shape is ``[N, L]``, dtype ``float32``. +""" + +BODY_INV_INERTIA = _TT.ARTICULATION_BODY_INV_INERTIA +"""Inverse inertia tensor of each body. + +Shape is ``[N, L, 9]``, dtype ``float32``. +""" + +""" +Dynamics tensors (GPU) +""" + +JACOBIAN = _TT.ARTICULATION_JACOBIAN +"""Jacobian matrix of each articulation instance. + +Shape is ``[N, L, 6, D+6]``, dtype ``float32``. +""" + +MASS_MATRIX = _TT.ARTICULATION_MASS_MATRIX +"""Generalized mass (inertia) matrix. + +Shape is ``[N, D+6, D+6]``, dtype ``float32``. +""" + +CORIOLIS = _TT.ARTICULATION_CORIOLIS_AND_CENTRIFUGAL_FORCE +"""Coriolis and centrifugal force vector. + +Shape is ``[N, D]``, dtype ``float32``. +""" + +GRAVITY_FORCE = _TT.ARTICULATION_GRAVITY_FORCE +"""Generalized gravity force vector. + +Shape is ``[N, D]``, dtype ``float32``. +""" + +""" +Joint force feedback (GPU) +""" + +LINK_INCOMING_JOINT_FORCE = _TT.ARTICULATION_LINK_INCOMING_JOINT_FORCE +"""Incoming joint force (constraint force) on each link. + +Shape is ``[N, L, 6]``, dtype ``float32``. +""" + +DOF_PROJECTED_JOINT_FORCE = _TT.ARTICULATION_DOF_PROJECTED_JOINT_FORCE +"""DOF-projected joint force. + +Shape is ``[N, D]``, dtype ``float32``. +""" + +""" +Fixed tendon properties (CPU) +""" + +FIXED_TENDON_STIFFNESS = _TT.ARTICULATION_FIXED_TENDON_STIFFNESS +"""Stiffness of each fixed tendon. + +Shape is ``[N, T_fix]``, dtype ``float32``. +""" + +FIXED_TENDON_DAMPING = _TT.ARTICULATION_FIXED_TENDON_DAMPING +"""Damping of each fixed tendon. + +Shape is ``[N, T_fix]``, dtype ``float32``. +""" + +FIXED_TENDON_LIMIT_STIFFNESS = _TT.ARTICULATION_FIXED_TENDON_LIMIT_STIFFNESS +"""Limit stiffness of each fixed tendon. + +Shape is ``[N, T_fix]``, dtype ``float32``. +""" + +FIXED_TENDON_LIMIT = _TT.ARTICULATION_FIXED_TENDON_LIMIT +"""Position limits of each fixed tendon (lower, upper). + +Shape is ``[N, T_fix, 2]``, dtype ``float32``. +""" + +FIXED_TENDON_REST_LENGTH = _TT.ARTICULATION_FIXED_TENDON_REST_LENGTH +"""Rest length of each fixed tendon. + +Shape is ``[N, T_fix]``, dtype ``float32``. +""" + +FIXED_TENDON_OFFSET = _TT.ARTICULATION_FIXED_TENDON_OFFSET +"""Offset of each fixed tendon. + +Shape is ``[N, T_fix]``, dtype ``float32``. +""" + +""" +Spatial tendon properties (CPU) +""" + +SPATIAL_TENDON_STIFFNESS = _TT.ARTICULATION_SPATIAL_TENDON_STIFFNESS +"""Stiffness of each spatial tendon. + +Shape is ``[N, T_spa]``, dtype ``float32``. +""" + +SPATIAL_TENDON_DAMPING = _TT.ARTICULATION_SPATIAL_TENDON_DAMPING +"""Damping of each spatial tendon. + +Shape is ``[N, T_spa]``, dtype ``float32``. +""" + +SPATIAL_TENDON_LIMIT_STIFFNESS = _TT.ARTICULATION_SPATIAL_TENDON_LIMIT_STIFFNESS +"""Limit stiffness of each spatial tendon. + +Shape is ``[N, T_spa]``, dtype ``float32``. +""" + +SPATIAL_TENDON_OFFSET = _TT.ARTICULATION_SPATIAL_TENDON_OFFSET +"""Offset of each spatial tendon. + +Shape is ``[N, T_spa]``, dtype ``float32``. +""" + +# fmt: on +# DOF/body property tensor types are CPU-resident even in GPU simulations. +# Write helpers check this set to route data through CPU, not self._device. +_CPU_ONLY_TYPES: frozenset[TensorType] = frozenset( + { + DOF_STIFFNESS, + DOF_DAMPING, + DOF_LIMIT, + DOF_MAX_VELOCITY, + DOF_MAX_FORCE, + DOF_ARMATURE, + DOF_FRICTION_PROPERTIES, + BODY_MASS, + BODY_COM_POSE, + BODY_INERTIA, + BODY_INV_MASS, + BODY_INV_INERTIA, + FIXED_TENDON_STIFFNESS, + FIXED_TENDON_DAMPING, + FIXED_TENDON_LIMIT_STIFFNESS, + FIXED_TENDON_LIMIT, + FIXED_TENDON_REST_LENGTH, + FIXED_TENDON_OFFSET, + SPATIAL_TENDON_STIFFNESS, + SPATIAL_TENDON_DAMPING, + SPATIAL_TENDON_LIMIT_STIFFNESS, + SPATIAL_TENDON_OFFSET, + } +) diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/test/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/__init__.py new file mode 100644 index 000000000000..2c6c0e67ffc4 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .views import MockTensorBinding, MockOvPhysxBindingSet diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/views/__init__.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/views/__init__.py new file mode 100644 index 000000000000..6f133a18202c --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/views/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .mock_ovphysx_bindings import MockTensorBinding, MockOvPhysxBindingSet diff --git a/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/views/mock_ovphysx_bindings.py b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/views/mock_ovphysx_bindings.py new file mode 100644 index 000000000000..29472ce74fd0 --- /dev/null +++ b/source/isaaclab_ovphysx/isaaclab_ovphysx/test/mock_interfaces/views/mock_ovphysx_bindings.py @@ -0,0 +1,276 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementations of ovphysx TensorBinding objects for unit testing.""" + +from __future__ import annotations + +import numpy as np + +from isaaclab_ovphysx import tensor_types as TT + + +class MockTensorBinding: + """Mock of ovphysx.TensorBinding that stores data in numpy arrays. + + Mimics the real TensorBinding API: ``read(tensor)`` fills the tensor from + the internal buffer, ``write(tensor, indices, mask)`` copies into it. + """ + + def __init__( + self, + tensor_type: int, + shape: tuple[int, ...], + count: int, + dof_count: int = 0, + body_count: int = 0, + joint_count: int = 0, + is_fixed_base: bool = False, + dof_names: list[str] | None = None, + body_names: list[str] | None = None, + joint_names: list[str] | None = None, + fixed_tendon_count: int = 0, + spatial_tendon_count: int = 0, + write_only: bool = False, + ): + self.tensor_type = tensor_type + self._shape = shape + self._count = count + self._dof_count = dof_count + self._body_count = body_count + self._joint_count = joint_count + self._is_fixed_base = is_fixed_base + self._dof_names = dof_names or [] + self._body_names = body_names or [] + self._joint_names = joint_names or [] + self._fixed_tendon_count = fixed_tendon_count + self._spatial_tendon_count = spatial_tendon_count + self._write_only = write_only + self._data = np.zeros(shape, dtype=np.float32) + + @property + def shape(self) -> tuple[int, ...]: + return self._shape + + @property + def ndim(self) -> int: + return len(self._shape) + + @property + def count(self) -> int: + return self._count + + @property + def dof_count(self) -> int: + return self._dof_count + + @property + def body_count(self) -> int: + return self._body_count + + @property + def joint_count(self) -> int: + return self._joint_count + + @property + def is_fixed_base(self) -> bool: + return self._is_fixed_base + + @property + def dof_names(self) -> list[str]: + return self._dof_names + + @property + def body_names(self) -> list[str]: + return self._body_names + + @property + def joint_names(self) -> list[str]: + return self._joint_names + + @property + def fixed_tendon_count(self) -> int: + return self._fixed_tendon_count + + @property + def spatial_tendon_count(self) -> int: + return self._spatial_tendon_count + + def read(self, tensor) -> None: + """Copy internal data into the provided array (numpy or warp).""" + if self._write_only: + raise RuntimeError("write-only tensor binding does not support read()") + try: + import warp as wp + + if isinstance(tensor, wp.array): + tmp = wp.from_numpy(self._data, dtype=wp.float32, device=tensor.device) + wp.copy(tensor, tmp) + return + except ImportError: + pass + np_dst = np.asarray(tensor) + np.copyto(np_dst, self._data.reshape(np_dst.shape)) + + @staticmethod + def _to_numpy(arr) -> np.ndarray: + """Convert warp/torch/numpy array to numpy, handling GPU arrays.""" + try: + import warp as wp + + if isinstance(arr, wp.array): + return arr.numpy() + except ImportError: + pass + try: + import torch + + if isinstance(arr, torch.Tensor): + return arr.detach().cpu().numpy() + except ImportError: + pass + return np.asarray(arr) + + def write(self, tensor, indices=None, mask=None) -> None: + """Copy from the provided array (numpy or warp) into internal data.""" + np_src = self._to_numpy(tensor).astype(np.float32) + if indices is not None: + idx = self._to_numpy(indices) + if np_src.shape[0] == self._data.shape[0]: + self._data[idx] = np_src[idx] + else: + self._data[idx] = np_src.reshape(len(idx), *self._data.shape[1:]) + elif mask is not None: + np_mask = self._to_numpy(mask).astype(bool) + self._data[np_mask] = np_src[np_mask] + else: + np.copyto(self._data, np_src.reshape(self._data.shape)) + + def destroy(self) -> None: + pass + + def set_random_data(self, low: float = -1.0, high: float = 1.0) -> None: + """Fill internal buffer with random data.""" + self._data = np.random.uniform(low, high, self._shape).astype(np.float32) + + +class MockOvPhysxBindingSet: + """Factory that creates a full set of MockTensorBinding objects + for a given articulation configuration. + + Mirrors the tensor types that ``Articulation._initialize_impl`` creates. + """ + + def __init__( + self, + num_instances: int, + num_joints: int, + num_bodies: int, + is_fixed_base: bool = False, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + ): + N = num_instances + D = num_joints + L = num_bodies + T_fix = num_fixed_tendons + T_spa = num_spatial_tendons + + if joint_names is None: + joint_names = [f"joint_{i}" for i in range(D)] + if body_names is None: + body_names = [f"body_{i}" for i in range(L)] + + common = dict( + count=N, + dof_count=D, + body_count=L, + joint_count=D, + is_fixed_base=is_fixed_base, + dof_names=joint_names, + body_names=body_names, + joint_names=joint_names, + fixed_tendon_count=T_fix, + spatial_tendon_count=T_spa, + ) + + self.bindings: dict[int, MockTensorBinding] = { + TT.ROOT_POSE: MockTensorBinding(TT.ROOT_POSE, (N, 7), **common), + TT.ROOT_VELOCITY: MockTensorBinding(TT.ROOT_VELOCITY, (N, 6), **common), + TT.LINK_POSE: MockTensorBinding(TT.LINK_POSE, (N, L, 7), **common), + TT.LINK_VELOCITY: MockTensorBinding(TT.LINK_VELOCITY, (N, L, 6), **common), + TT.LINK_ACCELERATION: MockTensorBinding(TT.LINK_ACCELERATION, (N, L, 6), **common), + TT.DOF_POSITION: MockTensorBinding(TT.DOF_POSITION, (N, D), **common), + TT.DOF_VELOCITY: MockTensorBinding(TT.DOF_VELOCITY, (N, D), **common), + TT.DOF_POSITION_TARGET: MockTensorBinding(TT.DOF_POSITION_TARGET, (N, D), **common), + TT.DOF_VELOCITY_TARGET: MockTensorBinding(TT.DOF_VELOCITY_TARGET, (N, D), **common), + TT.DOF_ACTUATION_FORCE: MockTensorBinding(TT.DOF_ACTUATION_FORCE, (N, D), **common), + TT.DOF_STIFFNESS: MockTensorBinding(TT.DOF_STIFFNESS, (N, D), **common), + TT.DOF_DAMPING: MockTensorBinding(TT.DOF_DAMPING, (N, D), **common), + TT.DOF_LIMIT: MockTensorBinding(TT.DOF_LIMIT, (N, D, 2), **common), + TT.DOF_MAX_VELOCITY: MockTensorBinding(TT.DOF_MAX_VELOCITY, (N, D), **common), + TT.DOF_MAX_FORCE: MockTensorBinding(TT.DOF_MAX_FORCE, (N, D), **common), + TT.DOF_ARMATURE: MockTensorBinding(TT.DOF_ARMATURE, (N, D), **common), + TT.DOF_FRICTION_PROPERTIES: MockTensorBinding(TT.DOF_FRICTION_PROPERTIES, (N, D, 3), **common), + TT.LINK_WRENCH: MockTensorBinding(TT.LINK_WRENCH, (N, L, 9), write_only=True, **common), + TT.BODY_MASS: MockTensorBinding(TT.BODY_MASS, (N, L), **common), + TT.BODY_COM_POSE: MockTensorBinding(TT.BODY_COM_POSE, (N, L, 7), **common), + TT.BODY_INERTIA: MockTensorBinding(TT.BODY_INERTIA, (N, L, 9), **common), + TT.BODY_INV_MASS: MockTensorBinding(TT.BODY_INV_MASS, (N, L), **common), + TT.BODY_INV_INERTIA: MockTensorBinding(TT.BODY_INV_INERTIA, (N, L, 9), **common), + TT.LINK_INCOMING_JOINT_FORCE: MockTensorBinding(TT.LINK_INCOMING_JOINT_FORCE, (N, L, 6), **common), + TT.DOF_PROJECTED_JOINT_FORCE: MockTensorBinding(TT.DOF_PROJECTED_JOINT_FORCE, (N, D), **common), + } + + # Fixed tendon bindings (only when tendons are present) + if T_fix > 0: + self.bindings.update( + { + TT.FIXED_TENDON_STIFFNESS: MockTensorBinding(TT.FIXED_TENDON_STIFFNESS, (N, T_fix), **common), + TT.FIXED_TENDON_DAMPING: MockTensorBinding(TT.FIXED_TENDON_DAMPING, (N, T_fix), **common), + TT.FIXED_TENDON_LIMIT_STIFFNESS: MockTensorBinding( + TT.FIXED_TENDON_LIMIT_STIFFNESS, (N, T_fix), **common + ), + TT.FIXED_TENDON_LIMIT: MockTensorBinding(TT.FIXED_TENDON_LIMIT, (N, T_fix, 2), **common), + TT.FIXED_TENDON_REST_LENGTH: MockTensorBinding(TT.FIXED_TENDON_REST_LENGTH, (N, T_fix), **common), + TT.FIXED_TENDON_OFFSET: MockTensorBinding(TT.FIXED_TENDON_OFFSET, (N, T_fix), **common), + } + ) + + # Spatial tendon bindings + if T_spa > 0: + self.bindings.update( + { + TT.SPATIAL_TENDON_STIFFNESS: MockTensorBinding(TT.SPATIAL_TENDON_STIFFNESS, (N, T_spa), **common), + TT.SPATIAL_TENDON_DAMPING: MockTensorBinding(TT.SPATIAL_TENDON_DAMPING, (N, T_spa), **common), + TT.SPATIAL_TENDON_LIMIT_STIFFNESS: MockTensorBinding( + TT.SPATIAL_TENDON_LIMIT_STIFFNESS, (N, T_spa), **common + ), + TT.SPATIAL_TENDON_OFFSET: MockTensorBinding(TT.SPATIAL_TENDON_OFFSET, (N, T_spa), **common), + } + ) + + def set_random_data(self) -> None: + """Fill all bindings with random data.""" + for b in self.bindings.values(): + if not b._write_only: + b.set_random_data() + lim = self.bindings[TT.DOF_LIMIT] + lim._data[..., 0] = -3.14 + lim._data[..., 1] = 3.14 + for tt in (TT.ROOT_POSE, TT.LINK_POSE, TT.BODY_COM_POSE): + b = self.bindings[tt] + b._data[..., 3:6] = 0.0 + b._data[..., 6] = 1.0 + self.bindings[TT.BODY_MASS]._data = np.abs(self.bindings[TT.BODY_MASS]._data) + 0.1 + self.bindings[TT.DOF_MAX_VELOCITY]._data = np.abs(self.bindings[TT.DOF_MAX_VELOCITY]._data) + 1.0 + self.bindings[TT.DOF_MAX_FORCE]._data = np.abs(self.bindings[TT.DOF_MAX_FORCE]._data) + 1.0 + # Set sensible defaults for fixed tendon limits + if TT.FIXED_TENDON_LIMIT in self.bindings: + tlim = self.bindings[TT.FIXED_TENDON_LIMIT] + tlim._data[..., 0] = -1.0 + tlim._data[..., 1] = 1.0 diff --git a/source/isaaclab_ovphysx/pyproject.toml b/source/isaaclab_ovphysx/pyproject.toml new file mode 100644 index 000000000000..31dce8d230ec --- /dev/null +++ b/source/isaaclab_ovphysx/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools<82.0.0", "wheel", "toml"] +build-backend = "setuptools.build_meta" diff --git a/source/isaaclab_ovphysx/setup.py b/source/isaaclab_ovphysx/setup.py new file mode 100644 index 000000000000..4898806285dd --- /dev/null +++ b/source/isaaclab_ovphysx/setup.py @@ -0,0 +1,53 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_ovphysx' python package.""" + +import os + +import toml +from setuptools import setup + +EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) +EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +INSTALL_REQUIRES: list[str] = [] + +EXTRAS_REQUIRE = { + "ovphysx": ["ovphysx"], +} + +setup( + name="isaaclab_ovphysx", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url=EXTENSION_TOML_DATA["package"]["repository"], + version=EXTENSION_TOML_DATA["package"]["version"], + description=EXTENSION_TOML_DATA["package"]["description"], + keywords=EXTENSION_TOML_DATA["package"]["keywords"], + license="BSD-3-Clause", + include_package_data=True, + package_data={"": ["*.pyi"]}, + python_requires=">=3.11", + install_requires=INSTALL_REQUIRES, + extras_require=EXTRAS_REQUIRE, + packages=[ + "isaaclab_ovphysx", + "isaaclab_ovphysx.assets", + "isaaclab_ovphysx.assets.articulation", + "isaaclab_ovphysx.cloner", + "isaaclab_ovphysx.physics", + "isaaclab_ovphysx.test", + "isaaclab_ovphysx.test.mock_interfaces", + "isaaclab_ovphysx.test.mock_interfaces.views", + ], + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", + ], + zip_safe=False, +) diff --git a/source/isaaclab_ovphysx/test/__init__.py b/source/isaaclab_ovphysx/test/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_ovphysx/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_ovphysx/test/assets/__init__.py b/source/isaaclab_ovphysx/test/assets/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_ovphysx/test/assets/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_ovphysx/test/assets/test_articulation.py b/source/isaaclab_ovphysx/test/assets/test_articulation.py new file mode 100644 index 000000000000..52998a2ec5f6 --- /dev/null +++ b/source/isaaclab_ovphysx/test/assets/test_articulation.py @@ -0,0 +1,116 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for ovphysx articulation helpers.""" + +from __future__ import annotations + +from types import SimpleNamespace + +import pytest +import warp as wp + +from pxr import Sdf, Usd, UsdPhysics + +# The CI isaaclab_ov* pattern unintentionally collects isaaclab_ovphysx tests, +# but the ovphysx wheel is not installed in that environment. Skip gracefully +# so the isaaclab_ov CI pipeline is not blocked by an unrelated dependency. +pytest.importorskip("ovphysx.types", reason="ovphysx wheel not installed") + +from isaaclab_ovphysx.assets.articulation.articulation import Articulation # noqa: E402 +from isaaclab_ovphysx.physics import OvPhysxManager # noqa: E402 +from isaaclab_ovphysx.test.mock_interfaces.views import MockOvPhysxBindingSet # noqa: E402 + +wp.init() + + +def _define_tendon_joint(stage: Usd.Stage, path: str, schema_name: str) -> None: + """Define a revolute joint prim with a tendon schema marker.""" + joint = UsdPhysics.RevoluteJoint.Define(stage, path) + schemas = Sdf.TokenListOp() + schemas.explicitItems = [schema_name] + joint.GetPrim().SetMetadata("apiSchemas", schemas) + + +def _make_articulation_root_stage(tmp_path) -> str: + """Create a stage with one relevant articulation subtree and unrelated joints elsewhere.""" + stage = Usd.Stage.CreateInMemory() + stage.DefinePrim("/World", "Xform") + stage.DefinePrim("/World/envs", "Xform") + stage.DefinePrim("/World/envs/env_0", "Xform") + stage.DefinePrim("/World/envs/env_0/Robot", "Xform") + stage.DefinePrim("/World/envs/env_0/Robot/root", "Xform") + stage.DefinePrim("/World/unrelated", "Xform") + + _define_tendon_joint( + stage, + "/World/envs/env_0/Robot/root/fixed_joint", + "PhysxTendonAxisRootAPI:inst0", + ) + _define_tendon_joint( + stage, + "/World/envs/env_0/Robot/root/spatial_joint", + "PhysxTendonAttachmentRootAPI:inst0", + ) + _define_tendon_joint( + stage, + "/World/unrelated/unrelated_fixed_joint", + "PhysxTendonAxisRootAPI:inst0", + ) + _define_tendon_joint( + stage, + "/World/unrelated/unrelated_spatial_joint", + "PhysxTendonAttachmentLeafAPI:inst0", + ) + + stage_path = tmp_path / "scene.usda" + stage.Export(str(stage_path)) + return str(stage_path) + + +def _make_articulation_shell() -> Articulation: + """Create a minimal ovphysx articulation shell for tendon processing tests.""" + articulation = object.__new__(Articulation) + bindings = MockOvPhysxBindingSet( + num_instances=1, + num_joints=2, + num_bodies=2, + num_fixed_tendons=1, + num_spatial_tendons=1, + ) + object.__setattr__(articulation, "_bindings", bindings.bindings) + object.__setattr__(articulation, "_articulation_root_path", "/World/envs/env_0/Robot/root") + object.__setattr__(articulation, "_initialize_handle", None) + object.__setattr__(articulation, "_invalidate_initialize_handle", None) + object.__setattr__(articulation, "_prim_deletion_handle", None) + object.__setattr__(articulation, "_debug_vis_handle", None) + object.__setattr__( + articulation, + "_data", + SimpleNamespace( + _num_fixed_tendons=0, + _num_spatial_tendons=0, + fixed_tendon_names=[], + spatial_tendon_names=[], + ), + ) + return articulation + + +def test_process_tendons_scopes_to_articulation_root(tmp_path): + """Tendon discovery should ignore joints that live outside the current articulation subtree.""" + articulation = _make_articulation_shell() + stage_path = _make_articulation_root_stage(tmp_path) + old_stage_path = OvPhysxManager._stage_path + OvPhysxManager._stage_path = stage_path + try: + articulation._process_tendons() + finally: + OvPhysxManager._stage_path = old_stage_path + + assert articulation.fixed_tendon_names == ["fixed_joint"] + assert articulation.spatial_tendon_names == ["spatial_joint"] + assert articulation._data.fixed_tendon_names == ["fixed_joint"] + assert articulation._data.spatial_tendon_names == ["spatial_joint"] diff --git a/source/isaaclab_ovphysx/test/assets/test_articulation_data.py b/source/isaaclab_ovphysx/test/assets/test_articulation_data.py new file mode 100644 index 000000000000..16bb99a4d6c4 --- /dev/null +++ b/source/isaaclab_ovphysx/test/assets/test_articulation_data.py @@ -0,0 +1,42 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for ovphysx articulation data helpers.""" + +import numpy as np +import pytest +import warp as wp + +# The CI isaaclab_ov* pattern unintentionally collects isaaclab_ovphysx tests, +# but the ovphysx wheel is not installed in that environment. Skip gracefully +# so the isaaclab_ov CI pipeline is not blocked by an unrelated dependency. +pytest.importorskip("ovphysx.types", reason="ovphysx wheel not installed") + +from isaaclab_ovphysx import tensor_types as TT # noqa: E402 +from isaaclab_ovphysx.assets.articulation.articulation_data import ArticulationData +from isaaclab_ovphysx.test.mock_interfaces.views import MockOvPhysxBindingSet + +wp.init() + + +class TestArticulationData: + """Unit tests for deterministic ArticulationData behavior.""" + + def test_joint_acc_uses_inverse_dt(self): + """Finite-difference joint acceleration should divide by ``dt``.""" + mock_bindings = MockOvPhysxBindingSet(num_instances=1, num_joints=2, num_bodies=1) + data = ArticulationData(mock_bindings.bindings, device="cpu") + data._create_buffers() + + mock_bindings.bindings[TT.DOF_VELOCITY]._data[...] = np.array([[1.0, -2.0]], dtype=np.float32) + + data.update(dt=0.25) + + np.testing.assert_allclose( + data.joint_acc.warp.numpy(), + np.array([[4.0, -8.0]], dtype=np.float32), + atol=1e-6, + err_msg="Joint acceleration should be computed as delta_velocity / dt.", + ) diff --git a/source/isaaclab_ovphysx/test/physics/__init__.py b/source/isaaclab_ovphysx/test/physics/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_ovphysx/test/physics/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_ovphysx/test/sensors/__init__.py b/source/isaaclab_ovphysx/test/sensors/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_ovphysx/test/sensors/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_ovphysx/test/sensors/check_contact_sensor.py b/source/isaaclab_ovphysx/test/sensors/check_contact_sensor.py new file mode 100644 index 000000000000..8bcd9f0941f6 --- /dev/null +++ b/source/isaaclab_ovphysx/test/sensors/check_contact_sensor.py @@ -0,0 +1,38 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Contact sensor parity tests for the ovphysx backend. + +Mirrors the structure of isaaclab_physx/test/sensors/check_contact_sensor.py. +Contact sensors are not yet supported by the ovphysx backend, so all tests +are skipped with an explanatory message. +""" + +import pytest + + +def test_contact_sensor_creation(): + """Verify contact sensor can be created on the ovphysx backend.""" + pytest.skip("Contact sensor not yet supported by ovphysx backend.") + + +def test_contact_sensor_data_reading(): + """Verify contact sensor data can be read after a simulation step.""" + pytest.skip("Contact sensor not yet supported by ovphysx backend.") + + +def test_contact_sensor_reset(): + """Verify contact sensor state resets correctly.""" + pytest.skip("Contact sensor not yet supported by ovphysx backend.") + + +def test_contact_sensor_air_time_tracking(): + """Verify contact sensor air time tracking.""" + pytest.skip("Contact sensor not yet supported by ovphysx backend.") + + +def test_contact_sensor_friction_forces(): + """Verify contact sensor friction force reporting.""" + pytest.skip("Contact sensor not yet supported by ovphysx backend.") diff --git a/source/isaaclab_physx/benchmark/assets/benchmark_articulation.py b/source/isaaclab_physx/benchmark/assets/benchmark_articulation.py new file mode 100644 index 000000000000..60969e714da0 --- /dev/null +++ b/source/isaaclab_physx/benchmark/assets/benchmark_articulation.py @@ -0,0 +1,1007 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for Articulation class (PhysX backend). + +This module provides a benchmarking framework to measure the performance of setter and writer +methods in the Articulation class. Each method is benchmarked under two scenarios: + +1. **Torch List**: Inputs are PyTorch tensors with list indices. +2. **Torch Tensor**: Inputs are PyTorch tensors with tensor indices. + +Usage: + python benchmark_articulation.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] [--num_joints J] + +Example: + python benchmark_articulation.py --num_iterations 1000 --warmup_steps 10 + python benchmark_articulation.py --mode torch_list # Only run list-based benchmarks + python benchmark_articulation.py --mode torch_tensor # Only run tensor-based benchmarks +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Benchmark Articulation methods (PhysX backend).") +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=12, help="Number of bodies") +parser.add_argument("--num_joints", type=int, default=11, help="Number of joints") +parser.add_argument("--mode", type=str, default="all", help="Benchmark mode (all, torch_list, torch_tensor)") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") +parser.add_argument("--no_shape_checks", action="store_true", help="Disable shape/dtype assertions") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import logging +import warnings +from unittest.mock import MagicMock + +import torch + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +# This is needed because the Data classes call SimulationManager.get_physics_sim_view().get_gravity() +# but there's no actual physics scene when running benchmarks +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) +import warp as wp +from isaaclab_physx.assets.articulation.articulation import Articulation +from isaaclab_physx.assets.articulation.articulation_data import ArticulationData +from isaaclab_physx.test.benchmark import make_tensor_body_ids, make_tensor_env_ids, make_tensor_joint_ids +from isaaclab_physx.test.mock_interfaces.views import MockArticulationViewWarp + +from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg +from isaaclab.test.benchmark import MethodBenchmarkDefinition, MethodBenchmarkRunner, MethodBenchmarkRunnerConfig +from isaaclab.test.mock_interfaces.utils import MockWrenchComposer + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + +# Also suppress logging warnings (the body_acc_w deprecation warnings use logging) +logging.getLogger("isaaclab_physx").setLevel(logging.ERROR) +logging.getLogger("isaaclab").setLevel(logging.ERROR) + + +def create_test_articulation( + num_instances: int = 2, + num_joints: int = 6, + num_bodies: int = 7, + device: str = "cuda:0", +) -> tuple[Articulation, MockArticulationViewWarp, MagicMock]: + """Create a test Articulation instance with mocked dependencies.""" + joint_names = [f"joint_{i}" for i in range(num_joints)] + body_names = [f"body_{i}" for i in range(num_bodies)] + + articulation = object.__new__(Articulation) + + articulation.cfg = ArticulationCfg( + prim_path="/World/Robot", + soft_joint_pos_limit_factor=1.0, + actuators={}, + ) + + # Create PhysX mock view + mock_view = MockArticulationViewWarp( + count=num_instances, + num_links=num_bodies, + num_dofs=num_joints, + device=device, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + # Set up the mock view's metatype for accessing names/counts + mock_metatype = MagicMock() + mock_metatype.fixed_base = False + mock_metatype.dof_count = num_joints + mock_metatype.link_count = num_bodies + mock_metatype.dof_names = joint_names + mock_metatype.link_names = body_names + object.__setattr__(mock_view, "_shared_metatype", mock_metatype) + + object.__setattr__(articulation, "_root_view", mock_view) + object.__setattr__(articulation, "_device", device) + object.__setattr__(articulation, "_check_shapes", not args.no_shape_checks) + + # Create ArticulationData instance (SimulationManager already mocked at module level) + data = ArticulationData(mock_view, device) + object.__setattr__(articulation, "_data", data) + + # Create mock wrench composers (pass articulation which has num_instances, num_bodies, device properties) + mock_inst_wrench = MockWrenchComposer(articulation) + mock_perm_wrench = MockWrenchComposer(articulation) + object.__setattr__(articulation, "_instantaneous_wrench_composer", mock_inst_wrench) + object.__setattr__(articulation, "_permanent_wrench_composer", mock_perm_wrench) + + # Set up other required attributes + object.__setattr__(articulation, "actuators", {}) + object.__setattr__(articulation, "_has_implicit_actuators", False) + + # Use warp arrays for _ALL_* indices (matching real _create_buffers) + import numpy as np + + all_indices_wp = wp.array(np.arange(num_instances, dtype=np.int32), device=device) + all_joint_indices_wp = wp.array(np.arange(num_joints, dtype=np.int32), device=device) + all_body_indices_wp = wp.array(np.arange(num_bodies, dtype=np.int32), device=device) + object.__setattr__(articulation, "_ALL_INDICES", all_indices_wp) + object.__setattr__(articulation, "_ALL_JOINT_INDICES", all_joint_indices_wp) + object.__setattr__(articulation, "_ALL_BODY_INDICES", all_body_indices_wp) + + # Warp arrays for set_external_force_and_torque + object.__setattr__(articulation, "_ALL_INDICES_WP", all_indices_wp) + object.__setattr__(articulation, "_ALL_BODY_INDICES_WP", all_body_indices_wp) + + # Initialize joint targets + object.__setattr__(articulation, "_joint_pos_target_sim", torch.zeros(num_instances, num_joints, device=device)) + object.__setattr__(articulation, "_joint_vel_target_sim", torch.zeros(num_instances, num_joints, device=device)) + object.__setattr__(articulation, "_joint_effort_target_sim", torch.zeros(num_instances, num_joints, device=device)) + + # Cached .view() wrappers + object.__setattr__(articulation, "_root_link_pose_w_f32", None) + object.__setattr__(articulation, "_root_com_vel_w_f32", None) + object.__setattr__(articulation, "_root_link_vel_w_f32", None) + + # Pre-allocated pinned CPU buffers for PhysX TensorAPI writes + N, J, B = num_instances, num_joints, num_bodies + object.__setattr__(articulation, "_cpu_env_ids_all", wp.zeros(N, dtype=wp.int32, device="cpu", pinned=True)) + wp.copy(articulation._cpu_env_ids_all, all_indices_wp) + object.__setattr__( + articulation, "_cpu_joint_stiffness", wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + ) + object.__setattr__( + articulation, "_cpu_joint_damping", wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + ) + object.__setattr__( + articulation, "_cpu_joint_pos_limits", wp.zeros((N, J, 2), dtype=wp.float32, device="cpu", pinned=True) + ) + object.__setattr__( + articulation, "_cpu_joint_vel_limits", wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + ) + object.__setattr__( + articulation, "_cpu_joint_effort_limits", wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + ) + object.__setattr__( + articulation, "_cpu_joint_armature", wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + ) + object.__setattr__( + articulation, "_cpu_joint_friction_props", wp.zeros((N, J, 3), dtype=wp.float32, device="cpu", pinned=True) + ) + object.__setattr__(articulation, "_cpu_body_mass", wp.zeros((N, B), dtype=wp.float32, device="cpu", pinned=True)) + object.__setattr__(articulation, "_cpu_body_coms", wp.zeros((N, B, 7), dtype=wp.float32, device="cpu", pinned=True)) + object.__setattr__( + articulation, "_cpu_body_inertia", wp.zeros((N, B, 9), dtype=wp.float32, device="cpu", pinned=True) + ) + + return articulation, mock_view, None + + +# ============================================================================= +# Input Generators (Torch-only for PhysX backend) +# ============================================================================= + + +# --- Root Link Pose --- +def gen_root_link_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_link_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_link_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Pose --- +def gen_root_com_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_com_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_com_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link Velocity --- +def gen_root_link_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_link_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_link_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Velocity --- +def gen_root_com_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_com_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_com_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root State (Deprecated) --- +def gen_root_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM State (Deprecated) --- +def gen_root_com_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_com_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_com_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link State (Deprecated) --- +def gen_root_link_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_link_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_link_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Joint State --- +def gen_joint_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_state_to_sim.""" + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_state_to_sim.""" + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Position --- +def gen_joint_position_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_position_to_sim.""" + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_position_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_position_to_sim.""" + return { + "position": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Velocity --- +def gen_joint_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_velocity_to_sim.""" + return { + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_velocity_to_sim.""" + return { + "velocity": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Stiffness --- +def gen_joint_stiffness_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_stiffness_to_sim.""" + return { + "stiffness": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_stiffness_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_stiffness_to_sim.""" + return { + "stiffness": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Damping --- +def gen_joint_damping_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_damping_to_sim.""" + return { + "damping": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_damping_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_damping_to_sim.""" + return { + "damping": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Position Limit --- +def gen_joint_position_limit_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_position_limit_to_sim.""" + # limits shape is (N, J, 2) where [:,: ,0] is lower and [:,:,1] is upper + lower = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * -3.14 + upper = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * 3.14 + return { + "limits": torch.cat([lower, upper], dim=-1), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_position_limit_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_position_limit_to_sim.""" + # limits shape is (N, J, 2) where [:,: ,0] is lower and [:,:,1] is upper + lower = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * -3.14 + upper = torch.rand(config.num_instances, config.num_joints, 1, device=config.device, dtype=torch.float32) * 3.14 + return { + "limits": torch.cat([lower, upper], dim=-1), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Velocity Limit --- +def gen_joint_velocity_limit_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_velocity_limit_to_sim.""" + return { + "limits": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 10.0, + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_velocity_limit_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_velocity_limit_to_sim.""" + return { + "limits": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 10.0, + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Effort Limit --- +def gen_joint_effort_limit_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_effort_limit_to_sim.""" + return { + "limits": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 100.0 + ), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_effort_limit_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_effort_limit_to_sim.""" + return { + "limits": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 100.0 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Armature --- +def gen_joint_armature_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_armature_to_sim.""" + return { + "armature": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.1 + ), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_armature_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_armature_to_sim.""" + return { + "armature": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.1 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Joint Friction Coefficient --- +def gen_joint_friction_coefficient_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for write_joint_friction_coefficient_to_sim.""" + return { + "joint_friction_coeff": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.5 + ), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_joint_friction_coefficient_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for write_joint_friction_coefficient_to_sim.""" + return { + "joint_friction_coeff": ( + torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32) * 0.5 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Joint Position Target --- +def gen_set_joint_position_target_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_joint_position_target.""" + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_set_joint_position_target_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_joint_position_target.""" + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Joint Velocity Target --- +def gen_set_joint_velocity_target_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_joint_velocity_target.""" + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_set_joint_velocity_target_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_joint_velocity_target.""" + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Joint Effort Target --- +def gen_set_joint_effort_target_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_joint_effort_target.""" + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "joint_ids": list(range(config.num_joints)), + } + + +def gen_set_joint_effort_target_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_joint_effort_target.""" + return { + "target": torch.rand(config.num_instances, config.num_joints, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "joint_ids": make_tensor_joint_ids(config.num_joints, config.device), + } + + +# --- Set Masses --- +def gen_set_masses_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_masses.""" + # Articulation masses shape is (N, B) + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_masses_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_masses.""" + # Articulation masses shape is (N, B) + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set CoMs --- +def gen_set_coms_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_coms.""" + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_coms_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_coms.""" + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set Inertias --- +def gen_set_inertias_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_inertias.""" + # Articulation inertias shape is (N, B, 9) - flattened 3x3 matrix + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_set_inertias_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_inertias.""" + # Articulation inertias shape is (N, B, 9) - flattened 3x3 matrix + return { + "inertias": torch.rand(config.num_instances, config.num_bodies, 9, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Set External Force and Torque --- +def gen_set_external_force_and_torque_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_external_force_and_torque.""" + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_set_external_force_and_torque_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_external_force_and_torque.""" + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# ============================================================================= +# Benchmarks +# ============================================================================= + +BENCHMARKS = [ + # --- Root State (Deprecated) --- + MethodBenchmarkDefinition( + name="write_root_state_to_sim", + method_name="write_root_state_to_sim", + input_generators={ + "torch_list": gen_root_state_torch_list, + "torch_tensor": gen_root_state_torch_tensor, + }, + category="root_state", + ), + MethodBenchmarkDefinition( + name="write_root_com_state_to_sim", + method_name="write_root_com_state_to_sim", + input_generators={ + "torch_list": gen_root_com_state_torch_list, + "torch_tensor": gen_root_com_state_torch_tensor, + }, + category="root_state", + ), + MethodBenchmarkDefinition( + name="write_root_link_state_to_sim", + method_name="write_root_link_state_to_sim", + input_generators={ + "torch_list": gen_root_link_state_torch_list, + "torch_tensor": gen_root_link_state_torch_tensor, + }, + category="root_state", + ), + # --- Root Pose / Velocity --- + MethodBenchmarkDefinition( + name="write_root_link_pose_to_sim", + method_name="write_root_link_pose_to_sim", + input_generators={ + "torch_list": gen_root_link_pose_torch_list, + "torch_tensor": gen_root_link_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_com_pose_to_sim", + method_name="write_root_com_pose_to_sim", + input_generators={ + "torch_list": gen_root_com_pose_torch_list, + "torch_tensor": gen_root_com_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_link_velocity_to_sim", + method_name="write_root_link_velocity_to_sim", + input_generators={ + "torch_list": gen_root_link_velocity_torch_list, + "torch_tensor": gen_root_link_velocity_torch_tensor, + }, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_com_velocity_to_sim", + method_name="write_root_com_velocity_to_sim", + input_generators={ + "torch_list": gen_root_com_velocity_torch_list, + "torch_tensor": gen_root_com_velocity_torch_tensor, + }, + category="root_velocity", + ), + # --- Joint State --- + MethodBenchmarkDefinition( + name="write_joint_state_to_sim", + method_name="write_joint_state_to_sim", + input_generators={ + "torch_list": gen_joint_state_torch_list, + "torch_tensor": gen_joint_state_torch_tensor, + }, + category="joint_state", + ), + MethodBenchmarkDefinition( + name="write_joint_position_to_sim", + method_name="write_joint_position_to_sim", + input_generators={ + "torch_list": gen_joint_position_torch_list, + "torch_tensor": gen_joint_position_torch_tensor, + }, + category="joint_state", + ), + MethodBenchmarkDefinition( + name="write_joint_velocity_to_sim", + method_name="write_joint_velocity_to_sim", + input_generators={ + "torch_list": gen_joint_velocity_torch_list, + "torch_tensor": gen_joint_velocity_torch_tensor, + }, + category="joint_state", + ), + # --- Joint Params --- + MethodBenchmarkDefinition( + name="write_joint_stiffness_to_sim", + method_name="write_joint_stiffness_to_sim", + input_generators={ + "torch_list": gen_joint_stiffness_torch_list, + "torch_tensor": gen_joint_stiffness_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_damping_to_sim", + method_name="write_joint_damping_to_sim", + input_generators={ + "torch_list": gen_joint_damping_torch_list, + "torch_tensor": gen_joint_damping_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_position_limit_to_sim", + method_name="write_joint_position_limit_to_sim", + input_generators={ + "torch_list": gen_joint_position_limit_torch_list, + "torch_tensor": gen_joint_position_limit_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_velocity_limit_to_sim", + method_name="write_joint_velocity_limit_to_sim", + input_generators={ + "torch_list": gen_joint_velocity_limit_torch_list, + "torch_tensor": gen_joint_velocity_limit_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_effort_limit_to_sim", + method_name="write_joint_effort_limit_to_sim", + input_generators={ + "torch_list": gen_joint_effort_limit_torch_list, + "torch_tensor": gen_joint_effort_limit_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_armature_to_sim", + method_name="write_joint_armature_to_sim", + input_generators={ + "torch_list": gen_joint_armature_torch_list, + "torch_tensor": gen_joint_armature_torch_tensor, + }, + category="joint_params", + ), + MethodBenchmarkDefinition( + name="write_joint_friction_coefficient_to_sim", + method_name="write_joint_friction_coefficient_to_sim", + input_generators={ + "torch_list": gen_joint_friction_coefficient_torch_list, + "torch_tensor": gen_joint_friction_coefficient_torch_tensor, + }, + category="joint_params", + ), + # --- Joint Targets --- + MethodBenchmarkDefinition( + name="set_joint_position_target", + method_name="set_joint_position_target", + input_generators={ + "torch_list": gen_set_joint_position_target_torch_list, + "torch_tensor": gen_set_joint_position_target_torch_tensor, + }, + category="joint_targets", + ), + MethodBenchmarkDefinition( + name="set_joint_velocity_target", + method_name="set_joint_velocity_target", + input_generators={ + "torch_list": gen_set_joint_velocity_target_torch_list, + "torch_tensor": gen_set_joint_velocity_target_torch_tensor, + }, + category="joint_targets", + ), + MethodBenchmarkDefinition( + name="set_joint_effort_target", + method_name="set_joint_effort_target", + input_generators={ + "torch_list": gen_set_joint_effort_target_torch_list, + "torch_tensor": gen_set_joint_effort_target_torch_tensor, + }, + category="joint_targets", + ), + # --- Body Properties --- + MethodBenchmarkDefinition( + name="set_masses", + method_name="set_masses", + input_generators={ + "torch_list": gen_set_masses_torch_list, + "torch_tensor": gen_set_masses_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms", + method_name="set_coms", + input_generators={ + "torch_list": gen_set_coms_torch_list, + "torch_tensor": gen_set_coms_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias", + method_name="set_inertias", + input_generators={ + "torch_list": gen_set_inertias_torch_list, + "torch_tensor": gen_set_inertias_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_external_force_and_torque", + method_name="set_external_force_and_torque", + input_generators={ + "torch_list": gen_set_external_force_and_torque_torch_list, + "torch_tensor": gen_set_external_force_and_torque_torch_tensor, + }, + category="external_wrench", + ), +] + + +# ============================================================================= +# Fill-Ratio Benchmarks (5%, 95%, 100% of env_ids filled) +# ============================================================================= + +FILL_RATIOS = {"5pct": 0.05, "95pct": 0.95, "100pct": 1.0} + + +def _make_fill_ratio_generator(base_gen_fn, fill_ratio): + """Create a generator that subsets env_ids to a given fill ratio. + + Only env_ids are subsetted — joint_ids and body_ids remain full-range. + Data tensors keyed on env count are sliced to match. + """ + + def generator(config): + n = max(1, int(config.num_instances * fill_ratio)) + base_inputs = base_gen_fn(config) + inputs = {} + for key, val in base_inputs.items(): + if key == "env_ids": + inputs[key] = ( + torch.randperm(config.num_instances, device=config.device)[:n].sort().values.to(torch.int32) + ) + elif isinstance(val, torch.Tensor) and val.dim() >= 1 and val.shape[0] == config.num_instances: + inputs[key] = val[:n] + else: + inputs[key] = val + return inputs + + return generator + + +def _build_fill_benchmarks(): + """Auto-generate fill-ratio benchmark definitions from the torch_tensor generators.""" + fill_benchmarks = [] + for bm in BENCHMARKS: + if "torch_tensor" not in bm.input_generators: + continue + base_gen = bm.input_generators["torch_tensor"] + generators = {} + for suffix, ratio in FILL_RATIOS.items(): + generators[f"tensor_{suffix}"] = _make_fill_ratio_generator(base_gen, ratio) + fill_benchmarks.append( + MethodBenchmarkDefinition( + name=bm.name, + method_name=bm.method_name, + input_generators=generators, + category=f"{bm.category}_fill", + ) + ) + return fill_benchmarks + + +FILL_BENCHMARKS = _build_fill_benchmarks() + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=args.num_joints, + device=args.device, + mode=args.mode, + ) + + # Create the test articulation + articulation, _, _ = create_test_articulation( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + num_joints=config.num_joints, + device=config.device, + ) + + print( + f"Benchmarking Articulation (PhysX) with {config.num_instances} instances, {config.num_bodies} bodies," + f" {config.num_joints} joints..." + ) + + # Create runner and run benchmarks + runner = MethodBenchmarkRunner( + benchmark_name="articulation_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + runner.run_benchmarks(BENCHMARKS, articulation) + + print("\n" + "=" * 80) + print("Fill-Ratio Benchmarks (env_ids at 5%, 95%, 100% fill)") + print("=" * 80) + + runner.run_benchmarks(FILL_BENCHMARKS, articulation) + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_physx/benchmark/assets/benchmark_articulation_data.py b/source/isaaclab_physx/benchmark/assets/benchmark_articulation_data.py new file mode 100644 index 000000000000..f463f73ea6aa --- /dev/null +++ b/source/isaaclab_physx/benchmark/assets/benchmark_articulation_data.py @@ -0,0 +1,333 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for ArticulationData class. + +This module provides a benchmarking framework to measure the performance of all functions +in the ArticulationData class. Each function is run multiple times with randomized mock data, +and timing statistics (mean and standard deviation) are reported. + +Usage: + python benchmark_articulation_data.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] [--num_joints J] + +Example: + python benchmark_articulation_data.py --num_iterations 10000 --warmup_steps 10 +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="Micro-benchmarking framework for ArticulationData class.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=12, help="Number of bodies") +parser.add_argument("--num_joints", type=int, default=11, help="Number of joints") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import warnings +from unittest.mock import MagicMock + +import torch + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +# This is needed because the Data classes call SimulationManager.get_physics_sim_view().get_gravity() +# but there's no actual physics scene when running benchmarks +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + +import warp as wp +from isaaclab_physx.assets.articulation.articulation_data import ArticulationData +from isaaclab_physx.test.mock_interfaces.views import MockArticulationViewWarp + +from isaaclab.test.benchmark import MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + + +# ============================================================================= +# Skip Lists +# ============================================================================= + +# List of deprecated properties (for backward compatibility) - skip these +DEPRECATED_PROPERTIES = { + "default_root_state", + "root_pose_w", + "root_pos_w", + "root_quat_w", + "root_vel_w", + "root_lin_vel_w", + "root_ang_vel_w", + "root_lin_vel_b", + "root_ang_vel_b", + "body_pose_w", + "body_pos_w", + "body_quat_w", + "body_vel_w", + "body_lin_vel_w", + "body_ang_vel_w", + "body_acc_w", + "body_lin_acc_w", + "body_ang_acc_w", + "com_pos_b", + "com_quat_b", + "joint_limits", + "joint_friction", + "fixed_tendon_limit", + "applied_torque", + "computed_torque", + "joint_dynamic_friction", + "joint_effort_target", + "joint_viscous_friction", + "joint_velocity_limits", + # Also skip the combined state properties marked as deprecated + "root_state_w", + "root_link_state_w", + "root_com_state_w", + "body_state_w", + "body_link_state_w", + "body_com_state_w", +} + +# List of properties that raise NotImplementedError - skip these +NOT_IMPLEMENTED_PROPERTIES = { + "fixed_tendon_stiffness", + "fixed_tendon_damping", + "fixed_tendon_limit_stiffness", + "fixed_tendon_rest_length", + "fixed_tendon_offset", + "fixed_tendon_pos_limits", + "spatial_tendon_stiffness", + "spatial_tendon_damping", + "spatial_tendon_limit_stiffness", + "spatial_tendon_offset", +} + +# Removed default_* properties that raise RuntimeError +REMOVED_PROPERTIES = { + "default_fixed_tendon_damping", + "default_fixed_tendon_limit", + "default_fixed_tendon_limit_stiffness", + "default_fixed_tendon_offset", + "default_fixed_tendon_pos_limits", + "default_fixed_tendon_rest_length", + "default_fixed_tendon_stiffness", + "default_inertia", + "default_joint_armature", + "default_joint_damping", + "default_joint_dynamic_friction_coeff", + "default_joint_friction", + "default_joint_friction_coeff", + "default_joint_limits", + "default_joint_pos_limits", + "default_joint_stiffness", + "default_joint_viscous_friction_coeff", + "default_mass", + "default_spatial_tendon_damping", + "default_spatial_tendon_limit_stiffness", + "default_spatial_tendon_offset", + "default_spatial_tendon_stiffness", +} + +# Private/internal properties and methods to skip +INTERNAL_PROPERTIES = { + "_create_simulation_bindings", + "_create_buffers", + "update", + "is_primed", + "device", + "body_names", + "joint_names", + "fixed_tendon_names", + "spatial_tendon_names", + "GRAVITY_VEC_W", + "GRAVITY_VEC_W_TORCH", + "FORWARD_VEC_B", + "FORWARD_VEC_B_TORCH", + "ALL_ENV_MASK", + "ALL_BODY_MASK", + "ALL_JOINT_MASK", + "ENV_MASK", + "BODY_MASK", + "JOINT_MASK", +} + +# Dependency mapping for properties +PROPERTY_DEPENDENCIES = { + "root_link_lin_vel_w": ["root_link_vel_w"], + "root_link_ang_vel_w": ["root_link_vel_w"], + "root_link_lin_vel_b": ["root_link_vel_b"], + "root_link_ang_vel_b": ["root_link_vel_b"], + "root_com_pos_w": ["root_com_pose_w"], + "root_com_quat_w": ["root_com_pose_w"], + "root_com_lin_vel_b": ["root_com_vel_b"], + "root_com_ang_vel_b": ["root_com_vel_b"], + "root_com_lin_vel_w": ["root_com_vel_w"], + "root_com_ang_vel_w": ["root_com_vel_w"], + "root_link_pos_w": ["root_link_pose_w"], + "root_link_quat_w": ["root_link_pose_w"], + "body_link_lin_vel_w": ["body_link_vel_w"], + "body_link_ang_vel_w": ["body_link_vel_w"], + "body_link_pos_w": ["body_link_pose_w"], + "body_link_quat_w": ["body_link_pose_w"], + "body_com_pos_w": ["body_com_pose_w"], + "body_com_quat_w": ["body_com_pose_w"], + "body_com_lin_vel_w": ["body_com_vel_w"], + "body_com_ang_vel_w": ["body_com_vel_w"], + "body_com_lin_acc_w": ["body_com_acc_w"], + "body_com_ang_acc_w": ["body_com_acc_w"], + "body_com_quat_b": ["body_com_pose_b"], +} + + +# ============================================================================= +# Benchmark Functions +# ============================================================================= + + +def get_benchmarkable_properties(articulation_data: ArticulationData) -> list[str]: + """Get list of properties that can be benchmarked.""" + all_properties = [] + + for name in dir(articulation_data): + if name.startswith("_"): + continue + if name in DEPRECATED_PROPERTIES: + continue + if name in NOT_IMPLEMENTED_PROPERTIES: + continue + if name in REMOVED_PROPERTIES: + continue + if name in INTERNAL_PROPERTIES: + continue + + try: + attr = getattr(type(articulation_data), name, None) + if isinstance(attr, property): + all_properties.append(name) + except Exception: + pass + + return sorted(all_properties) + + +def setup_mock_environment(config: MethodBenchmarkRunnerConfig) -> MockArticulationViewWarp: + """Set up the mock environment for benchmarking.""" + mock_view = MockArticulationViewWarp( + count=config.num_instances, + num_links=config.num_bodies, + num_dofs=config.num_joints, + device=config.device, + ) + return mock_view + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=args.num_joints, + device=args.device, + ) + + # Setup mock environment + mock_view = setup_mock_environment(config) + mock_view.set_random_mock_data() + + # Create ArticulationData instance + articulation_data = ArticulationData(mock_view, config.device) + + # Get list of properties to benchmark + properties = get_benchmarkable_properties(articulation_data) + + # Generator that updates mock data and invalidates timestamp + def gen_mock_data(cfg: MethodBenchmarkRunnerConfig) -> dict: + mock_view.set_mock_coms( + wp.from_torch(torch.randn(cfg.num_instances, cfg.num_bodies, 7, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_inertias( + wp.from_torch(torch.randn(cfg.num_instances, cfg.num_bodies, 9, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_root_transforms( + wp.from_torch(torch.randn(cfg.num_instances, 7, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_root_velocities( + wp.from_torch(torch.randn(cfg.num_instances, 6, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_link_transforms( + wp.from_torch(torch.randn(cfg.num_instances, cfg.num_bodies, 7, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_link_velocities( + wp.from_torch(torch.randn(cfg.num_instances, cfg.num_bodies, 6, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_link_accelerations( + wp.from_torch(torch.randn(cfg.num_instances, cfg.num_bodies, 6, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_dof_positions( + wp.from_torch(torch.randn(cfg.num_instances, cfg.num_joints, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_dof_velocities( + wp.from_torch(torch.randn(cfg.num_instances, cfg.num_joints, device=cfg.device), dtype=wp.float32) + ) + articulation_data._sim_timestamp += 1.0 + return {} + + # Create runner + runner = MethodBenchmarkRunner( + benchmark_name="articulation_data_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + # Run property benchmarks + runner.run_property_benchmarks( + target_data=articulation_data, + properties=properties, + gen_mock_data=gen_mock_data, + dependencies=PROPERTY_DEPENDENCIES, + category="property", + ) + + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object.py b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object.py new file mode 100644 index 000000000000..cf76cf6e8489 --- /dev/null +++ b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object.py @@ -0,0 +1,584 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObject class (PhysX backend). + +This module provides a benchmarking framework to measure the performance of setter and writer +methods in the RigidObject class. Each method is benchmarked under two scenarios: + +1. **Torch List**: Inputs are PyTorch tensors with list indices. +2. **Torch Tensor**: Inputs are PyTorch tensors with tensor indices. + +Usage: + python benchmark_rigid_object.py [--num_iterations N] [--warmup_steps W] [--num_instances I] + +Example: + python benchmark_rigid_object.py --num_iterations 1000 --warmup_steps 10 + python benchmark_rigid_object.py --mode torch_list # Only run list-based benchmarks + python benchmark_rigid_object.py --mode torch_tensor # Only run tensor-based benchmarks +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Benchmark RigidObject methods (PhysX backend).") +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=1, help="Number of bodies") +parser.add_argument("--mode", type=str, default="all", help="Benchmark mode (all, torch_list, torch_tensor)") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import logging +import warnings +from unittest.mock import MagicMock + +import torch + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +# This is needed because the Data classes call SimulationManager.get_physics_sim_view().get_gravity() +# but there's no actual physics scene when running benchmarks +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + + +from isaaclab_physx.assets.rigid_object.rigid_object import RigidObject +from isaaclab_physx.assets.rigid_object.rigid_object_data import RigidObjectData +from isaaclab_physx.test.benchmark import make_tensor_env_ids +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyViewWarp + +from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg +from isaaclab.test.benchmark import MethodBenchmarkDefinition, MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + +# Suppress isaaclab logging (deprecation warnings) +logging.getLogger("isaaclab_physx").setLevel(logging.ERROR) +logging.getLogger("isaaclab").setLevel(logging.ERROR) + + +def create_test_rigid_object( + num_instances: int = 2, + num_bodies: int = 1, + device: str = "cuda:0", +) -> tuple[RigidObject, MockRigidBodyViewWarp, MagicMock]: + """Create a test RigidObject instance with mocked dependencies.""" + rigid_object = object.__new__(RigidObject) + + rigid_object.cfg = RigidObjectCfg( + prim_path="/World/Object", + ) + + # Create PhysX mock view + mock_view = MockRigidBodyViewWarp( + count=num_instances, + device=device, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + # Set up attributes required before _create_buffers + object.__setattr__(rigid_object, "_root_view", mock_view) + object.__setattr__(rigid_object, "_device", device) + + # Create RigidObjectData instance (mocks already set up at module level) + data = RigidObjectData(mock_view, device) + object.__setattr__(rigid_object, "_data", data) + + # Call _create_buffers to set up all internal buffers and wrench composers + rigid_object._create_buffers() + + return rigid_object, mock_view, None + + +# ============================================================================= +# Input Generators (Torch-only for PhysX backend) +# ============================================================================= + + +# --- Root State (Deprecated) --- +def gen_root_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM State (Deprecated) --- +def gen_root_com_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_com_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_com_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link State (Deprecated) --- +def gen_root_link_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_link_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_link_state_to_sim.""" + return { + "root_state": torch.rand(config.num_instances, 13, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Pose (Deprecated) --- +def gen_root_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link Pose --- +def gen_root_link_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_link_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_link_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Pose --- +def gen_root_com_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_com_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_com_pose_to_sim.""" + return { + "root_pose": torch.rand(config.num_instances, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Velocity (Deprecated) --- +def gen_root_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root Link Velocity --- +def gen_root_link_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_link_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_link_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_link_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Root COM Velocity --- +def gen_root_com_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list env_ids for write_root_com_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_root_com_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor env_ids for write_root_com_velocity_to_sim.""" + return { + "root_velocity": torch.rand(config.num_instances, 6, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Masses --- +def gen_masses_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_masses.""" + # RigidObject has only 1 body, don't pass body_ids to avoid advanced indexing issues + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_masses_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_masses.""" + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- CoMs --- +def gen_coms_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_coms.""" + # RigidObject has only 1 body, don't pass body_ids to avoid advanced indexing issues + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_coms_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_coms.""" + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Inertias --- +def gen_inertias_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_inertias.""" + # RigidObject has only 1 body, don't pass body_ids to avoid advanced indexing issues + return { + "inertias": torch.rand( + config.num_instances, config.num_bodies, 3, 3, device=config.device, dtype=torch.float32 + ), + "env_ids": list(range(config.num_instances)), + } + + +def gen_inertias_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_inertias.""" + return { + "inertias": torch.rand( + config.num_instances, config.num_bodies, 3, 3, device=config.device, dtype=torch.float32 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- External Wrench --- +def gen_external_force_and_torque_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with list ids for set_external_force_and_torque.""" + # Note: body_ids is not used by set_external_force_and_torque (it uses internal _ALL_BODY_INDICES_WP) + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_external_force_and_torque_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + """Generate Torch inputs with tensor ids for set_external_force_and_torque.""" + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# ============================================================================= +# Benchmarks +# ============================================================================= + +BENCHMARKS = [ + # --- Root State (Deprecated) --- + MethodBenchmarkDefinition( + name="write_root_state_to_sim", + method_name="write_root_state_to_sim", + input_generators={ + "torch_list": gen_root_state_torch_list, + "torch_tensor": gen_root_state_torch_tensor, + }, + category="root_state", + ), + MethodBenchmarkDefinition( + name="write_root_com_state_to_sim", + method_name="write_root_com_state_to_sim", + input_generators={ + "torch_list": gen_root_com_state_torch_list, + "torch_tensor": gen_root_com_state_torch_tensor, + }, + category="root_state", + ), + MethodBenchmarkDefinition( + name="write_root_link_state_to_sim", + method_name="write_root_link_state_to_sim", + input_generators={ + "torch_list": gen_root_link_state_torch_list, + "torch_tensor": gen_root_link_state_torch_tensor, + }, + category="root_state", + ), + # --- Root Pose / Velocity --- + MethodBenchmarkDefinition( + name="write_root_pose_to_sim", + method_name="write_root_pose_to_sim", + input_generators={ + "torch_list": gen_root_pose_torch_list, + "torch_tensor": gen_root_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_link_pose_to_sim", + method_name="write_root_link_pose_to_sim", + input_generators={ + "torch_list": gen_root_link_pose_torch_list, + "torch_tensor": gen_root_link_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_com_pose_to_sim", + method_name="write_root_com_pose_to_sim", + input_generators={ + "torch_list": gen_root_com_pose_torch_list, + "torch_tensor": gen_root_com_pose_torch_tensor, + }, + category="root_pose", + ), + MethodBenchmarkDefinition( + name="write_root_velocity_to_sim", + method_name="write_root_velocity_to_sim", + input_generators={ + "torch_list": gen_root_velocity_torch_list, + "torch_tensor": gen_root_velocity_torch_tensor, + }, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_link_velocity_to_sim", + method_name="write_root_link_velocity_to_sim", + input_generators={ + "torch_list": gen_root_link_velocity_torch_list, + "torch_tensor": gen_root_link_velocity_torch_tensor, + }, + category="root_velocity", + ), + MethodBenchmarkDefinition( + name="write_root_com_velocity_to_sim", + method_name="write_root_com_velocity_to_sim", + input_generators={ + "torch_list": gen_root_com_velocity_torch_list, + "torch_tensor": gen_root_com_velocity_torch_tensor, + }, + category="root_velocity", + ), + # --- Body Properties --- + MethodBenchmarkDefinition( + name="set_masses", + method_name="set_masses", + input_generators={ + "torch_list": gen_masses_torch_list, + "torch_tensor": gen_masses_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms", + method_name="set_coms", + input_generators={ + "torch_list": gen_coms_torch_list, + "torch_tensor": gen_coms_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias", + method_name="set_inertias", + input_generators={ + "torch_list": gen_inertias_torch_list, + "torch_tensor": gen_inertias_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_external_force_and_torque", + method_name="set_external_force_and_torque", + input_generators={ + "torch_list": gen_external_force_and_torque_torch_list, + "torch_tensor": gen_external_force_and_torque_torch_tensor, + }, + category="body_props", + ), +] + + +# ============================================================================= +# Fill-Ratio Benchmarks (5%, 95%, 100% of env_ids filled) +# ============================================================================= + +FILL_RATIOS = {"5pct": 0.05, "95pct": 0.95, "100pct": 1.0} + + +def _make_fill_ratio_generator(base_gen_fn, fill_ratio): + """Create a generator that subsets env_ids to a given fill ratio. + + Only env_ids are subsetted — body_ids remain full-range. + Data tensors keyed on env count are sliced to match. + """ + + def generator(config): + n = max(1, int(config.num_instances * fill_ratio)) + base_inputs = base_gen_fn(config) + inputs = {} + for key, val in base_inputs.items(): + if key == "env_ids": + inputs[key] = ( + torch.randperm(config.num_instances, device=config.device)[:n].sort().values.to(torch.int32) + ) + elif isinstance(val, torch.Tensor) and val.dim() >= 1 and val.shape[0] == config.num_instances: + inputs[key] = val[:n] + else: + inputs[key] = val + return inputs + + return generator + + +def _build_fill_benchmarks(): + """Auto-generate fill-ratio benchmark definitions from existing generators.""" + fill_benchmarks = [] + for bm in BENCHMARKS: + generators = {} + if "torch_tensor" in bm.input_generators: + base_gen = bm.input_generators["torch_tensor"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"tensor_{suffix}"] = _make_fill_ratio_generator(base_gen, ratio) + if generators: + fill_benchmarks.append( + MethodBenchmarkDefinition( + name=bm.name, + method_name=bm.method_name, + input_generators=generators, + category=f"{bm.category}_fill", + ) + ) + return fill_benchmarks + + +FILL_BENCHMARKS = _build_fill_benchmarks() + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=0, + device=args.device, + mode=args.mode, + ) + + # Setup + rigid_object, mock_view, _ = create_test_rigid_object( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + device=config.device, + ) + + print(f"Benchmarking RigidObject (PhysX) with {config.num_instances} instances, {config.num_bodies} bodies...") + + # Create runner and run benchmarks + runner = MethodBenchmarkRunner( + benchmark_name="rigid_object_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + runner.run_benchmarks(BENCHMARKS, rigid_object) + + print("\n" + "=" * 80) + print("Fill-Ratio Benchmarks (env_ids at 5%, 95%, 100% fill)") + print("=" * 80) + + runner.run_benchmarks(FILL_BENCHMARKS, rigid_object) + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_collection.py b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_collection.py new file mode 100644 index 000000000000..dc69c349adf4 --- /dev/null +++ b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_collection.py @@ -0,0 +1,487 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObjectCollection class (PhysX backend). + +This module provides a benchmarking framework to measure the performance of setter and writer +methods in the RigidObjectCollection class. + +Usage: + python benchmark_rigid_object_collection.py [--num_iterations N] [--num_instances I] [--num_bodies B] + +Example: + python benchmark_rigid_object_collection.py --num_iterations 1000 --num_instances 4096 --num_bodies 4 +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Benchmark RigidObjectCollection methods (PhysX backend).") +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=4, help="Number of bodies per instance") +parser.add_argument("--mode", type=str, default="all", help="Benchmark mode (all, torch_list, torch_tensor)") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import logging +import warnings +from unittest.mock import MagicMock + +import torch + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +# This is needed because the Data classes call SimulationManager.get_physics_sim_view().get_gravity() +# but there's no actual physics scene when running benchmarks +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + + +from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection import RigidObjectCollection +from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection_data import RigidObjectCollectionData +from isaaclab_physx.test.benchmark import make_tensor_body_ids, make_tensor_env_ids +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyViewWarp + +from isaaclab.assets.rigid_object_collection.rigid_object_collection_cfg import RigidObjectCollectionCfg +from isaaclab.test.benchmark import MethodBenchmarkDefinition, MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress warnings +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + +# Suppress isaaclab logging (deprecation warnings) +logging.getLogger("isaaclab_physx").setLevel(logging.ERROR) +logging.getLogger("isaaclab").setLevel(logging.ERROR) + + +def create_test_collection( + num_instances: int = 2, + num_bodies: int = 4, + device: str = "cuda:0", +) -> tuple[RigidObjectCollection, MockRigidBodyViewWarp]: + """Create a test RigidObjectCollection instance with mocked dependencies.""" + object_names = [f"object_{i}" for i in range(num_bodies)] + + collection = object.__new__(RigidObjectCollection) + + # Create a minimal config with dummy rigid objects + from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg + + rigid_objects = {name: RigidObjectCfg(prim_path=f"/World/{name}") for name in object_names} + collection.cfg = RigidObjectCollectionCfg(rigid_objects=rigid_objects) + + # Create PhysX mock view (total count = num_instances * num_bodies) + total_count = num_instances * num_bodies + mock_view = MockRigidBodyViewWarp( + count=total_count, + device=device, + ) + mock_view.set_random_mock_data() + mock_view._noop_setters = True + + # Set up attributes required before _create_buffers + object.__setattr__(collection, "_root_view", mock_view) + object.__setattr__(collection, "_device", device) + object.__setattr__(collection, "_body_names_list", object_names) + + # Create RigidObjectCollectionData instance + data = RigidObjectCollectionData(mock_view, num_bodies, device) + data.object_names = object_names + object.__setattr__(collection, "_data", data) + + # Call _create_buffers to set up all internal buffers and wrench composers + collection._create_buffers() + + return collection, mock_view + + +# ============================================================================= +# Input Generators +# ============================================================================= + + +def gen_body_state_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_states": torch.rand( + config.num_instances, config.num_bodies, 13, device=config.device, dtype=torch.float32 + ), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_body_state_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_states": torch.rand( + config.num_instances, config.num_bodies, 13, device=config.device, dtype=torch.float32 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +def gen_body_pose_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_body_pose_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_poses": torch.rand(config.num_instances, config.num_bodies, 7, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +def gen_body_velocity_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_body_velocity_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "body_velocities": torch.rand( + config.num_instances, config.num_bodies, 6, device=config.device, dtype=torch.float32 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- External Force and Torque --- +def gen_external_force_and_torque_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + } + + +def gen_external_force_and_torque_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "forces": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "torques": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + } + + +# --- Masses --- +def gen_masses_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_masses_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "masses": torch.rand(config.num_instances, config.num_bodies, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- CoMs --- +def gen_coms_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_coms_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "coms": torch.rand(config.num_instances, config.num_bodies, 3, device=config.device, dtype=torch.float32), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# --- Inertias --- +def gen_inertias_torch_list(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand( + config.num_instances, config.num_bodies, 3, 3, device=config.device, dtype=torch.float32 + ), + "env_ids": list(range(config.num_instances)), + "body_ids": list(range(config.num_bodies)), + } + + +def gen_inertias_torch_tensor(config: MethodBenchmarkRunnerConfig) -> dict: + return { + "inertias": torch.rand( + config.num_instances, config.num_bodies, 3, 3, device=config.device, dtype=torch.float32 + ), + "env_ids": make_tensor_env_ids(config.num_instances, config.device), + "body_ids": make_tensor_body_ids(config.num_bodies, config.device), + } + + +# ============================================================================= +# Benchmarks +# ============================================================================= + +BENCHMARKS = [ + # --- Body State --- + MethodBenchmarkDefinition( + name="write_body_state_to_sim", + method_name="write_body_state_to_sim", + input_generators={ + "torch_list": gen_body_state_torch_list, + "torch_tensor": gen_body_state_torch_tensor, + }, + category="body_state", + ), + MethodBenchmarkDefinition( + name="write_body_link_state_to_sim", + method_name="write_body_link_state_to_sim", + input_generators={ + "torch_list": gen_body_state_torch_list, + "torch_tensor": gen_body_state_torch_tensor, + }, + category="body_state", + ), + MethodBenchmarkDefinition( + name="write_body_com_state_to_sim", + method_name="write_body_com_state_to_sim", + input_generators={ + "torch_list": gen_body_state_torch_list, + "torch_tensor": gen_body_state_torch_tensor, + }, + category="body_state", + ), + # --- Body Pose --- + MethodBenchmarkDefinition( + name="write_body_pose_to_sim", + method_name="write_body_pose_to_sim", + input_generators={ + "torch_list": gen_body_pose_torch_list, + "torch_tensor": gen_body_pose_torch_tensor, + }, + category="body_pose", + ), + MethodBenchmarkDefinition( + name="write_body_link_pose_to_sim", + method_name="write_body_link_pose_to_sim", + input_generators={ + "torch_list": gen_body_pose_torch_list, + "torch_tensor": gen_body_pose_torch_tensor, + }, + category="body_pose", + ), + MethodBenchmarkDefinition( + name="write_body_com_pose_to_sim", + method_name="write_body_com_pose_to_sim", + input_generators={ + "torch_list": gen_body_pose_torch_list, + "torch_tensor": gen_body_pose_torch_tensor, + }, + category="body_pose", + ), + # --- Body Velocity --- + MethodBenchmarkDefinition( + name="write_body_velocity_to_sim", + method_name="write_body_velocity_to_sim", + input_generators={ + "torch_list": gen_body_velocity_torch_list, + "torch_tensor": gen_body_velocity_torch_tensor, + }, + category="body_velocity", + ), + MethodBenchmarkDefinition( + name="write_body_link_velocity_to_sim", + method_name="write_body_link_velocity_to_sim", + input_generators={ + "torch_list": gen_body_velocity_torch_list, + "torch_tensor": gen_body_velocity_torch_tensor, + }, + category="body_velocity", + ), + MethodBenchmarkDefinition( + name="write_body_com_velocity_to_sim", + method_name="write_body_com_velocity_to_sim", + input_generators={ + "torch_list": gen_body_velocity_torch_list, + "torch_tensor": gen_body_velocity_torch_tensor, + }, + category="body_velocity", + ), + # --- External Force and Torque --- + MethodBenchmarkDefinition( + name="set_external_force_and_torque", + method_name="set_external_force_and_torque", + input_generators={ + "torch_list": gen_external_force_and_torque_torch_list, + "torch_tensor": gen_external_force_and_torque_torch_tensor, + }, + category="external_wrench", + ), + # --- Body Properties --- + MethodBenchmarkDefinition( + name="set_masses", + method_name="set_masses", + input_generators={ + "torch_list": gen_masses_torch_list, + "torch_tensor": gen_masses_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_coms", + method_name="set_coms", + input_generators={ + "torch_list": gen_coms_torch_list, + "torch_tensor": gen_coms_torch_tensor, + }, + category="body_props", + ), + MethodBenchmarkDefinition( + name="set_inertias", + method_name="set_inertias", + input_generators={ + "torch_list": gen_inertias_torch_list, + "torch_tensor": gen_inertias_torch_tensor, + }, + category="body_props", + ), +] + + +# ============================================================================= +# Fill-Ratio Benchmarks (5%, 95%, 100% of env_ids filled) +# ============================================================================= + +FILL_RATIOS = {"5pct": 0.05, "95pct": 0.95, "100pct": 1.0} + + +def _make_fill_ratio_generator(base_gen_fn, fill_ratio): + """Create a generator that subsets env_ids to a given fill ratio. + + Only env_ids are subsetted — body_ids remain full-range. + Data tensors keyed on env count are sliced to match. + """ + + def generator(config): + n = max(1, int(config.num_instances * fill_ratio)) + base_inputs = base_gen_fn(config) + inputs = {} + for key, val in base_inputs.items(): + if key == "env_ids": + inputs[key] = ( + torch.randperm(config.num_instances, device=config.device)[:n].sort().values.to(torch.int32) + ) + elif isinstance(val, torch.Tensor) and val.dim() >= 1 and val.shape[0] == config.num_instances: + inputs[key] = val[:n] + else: + inputs[key] = val + return inputs + + return generator + + +def _build_fill_benchmarks(): + """Auto-generate fill-ratio benchmark definitions from existing generators.""" + fill_benchmarks = [] + for bm in BENCHMARKS: + generators = {} + if "torch_tensor" in bm.input_generators: + base_gen = bm.input_generators["torch_tensor"] + for suffix, ratio in FILL_RATIOS.items(): + generators[f"tensor_{suffix}"] = _make_fill_ratio_generator(base_gen, ratio) + if generators: + fill_benchmarks.append( + MethodBenchmarkDefinition( + name=bm.name, + method_name=bm.method_name, + input_generators=generators, + category=f"{bm.category}_fill", + ) + ) + return fill_benchmarks + + +FILL_BENCHMARKS = _build_fill_benchmarks() + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=0, + device=args.device, + mode=args.mode, + ) + + collection, mock_view = create_test_collection( + num_instances=config.num_instances, + num_bodies=config.num_bodies, + device=config.device, + ) + + print( + f"Benchmarking RigidObjectCollection (PhysX) with {config.num_instances} instances, " + f"{config.num_bodies} bodies..." + ) + + # Create runner and run benchmarks + runner = MethodBenchmarkRunner( + benchmark_name="rigid_object_collection_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + runner.run_benchmarks(BENCHMARKS, collection) + + print("\n" + "=" * 80) + print("Fill-Ratio Benchmarks (env_ids at 5%, 95%, 100% fill)") + print("=" * 80) + + runner.run_benchmarks(FILL_BENCHMARKS, collection) + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_collection_data.py b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_collection_data.py new file mode 100644 index 000000000000..0b0327e9bf3c --- /dev/null +++ b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_collection_data.py @@ -0,0 +1,259 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObjectCollectionData class. + +This module provides a benchmarking framework to measure the performance of all functions +in the RigidObjectCollectionData class. Each function is run multiple times with randomized mock data, +and timing statistics (mean and standard deviation) are reported. + +Usage: + python benchmark_rigid_object_collection_data.py [--num_iterations N] [--warmup_steps W] + [--num_instances I] [--num_bodies B] + +Example: + python benchmark_rigid_object_collection_data.py --num_iterations 10000 --warmup_steps 10 +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="Micro-benchmarking framework for RigidObjectCollectionData class.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--num_bodies", type=int, default=4, help="Number of bodies per instance") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import warnings +from unittest.mock import MagicMock + +import torch + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +# This is needed because the Data classes call SimulationManager.get_physics_sim_view().get_gravity() +# but there's no actual physics scene when running benchmarks +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + +import warp as wp +from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection_data import RigidObjectCollectionData +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyViewWarp + +from isaaclab.test.benchmark import MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + + +# ============================================================================= +# Skip Lists +# ============================================================================= + +# List of deprecated properties (for backward compatibility) - skip these +DEPRECATED_PROPERTIES = { + "default_root_state", + "root_pose_w", + "root_pos_w", + "root_quat_w", + "root_vel_w", + "root_lin_vel_w", + "root_ang_vel_w", + "root_lin_vel_b", + "root_ang_vel_b", + "body_pose_w", + "body_pos_w", + "body_quat_w", + "body_vel_w", + "body_lin_vel_w", + "body_ang_vel_w", + "body_acc_w", + "body_lin_acc_w", + "body_ang_acc_w", + "com_pos_b", + "com_quat_b", + # Also skip the combined state properties marked as deprecated + "root_state_w", + "object_link_state_w", + "object_com_state_w", +} + +# List of properties that raise NotImplementedError - skip these +NOT_IMPLEMENTED_PROPERTIES = {} + +# Removed default_* properties that raise RuntimeError +REMOVED_PROPERTIES = { + "default_inertia", + "default_mass", +} + +# Private/internal properties and methods to skip +INTERNAL_PROPERTIES = { + "_create_simulation_bindings", + "_create_buffers", + "update", + "is_primed", + "device", + "object_names", + "GRAVITY_VEC_W", + "GRAVITY_VEC_W_TORCH", + "FORWARD_VEC_B", + "FORWARD_VEC_B_TORCH", + "ALL_ENV_MASK", + "ENV_MASK", + "ALL_OBJECT_MASK", + "OBJECT_MASK", + "num_bodies", + "num_instances", +} + +# Dependency mapping for properties +PROPERTY_DEPENDENCIES = { + "object_link_lin_vel_w": ["object_link_vel_w"], + "object_link_ang_vel_w": ["object_link_vel_w"], + "object_link_lin_vel_b": ["object_link_vel_b"], + "object_link_ang_vel_b": ["object_link_vel_b"], + "object_com_pos_w": ["object_com_pose_w"], + "object_com_quat_w": ["object_com_pose_w"], + "object_com_lin_vel_b": ["object_com_vel_b"], + "object_com_ang_vel_b": ["object_com_vel_b"], + "object_com_lin_vel_w": ["object_com_vel_w"], + "object_com_ang_vel_w": ["object_com_vel_w"], + "object_link_pos_w": ["object_link_pose_w"], + "object_link_quat_w": ["object_link_pose_w"], + "object_com_lin_acc_w": ["object_com_acc_w"], + "object_com_ang_acc_w": ["object_com_acc_w"], + "object_com_quat_b": ["object_com_pose_b"], +} + + +# ============================================================================= +# Benchmark Functions +# ============================================================================= + + +def get_benchmarkable_properties(data: RigidObjectCollectionData) -> list[str]: + """Get list of properties that can be benchmarked.""" + all_properties = [] + + for name in dir(data): + if name.startswith("_"): + continue + if name in DEPRECATED_PROPERTIES: + continue + if name in NOT_IMPLEMENTED_PROPERTIES: + continue + if name in REMOVED_PROPERTIES: + continue + if name in INTERNAL_PROPERTIES: + continue + + try: + attr = getattr(type(data), name, None) + if isinstance(attr, property): + all_properties.append(name) + except Exception: + pass + + return sorted(all_properties) + + +def setup_mock_environment(config: MethodBenchmarkRunnerConfig) -> MockRigidBodyViewWarp: + """Set up the mock environment for benchmarking.""" + # For collection, total count = num_instances * num_bodies + total_count = config.num_instances * config.num_bodies + mock_view = MockRigidBodyViewWarp( + count=total_count, + device=config.device, + ) + return mock_view + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=args.num_bodies, + num_joints=0, + device=args.device, + ) + + # Setup mock environment + mock_view = setup_mock_environment(config) + mock_view.set_random_mock_data() + + # Create RigidObjectCollectionData instance + data = RigidObjectCollectionData(mock_view, config.num_bodies, config.device) + + # Get list of properties to benchmark + properties = get_benchmarkable_properties(data) + + total_count = config.num_instances * config.num_bodies + + # Generator that updates mock data and invalidates timestamp + def gen_mock_data(cfg: MethodBenchmarkRunnerConfig) -> dict: + mock_view.set_mock_transforms(wp.from_torch(torch.randn(total_count, 7, device=cfg.device), dtype=wp.float32)) + mock_view.set_mock_velocities(wp.from_torch(torch.randn(total_count, 6, device=cfg.device), dtype=wp.float32)) + mock_view.set_mock_accelerations( + wp.from_torch(torch.randn(total_count, 6, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_coms(wp.from_torch(torch.randn(total_count, 7, device=cfg.device), dtype=wp.float32)) + data._sim_timestamp += 1.0 + return {} + + # Create runner + runner = MethodBenchmarkRunner( + benchmark_name="rigid_object_collection_data_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + # Run property benchmarks + runner.run_property_benchmarks( + target_data=data, + properties=properties, + gen_mock_data=gen_mock_data, + dependencies=PROPERTY_DEPENDENCIES, + category="property", + ) + + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_data.py b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_data.py new file mode 100644 index 000000000000..9c0d65da2da3 --- /dev/null +++ b/source/isaaclab_physx/benchmark/assets/benchmark_rigid_object_data.py @@ -0,0 +1,264 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Micro-benchmarking framework for RigidObjectData class. + +This module provides a benchmarking framework to measure the performance of all functions +in the RigidObjectData class. Each function is run multiple times with randomized mock data, +and timing statistics (mean and standard deviation) are reported. + +Usage: + python benchmark_rigid_object_data.py [--num_iterations N] [--warmup_steps W] [--num_instances I] + +Example: + python benchmark_rigid_object_data.py --num_iterations 10000 --warmup_steps 10 +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="Micro-benchmarking framework for RigidObjectData class.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) +parser.add_argument("--num_iterations", type=int, default=1000, help="Number of iterations") +parser.add_argument("--warmup_steps", type=int, default=10, help="Number of warmup steps") +parser.add_argument("--num_instances", type=int, default=4096, help="Number of instances") +parser.add_argument("--output_dir", type=str, default=".", help="Output directory for results") +parser.add_argument("--backend", type=str, default="json", choices=["json", "osmo", "omniperf"], help="Metrics backend") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=True, args=args) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import warnings +from unittest.mock import MagicMock + +import torch + +# Mock SimulationManager.get_physics_sim_view() to return a mock object with gravity +# This is needed because the Data classes call SimulationManager.get_physics_sim_view().get_gravity() +# but there's no actual physics scene when running benchmarks +_mock_physics_sim_view = MagicMock() +_mock_physics_sim_view.get_gravity.return_value = (0.0, 0.0, -9.81) + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +SimulationManager.get_physics_sim_view = MagicMock(return_value=_mock_physics_sim_view) + +import warp as wp +from isaaclab_physx.assets.rigid_object.rigid_object_data import RigidObjectData +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyViewWarp + +from isaaclab.test.benchmark import MethodBenchmarkRunner, MethodBenchmarkRunnerConfig + +# Suppress deprecation warnings during benchmarking +warnings.filterwarnings("ignore", category=DeprecationWarning) +warnings.filterwarnings("ignore", category=UserWarning) + + +# ============================================================================= +# Skip Lists +# ============================================================================= + +# List of deprecated properties (for backward compatibility) - skip these +DEPRECATED_PROPERTIES = { + "default_root_state", + "root_pose_w", + "root_pos_w", + "root_quat_w", + "root_vel_w", + "root_lin_vel_w", + "root_ang_vel_w", + "root_lin_vel_b", + "root_ang_vel_b", + "body_pose_w", + "body_pos_w", + "body_quat_w", + "body_vel_w", + "body_lin_vel_w", + "body_ang_vel_w", + "body_acc_w", + "body_lin_acc_w", + "body_ang_acc_w", + "com_pos_b", + "com_quat_b", + # Also skip the combined state properties marked as deprecated + "root_state_w", + "root_link_state_w", + "root_com_state_w", + "body_state_w", + "body_link_state_w", + "body_com_state_w", +} + +# List of properties that raise NotImplementedError - skip these +NOT_IMPLEMENTED_PROPERTIES = {} + +# Removed default_* properties that raise RuntimeError +REMOVED_PROPERTIES = { + "default_inertia", + "default_mass", +} + +# Private/internal properties and methods to skip +INTERNAL_PROPERTIES = { + "_create_simulation_bindings", + "_create_buffers", + "update", + "is_primed", + "device", + "body_names", + "GRAVITY_VEC_W", + "GRAVITY_VEC_W_TORCH", + "FORWARD_VEC_B", + "FORWARD_VEC_B_TORCH", + "ALL_ENV_MASK", + "ENV_MASK", +} + +# Dependency mapping for properties +PROPERTY_DEPENDENCIES = { + "root_link_lin_vel_w": ["root_link_vel_w"], + "root_link_ang_vel_w": ["root_link_vel_w"], + "root_link_lin_vel_b": ["root_link_vel_b"], + "root_link_ang_vel_b": ["root_link_vel_b"], + "root_com_pos_w": ["root_com_pose_w"], + "root_com_quat_w": ["root_com_pose_w"], + "root_com_lin_vel_b": ["root_com_vel_b"], + "root_com_ang_vel_b": ["root_com_vel_b"], + "root_com_lin_vel_w": ["root_com_vel_w"], + "root_com_ang_vel_w": ["root_com_vel_w"], + "root_link_pos_w": ["root_link_pose_w"], + "root_link_quat_w": ["root_link_pose_w"], + "body_link_lin_vel_w": ["body_link_vel_w"], + "body_link_ang_vel_w": ["body_link_vel_w"], + "body_link_pos_w": ["body_link_pose_w"], + "body_link_quat_w": ["body_link_pose_w"], + "body_com_pos_w": ["body_com_pose_w"], + "body_com_quat_w": ["body_com_pose_w"], + "body_com_lin_vel_w": ["body_com_vel_w"], + "body_com_ang_vel_w": ["body_com_vel_w"], + "body_com_lin_acc_w": ["body_com_acc_w"], + "body_com_ang_acc_w": ["body_com_acc_w"], + "body_com_quat_b": ["body_com_pose_b"], +} + + +# ============================================================================= +# Benchmark Functions +# ============================================================================= + + +def get_benchmarkable_properties(rigid_object_data: RigidObjectData) -> list[str]: + """Get list of properties that can be benchmarked.""" + all_properties = [] + + for name in dir(rigid_object_data): + if name.startswith("_"): + continue + if name in DEPRECATED_PROPERTIES: + continue + if name in NOT_IMPLEMENTED_PROPERTIES: + continue + if name in REMOVED_PROPERTIES: + continue + if name in INTERNAL_PROPERTIES: + continue + + try: + attr = getattr(type(rigid_object_data), name, None) + if isinstance(attr, property): + all_properties.append(name) + except Exception: + pass + + return sorted(all_properties) + + +def setup_mock_environment(config: MethodBenchmarkRunnerConfig) -> MockRigidBodyViewWarp: + """Set up the mock environment for benchmarking.""" + mock_view = MockRigidBodyViewWarp( + count=config.num_instances, + device=config.device, + ) + return mock_view + + +def main(): + """Main entry point for the benchmarking script.""" + config = MethodBenchmarkRunnerConfig( + num_iterations=args.num_iterations, + warmup_steps=args.warmup_steps, + num_instances=args.num_instances, + num_bodies=1, + num_joints=0, + device=args.device, + ) + + # Setup mock environment + mock_view = setup_mock_environment(config) + mock_view.set_random_mock_data() + + # Create RigidObjectData instance + rigid_object_data = RigidObjectData(mock_view, config.device) + + # Get list of properties to benchmark + properties = get_benchmarkable_properties(rigid_object_data) + + # Generator that updates mock data and invalidates timestamp + def gen_mock_data(cfg: MethodBenchmarkRunnerConfig) -> dict: + mock_view.set_mock_transforms( + wp.from_torch(torch.randn(cfg.num_instances, 7, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_velocities( + wp.from_torch(torch.randn(cfg.num_instances, 6, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_accelerations( + wp.from_torch(torch.randn(cfg.num_instances, 6, device=cfg.device), dtype=wp.float32) + ) + mock_view.set_mock_coms(wp.from_torch(torch.randn(cfg.num_instances, 7, device=cfg.device), dtype=wp.float32)) + rigid_object_data._sim_timestamp += 1.0 + return {} + + # Create runner + runner = MethodBenchmarkRunner( + benchmark_name="rigid_object_data_benchmark", + config=config, + backend_type=args.backend, + output_path=args.output_dir, + use_recorders=True, + ) + + # Run property benchmarks + runner.run_property_benchmarks( + target_data=rigid_object_data, + properties=properties, + gen_mock_data=gen_mock_data, + dependencies=PROPERTY_DEPENDENCIES, + category="property", + ) + + runner.finalize() + + # Close the simulation app + simulation_app.close() + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab_physx/changelog.d/.gitkeep b/source/isaaclab_physx/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_physx/changelog.d/clone-plan-visualizer-cleanup.skip b/source/isaaclab_physx/changelog.d/clone-plan-visualizer-cleanup.skip new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_physx/changelog.d/mtrepte-expand_viz_markers.skip b/source/isaaclab_physx/changelog.d/mtrepte-expand_viz_markers.skip new file mode 100644 index 000000000000..4f6915f6b47b --- /dev/null +++ b/source/isaaclab_physx/changelog.d/mtrepte-expand_viz_markers.skip @@ -0,0 +1,2 @@ +Marker visualization changes are covered by the isaaclab fragment. +Marker visualization changes are covered by the isaaclab fragment. diff --git a/source/isaaclab_physx/changelog.d/pr-5458-merge-develop.rst b/source/isaaclab_physx/changelog.d/pr-5458-merge-develop.rst new file mode 100644 index 000000000000..1fe6a600bb99 --- /dev/null +++ b/source/isaaclab_physx/changelog.d/pr-5458-merge-develop.rst @@ -0,0 +1,16 @@ +Added +^^^^^ + +* Added :class:`~isaaclab_physx.sensors.JointWrenchSensor` for reading PhysX + incoming joint reaction wrenches as split force [N] and torque [N·m] buffers. + The sensor accepts asset prim paths whose articulation root is nested below + the configured prim and converts PhysX's native body-frame wrench to the + shared child-side joint-frame convention. + +Removed +^^^^^^^ + +* Removed ``ArticulationData.body_incoming_joint_wrench_b``. Add + :class:`~isaaclab.sensors.JointWrenchSensorCfg` to the scene and read + :attr:`~isaaclab.sensors.JointWrenchSensorData.force` and + :attr:`~isaaclab.sensors.JointWrenchSensorData.torque` instead. diff --git a/source/isaaclab_physx/changelog.d/rschmitt_decouple_renderer_camera.rst b/source/isaaclab_physx/changelog.d/rschmitt_decouple_renderer_camera.rst new file mode 100644 index 000000000000..eada0bbb809c --- /dev/null +++ b/source/isaaclab_physx/changelog.d/rschmitt_decouple_renderer_camera.rst @@ -0,0 +1,4 @@ +Changed +^^^^^^^^ + +* Modified the isaac rtx renderer to use the new patterns from renderer/camera decoupling. diff --git a/source/isaaclab_physx/changelog.d/test-articulation-timeout.rst b/source/isaaclab_physx/changelog.d/test-articulation-timeout.rst new file mode 100644 index 000000000000..e0c1b96870c2 --- /dev/null +++ b/source/isaaclab_physx/changelog.d/test-articulation-timeout.rst @@ -0,0 +1,6 @@ +Fixed +^^^^^ + +* Fixed :class:`~isaaclab_physx.assets.SurfaceGripper` initialization on + non-CPU simulation backends to raise before loading the surface gripper + extension, avoiding hangs during startup. diff --git a/source/isaaclab_physx/config/extension.toml b/source/isaaclab_physx/config/extension.toml new file mode 100644 index 000000000000..5c63b0e6322f --- /dev/null +++ b/source/isaaclab_physx/config/extension.toml @@ -0,0 +1,21 @@ +[package] + +# Note: Semantic Versioning is used: https://semver.org/ +version = "0.5.29" + +# Description +title = "PhysX simulation interfaces for IsaacLab core package" +description="Extension providing IsaacLab with PhysX specific abstractions." +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["robotics", "simulation", "physx"] + +[dependencies] +"isaaclab" = {} + +[core] +reloadable = false + +[[python.module]] +name = "isaaclab_physx" diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst new file mode 100644 index 000000000000..14425eb74869 --- /dev/null +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -0,0 +1,789 @@ +Changelog +--------- + +0.5.29 (2026-04-30) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Added fused :meth:`~isaaclab_physx.assets.Articulation.write_joint_state_to_sim_index` + that writes joint position and velocity in a single kernel launch instead of two. +* Cached ``.view(wp.float32)`` results in root pose/velocity writers and wrench + composer views in ``write_data_to_sim`` to avoid per-call wrapper allocations. +* Pre-allocated pinned CPU buffers for all joint property and body property writers, + replacing per-call ``wp.clone(device="cpu")`` allocations with ``wp.copy`` into + reusable pinned memory. + + +0.5.28 (2026-04-29) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed camera observation hang when a visualizer (e.g. KitVisualizer for XR + teleop) is active and ``--enable_cameras`` is set. + :func:`~isaaclab_physx.renderers.isaac_rtx_renderer_utils.ensure_isaac_rtx_render_update` + now performs the initial ``app.update()`` on the very first call for a new + :class:`~isaaclab.sim.SimulationContext`, even when a visualizer reports that + it pumps the Kit app loop, because the visualizer has not had a chance to pump + yet at that point. + + +0.5.27 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed ``Simulation view object is invalidated and cannot be used again to call + getDofVelocities`` raised on the first ``scene.update()`` after ``sim.reset()`` + with recent Isaac Sim ``develop`` builds. Isaac Sim's + ``isaacsim.core.simulation_manager.SimulationManager`` recently became reactive + to timeline ``STOP`` events (after its ``_on_stop`` was decorated with + ``@staticmethod`` upstream), and its ``invalidate_physics()`` was clobbering + the shared ``omni.physics.tensors`` simulation view that + :class:`~isaaclab_physx.physics.PhysxManager` and PhysX articulation views + rely on. The ``isaaclab_physx`` package init now disables the original Isaac + Sim ``SimulationManager``'s default timeline/stage callbacks via + ``enable_all_default_callbacks(False)`` before swapping the module attribute, + so :class:`PhysxManager` is the single owner of the simulation lifecycle. + + +0.5.26 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed ``import isaaclab_physx`` eagerly importing ``isaacsim``, ``omni``, + and ``carb`` backend modules when used for pure-data config loading before + ``SimulationApp`` has launched. The ``SimulationManager`` patch now checks + ``sys.modules`` lazily instead of force-importing the target module, allowing + env-cfg classes that reference :class:`~isaaclab_physx.physics.PhysxCfg` to + be constructed without a running Kit instance (regression caught by + ``test_env_cfg_no_forbidden_imports``). + +Changed +^^^^^^^ + +* Migrated :func:`~isaaclab_physx.renderers.kit_viewport_utils.set_kit_renderer_camera_view` + off the deprecated ``isaacsim.core.utils.viewports.set_camera_view`` to + ``isaacsim.core.rendering_manager.ViewportManager.set_camera_view``, matching the + pattern used by the Kit perspective video helper. + + +0.5.25 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated imports of the PhysX tensors API from ``omni.physics.tensors.impl.api`` to + ``omni.physics.tensors.api`` to track the upstream Isaac Sim module relocation + (the ``impl`` submodule was removed). +* Migrated the PhysX scene data provider, PhysX asset micro-benchmarks, and cross-backend asset + interface tests off ``isaacsim.core.simulation_manager.SimulationManager`` to + :class:`~isaaclab_physx.physics.PhysxManager` (imported as ``SimulationManager`` to mirror the + Newton backend's ``NewtonManager as SimulationManager`` convention). +* Updated optional-extension enablement and Kit perspective capture helpers to use non-deprecated + Isaac Sim module paths (``isaacsim.core.experimental.utils.app`` and ``isaacsim.core.rendering_manager``). + + +0.5.24 (2026-04-24) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated :class:`~isaaclab_physx.sim.views.FabricFrameView` to match the new + :class:`~isaaclab.sim.views.BaseFrameView` ProxyArray return contract. See + the ``isaaclab`` 4.6.15 changelog for migration guidance. + + +0.5.23 (2026-04-24) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Properties on the following data classes now return + :class:`~isaaclab.utils.warp.ProxyArray` instead of raw ``wp.array``: + :class:`~isaaclab_physx.assets.articulation.ArticulationData`, + :class:`~isaaclab_physx.assets.rigid_object.RigidObjectData`, + :class:`~isaaclab_physx.assets.rigid_object_collection.RigidObjectCollectionData`, + :class:`~isaaclab_physx.assets.deformable_object.DeformableObjectData`, + :class:`~isaaclab_physx.sensors.contact_sensor.ContactSensorData`, + :class:`~isaaclab_physx.sensors.frame_transformer.FrameTransformerData`, + :class:`~isaaclab_physx.sensors.imu.ImuData`, and + :class:`~isaaclab_physx.sensors.pva.PvaData`. + Use ``.torch`` for a cached zero-copy ``torch.Tensor`` view, or ``.warp`` for + the underlying ``wp.array``. Implicit torch operations (arithmetic, + ``torch.*`` functions) work during the deprecation period but emit a warning. + + +0.5.22 (2026-04-23) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed ``RuntimeError: NewtonWarpRenderer requires a Newton model but the scene data provider + returned None`` when a Direct env (e.g. ``ShadowHandVisionEnv``, ``CartpoleCameraEnv``) + uses ``physx`` physics with the ``newton_warp`` renderer. The + :class:`~isaaclab_physx.scene_data_providers.PhysxSceneDataProvider` now falls back to a + USD-traversal Newton build when the cloner-time prebuilt artifact is absent, and stashes + the freshly built artifact on the simulation context so subsequent providers reuse it. + + +0.5.21 (2026-04-22) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_physx.sim.views.XformPrimView` providing the PhysX/Fabric + backend implementation for xform prim views. + +Changed +^^^^^^^ + +* Renamed :class:`~isaaclab_physx.sim.views.FabricXformPrimView` to + :class:`~isaaclab_physx.sim.views.FabricFrameView`. Old name is kept as a deprecated alias. + + +0.5.20 (2026-04-21) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated ``write_data_to_sim`` in :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, and :class:`~isaaclab_physx.assets.RigidObjectCollection` + to use the dual-buffer :class:`~isaaclab.utils.wrench_composer.WrenchComposer`. Composed wrenches are + applied to PhysX with ``is_global=False`` after body-frame composition. + + +0.5.19 (2026-04-20) +~~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed Newton ``shape_color`` not reflecting the post-clone USD stage when the + PhysX scene data provider builds or reloads the Newton model by calling + :func:`~isaaclab.sim.utils.newton_model_utils.replace_newton_shape_colors` on + the artifact, per-environment, and filtered Newton models in + :class:`~isaaclab_physx.scene_data_providers.PhysxSceneDataProvider`. + +0.5.18 (2026-04-16) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed flaky first-frame textured rendering by replacing the event-based RTX + streaming subscription with a synchronous + ``UsdContext.get_stage_streaming_status()`` query in + :func:`~isaaclab_physx.renderers.isaac_rtx_renderer_utils.ensure_isaac_rtx_render_update`. + + +0.5.17 (2026-04-14) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_physx.sim.schemas.DeformableBodyPropertiesCfg` with + namespace-aware property routing. Properties are organized into + ``omniphysics:``, ``physxDeformableBody:``, and ``physxCollision:`` prefixed + parent classes, allowing correct USD attribute mapping for the updated PhysX + deformable body schema. +* Added :func:`~isaaclab_physx.sim.schemas.define_deformable_body_properties` and + :func:`~isaaclab_physx.sim.schemas.modify_deformable_body_properties` to + ``isaaclab_physx.sim.schemas``, supporting both surface and volume deformable + types via the ``deformable_type`` parameter. +* Added :class:`~isaaclab_physx.sim.spawners.materials.DeformableBodyMaterialCfg` + and :class:`~isaaclab_physx.sim.spawners.materials.SurfaceDeformableBodyMaterialCfg` + with namespace-aware property routing for ``omniphysics:`` and + ``physxDeformableBody:`` material attributes. +* Added :class:`~isaaclab_physx.sim.spawners.spawner_cfg.DeformableObjectSpawnerCfg` + for configuring deformable body properties and materials when spawning. +* Added surface deformable body support to + :class:`~isaaclab_physx.assets.DeformableObject`. The asset now detects whether + the deformable is a surface or volume type based on the applied material API + and creates the appropriate PhysX tensor view + (``create_surface_deformable_body_view`` vs ``create_volume_deformable_body_view``). + +Changed +^^^^^^^ + +* Changed :attr:`~isaaclab_physx.assets.DeformableObject.root_view` return type + from ``physx.SoftBodyView`` to ``physx.DeformableBodyView`` to align with the + updated PhysX API. +* Changed :attr:`~isaaclab_physx.assets.DeformableObject.material_physx_view` + return type from ``physx.SoftBodyMaterialView`` to + ``physx.DeformableMaterialView``. +* Changed deformable body root prim discovery to check for + ``OmniPhysicsDeformableBodyAPI`` instead of ``PhysxDeformableBodyAPI``. +* Changed material prim discovery to check for ``OmniPhysicsDeformableMaterialAPI`` + instead of ``PhysxDeformableBodyMaterialAPI``. +* Changed PhysX view API calls to use updated method names: + ``get_simulation_nodal_positions``, ``set_simulation_nodal_positions``, + ``set_simulation_nodal_velocities``, ``get_simulation_nodal_kinematic_targets``, + ``set_simulation_nodal_kinematic_targets``. +* Changed property accessors to use updated PhysX view attributes: + ``max_simulation_elements_per_body``, ``max_collision_elements_per_body``, + ``max_simulation_nodes_per_body``, ``max_collision_nodes_per_body``. +* Changed kinematic target operations to raise ``ValueError`` when called on + surface deformable bodies, which do not support kinematic targets. + + +0.5.16 (2026-04-13) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Renamed the PhysX IMU sensor implementation to + :class:`~isaaclab_physx.sensors.Pva`. The ``isaaclab_physx.sensors.imu`` + module now contains a new lightweight IMU sensor that only provides angular + velocity and linear acceleration. +* Changed :class:`~isaaclab_physx.sensors.Pva` to no longer accept a + ``gravity_bias`` parameter. Linear acceleration is now pure finite + differencing of velocity without any gravity contribution. +* Changed :class:`~isaaclab_physx.sensors.Imu` to unconditionally include + gravity in accelerometer readings. The gravity vector is queried from the + PhysX simulation at initialization instead of being user-configured. + +Added +^^^^^ + +* Added :class:`~isaaclab_physx.sensors.Imu` PhysX backend for the new + lightweight IMU sensor with simplified Warp kernels that only compute + angular velocity and linear acceleration. + +Fixed +^^^^^ + +* Fixed unused ``body_pos`` variable in the IMU Warp kernel. +* Fixed ``phsyx`` typo in :class:`~isaaclab.sensors.pva.BasePva` docstring. + + +0.5.15 (2026-04-13) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :meth:`~isaaclab_physx.assets.RigidObject.set_material_properties_index`, + :meth:`~isaaclab_physx.assets.RigidObject.set_material_properties_mask`, + :meth:`~isaaclab_physx.assets.Articulation.set_material_properties_index`, + :meth:`~isaaclab_physx.assets.Articulation.set_material_properties_mask`, + :meth:`~isaaclab_physx.assets.RigidObjectCollection.set_material_properties_index`, and + :meth:`~isaaclab_physx.assets.RigidObjectCollection.set_material_properties_mask` + methods for setting collision shape material properties (friction, restitution). + These methods follow the standard ``_index``/``_mask`` pattern, providing a unified + API across PhysX and Newton backends. + + +0.5.14 (2026-04-06) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed the simulation training loop not pausing when the Kit GUI timeline is + paused. :meth:`~isaaclab_physx.physics.PhysxManager.wait_for_playing` now + blocks and keeps the GUI responsive until the timeline is resumed or stopped. +* Fixed articulation visualization freezing after pausing and unpausing the + simulation through the headed GUI in Isaac Sim 5.1+. Articulation meshes now + remain visually updated after resuming. + + +0.5.13 (2026-03-25) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed device mismatch in :meth:`~isaaclab_physx.assets.RigidObjectCollection.reshape_view_to_data_2d` + and :meth:`~isaaclab_physx.assets.RigidObjectCollection.reshape_view_to_data_3d` that caused + ``wp.clone`` to fail with CUDA errors when PhysX returns data on CPU (e.g., masses, COMs, inertias) + while the simulation runs on GPU. The strided view now correctly uses ``data.device`` instead of + ``self.device``, matching the fix already present in :class:`~isaaclab_physx.assets.RigidObjectCollectionData`. + + +0.5.12 (2026-03-16) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed ``test_body_incoming_joint_wrench_b_single_joint`` computing the expected + wrench in the parent body's frame instead of the child body's frame. The expected + wrench is now expressed in + :attr:`~isaaclab_physx.assets.ArticulationData.body_incoming_joint_wrench_b`'s + actual convention (child body frame) and body indices are resolved by name to be + robust across backends. Also corrected the docstring for + :attr:`~isaaclab_physx.assets.ArticulationData.body_incoming_joint_wrench_b` to + accurately describe the frame convention. + + +0.5.11 (2026-03-13) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed articulation root prim discovery failing when the + ``physxArticulation:articulationEnabled`` attribute is not authored on the + USD prim. The predicate now treats an unset attribute as enabled (the PhysX + default) instead of rejecting the prim. + + +0.5.10 (2026-03-13) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Removed verbose ``logger.info`` calls from + :class:`~isaaclab_physx.assets.RigidObject` and + :class:`~isaaclab_physx.assets.Articulation` initialization that logged body + names, joint names, and instance counts. Articulation joint parameter tables and + actuator group summaries are retained. + + +0.5.9 (2026-03-11) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed device mismatch in + :class:`~isaaclab_physx.assets.RigidObjectCollectionData` where + ``_reshape_view_to_data_2d`` and ``_reshape_view_to_data_3d`` created + strided pointer views with the target GPU device instead of the source + array's device. PhysX returns masses, COMs, and inertias on CPU, so the + strided view incorrectly claimed a CPU pointer lived on GPU. This caused + ``CUDA error 1: invalid argument`` during ``wp.clone`` on GPUs without + HMM (Heterogeneous Memory Management). + + +0.5.8 (2026-03-10) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Removed redundant ``ArticulationView`` from + :class:`~isaaclab_physx.scene_data_providers.PhysxSceneDataProvider`. + Creating a single ``ArticulationView`` for heterogeneous articulation types + (e.g. Robot + Cabinet) triggered PhysX "Incorrect DofIdx" errors. The + ``RigidBodyView`` already covers all body transforms including articulation + links, so the articulation view was unnecessary. Articulation paths from + prebuilt artifacts are now merged into rigid body paths for the + ``RigidBodyView``. +* Fixed pre-existing test fixture in + ``test_physx_scene_data_provider_visualizer_contract.py`` where + ``_make_provider()`` was missing the + ``_force_usd_fallback_for_newton_model_build`` attribute and the force + fallback test used an incorrect attribute name. + + +0.5.7 (2026-03-06) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Made several PhysX articulation tests more backend-agnostic by relaxing + PhysX-specific assumptions in ``test_articulation.py``. + + +0.5.6 (2026-03-03) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fix asset writer methods in :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, and + :class:`~isaaclab_physx.assets.RigidObjectCollection` to use public data + properties instead of internal timestamped buffer ``.data`` fields, removing + redundant manual timestamp updates. + + +0.5.5 (2026-03-02) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Replaced all ``wp.nonzero()`` calls in + :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, and + :class:`~isaaclab_physx.assets.RigidObjectCollection` mask methods with + ``torch.nonzero()`` via new ``_resolve_env_mask``, ``_resolve_body_mask``, + ``_resolve_joint_mask``, ``_resolve_fixed_tendon_mask``, and + ``_resolve_spatial_tendon_mask`` helpers, fixing mask-based writers that previously + raised errors at runtime. + +* Fixed device mismatch in ``RigidObjectCollection._env_body_ids_to_view_ids`` where GPU + index arrays were passed to a CPU kernel launch. Inputs are now cloned to the target + device before use. + +* Added ``_get_cpu_env_ids`` helper to :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, and + :class:`~isaaclab_physx.assets.RigidObjectCollection` to safely clone environment + indices to CPU for PhysX model-property setters. + +* Fixed ``MockArticulationViewWarp`` to support the mock test infrastructure. + + +0.5.4 (2026-03-01) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* fixed :func:`~isaaclab_physx.cloner.physx_replicate` to not exclude self replication by default. + + +0.5.3 (2026-02-27) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added runtime shape and dtype validation to all write methods in + :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, + :class:`~isaaclab_physx.assets.RigidObjectCollection`, + :class:`~isaaclab_physx.assets.DeformableObject`, and + :class:`~isaaclab_physx.assets.SurfaceGripper` using + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype`. Validates input dimensions + and types before kernel launch to catch mismatches early. + + +0.5.2 (2026-02-25) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added runtime shape and dtype validation to all write methods in + :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, + :class:`~isaaclab_physx.assets.RigidObjectCollection`, + :class:`~isaaclab_physx.assets.DeformableObject`, and + :class:`~isaaclab_physx.assets.SurfaceGripper` using + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype`. Validates input dimensions + and types before kernel launch to catch mismatches early. + + +0.5.1 (2026-02-25) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated ContactSensor ``body_names`` property to use ``num_sensors`` instead of + deprecated ``num_bodies``. + + +0.5.0 (2026-02-24) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Aligned asset API with the multi-backend architecture. Base class abstract methods + in :class:`~isaaclab.assets.BaseArticulation` and :class:`~isaaclab.assets.BaseRigidObject` + have been refined so that PhysX and Newton backends share a consistent interface. + +* Improved docstrings across all asset classes with precise shape and dtype annotations + for warp array properties and write methods. + +* Migrated tests to use the new ``_index`` / ``_mask`` write method APIs, removing + usage of deprecated write methods. + + +0.4.1 (2026-02-18) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed a bug in :meth:~isaaclab_physx.assets.Articulation.process_actuators_cfg where explicit actuator joints could receive non-zero PhysX stiffness/damping, causing double PD control. + + +0.4.0 (2026-02-13) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Migrated all PhysX asset classes to warp backend: + :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, + :class:`~isaaclab_physx.assets.RigidObjectCollection`, + :class:`~isaaclab_physx.assets.DeformableObject`, and + :class:`~isaaclab_physx.assets.SurfaceGripper`. + Internal state buffers now use ``wp.array`` with structured warp types + (``wp.vec3f``, ``wp.quatf``, ``wp.transformf``, ``wp.spatial_vectorf``). + +* Migrated all PhysX sensor classes to warp backend: + :class:`~isaaclab_physx.sensors.ContactSensor`, + :class:`~isaaclab_physx.sensors.Imu`, and + :class:`~isaaclab_physx.sensors.FrameTransformer`. + +* Split all write methods into ``_index`` and ``_mask`` variants for explicit + sparse-index vs. boolean-mask semantics. + +Added +^^^^^ + +* Added warp kernel modules for fused GPU computations: + + * :mod:`isaaclab_physx.assets.kernels` — shared kernels for root state extraction, + velocity transforms, and data write-back. + * :mod:`isaaclab_physx.assets.articulation.kernels` — articulation-specific kernels + for joint state, body properties, and COM computations. + * :mod:`isaaclab_physx.assets.deformable_object.kernels` — nodal state and mean + vertex computations. + * :mod:`isaaclab_physx.assets.rigid_object_collection.kernels` — 2D indexed kernels + for multi-body collections. + * :mod:`isaaclab_physx.sensors.contact_sensor.kernels` — contact force aggregation + and history buffer management. + * :mod:`isaaclab_physx.sensors.imu.kernels` — fused IMU update combining acceleration, + gyroscope, and gravity projection. + * :mod:`isaaclab_physx.sensors.frame_transformer.kernels` — frame transform computations. + +* Added warp-based mock PhysX views for unit testing: + ``MockArticulationViewWarp``, ``MockRigidBodyViewWarp``, ``MockRigidContactViewWarp``. + + +0.3.0 (2026-02-11) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Refactored :class:`~isaaclab_physx.physics.PhysxManager` to properly handle physics initialization + order. ``attach_stage()`` is now called before ``start_simulation()`` to ensure GPU buffers are + correctly allocated. +* Removed ``device`` field from :class:`~isaaclab_physx.physics.PhysxManagerCfg`. Device is now + inherited from :attr:`SimulationCfg.device`. + +Added +^^^^^ + +* Added :class:`~isaaclab_physx.physics.PhysxManager` as the concrete PhysX backend implementation + of :class:`~isaaclab.physics.PhysicsManager`. +* Added :class:`~isaaclab_physx.physics.IsaacEvents` enum for PhysX-specific simulation events. +* Added monkey-patching of ``isaacsim.core.simulation_manager.SimulationManager`` in package init + to ensure Isaac Sim uses :class:`~isaaclab_physx.physics.PhysxManager` for callback handling. + + +0.2.0 (2026-02-05) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated all PhysX benchmarks in :mod:`isaaclab_physx.benchmark` to use the new + :class:`~isaaclab.test.benchmark.BaseIsaacLabBenchmark` framework from ``isaaclab.test.benchmark``. + +* Added support for configurable output backends via ``--benchmark_backend`` argument. + Supported backends: ``json``, ``osmo``, ``omniperf``. + + +0.1.4 (2026-02-05) +~~~~~~~~~~~~~~~~~~ + +Removed +^^^^^^^ + +* Removed all the deprecated properties and shorthands in the assets. They now live in the base classes. + + +0.1.3 (2026-02-03) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_physx.benchmark` module containing performance micro-benchmarks for + PhysX asset classes. Includes: + + * ``benchmark_articulation.py``: Benchmarks for setter/writer methods on + :class:`~isaaclab_physx.assets.Articulation` including root state, joint state, + joint parameters, and body property operations. + * ``benchmark_articulation_data.py``: Benchmarks for property accessors on + :class:`~isaaclab_physx.assets.ArticulationData` covering root link/COM properties, + joint properties, and body link/COM properties. + * ``benchmark_rigid_object.py``: Benchmarks for setter/writer methods on + :class:`~isaaclab_physx.assets.RigidObject` including root state and body property operations. + * ``benchmark_rigid_object_data.py``: Benchmarks for property accessors on + :class:`~isaaclab_physx.assets.RigidObjectData`. + * ``benchmark_rigid_object_collection.py``: Benchmarks for setter/writer methods on + :class:`~isaaclab_physx.assets.RigidObjectCollection` including body state, pose, + velocity, and property operations. + * ``benchmark_rigid_object_collection_data.py``: Benchmarks for property accessors on + :class:`~isaaclab_physx.assets.RigidObjectCollectionData`. + + All benchmarks support configurable iterations, warmup steps, instance counts, multiple + input modes (torch list, torch tensor), and output to JSON/CSV formats with hardware + information capture. + + +0.1.2 (2026-02-03) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_physx.test.mock_interfaces` module providing mock PhysX view implementations + for unit testing without requiring Isaac Sim. Includes: + + * :class:`MockRigidBodyView`: Mock for ``physx.RigidBodyView`` with transforms, velocities, + accelerations, and mass properties. + * :class:`MockArticulationView`: Mock for ``physx.ArticulationView`` with root/link states, + DOF properties, and joint control. + * :class:`MockRigidContactView`: Mock for ``physx.RigidContactView`` with contact forces, + positions, normals, and friction data. + * Factory functions including pre-configured quadruped and humanoid views. + * Patching utilities and decorators for easy test injection. + + +0.1.0 (2026-01-28) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_physx.sensors` module containing PhysX-specific sensor implementations: + + * :class:`~isaaclab_physx.sensors.ContactSensor` and :class:`~isaaclab_physx.sensors.ContactSensorData`: + PhysX-specific implementation for contact force sensing. Extends + :class:`~isaaclab.sensors.contact_sensor.BaseContactSensor` with PhysX tensor API for contact + force queries, contact filtering, and contact point tracking. + + * :class:`~isaaclab_physx.sensors.Imu` and :class:`~isaaclab_physx.sensors.ImuData`: + PhysX-specific implementation for inertial measurement unit simulation. Extends + :class:`~isaaclab.sensors.imu.BaseImu` with PhysX rigid body views for velocity and + acceleration computation. + + * :class:`~isaaclab_physx.sensors.FrameTransformer` and :class:`~isaaclab_physx.sensors.FrameTransformerData`: + PhysX-specific implementation for coordinate frame transformations. Extends + :class:`~isaaclab.sensors.frame_transformer.BaseFrameTransformer` with PhysX rigid body views + for efficient frame pose queries. + +* Added PhysX-specific sensor tests moved from ``isaaclab/test/sensors/``: + + * ``test_contact_sensor.py`` + * ``test_imu.py`` + * ``test_frame_transformer.py`` + * ``check_contact_sensor.py`` + * ``check_imu_sensor.py`` + +Deprecated +^^^^^^^^^^ + +* Deprecated the ``pose_w``, ``pos_w``, and ``quat_w`` properties on + :class:`~isaaclab_physx.sensors.ContactSensorData` and :class:`~isaaclab_physx.sensors.ImuData`. + These properties will be removed in a future release. Please use a dedicated sensor (e.g., + :class:`~isaaclab.sensors.FrameTransformer`) to measure sensor poses in world frame. + + +0.1.0 (2026-01-28) +~~~~~~~~~~~~~~~~~~~ + +This is the initial release of the ``isaaclab_physx`` extension, which provides PhysX-specific +implementations of Isaac Lab asset classes. This extension enables a multi-backend architecture +where simulation backend-specific code is separated from the core Isaac Lab API. + +Added +^^^^^ + +* Added :mod:`isaaclab_physx.assets` module containing PhysX-specific asset implementations: + + * :class:`~isaaclab_physx.assets.Articulation` and :class:`~isaaclab_physx.assets.ArticulationData`: + PhysX-specific implementation for articulated rigid body systems (e.g., robots). Extends + :class:`~isaaclab.assets.BaseArticulation` with PhysX tensor API integration for efficient + GPU-accelerated simulation of multi-joint systems. + + * :class:`~isaaclab_physx.assets.RigidObject` and :class:`~isaaclab_physx.assets.RigidObjectData`: + PhysX-specific implementation for single rigid body assets. Extends + :class:`~isaaclab.assets.BaseRigidObject` with PhysX tensor API for efficient rigid body + state queries and writes. + + * :class:`~isaaclab_physx.assets.RigidObjectCollection` and :class:`~isaaclab_physx.assets.RigidObjectCollectionData`: + PhysX-specific implementation for collections of rigid objects. Extends + :class:`~isaaclab.assets.BaseRigidObjectCollection` with batched ``(env_ids, object_ids)`` + API for efficient multi-object state management. + + * :class:`~isaaclab_physx.assets.DeformableObject`, :class:`~isaaclab_physx.assets.DeformableObjectCfg`, + and :class:`~isaaclab_physx.assets.DeformableObjectData`: PhysX-specific implementation for + soft body simulation using finite element methods (FEM). Moved from ``isaaclab.assets``. + + * :class:`~isaaclab_physx.assets.SurfaceGripper` and :class:`~isaaclab_physx.assets.SurfaceGripperCfg`: + PhysX-specific implementation for surface gripper simulation using contact APIs. Moved from + ``isaaclab.assets``. + +* Added backward-compatible wrapper methods in :class:`~isaaclab_physx.assets.RigidObjectCollection` + and :class:`~isaaclab_physx.assets.RigidObjectCollectionData` that delegate to the new + ``body_*`` naming convention. + +Deprecated +^^^^^^^^^^ + +* Deprecated the ``root_physx_view`` property on :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, :class:`~isaaclab_physx.assets.RigidObjectCollection`, + and :class:`~isaaclab_physx.assets.DeformableObject` in favor of the ``root_view`` property. + The ``root_physx_view`` property will be removed in a future release. + +* Deprecated the ``object_*`` naming convention in :class:`~isaaclab_physx.assets.RigidObjectCollection` + and :class:`~isaaclab_physx.assets.RigidObjectCollectionData` in favor of ``body_*``. The following + methods and properties are deprecated and will be removed in a future release: + + **RigidObjectCollection methods:** + + * ``write_object_state_to_sim()`` → use ``write_body_state_to_sim()`` + * ``write_object_link_state_to_sim()`` → use ``write_body_link_state_to_sim()`` + * ``write_object_pose_to_sim()`` → use ``write_body_pose_to_sim()`` + * ``write_object_link_pose_to_sim()`` → use ``write_body_link_pose_to_sim()`` + * ``write_object_com_pose_to_sim()`` → use ``write_body_com_pose_to_sim()`` + * ``write_object_velocity_to_sim()`` → use ``write_body_com_velocity_to_sim()`` + * ``write_object_com_velocity_to_sim()`` → use ``write_body_com_velocity_to_sim()`` + * ``write_object_link_velocity_to_sim()`` → use ``write_body_link_velocity_to_sim()`` + * ``find_objects()`` → use ``find_bodies()`` + + **RigidObjectCollectionData properties:** + + * ``default_object_state`` → use ``default_body_state`` + * ``object_names`` → use ``body_names`` + * ``object_link_pose_w`` → use ``body_link_pose_w`` + * ``object_link_vel_w`` → use ``body_link_vel_w`` + * ``object_com_pose_w`` → use ``body_com_pose_w`` + * ``object_com_vel_w`` → use ``body_com_vel_w`` + * ``object_state_w`` → use ``body_state_w`` + * ``object_link_state_w`` → use ``body_link_state_w`` + * ``object_com_state_w`` → use ``body_com_state_w`` + * ``object_com_acc_w`` → use ``body_com_acc_w`` + * ``object_pose_w`` → use ``body_pose_w`` + * ``object_pos_w`` → use ``body_pos_w`` + * ``object_quat_w`` → use ``body_quat_w`` + * ``object_vel_w`` → use ``body_vel_w`` + * ``object_lin_vel_w`` → use ``body_lin_vel_w`` + * ``object_ang_vel_w`` → use ``body_ang_vel_w`` + * ``object_acc_w`` → use ``body_acc_w`` + * And all other ``object_*`` properties (see :ref:`migrating-to-isaaclab-3-0` for complete list). + +Migration +^^^^^^^^^ + +* See :ref:`migrating-to-isaaclab-3-0` for detailed migration instructions. diff --git a/source/isaaclab_physx/docs/README.md b/source/isaaclab_physx/docs/README.md new file mode 100644 index 000000000000..4ae826f1d6bc --- /dev/null +++ b/source/isaaclab_physx/docs/README.md @@ -0,0 +1 @@ +# Isaac Lab PhysX Simulation interfaces diff --git a/source/isaaclab_physx/isaaclab_physx/__init__.py b/source/isaaclab_physx/isaaclab_physx/__init__.py new file mode 100644 index 000000000000..b7bfc7481300 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/__init__.py @@ -0,0 +1,99 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package containing the PhysX simulation interfaces for IsaacLab core package.""" + +import os +import sys +import toml + +# Conveniences to other module directories via relative paths +ISAACLAB_PHYSX_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) +"""Path to the extension source directory.""" + +ISAACLAB_PHYSX_METADATA = toml.load(os.path.join(ISAACLAB_PHYSX_EXT_DIR, "config", "extension.toml")) +"""Extension metadata dictionary parsed from the extension.toml file.""" + +# Configure the module-level variables +__version__ = ISAACLAB_PHYSX_METADATA["package"]["version"] + + +def _patch_isaacsim_simulation_manager(): + """Patch Isaac Sim's ``SimulationManager`` to use :class:`PhysxManager`. + + This redirects future ``from isaacsim.core.simulation_manager import SimulationManager`` + consumers to :class:`isaaclab_physx.physics.PhysxManager`, but the original + Isaac Sim ``SimulationManager`` class has *already* registered timeline + (PLAY/STOP) and stage (OPENED/CLOSED) subscriptions during its extension + startup. Those subscriptions live on the original class, not the module + attribute, so swapping the attribute alone is not enough. + + Starting with Isaac Sim 6.0.0-alpha.180 (commit ``8df6beeb0`` on + ``develop``, "hmazhar/autofix_bugs"), the original + ``SimulationManager._on_stop``/``_on_play``/``_on_stage_*`` methods were + decorated with ``@staticmethod`` so they finally fire correctly from the + Carb event subscriptions. Before that fix they were silently broken (the + subscriptions invoked them as bound methods, so the ``event`` argument was + being passed as ``self``/``cls`` and the bodies never executed). + + The newly-working ``_on_stop`` calls + ``SimulationManager.invalidate_physics()``, which calls + ``view.invalidate()`` on its ``omni.physics.tensors`` simulation view. + Because ``omni.physics.tensors.create_simulation_view("warp", stage_id=...)`` + returns the same underlying SimulationView per stage_id, that invalidation + also wrecks the view that :class:`PhysxManager` (and any articulation + ``_root_view`` derived from it) relies on. The result is the runtime error + ``Simulation view object is invalidated and cannot be used again to call + getDofVelocities`` on the very first ``scene.update()`` after + ``sim.reset()``. + + To prevent this, we disable the original class's default callbacks here + *before* swapping the module attribute, so :class:`PhysxManager` becomes + the single owner of the simulation lifecycle. + + This function is intentionally lazy: it only patches if + ``isaacsim.core.simulation_manager`` is already present in ``sys.modules``. + In the normal production flow Kit loads that module during extension startup, + before any user script imports :mod:`isaaclab_physx`, so the condition is + true and the patch fires on time. If :mod:`isaaclab_physx` happens to be + imported for pure config loading before Kit has launched (e.g. in + ``test_env_cfg_no_forbidden_imports``), the module is absent and this + function is a no-op — which is correct, because no callbacks have been + registered yet. + """ + original_module = sys.modules.get("isaacsim.core.simulation_manager") + if original_module is None: + return + + from .physics.physx_manager import IsaacEvents, PhysxManager + + # Tear down the original Isaac Sim SimulationManager's default timeline / + # stage subscriptions so they cannot invalidate the omni.physics.tensors + # view that PhysxManager owns. ``enable_all_default_callbacks(False)`` + # covers warm_start (PLAY), on_stop (STOP), stage_open (OPENED) and + # stage_close (CLOSED). Older Isaac Sim builds may not expose this API, so + # fall back gracefully. + original_class = getattr(original_module, "SimulationManager", None) + if original_class is not None and original_class is not PhysxManager: + try: + original_class.enable_all_default_callbacks(False) + except Exception: + # Defensive: API changed or original class never finished startup. + # Manually clear the subscription handles if they exist so any + # remaining references go through the dead-callback path. + for attr in ( + "_default_callback_warm_start", + "_default_callback_on_stop", + "_default_callback_stage_open", + "_default_callback_stage_close", + ): + if hasattr(original_class, attr): + setattr(original_class, attr, None) + + original_module.SimulationManager = PhysxManager + original_module.IsaacEvents = IsaacEvents + + +_patch_isaacsim_simulation_manager() diff --git a/source/isaaclab_physx/isaaclab_physx/assets/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/__init__.py new file mode 100644 index 000000000000..42d690079199 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/__init__.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package for different assets, such as rigid objects and articulations. + +An asset is a physical object that can be spawned in the simulation. The class handles both +the spawning of the asset into the USD stage as well as initialization of necessary physics +handles to interact with the asset. + +Upon construction of the asset instance, the prim corresponding to the asset is spawned into the +USD stage if the spawn configuration is not None. The spawn configuration is defined in the +:attr:`AssetBaseCfg.spawn` attribute. In case the configured :attr:`AssetBaseCfg.prim_path` is +an expression, then the prim is spawned at all the matching paths. Otherwise, a single prim is +spawned at the configured path. For more information on the spawn configuration, see the +:mod:`isaaclab.sim.spawners` module. + +The asset class also registers callbacks for the stage play/stop events. These are used to +construct the physics handles for the asset as the physics engine is only available when the +stage is playing. Additionally, the class registers a callback for debug visualization of the +asset. This can be enabled by setting the :attr:`AssetBaseCfg.debug_vis` attribute to True. + +The asset class follows the following naming convention for its methods: + +* **set_xxx()**: These are used to only set the buffers into the :attr:`data` instance. However, they + do not write the data into the simulator. The writing of data only happens when the + :meth:`write_data_to_sim` method is called. +* **write_xxx_to_sim()**: These are used to set the buffers into the :attr:`data` instance and write + the corresponding data into the simulator as well. +* **update(dt)**: These are used to update the buffers in the :attr:`data` instance. This should + be called after a simulation step is performed. + +The main reason to separate the ``set`` and ``write`` operations is to provide flexibility to the +user when they need to perform a post-processing operation of the buffers before applying them +into the simulator. A common example for this is dealing with explicit actuator models where the +specified joint targets are not directly applied to the simulator but are instead used to compute +the corresponding actuator torques. +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/assets/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/assets/__init__.pyi new file mode 100644 index 000000000000..dbcf33d000c6 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Articulation", + "ArticulationData", + "DeformableObject", + "DeformableObjectCfg", + "DeformableObjectData", + "RigidObject", + "RigidObjectData", + "RigidObjectCollection", + "RigidObjectCollectionData", + "SurfaceGripper", + "SurfaceGripperCfg", +] + +from .articulation import Articulation, ArticulationData +from .deformable_object import DeformableObject, DeformableObjectCfg, DeformableObjectData +from .rigid_object import RigidObject, RigidObjectData +from .rigid_object_collection import RigidObjectCollection, RigidObjectCollectionData +from .surface_gripper import SurfaceGripper, SurfaceGripperCfg diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py new file mode 100644 index 000000000000..91dd3a0caa25 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for rigid articulated assets.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.pyi new file mode 100644 index 000000000000..9e482491fe54 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Articulation", + "ArticulationData", +] + +from .articulation import Articulation +from .articulation_data import ArticulationData diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py new file mode 100644 index 000000000000..913914e29f30 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -0,0 +1,4708 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Flag for pyright to ignore type errors in this file. +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import logging +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp +from prettytable import PrettyTable + +from pxr import UsdPhysics + +from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator +from isaaclab.assets.articulation.base_articulation import BaseArticulation +from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims +from isaaclab.utils.string import resolve_matching_names, resolve_matching_names_values +from isaaclab.utils.types import ArticulationActions +from isaaclab.utils.version import get_isaac_sim_version, has_kit +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab_physx.assets import kernels as shared_kernels +from isaaclab_physx.assets.articulation import kernels as articulation_kernels +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .articulation_data import ArticulationData + +if TYPE_CHECKING: + import omni.physics.tensors.api as physx + + from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg + +# import logger +logger = logging.getLogger(__name__) + + +class Articulation(BaseArticulation): + """An articulation asset class. + + An articulation is a collection of rigid bodies connected by joints. The joints can be either + fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. + However, the articulation class has currently been tested with revolute and prismatic joints. + The class supports both floating-base and fixed-base articulations. The type of articulation + is determined based on the root joint of the articulation. If the root joint is fixed, then + the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base + system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. + + For an asset to be considered an articulation, the root prim of the asset must have the + `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using + the reduced coordinate formulation. On playing the simulation, the physics engine parses the + articulation root prim and creates the corresponding articulation in the physics engine. The + articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. + + The articulation class also provides the functionality to augment the simulation of an articulated + system with custom actuator models. These models can either be explicit or implicit, as detailed in + the :mod:`isaaclab.actuators` module. The actuator models are specified using the + :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the + corresponding actuator models, when the simulation is played. + + During the simulation step, the articulation class first applies the actuator models to compute + the joint commands based on the user-specified targets. These joint commands are then applied + into the simulation. The joint commands can be either position, velocity, or effort commands. + As an example, the following snippet shows how this can be used for position commands: + + .. code-block:: python + + # an example instance of the articulation class + my_articulation = Articulation(cfg) + + # set joint position targets + my_articulation.set_joint_position_target(position) + # propagate the actuator models and apply the computed commands into the simulation + my_articulation.write_data_to_sim() + + # step the simulation using the simulation context + sim_context.step() + + # update the articulation state, where dt is the simulation time step + my_articulation.update(dt) + + .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html + + """ + + cfg: ArticulationCfg + """Configuration instance for the articulations.""" + + __backend_name__: str = "physx" + """The name of the backend for the articulation.""" + + actuators: dict + """Dictionary of actuator instances for the articulation. + + The keys are the actuator names and the values are the actuator instances. The actuator instances + are initialized based on the actuator configurations specified in the :attr:`ArticulationCfg.actuators` + attribute. They are used to compute the joint commands during the :meth:`write_data_to_sim` function. + """ + + def __init__(self, cfg: ArticulationCfg): + """Initialize the articulation. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> ArticulationData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation is a fixed-base or floating-base system.""" + return self.root_view.shared_metatype.fixed_base + + @property + def num_joints(self) -> int: + """Number of joints in articulation.""" + return self.root_view.shared_metatype.dof_count + + @property + def num_fixed_tendons(self) -> int: + """Number of fixed tendons in articulation.""" + return self.root_view.max_fixed_tendons + + @property + def num_spatial_tendons(self) -> int: + """Number of spatial tendons in articulation.""" + return self.root_view.max_spatial_tendons + + @property + def num_bodies(self) -> int: + """Number of bodies in articulation.""" + return self.root_view.shared_metatype.link_count + + @property + def joint_names(self) -> list[str]: + """Ordered names of joints in articulation.""" + return self.root_view.shared_metatype.dof_names + + @property + def fixed_tendon_names(self) -> list[str]: + """Ordered names of fixed tendons in articulation.""" + return self._fixed_tendon_names + + @property + def spatial_tendon_names(self) -> list[str]: + """Ordered names of spatial tendons in articulation.""" + return self._spatial_tendon_names + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in articulation.""" + return self.root_view.shared_metatype.link_names + + @property + def root_view(self) -> physx.ArticulationView: + """Root view for the asset. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the articulation. + + .. caution:: + If both `env_ids` and `env_mask` are provided, then `env_mask` takes precedence over `env_ids`. + + Args: + env_ids: Environment indices. If None, then all indices are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # use ellipses object to skip initial indices. + if (env_ids is None) or (env_ids == slice(None)): + env_ids = slice(None) + # reset actuators + for actuator in self.actuators.values(): + actuator.reset(env_ids) + # reset external wrenches. + self._instantaneous_wrench_composer.reset(env_ids, env_mask) + self._permanent_wrench_composer.reset(env_ids, env_mask) + + def write_data_to_sim(self): + """Write external wrenches and joint commands to the simulation. + + If any explicit actuators are present, then the actuator models are used to compute the + joint commands. Otherwise, the joint commands are directly set into the simulation. + + .. note:: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + composer = self._instantaneous_wrench_composer + composer.add_raw_buffers_from(self._permanent_wrench_composer) + get_force_data = self._get_inst_wrench_force_f32 + get_torque_data = self._get_inst_wrench_torque_f32 + else: + composer = self._permanent_wrench_composer + get_force_data = self._get_perm_wrench_force_f32 + get_torque_data = self._get_perm_wrench_torque_f32 + composer.compose_to_body_frame() + self.root_view.apply_forces_and_torques_at_position( + force_data=get_force_data(), + torque_data=get_torque_data(), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + # apply actuator models + self._apply_actuator_model() + # write actions into simulation + self.root_view.set_dof_actuation_forces(self._joint_effort_target_sim, self._ALL_INDICES) + # position and velocity targets only for implicit actuators + if self._has_implicit_actuators: + self.root_view.set_dof_position_targets(self._joint_pos_target_sim, self._ALL_INDICES) + self.root_view.set_dof_velocity_targets(self._joint_vel_target_sim, self._ALL_INDICES) + + def update(self, dt: float): + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return resolve_matching_names(name_keys, self.body_names, preserve_order) + + def find_joints( + self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find joints in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint names. + joint_subset: A subset of joints to search for. Defaults to None, which means all joints + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the joint indices and names. + """ + if joint_subset is None: + joint_subset = self.joint_names + # find joints + return resolve_matching_names(name_keys, joint_subset, preserve_order) + + def find_fixed_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find fixed tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint + names with fixed tendons. + tendon_subsets: A subset of joints with fixed tendons to search for. Defaults to None, which means + all joints in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + # tendons follow the joint names they are attached to + tendon_subsets = self.fixed_tendon_names + # find tendons + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + def find_spatial_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find spatial tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the tendon names. + tendon_subsets: A subset of tendons to search for. Defaults to None, which means all tendons + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + tendon_subsets = self.spatial_tendon_names + # find tendons + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + """ + Operations - State Writers. + """ + + def write_root_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) + or (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids) + + def write_root_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_link_pose_to_sim_mask(root_pose=root_pose, env_mask=env_mask) + + def write_root_link_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7), + or (len(env_ids),) / (num_instances,) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.set_root_link_pose_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_pose, + env_ids, + full_data, + ], + outputs=[ + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Update the timestamps + self.data._root_link_state_w.timestamp = -1.0 + self.data._root_state_w.timestamp = -1.0 + # Need to invalidate the buffer to trigger the update with the new state. + self.data._body_link_pose_w.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_root_transforms(self._get_root_link_pose_w_f32(), indices=env_ids) + + def write_root_link_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + # Set full data to True to ensure the the right code path is taken inside the kernel. + self.write_root_link_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids, full_data=True) + + def write_root_com_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7), + or (len(env_ids),) / (num_instances,) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would call + # write_root_link_pose_to_sim after this. + wp.launch( + shared_kernels.set_root_com_pose_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_pose, + self.data.body_com_pose_b, + env_ids, + full_data, + ], + outputs=[ + self.data.root_com_pose_w, + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Update the timestamps + self.data._root_com_state_w.timestamp = -1.0 + self.data._root_link_state_w.timestamp = -1.0 + self.data._root_state_w.timestamp = -1.0 + # Need to invalidate the buffer to trigger the update with the new state. + self.data._body_link_pose_w.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_root_transforms(self._get_root_link_pose_w_f32(), indices=env_ids) + + def write_root_com_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + # Set full data to True to ensure the the right code path is taken inside the kernel. + self.write_root_com_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids, full_data=True) + + def write_root_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (len(env_ids), 6) or (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (num_instances, 6) or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_com_velocity_to_sim_mask(root_velocity=root_velocity, env_mask=env_mask) + + def write_root_com_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or + (num_instances, 6), or (len(env_ids),) / (num_instances,) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.set_root_com_velocity_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + env_ids, + self.data._num_bodies, + full_data, + ], + outputs=[ + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Update the timestamps + self.data._root_state_w.timestamp = -1.0 + self.data._root_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_root_velocities(self._get_root_com_vel_w_f32(), indices=env_ids) + + def write_root_com_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (num_instances, 6) or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + # Set full data to True to ensure the the right code path is taken inside the kernel. + self.write_root_com_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids, full_data=True) + + def write_root_link_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6) or + (num_instances, 6), or (len(env_ids),) / (num_instances,) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. + wp.launch( + shared_kernels.set_root_link_velocity_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + self.data.body_com_pose_b, + self.data.root_link_pose_w, + env_ids, + self.data._num_bodies, + full_data, + ], + outputs=[ + self.data.root_link_vel_w, + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Update the timestamps + self.data._root_link_state_w.timestamp = -1.0 + self.data._root_state_w.timestamp = -1.0 + self.data._root_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_root_velocities(self._get_root_link_vel_w_f32(), indices=env_ids) + + def write_root_link_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root frame velocities in simulation world frame. + Shape is (num_instances, 6) or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + # Set full data to True to ensure the the right code path is taken inside the kernel. + self.write_root_link_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids, full_data=True) + + def write_joint_state_to_sim_index( + self, + *, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint positions and velocities in a single fused kernel launch. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(position, (self.num_instances, self.num_joints), wp.float32, "position") + self.assert_shape_and_dtype(velocity, (self.num_instances, self.num_joints), wp.float32, "velocity") + else: + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") + wp.launch( + articulation_kernels.write_joint_state_data, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + position, + velocity, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data.joint_pos, + self.data.joint_vel, + self.data._previous_joint_vel, + self.data.joint_acc, + ], + device=self.device, + ) + # Invalidate buffers + self.data._body_com_vel_w.timestamp = -1.0 + self.data._body_link_vel_w.timestamp = -1.0 + self.data._body_com_pose_b.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_link_pose_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_dof_positions(self.data._joint_pos.data, indices=env_ids) + self.root_view.set_dof_velocities(self.data._joint_vel.data, indices=env_ids) + + def write_joint_state_to_sim_mask( + self, + *, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint positions and velocities over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + position: Joint positions. Shape is (num_instances, num_joints). + velocity: Joint velocities. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks to indices (PhysX only supports index-based TensorAPI) + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + self.write_joint_state_to_sim_index( + position=position, velocity=velocity, joint_ids=joint_ids, env_ids=env_ids, full_data=True + ) + + def write_joint_position_to_sim_index( + self, + *, + position: torch.Tensor, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint positions over selected environment indices into the simulation. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(position, (self.num_instances, self.num_joints), wp.float32, "position") + else: + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + position, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data.joint_pos, + ], + device=self.device, + ) + # Need to invalidate the buffer to trigger the update with the new root pose. + self.data._body_com_vel_w.timestamp = -1.0 + self.data._body_link_vel_w.timestamp = -1.0 + self.data._body_com_pose_b.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_link_pose_w.timestamp = -1.0 + + self.data._body_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_dof_positions(self.data._joint_pos.data, indices=env_ids) + + def write_joint_position_to_sim_mask( + self, + *, + position: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint positions over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + position: Joint positions. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the the right code path is taken inside the kernel. + self.write_joint_position_to_sim_index(position=position, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + def write_joint_velocity_to_sim_index( + self, + *, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint velocities to the simulation. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(velocity, (self.num_instances, self.num_joints), wp.float32, "velocity") + else: + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + articulation_kernels.write_joint_vel_data, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + velocity, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data.joint_vel, + self.data._previous_joint_vel, + self.data.joint_acc, + ], + device=self.device, + ) + # set into simulation + self.root_view.set_dof_velocities(self.data._joint_vel.data, indices=env_ids) + + def write_joint_velocity_to_sim_mask( + self, + *, + velocity: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint velocities over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + velocity: Joint velocities. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the the right code path is taken inside the kernel. + self.write_joint_velocity_to_sim_index(velocity=velocity, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + """ + Operations - Simulation Parameters Writers. + """ + + def write_joint_stiffness_to_sim_index( + self, + *, + stiffness: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint stiffness over selected environment indices into the simulation. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(stiffness, (self.num_instances, self.num_joints), wp.float32, "stiffness") + else: + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "stiffness") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_stiffness, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_stiffness, self.data._joint_stiffness) + self.root_view.set_dof_stiffnesses(self._cpu_joint_stiffness, indices=cpu_env_ids) + + def write_joint_stiffness_to_sim_mask( + self, + *, + stiffness: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint stiffness over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + stiffness: Joint stiffness. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the the right code path is taken inside the kernel. + self.write_joint_stiffness_to_sim_index( + stiffness=stiffness, joint_ids=joint_ids, env_ids=env_ids, full_data=True + ) + + def write_joint_damping_to_sim_index( + self, + *, + damping: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint damping over selected environment indices into the simulation. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + damping: Joint damping. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(damping, (self.num_instances, self.num_joints), wp.float32, "damping") + else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "damping") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(damping, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + damping, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_damping, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + damping, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_damping, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_damping, self.data._joint_damping) + self.root_view.set_dof_dampings(self._cpu_joint_damping, indices=cpu_env_ids) + + def write_joint_damping_to_sim_mask( + self, + *, + damping: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint damping over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + damping: Joint damping. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_damping_to_sim_index(damping=damping, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + def write_joint_position_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + warn_limit_violation: bool = True, + ): + """Write joint position limits over selected environment indices into the simulation. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2) or (num_instances, num_joints, 2). + In warp the expected shape is (num_instances, num_joints), with dtype wp.vec2f. + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + warn_limit_violation: Whether to use warning or info level logging when default joint positions + exceed the new limits. Defaults to True. + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(limits, (self.num_instances, self.num_joints), wp.vec2f, "limits") + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f, "limits") + + clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do this in multiple launches. + if isinstance(limits, float): + raise ValueError("Joint position limits must be a tensor or array, not a float.") + wp.launch( + articulation_kernels.write_joint_limit_data_to_buffer, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + self.cfg.soft_joint_pos_limit_factor, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_pos_limits, + self.data._soft_joint_pos_limits, + self.data._default_joint_pos, + clamped_defaults, + ], + device=self.device, + ) + # Log a warning if the default joint positions are outside of the new limits. + if clamped_defaults.numpy()[0] > 0: + violation_message = ( + "Some default joint positions are outside of the range of the new joint limits. Default joint positions" + " will be clamped to be within the new joint limits." + ) + if warn_limit_violation: + logger.warning(violation_message) + else: + logger.info(violation_message) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_pos_limits, self.data._joint_pos_limits) + self.root_view.set_dof_limits(self._cpu_joint_pos_limits, indices=cpu_env_ids) + + def write_joint_position_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + warn_limit_violation: bool = True, + ): + """Write joint position limits over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limits: Joint limits. Shape is (num_instances, num_joints, 2). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + warn_limit_violation: Whether to use warning or info level logging when default joint positions + exceed the new limits. Defaults to True. + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_position_limit_to_sim_index( + limits=limits, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=True, + warn_limit_violation=warn_limit_violation, + ) + + def write_joint_velocity_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint max velocity over selected environment indices into the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only + be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving + faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limits: Joint max velocity. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(limits, (self.num_instances, self.num_joints), wp.float32, "limits") + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(limits, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_vel_limits, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_vel_limits, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_vel_limits, self.data._joint_vel_limits) + self.root_view.set_dof_max_velocities(self._cpu_joint_vel_limits, indices=cpu_env_ids) + + def write_joint_velocity_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint max velocity over selected environment mask into the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only + be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving + faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limits: Joint max velocity. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_velocity_limit_to_sim_index( + limits=limits, joint_ids=joint_ids, env_ids=env_ids, full_data=True + ) + + def write_joint_effort_limit_to_sim_index( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint effort limits over selected environment indices into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. If the + computed effort exceeds this limit, the physics engine will clip the effort to this value. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(limits, (self.num_instances, self.num_joints), wp.float32, "limits") + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(limits, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_effort_limits, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + limits, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_effort_limits, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_effort_limits, self.data._joint_effort_limits) + self.root_view.set_dof_max_forces(self._cpu_joint_effort_limits, indices=cpu_env_ids) + + def write_joint_effort_limit_to_sim_mask( + self, + *, + limits: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint effort limits over selected environment mask into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. If the + computed effort exceeds this limit, the physics engine will clip the effort to this value. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limits: Joint torque limits. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_effort_limit_to_sim_index(limits=limits, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + def write_joint_armature_to_sim_index( + self, + *, + armature: torch.Tensor | wp.array | float, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Write joint armature over selected environment indices into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + armature: Joint armature. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(armature, (self.num_instances, self.num_joints), wp.float32, "armature") + else: + self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "armature") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(armature, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + armature, + env_ids, + joint_ids, + ], + outputs=[ + self.data._joint_armature, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + armature, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_armature, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + if isinstance(env_ids, torch.Tensor): + env_ids = wp.from_torch(env_ids, dtype=wp.int32) + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_armature, self.data._joint_armature) + self.root_view.set_dof_armatures(self._cpu_joint_armature, indices=cpu_env_ids) + + def write_joint_armature_to_sim_mask( + self, + *, + armature: torch.Tensor | wp.array | float, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + """Write joint armature over selected environment mask into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + armature: Joint armature. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_armature_to_sim_index(armature=armature, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + def write_joint_friction_coefficient_to_sim_index( + self, + *, + joint_friction_coeff: torch.Tensor | wp.array | float, + joint_dynamic_friction_coeff: torch.Tensor | wp.array | float | None = None, + joint_viscous_friction_coeff: torch.Tensor | wp.array | float | None = None, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + r"""Write joint friction coefficients over selected environment indices into the simulation. + + For Isaac Sim versions below 5.0, only the static friction coefficient is set. + This limits the resisting force or torque up to a maximum proportional to the transmitted + spatial force: :math:`\|F_{resist}\| \leq \mu_s \, \|F_{spatial}\|`. + + For Isaac Sim versions 5.0 and above, the static, dynamic, and viscous friction coefficients + are set. The model combines Coulomb (static & dynamic) friction with a viscous term: + + - Static friction :math:`\mu_s` defines the maximum effort that prevents motion at rest. + - Dynamic friction :math:`\mu_d` applies once motion begins and remains constant during motion. + - Viscous friction :math:`c_v` is a velocity-proportional resistive term. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + joint_friction_coeff: Static friction coefficient :math:`\mu_s`. + Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_dynamic_friction_coeff: Dynamic (Coulomb) friction coefficient :math:`\mu_d`. + Same shape as above. If None, the dynamic coefficient is not updated. + joint_viscous_friction_coeff: Viscous friction coefficient :math:`c_v`. + Same shape as above. If None, the viscous coefficient is not updated. + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype( + joint_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_friction_coeff" + ) + else: + self.assert_shape_and_dtype( + joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff" + ) + if joint_dynamic_friction_coeff is not None: + if full_data: + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_dynamic_friction_coeff", + ) + else: + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_dynamic_friction_coeff", + ) + if joint_viscous_friction_coeff is not None: + if full_data: + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_viscous_friction_coeff", + ) + else: + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_viscous_friction_coeff", + ) + # Get the friction properties from the simulation. + friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do this in multiple launches. + wp.launch( + articulation_kernels.write_joint_friction_data_to_buffer, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + joint_friction_coeff, + joint_dynamic_friction_coeff, + joint_viscous_friction_coeff, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_friction_coeff, + self.data._joint_dynamic_friction_coeff, + self.data._joint_viscous_friction_coeff, + friction_props, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_friction_props, friction_props) + self.root_view.set_dof_friction_properties(self._cpu_joint_friction_props, indices=cpu_env_ids) + + def write_joint_friction_coefficient_to_sim_mask( + self, + *, + joint_friction_coeff: torch.Tensor | wp.array, + joint_dynamic_friction_coeff: torch.Tensor | wp.array | None = None, + joint_viscous_friction_coeff: torch.Tensor | wp.array | None = None, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ): + r"""Write joint friction coefficients over selected environment mask into the simulation. + + For Isaac Sim versions below 5.0, only the static friction coefficient is set. + This limits the resisting force or torque up to a maximum proportional to the transmitted + spatial force: :math:`\|F_{resist}\| \leq \mu_s \, \|F_{spatial}\|`. + + For Isaac Sim versions 5.0 and above, the static, dynamic, and viscous friction coefficients + are set. The model combines Coulomb (static & dynamic) friction with a viscous term: + + - Static friction :math:`\mu_s` defines the maximum effort that prevents motion at rest. + - Dynamic friction :math:`\mu_d` applies once motion begins and remains constant during motion. + - Viscous friction :math:`c_v` is a velocity-proportional resistive term. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + joint_friction_coeff: Static friction coefficient :math:`\mu_s`. + Shape is (num_instances, num_joints). + joint_dynamic_friction_coeff: Dynamic (Coulomb) friction coefficient :math:`\mu_d`. + Same shape as above. If None, the dynamic coefficient is not updated. + joint_viscous_friction_coeff: Viscous friction coefficient :math:`c_v`. + Same shape as above. If None, the viscous coefficient is not updated. + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=joint_friction_coeff, + joint_dynamic_friction_coeff=joint_dynamic_friction_coeff, + joint_viscous_friction_coeff=joint_viscous_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=True, + ) + + def write_joint_dynamic_friction_coefficient_to_sim_index( + self, + *, + joint_dynamic_friction_coeff: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Write joint dynamic friction coefficient over selected environment indices into the simulation. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + joint_dynamic_friction_coeff: Joint dynamic friction coefficient. Shape is (len(env_ids), len(joint_ids)) + or (num_instances, num_joints) if full_data. + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_dynamic_friction_coeff", + ) + else: + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_dynamic_friction_coeff", + ) + # Get the friction properties from the simulation. + friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do this in multiple launches. + wp.launch( + articulation_kernels.write_joint_friction_param_to_buffer, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + joint_dynamic_friction_coeff, + env_ids, + joint_ids, + 1, + full_data, + ], + outputs=[ + self.data._joint_dynamic_friction_coeff, + friction_props, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_friction_props, friction_props) + self.root_view.set_dof_friction_properties(self._cpu_joint_friction_props, indices=cpu_env_ids) + + def write_joint_dynamic_friction_coefficient_to_sim_mask( + self, + *, + joint_dynamic_friction_coeff: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint dynamic friction coefficient over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + joint_dynamic_friction_coeff: Joint dynamic friction coefficient. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_dynamic_friction_coefficient_to_sim_index( + joint_dynamic_friction_coeff=joint_dynamic_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=True, + ) + + def write_joint_viscous_friction_coefficient_to_sim_index( + self, + *, + joint_viscous_friction_coeff: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Write joint viscous friction coefficient over selected environment indices into the simulation. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + joint_viscous_friction_coeff: Joint viscous friction coefficient. Shape is (len(env_ids), len(joint_ids)) + or (num_instances, num_joints) if full_data. + joint_ids: Joint indices. If None, then all joints are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + if has_kit() and get_isaac_sim_version().major < 5: + logger.warning("Setting joint viscous friction coefficients are not supported in Isaac Sim < 5.0") + return + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_viscous_friction_coeff", + ) + else: + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_viscous_friction_coeff", + ) + # Get the friction properties from the simulation. + friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + # Note: we are doing a single launch for faster performance. Prior versions would do this in multiple launches. + wp.launch( + articulation_kernels.write_joint_friction_param_to_buffer, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + joint_viscous_friction_coeff, + env_ids, + joint_ids, + 2, + full_data, + ], + outputs=[ + self.data._joint_viscous_friction_coeff, + friction_props, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_joint_friction_props, friction_props) + self.root_view.set_dof_friction_properties(self._cpu_joint_friction_props, indices=cpu_env_ids) + + def write_joint_viscous_friction_coefficient_to_sim_mask( + self, + *, + joint_viscous_friction_coeff: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write joint viscous friction coefficient over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + joint_viscous_friction_coeff: Joint viscous friction coefficient. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.write_joint_viscous_friction_coefficient_to_sim_index( + joint_viscous_friction_coeff=joint_viscous_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=True, + ) + + """ + Operations - Setters. + """ + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set masses of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + masses: Masses of all bodies. Shape is (len(env_ids), len(body_ids)) or (num_instances, num_bodies) + if full_data. + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(masses, (self.num_instances, self.num_bodies), wp.float32, "masses") + else: + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + masses, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_mass, + ], + device=self.device, + ) + + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_body_mass, self.data._body_mass) + self.root_view.set_masses(self._cpu_body_mass, indices=cpu_env_ids) + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set masses of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + body_ids = self._resolve_body_mask(body_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_masses_index(masses=masses, body_ids=body_ids, env_ids=env_ids, full_data=True) + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set center of mass pose of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + coms: Center of mass pose of all bodies. Shape is (len(env_ids), len(body_ids), 7) or + (num_instances, num_bodies, 7) if full_data, or (len(env_ids), len(body_ids)) / + (num_instances, num_bodies) with dtype wp.transformf. + body_ids: The body indices to set the center of mass pose for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass pose for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(coms, (self.num_instances, self.num_bodies), wp.transformf, "coms") + else: + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_com_pose_to_buffer, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + coms, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_com_pose_b.data, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + # Convert from wp.transformf to flat (N, M, 7) array for PhysX + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy( + self._cpu_body_coms, + self.data._body_com_pose_b.data.view(wp.float32).reshape((self.num_instances, self.num_bodies, 7)), + ) + self.root_view.set_coms(self._cpu_body_coms, indices=cpu_env_ids) + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set center of mass pose of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + coms: Center of mass pose of all bodies. Shape is (num_instances, num_bodies, 7) + or (num_instances, num_bodies) with dtype wp.transformf. + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + body_ids = self._resolve_body_mask(body_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_coms_index(coms=coms, body_ids=body_ids, env_ids=env_ids, full_data=True) + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set inertias of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 9) or + (num_instances, num_bodies, 9) if full_data. + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(inertias, (self.num_instances, self.num_bodies, 9), wp.float32, "inertias") + else: + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_inertia_to_buffer, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + inertias, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_inertia, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_body_inertia, self.data._body_inertia) + self.root_view.set_inertias(self._cpu_body_inertia, indices=cpu_env_ids) + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set inertias of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 9). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + body_ids = self._resolve_body_mask(body_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids, full_data=True) + + def set_joint_position_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set joint position targets into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + target: Joint position targets. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints) + if full_data. + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(target, (self.num_instances, self.num_joints), wp.float32, "target") + else: + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + target, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_pos_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_position_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint position targets into internal buffers using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + target: Joint position targets. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_joint_position_target_index(target=target, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + def set_joint_velocity_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set joint velocity targets into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints) + if full_data. + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(target, (self.num_instances, self.num_joints), wp.float32, "target") + else: + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + target, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_vel_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_velocity_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint velocity targets into internal buffers using masks. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + target: Joint velocity targets. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_joint_velocity_target_index(target=target, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + def set_joint_effort_target_index( + self, + *, + target: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set joint efforts into internal buffers using indices. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints) + if full_data. + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + joint_ids = self._resolve_joint_ids(joint_ids) + if full_data: + self.assert_shape_and_dtype(target, (self.num_instances, self.num_joints), wp.float32, "target") + else: + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], joint_ids.shape[0]), + inputs=[ + target, + env_ids, + joint_ids, + full_data, + ], + outputs=[ + self.data._joint_effort_target, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the targets to the simulation. + + def set_joint_effort_target_mask( + self, + *, + target: torch.Tensor | wp.array, + joint_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set joint efforts into internal buffers using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + target: Joint effort targets. Shape is (num_instances, num_joints). + joint_mask: Joint mask. If None, then all joints are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + joint_ids = self._resolve_joint_mask(joint_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_joint_effort_target_index(target=target, joint_ids=joint_ids, env_ids=env_ids, full_data=True) + + """ + Operations - Tendons. + """ + + def set_fixed_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set fixed tendon stiffness into internal buffers using indices. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons) if full_data. + fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + if full_data: + self.assert_shape_and_dtype( + stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "stiffness" + ) + else: + self.assert_shape_and_dtype( + stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "stiffness" + ) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_stiffness, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the stiffness to the simulation. + + def set_fixed_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon stiffness into internal buffers using masks. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + stiffness: Fixed tendon stiffness. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + fixed_tendon_ids = self._resolve_fixed_tendon_mask(fixed_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_fixed_tendon_stiffness_index( + stiffness=stiffness, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_fixed_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set fixed tendon damping into internal buffers using indices. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim_index` + function. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons) if full_data. + fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + if full_data: + self.assert_shape_and_dtype(damping, (self.num_instances, self.num_fixed_tendons), wp.float32, "damping") + else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "damping") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(damping, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_damping, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_damping, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the damping to the simulation. + + def set_fixed_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon damping into internal buffers using masks. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + damping: Fixed tendon damping. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + fixed_tendon_ids = self._resolve_fixed_tendon_mask(fixed_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_fixed_tendon_damping_index( + damping=damping, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_fixed_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set fixed tendon limit stiffness into internal buffers using indices. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons) if full_data. + fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + if full_data: + self.assert_shape_and_dtype( + limit_stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "limit_stiffness" + ) + else: + self.assert_shape_and_dtype( + limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit_stiffness" + ) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(limit_stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_limit_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_limit_stiffness, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the limit stiffness to the simulation. + + def set_fixed_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon limit stiffness into internal buffers using masks. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + fixed_tendon_ids = self._resolve_fixed_tendon_mask(fixed_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_fixed_tendon_limit_stiffness_index( + limit_stiffness=limit_stiffness, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_fixed_tendon_position_limit_index( + self, + *, + limit: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set fixed tendon position limit into internal buffers using indices. + + This function does not apply the tendon position limit to the simulation. It only fills the buffers with + the desired values. To apply the tendon position limit, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limit: Fixed tendon position limit. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons) if full_data. + fixed_tendon_ids: The tendon indices to set the position limit for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + if full_data: + self.assert_shape_and_dtype(limit, (self.num_instances, self.num_fixed_tendons), wp.float32, "limit") + else: + self.assert_shape_and_dtype(limit, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(limit, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_pos_limits, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_pos_limits, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the position limit to the simulation. + + def set_fixed_tendon_position_limit_mask( + self, + *, + limit: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon position limit into internal buffers using masks. + + This function does not apply the tendon position limit to the simulation. It only fills the buffers with + the desired values. To apply the tendon position limit, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limit: Fixed tendon position limit. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + fixed_tendon_ids = self._resolve_fixed_tendon_mask(fixed_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_fixed_tendon_position_limit_index( + limit=limit, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_fixed_tendon_rest_length_index( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set fixed tendon rest length into internal buffers using indices. + + This function does not apply the tendon rest length to the simulation. It only fills the buffers with + the desired values. To apply the tendon rest length, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons) if full_data. + fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + if full_data: + self.assert_shape_and_dtype( + rest_length, (self.num_instances, self.num_fixed_tendons), wp.float32, "rest_length" + ) + else: + self.assert_shape_and_dtype( + rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "rest_length" + ) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(rest_length, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + rest_length, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_rest_length, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + rest_length, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_rest_length, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the rest length to the simulation. + + def set_fixed_tendon_rest_length_mask( + self, + *, + rest_length: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon rest length into internal buffers using masks. + + This function does not apply the tendon rest length to the simulation. It only fills the buffers with + the desired values. To apply the tendon rest length, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + rest_length: Fixed tendon rest length. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + fixed_tendon_ids = self._resolve_fixed_tendon_mask(fixed_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_fixed_tendon_rest_length_index( + rest_length=rest_length, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_fixed_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set fixed tendon offset into internal buffers using indices. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_fixed_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons) if full_data. + fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + if full_data: + self.assert_shape_and_dtype(offset, (self.num_instances, self.num_fixed_tendons), wp.float32, "offset") + else: + self.assert_shape_and_dtype(offset, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "offset") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(offset, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_offset, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_offset, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the offset to the simulation. + + def set_fixed_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + fixed_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set fixed tendon offset into internal buffers using masks. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_fixed_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + offset: Fixed tendon offset. Shape is (num_instances, num_fixed_tendons). + fixed_tendon_mask: Fixed tendon mask. If None, then all fixed tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + fixed_tendon_ids = self._resolve_fixed_tendon_mask(fixed_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_fixed_tendon_offset_index( + offset=offset, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids, full_data=True + ) + + def write_fixed_tendon_properties_to_sim_index( + self, + *, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write fixed tendon properties into the simulation using indices. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + fixed_tendon_ids: The fixed tendon indices to write the properties for. Defaults to None + (all fixed tendons). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + # Write fixed tendon properties to the simulation. + self.root_view.set_fixed_tendon_properties( + self.data.fixed_tendon_stiffness.warp, + self.data.fixed_tendon_damping.warp, + self.data.fixed_tendon_limit_stiffness.warp, + self.data.fixed_tendon_pos_limits.warp, + self.data.fixed_tendon_rest_length.warp, + self.data.fixed_tendon_offset.warp, + indices=env_ids, + ) + + def write_fixed_tendon_properties_to_sim_mask( + self, + *, + env_mask: wp.array | None = None, + ) -> None: + """Write fixed tendon properties into the simulation using masks. + + .. tip:: + For maximum performance we recommend using the mask method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + self.write_fixed_tendon_properties_to_sim_index(env_ids=env_ids) + + def set_spatial_tendon_stiffness_index( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set spatial tendon stiffness into internal buffers using indices. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons) if full_data. + spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + if full_data: + self.assert_shape_and_dtype( + stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "stiffness" + ) + else: + self.assert_shape_and_dtype( + stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "stiffness" + ) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_stiffness, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the stiffness to the simulation. + + def set_spatial_tendon_stiffness_mask( + self, + *, + stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon stiffness into internal buffers using masks. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + stiffness: Spatial tendon stiffness. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + spatial_tendon_ids = self._resolve_spatial_tendon_mask(spatial_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_spatial_tendon_stiffness_index( + stiffness=stiffness, spatial_tendon_ids=spatial_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_spatial_tendon_damping_index( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set spatial tendon damping into internal buffers using indices. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons) if full_data. + spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + if full_data: + self.assert_shape_and_dtype(damping, (self.num_instances, self.num_spatial_tendons), wp.float32, "damping") + else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "damping") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(damping, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_damping, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_damping, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the damping to the simulation. + + def set_spatial_tendon_damping_mask( + self, + *, + damping: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon damping into internal buffers using masks. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + damping: Spatial tendon damping. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + spatial_tendon_ids = self._resolve_spatial_tendon_mask(spatial_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_spatial_tendon_damping_index( + damping=damping, spatial_tendon_ids=spatial_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_spatial_tendon_limit_stiffness_index( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set spatial tendon limit stiffness into internal buffers using indices. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons) if full_data. + spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None + (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + if full_data: + self.assert_shape_and_dtype( + limit_stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "limit_stiffness" + ) + else: + self.assert_shape_and_dtype( + limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "limit_stiffness" + ) + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(limit_stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_limit_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_limit_stiffness, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the limit stiffness to the simulation. + + def set_spatial_tendon_limit_stiffness_mask( + self, + *, + limit_stiffness: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon limit stiffness into internal buffers using masks. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + spatial_tendon_ids = self._resolve_spatial_tendon_mask(spatial_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_spatial_tendon_limit_stiffness_index( + limit_stiffness=limit_stiffness, spatial_tendon_ids=spatial_tendon_ids, env_ids=env_ids, full_data=True + ) + + def set_spatial_tendon_offset_index( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set spatial tendon offset into internal buffers using indices. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_spatial_tendon_properties_to_sim_index` method. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons) if full_data. + spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve indices + env_ids = self._resolve_env_ids(env_ids) + spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + if full_data: + self.assert_shape_and_dtype(offset, (self.num_instances, self.num_spatial_tendons), wp.float32, "offset") + else: + self.assert_shape_and_dtype(offset, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "offset") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + if isinstance(offset, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_offset, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_offset, + ], + device=self.device, + ) + # Only updates internal buffers, does not apply the offset to the simulation. + + def set_spatial_tendon_offset_mask( + self, + *, + offset: float | torch.Tensor | wp.array, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set spatial tendon offset into internal buffers using masks. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_spatial_tendon_properties_to_sim_mask` method. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + offset: Spatial tendon offset. Shape is (num_instances, num_spatial_tendons). + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + spatial_tendon_ids = self._resolve_spatial_tendon_mask(spatial_tendon_mask) + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_spatial_tendon_offset_index( + offset=offset, spatial_tendon_ids=spatial_tendon_ids, env_ids=env_ids, full_data=True + ) + + def write_spatial_tendon_properties_to_sim_index( + self, + *, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Write spatial tendon properties into the simulation using indices. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve indices + if (env_ids is None) or (env_ids == slice(None)): + env_ids = self._ALL_INDICES + elif isinstance(env_ids, list): + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + # Write spatial tendon properties to the simulation. + self.root_view.set_spatial_tendon_properties( + self.data.spatial_tendon_stiffness.warp, + self.data.spatial_tendon_damping.warp, + self.data.spatial_tendon_limit_stiffness.warp, + self.data.spatial_tendon_offset.warp, + indices=env_ids, + ) + + def write_spatial_tendon_properties_to_sim_mask( + self, + *, + spatial_tendon_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Write spatial tendon properties into the simulation using masks. + + .. tip:: + For maximum performance we recommend using the mask method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + spatial_tendon_mask: Spatial tendon mask. If None, then all spatial tendons are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + env_ids = self._resolve_env_mask(env_mask) + # Write spatial tendon properties to the simulation. + self.write_spatial_tendon_properties_to_sim_index(env_ids=env_ids) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + + if self.cfg.articulation_root_prim_path is not None: + # The articulation root prim path is specified explicitly, so we can just use this. + root_prim_path_expr = self.cfg.prim_path + self.cfg.articulation_root_prim_path + else: + # No articulation root prim path was specified, so we need to search + # for it. We search for this in the first environment and then + # create a regex that matches all environments. + first_env_matching_prim = find_first_matching_prim(self.cfg.prim_path) + if first_env_matching_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString + + # Find all articulation root prims in the first environment. + first_env_root_prims = get_all_matching_child_prims( + first_env_matching_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI) + and prim.GetAttribute("physxArticulation:articulationEnabled").Get() is not False, + traverse_instance_prims=False, + ) + if len(first_env_root_prims) == 0: + raise RuntimeError( + f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." + " Please ensure that the prim has 'USD ArticulationRootAPI' applied." + ) + if len(first_env_root_prims) > 1: + raise RuntimeError( + f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." + f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." + " Please ensure that there is only one articulation in the prim path tree." + ) + + # Now we convert the found articulation root from the first + # environment back into a regex that matches all environments. + first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString + root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] + root_prim_path_expr = self.cfg.prim_path + root_prim_path_relative_to_prim_path + + # -- articulation + self._root_view = self._physics_sim_view.create_articulation_view(root_prim_path_expr.replace(".*", "*")) + + # check if the articulation was created + if self.root_view._backend is None: + raise RuntimeError(f"Failed to create articulation at: {root_prim_path_expr}. Please check PhysX logs.") + + # container for data access + self._data = ArticulationData(self.root_view, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + self._process_actuators_cfg() + self._process_tendons() + # validate configuration + self._validate_cfg() + # update the robot data + self.update(0.0) + # log joint information + self._log_articulation_info() + # Let the articulation data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _create_buffers(self): + self._ALL_INDICES = wp.array(np.arange(self.num_instances, dtype=np.int32), device=self.device) + self._ALL_JOINT_INDICES = wp.array(np.arange(self.num_joints, dtype=np.int32), device=self.device) + self._ALL_BODY_INDICES = wp.array(np.arange(self.num_bodies, dtype=np.int32), device=self.device) + self._ALL_FIXED_TENDON_INDICES = wp.array(np.arange(self.num_fixed_tendons, dtype=np.int32), device=self.device) + self._ALL_SPATIAL_TENDON_INDICES = wp.array( + np.arange(self.num_spatial_tendons, dtype=np.int32), device=self.device + ) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # asset named data + self.data.joint_names = self.joint_names + self.data.body_names = self.body_names + # tendon names are set in _process_tendons function + + # -- joint commands (sent to the simulation after actuator processing) + self._joint_pos_target_sim = wp.zeros_like(self.data.joint_pos_target, device=self.device) + self._joint_vel_target_sim = wp.zeros_like(self.data.joint_pos_target, device=self.device) + self._joint_effort_target_sim = wp.zeros_like(self.data.joint_pos_target, device=self.device) + + # soft joint position limits (recommended not to be too close to limits). + wp.launch( + articulation_kernels.update_soft_joint_pos_limits, + dim=(self.num_instances, self.num_joints), + inputs=[ + self.data.joint_pos_limits, + self.cfg.soft_joint_pos_limit_factor, + ], + outputs=[ + self.data.soft_joint_pos_limits, + ], + device=self.device, + ) + + # Cached .view(wp.float32) wrappers for structured warp arrays. + # These avoid per-call wp.array metadata allocation in writers. + # Reset to None each time _create_buffers runs (during initialization). + self._root_link_pose_w_f32: wp.array | None = None + self._root_com_vel_w_f32: wp.array | None = None + self._root_link_vel_w_f32: wp.array | None = None + # Cached wrench views for write_data_to_sim + self._inst_wrench_force_f32: wp.array | None = None + self._inst_wrench_torque_f32: wp.array | None = None + self._perm_wrench_force_f32: wp.array | None = None + self._perm_wrench_torque_f32: wp.array | None = None + + # Pre-allocated pinned CPU buffers for PhysX TensorAPI writes. + # PhysX requires CPU arrays for "model" property updates (stiffness, damping, etc.). + # Pinned memory enables DMA fast path and avoids per-call malloc. + N, J, B = self.num_instances, self.num_joints, self.num_bodies + self._cpu_env_ids_all = wp.zeros(N, dtype=wp.int32, device="cpu", pinned=True) + wp.copy(self._cpu_env_ids_all, self._ALL_INDICES) + self._cpu_joint_stiffness = wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_joint_damping = wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_joint_pos_limits = wp.zeros((N, J, 2), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_joint_vel_limits = wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_joint_effort_limits = wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_joint_armature = wp.zeros((N, J), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_joint_friction_props = wp.zeros((N, J, 3), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_body_mass = wp.zeros((N, B), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_body_coms = wp.zeros((N, B, 7), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_body_inertia = wp.zeros((N, B, 9), dtype=wp.float32, device="cpu", pinned=True) + + def _process_cfg(self): + """Post processing of configuration parameters.""" + # default state + # -- root state + # Note we cast to tuple to avoid torch/numpy type mismatch. + default_root_pose = tuple(self.cfg.init_state.pos) + tuple(self.cfg.init_state.rot) + default_root_vel = tuple(self.cfg.init_state.lin_vel) + tuple(self.cfg.init_state.ang_vel) + default_root_pose = np.tile(np.array(default_root_pose, dtype=np.float32), (self.num_instances, 1)) + default_root_vel = np.tile(np.array(default_root_vel, dtype=np.float32), (self.num_instances, 1)) + self.data.default_root_pose = wp.array(default_root_pose, dtype=wp.transformf, device=self.device) + self.data.default_root_vel = wp.array(default_root_vel, dtype=wp.spatial_vectorf, device=self.device) + + # -- joint state + pos_idx_list, _, pos_val_list = resolve_matching_names_values(self.cfg.init_state.joint_pos, self.joint_names) + vel_idx_list, _, vel_val_list = resolve_matching_names_values(self.cfg.init_state.joint_vel, self.joint_names) + wp.launch( + articulation_kernels.update_default_joint_values, + dim=(self.num_instances, len(pos_idx_list)), + inputs=[ + wp.array(pos_val_list, dtype=wp.float32, device=self.device), + wp.array(pos_idx_list, dtype=wp.int32, device=self.device), + ], + outputs=[ + self.data.default_joint_pos, + ], + device=self.device, + ) + wp.launch( + articulation_kernels.update_default_joint_values, + dim=(self.num_instances, len(vel_idx_list)), + inputs=[ + wp.array(vel_val_list, dtype=wp.float32, device=self.device), + wp.array(vel_idx_list, dtype=wp.int32, device=self.device), + ], + outputs=[ + self.data.default_joint_vel, + ], + device=self.device, + ) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + self._root_view = None + + """ + Internal helpers -- Actuators. + """ + + def _process_actuators_cfg(self): + """Process and apply articulation joint properties.""" + # create actuators + self.actuators = dict() + # flag for implicit actuators + # if this is false, we by-pass certain checks when doing actuator-related operations + self._has_implicit_actuators = False + + # iterate over all actuator configurations + for actuator_name, actuator_cfg in self.cfg.actuators.items(): + # type annotation for type checkers + actuator_cfg: ActuatorBaseCfg + # create actuator group + joint_ids, joint_names = self.find_joints(actuator_cfg.joint_names_expr) + # check if any joints are found + if len(joint_names) == 0: + raise ValueError( + f"No joints found for actuator group: {actuator_name} with joint name expression:" + f" {actuator_cfg.joint_names_expr}." + ) + # resolve joint indices + # we pass a slice if all joints are selected to avoid indexing overhead + if len(joint_names) == self.num_joints: + joint_ids = slice(None) + else: + joint_ids = torch.tensor(joint_ids, device=self.device, dtype=torch.int32) + # create actuator collection + # note: for efficiency avoid indexing when over all indices + actuator: ActuatorBase = actuator_cfg.class_type( + cfg=actuator_cfg, + joint_names=joint_names, + joint_ids=joint_ids, + num_envs=self.num_instances, + device=self.device, + stiffness=self._data.joint_stiffness.torch[:, joint_ids], + damping=self._data.joint_damping.torch[:, joint_ids], + armature=self._data.joint_armature.torch[:, joint_ids], + friction=self._data.joint_friction_coeff.torch[:, joint_ids], + dynamic_friction=self._data.joint_dynamic_friction_coeff.torch[:, joint_ids], + viscous_friction=self._data.joint_viscous_friction_coeff.torch[:, joint_ids], + effort_limit=self._data.joint_effort_limits.torch[:, joint_ids].clone(), + velocity_limit=self._data.joint_vel_limits.torch[:, joint_ids], + ) + # store actuator group + self.actuators[actuator_name] = actuator + # Store the configured values from the actuator model + # note: this is the value configured in the actuator model (for implicit and explicit actuators) + joint_ids = actuator.joint_indices + if joint_ids == slice(None): + joint_ids = self._ALL_JOINT_INDICES + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.stiffness, + self._ALL_INDICES, + joint_ids, + False, + ], + outputs=[ + self.data._joint_stiffness, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.damping, + self._ALL_INDICES, + joint_ids, + False, + ], + outputs=[ + self.data._joint_damping, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.armature, + self._ALL_INDICES, + joint_ids, + False, + ], + outputs=[ + self.data._joint_armature, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.friction, + self._ALL_INDICES, + joint_ids, + False, + ], + outputs=[ + self.data._joint_friction_coeff, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.dynamic_friction, + self._ALL_INDICES, + joint_ids, + False, + ], + outputs=[ + self.data._joint_dynamic_friction_coeff, + ], + device=self.device, + ) + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(self.num_instances, joint_ids.shape[0]), + inputs=[ + actuator.viscous_friction, + self._ALL_INDICES, + joint_ids, + False, + ], + outputs=[ + self.data._joint_viscous_friction_coeff, + ], + device=self.device, + ) + # set the passed gains and limits into the simulation + if isinstance(actuator, ImplicitActuator): + self._has_implicit_actuators = True + # the gains and limits are set into the simulation since actuator model is implicit + self.write_joint_stiffness_to_sim_index(stiffness=actuator.stiffness, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim_index(damping=actuator.damping, joint_ids=actuator.joint_indices) + else: + # the gains and limits are processed by the actuator model + # we set gains to zero, and torque limit to a high value in simulation to avoid any interference + self.write_joint_stiffness_to_sim_index(stiffness=0.0, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim_index(damping=0.0, joint_ids=actuator.joint_indices) + + # Set common properties into the simulation + self.write_joint_effort_limit_to_sim_index( + limits=actuator.effort_limit_sim, joint_ids=actuator.joint_indices + ) + self.write_joint_velocity_limit_to_sim_index( + limits=actuator.velocity_limit_sim, joint_ids=actuator.joint_indices + ) + self.write_joint_armature_to_sim_index(armature=actuator.armature, joint_ids=actuator.joint_indices) + self.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=actuator.friction, joint_ids=actuator.joint_indices + ) + self.write_joint_dynamic_friction_coefficient_to_sim_index( + joint_dynamic_friction_coeff=actuator.dynamic_friction, joint_ids=actuator.joint_indices + ) + self.write_joint_viscous_friction_coefficient_to_sim_index( + joint_viscous_friction_coeff=actuator.viscous_friction, joint_ids=actuator.joint_indices + ) + + # perform some sanity checks to ensure actuators are prepared correctly + total_act_joints = sum(actuator.num_joints for actuator in self.actuators.values()) + if total_act_joints != (self.num_joints - self.num_fixed_tendons): + logger.warning( + "Not all actuators are configured! Total number of actuated joints not equal to number of" + f" joints available: {total_act_joints} != {self.num_joints - self.num_fixed_tendons}." + ) + + if self.cfg.actuator_value_resolution_debug_print: + t = PrettyTable(["Group", "Property", "Name", "ID", "USD Value", "ActutatorCfg Value", "Applied"]) + for actuator_group, actuator in self.actuators.items(): + group_count = 0 + for property, resolution_details in actuator.joint_property_resolution_table.items(): + for prop_idx, resolution_detail in enumerate(resolution_details): + actuator_group_str = actuator_group if group_count == 0 else "" + property_str = property if prop_idx == 0 else "" + fmt = [f"{v:.2e}" if isinstance(v, float) else str(v) for v in resolution_detail] + t.add_row([actuator_group_str, property_str, *fmt]) + group_count += 1 + logger.warning(f"\nActuatorCfg-USD Value Discrepancy Resolution (matching values are skipped): \n{t}") + + def _process_tendons(self): + """Process fixed and spatial tendons.""" + # create a list to store the fixed tendon names + self._fixed_tendon_names = list() + self._spatial_tendon_names = list() + # parse fixed tendons properties if they exist + if self.num_fixed_tendons > 0 or self.num_spatial_tendons > 0: + joint_paths = self.root_view.dof_paths[0] + + # iterate over all joints to find tendons attached to them + for j in range(self.num_joints): + usd_joint_path = joint_paths[j] + # check whether joint has tendons - tendon name follows the joint name it is attached to + joint = UsdPhysics.Joint.Get(self.stage, usd_joint_path) + joint_applied_str = str(joint.GetPrim().GetAppliedSchemas()) + if "PhysxTendonAxisRootAPI" in joint_applied_str: + self._fixed_tendon_names.append(usd_joint_path.split("/")[-1]) + elif ( + "PhysxTendonAttachmentRootAPI" in joint_applied_str + or "PhysxTendonAttachmentLeafAPI" in joint_applied_str + ): + self._spatial_tendon_names.append(usd_joint_path.split("/")[-1]) + + # store the fixed tendon names + self._data.fixed_tendon_names = self._fixed_tendon_names + self._data.spatial_tendon_names = self._spatial_tendon_names + + def _apply_actuator_model(self): + """Processes joint commands for the articulation by forwarding them to the actuators. + + The actions are first processed using actuator models. Depending on the robot configuration, + the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. + """ + # process actions per group + for actuator in self.actuators.values(): + # prepare input for actuator model based on cached data + # TODO : A tensor dict would be nice to do the indexing of all tensors together + control_action = ArticulationActions( + joint_positions=self._data.joint_pos_target.torch[:, actuator.joint_indices], + joint_velocities=self._data.joint_vel_target.torch[:, actuator.joint_indices], + joint_efforts=self._data.joint_effort_target.torch[:, actuator.joint_indices], + joint_indices=actuator.joint_indices, + ) + # compute joint command from the actuator model + control_action = actuator.compute( + control_action, + joint_pos=self._data.joint_pos.torch[:, actuator.joint_indices], + joint_vel=self._data.joint_vel.torch[:, actuator.joint_indices], + ) + # update targets (these are set into the simulation) + joint_indices = actuator.joint_indices + if actuator.joint_indices == slice(None) or actuator.joint_indices is None: + joint_indices = self._ALL_JOINT_INDICES + if hasattr(actuator, "gear_ratio"): + gear_ratio = actuator.gear_ratio + else: + gear_ratio = None + wp.launch( + articulation_kernels.update_targets, + dim=(self.num_instances, joint_indices.shape[0]), + inputs=[ + control_action.joint_positions, + control_action.joint_velocities, + control_action.joint_efforts, + joint_indices, + ], + outputs=[ + self._joint_pos_target_sim, + self._joint_vel_target_sim, + self._joint_effort_target_sim, + ], + device=self.device, + ) + # update state of the actuator model + wp.launch( + articulation_kernels.update_actuator_state_model, + dim=(self.num_instances, joint_indices.shape[0]), + inputs=[ + actuator.computed_effort, + actuator.applied_effort, + gear_ratio, + actuator.velocity_limit, + joint_indices, + ], + outputs=[ + self._data.computed_torque, + self._data.applied_torque, + self._data.gear_ratio, + self._data.soft_joint_vel_limits, + ], + device=self.device, + ) + + """ + Internal helpers -- Debugging. + """ + + def _validate_cfg(self): + """Validate the configuration after processing. + + .. note:: + This function should be called only after the configuration has been processed and the buffers have been + created. Otherwise, some settings that are altered during processing may not be validated. + For instance, the actuator models may change the joint max velocity limits. + """ + # check that the default values are within the limits + joint_pos_limits = wp.to_torch(wp.clone(self.root_view.get_dof_limits(), device=self.device))[0] + out_of_range = self._data.default_joint_pos.torch[0] < joint_pos_limits[:, 0] + out_of_range |= self._data.default_joint_pos.torch[0] > joint_pos_limits[:, 1] + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + # throw error if any of the default joint positions are out of the limits + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default positions out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = joint_pos_limits[idx] + joint_pos = self._data.default_joint_pos.torch[0, idx] + # add to message + msg += f"\t- '{joint_name}': {joint_pos:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + # check that the default joint velocities are within the limits + joint_max_vel = wp.to_torch(wp.clone(self.root_view.get_dof_max_velocities(), device=self.device))[0] + out_of_range = torch.abs(self._data.default_joint_vel.torch[0]) > joint_max_vel + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default velocities out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = [-joint_max_vel[idx], joint_max_vel[idx]] + joint_vel = self._data.default_joint_vel.torch[0, idx] + # add to message + msg += f"\t- '{joint_name}': {joint_vel:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + def _log_articulation_info(self): + """Log information about the articulation. + + .. note:: We purposefully read the values from the simulator to ensure that the values are configured as + expected. + """ + + # define custom formatters for large numbers and limit ranges + def format_large_number(_, v: float) -> str: + """Format large numbers using scientific notation.""" + if abs(v) >= 1e3: + return f"{v:.1e}" + else: + return f"{v:.3f}" + + def format_limits(_, v: tuple[float, float]) -> str: + """Format limit ranges using scientific notation.""" + if abs(v[0]) >= 1e3 or abs(v[1]) >= 1e3: + return f"[{v[0]:.1e}, {v[1]:.1e}]" + else: + return f"[{v[0]:.3f}, {v[1]:.3f}]" + + # read out all joint parameters from simulation + # -- gains + # Use data properties which have already been cloned and stored during initialization + # This avoids issues with indexedarray or empty arrays from root_view + stiffnesses = self.data.joint_stiffness.torch[0].cpu().tolist() + dampings = self.data.joint_damping.torch[0].cpu().tolist() + # -- properties + armatures = self.data.joint_armature.torch[0].cpu().tolist() + # For friction, use the individual components from data + friction_coeff = self.data.joint_friction_coeff.torch[0].cpu() + dynamic_friction_coeff = self.data.joint_dynamic_friction_coeff.torch[0].cpu() + viscous_friction_coeff = self.data.joint_viscous_friction_coeff.torch[0].cpu() + static_frictions = friction_coeff.tolist() + dynamic_frictions = dynamic_friction_coeff.tolist() + viscous_frictions = viscous_friction_coeff.tolist() + # -- limits + # joint_pos_limits is vec2f array, convert to torch and extract [lower, upper] pairs + position_limits_torch = self.data.joint_pos_limits.torch[0].cpu() # shape: (num_joints, 2) + position_limits = [tuple(pos_limit.tolist()) for pos_limit in position_limits_torch] + velocity_limits = self.data.joint_vel_limits.torch[0].cpu().tolist() + effort_limits = self.data.joint_effort_limits.torch[0].cpu().tolist() + # create table for term information + joint_table = PrettyTable() + joint_table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" + # build field names based on Isaac Sim version + field_names = ["Index", "Name", "Stiffness", "Damping", "Armature"] + field_names.extend(["Static Friction", "Dynamic Friction", "Viscous Friction"]) + field_names.extend(["Position Limits", "Velocity Limits", "Effort Limits"]) + joint_table.field_names = field_names + + # apply custom formatters to numeric columns + joint_table.custom_format["Stiffness"] = format_large_number + joint_table.custom_format["Damping"] = format_large_number + joint_table.custom_format["Armature"] = format_large_number + joint_table.custom_format["Static Friction"] = format_large_number + joint_table.custom_format["Dynamic Friction"] = format_large_number + joint_table.custom_format["Viscous Friction"] = format_large_number + joint_table.custom_format["Position Limits"] = format_limits + joint_table.custom_format["Velocity Limits"] = format_large_number + joint_table.custom_format["Effort Limits"] = format_large_number + + # set alignment of table columns + joint_table.align["Name"] = "l" + # add info on each term + for index, name in enumerate(self.joint_names): + # build row data based on Isaac Sim version + row_data = [index, name, stiffnesses[index], dampings[index], armatures[index]] + if has_kit() and get_isaac_sim_version().major < 5: + row_data.append(static_frictions[index]) + else: + row_data.extend([static_frictions[index], dynamic_frictions[index], viscous_frictions[index]]) + row_data.extend([position_limits[index], velocity_limits[index], effort_limits[index]]) + # add row to table + joint_table.add_row(row_data) + # convert table to string + logger.info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + joint_table.get_string()) + + # read out all fixed tendon parameters from simulation + if self.num_fixed_tendons > 0: + # -- gains + # Use data properties which have already been cloned and stored during initialization + ft_stiffnesses = self.data.fixed_tendon_stiffness.torch[0].cpu().tolist() + ft_dampings = self.data.fixed_tendon_damping.torch[0].cpu().tolist() + # -- limits + ft_limit_stiffnesses = self.data.fixed_tendon_limit_stiffness.torch[0].cpu().tolist() + # fixed_tendon_pos_limits is vec2f array + ft_limits_torch = self.data.fixed_tendon_pos_limits.torch[0].cpu() + ft_limits = [tuple(limit.tolist()) for limit in ft_limits_torch] + ft_rest_lengths = self.data.fixed_tendon_rest_length.torch[0].cpu().tolist() + ft_offsets = self.data.fixed_tendon_offset.torch[0].cpu().tolist() + # create table for term information + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Fixed Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Limits", + "Rest Length", + "Offset", + ] + tendon_table.float_format = ".3" + + # apply custom formatters to tendon table columns + tendon_table.custom_format["Stiffness"] = format_large_number + tendon_table.custom_format["Damping"] = format_large_number + tendon_table.custom_format["Limit Stiffness"] = format_large_number + tendon_table.custom_format["Limits"] = format_limits + tendon_table.custom_format["Rest Length"] = format_large_number + tendon_table.custom_format["Offset"] = format_large_number + + # add info on each term + for index in range(self.num_fixed_tendons): + tendon_table.add_row( + [ + index, + ft_stiffnesses[index], + ft_dampings[index], + ft_limit_stiffnesses[index], + ft_limits[index], + ft_rest_lengths[index], + ft_offsets[index], + ] + ) + # convert table to string + logger.info( + f"Simulation parameters for fixed tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + if self.num_spatial_tendons > 0: + # -- gains + # Use data properties which have already been cloned and stored during initialization + st_stiffnesses = self.data.spatial_tendon_stiffness.torch[0].cpu().tolist() + st_dampings = self.data.spatial_tendon_damping.torch[0].cpu().tolist() + # -- limits + st_limit_stiffnesses = self.data.spatial_tendon_limit_stiffness.torch[0].cpu().tolist() + st_offsets = self.data.spatial_tendon_offset.torch[0].cpu().tolist() + # create table for term information + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Spatial Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Offset", + ] + tendon_table.float_format = ".3" + # add info on each term + for index in range(self.num_spatial_tendons): + tendon_table.add_row( + [ + index, + st_stiffnesses[index], + st_dampings[index], + st_limit_stiffnesses[index], + st_offsets[index], + ] + ) + # convert table to string + logger.info( + f"Simulation parameters for spatial tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + def _get_cpu_env_ids(self, env_ids: wp.array | torch.Tensor) -> wp.array: + """Get the CPU environment indices. + + For the full-index case (all environments), returns the pre-allocated + pinned CPU buffer. For partial indices (e.g. during partial resets), clones to CPU. + + Args: + env_ids: Environment indices. + + Returns: + A warp array of environment indices on CPU. + """ + if isinstance(env_ids, torch.Tensor): + env_ids = wp.from_torch(env_ids, dtype=wp.int32) + # Fast path: if these are all indices, use pre-allocated pinned buffer + if env_ids.ptr == self._ALL_INDICES.ptr: + return self._cpu_env_ids_all + # Slow path: partial indices (reset), clone to CPU + return wp.clone(env_ids, device="cpu") + + def _resolve_env_mask(self, env_mask: wp.array | None) -> torch.Tensor | wp.array: + """ + Resolve environment mask to a torch tensor. + + Args: + env_mask: Environment mask. If None, then all indices are used. + + Returns: + A torch tensor of environment indices. + """ + # resolve masks + if env_mask is not None: + if isinstance(env_mask, wp.array): + env_mask = wp.to_torch(env_mask) + env_ids = torch.nonzero(env_mask)[:, 0].to(torch.int32) + else: + env_ids = self._ALL_INDICES + return env_ids + + def _get_root_link_pose_w_f32(self) -> wp.array: + """Get a cached float32 view of root_link_pose_w for PhysX TensorAPI. Invalidated in ``_create_buffers``.""" + if self._root_link_pose_w_f32 is None: + self._root_link_pose_w_f32 = self.data._root_link_pose_w.data.view(wp.float32) + return self._root_link_pose_w_f32 + + def _get_root_com_vel_w_f32(self) -> wp.array: + """Get a cached float32 view of root_com_vel_w for PhysX TensorAPI. Invalidated in ``_create_buffers``.""" + if self._root_com_vel_w_f32 is None: + self._root_com_vel_w_f32 = self.data._root_com_vel_w.data.view(wp.float32) + return self._root_com_vel_w_f32 + + def _get_root_link_vel_w_f32(self) -> wp.array: + """Get a cached float32 view of root_link_vel_w for PhysX TensorAPI. Invalidated in ``_create_buffers``.""" + if self._root_link_vel_w_f32 is None: + self._root_link_vel_w_f32 = self.data._root_link_vel_w.data.view(wp.float32) + return self._root_link_vel_w_f32 + + def _get_inst_wrench_force_f32(self) -> wp.array: + """Get a cached flattened float32 view of instantaneous wrench force. Invalidated in ``_create_buffers``.""" + if self._inst_wrench_force_f32 is None: + self._inst_wrench_force_f32 = self._instantaneous_wrench_composer.out_force_b.warp.flatten().view( + wp.float32 + ) + return self._inst_wrench_force_f32 + + def _get_inst_wrench_torque_f32(self) -> wp.array: + """Get a cached flattened float32 view of instantaneous wrench torque. Invalidated in ``_create_buffers``.""" + if self._inst_wrench_torque_f32 is None: + self._inst_wrench_torque_f32 = self._instantaneous_wrench_composer.out_torque_b.warp.flatten().view( + wp.float32 + ) + return self._inst_wrench_torque_f32 + + def _get_perm_wrench_force_f32(self) -> wp.array: + """Get a cached flattened float32 view of permanent wrench force. Invalidated in ``_create_buffers``.""" + if self._perm_wrench_force_f32 is None: + self._perm_wrench_force_f32 = self._permanent_wrench_composer.out_force_b.warp.flatten().view(wp.float32) + return self._perm_wrench_force_f32 + + def _get_perm_wrench_torque_f32(self) -> wp.array: + """Get a cached flattened float32 view of permanent wrench torque. Invalidated in ``_create_buffers``.""" + if self._perm_wrench_torque_f32 is None: + self._perm_wrench_torque_f32 = self._permanent_wrench_composer.out_torque_b.warp.flatten().view(wp.float32) + return self._perm_wrench_torque_f32 + + def _resolve_env_ids(self, env_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array: + """Resolve environment indices to a warp array. + + Args: + env_ids: Environment indices. If None, then all indices are used. + + Returns: + A warp array of environment indices. + """ + if (env_ids is None) or (env_ids == slice(None)): + return self._ALL_INDICES + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids, dtype=wp.int32) + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + return env_ids + + def _resolve_joint_mask(self, joint_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve joint mask to a torch tensor. + + Args: + joint_mask: Joint mask. If None, then all indices are used. + + Returns: + A torch tensor of joint indices. + """ + if joint_mask is not None: + if isinstance(joint_mask, wp.array): + joint_mask = wp.to_torch(joint_mask) + joint_ids = torch.nonzero(joint_mask)[:, 0].to(torch.int32) + else: + joint_ids = self._ALL_JOINT_INDICES + return joint_ids + + def _resolve_joint_ids(self, joint_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve joint indices to a warp array or tensor. + + Args: + joint_ids: Joint indices. If None, then all indices are used. + + Returns: + A warp array of joint indices or a tensor of joint indices. + """ + if isinstance(joint_ids, list): + return wp.array(joint_ids, dtype=wp.int32, device=self.device) + if (joint_ids is None) or (joint_ids == slice(None)): + return self._ALL_JOINT_INDICES + if isinstance(joint_ids, torch.Tensor): + if joint_ids.dtype == torch.int64: + joint_ids = joint_ids.to(torch.int32) + return wp.from_torch(joint_ids, dtype=wp.int32) + return joint_ids + + def _resolve_body_mask(self, body_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve body mask to a torch tensor. + + Args: + body_mask: Body mask. If None, then all indices are used. + + Returns: + A torch tensor of body indices. + """ + if body_mask is not None: + if isinstance(body_mask, wp.array): + body_mask = wp.to_torch(body_mask) + body_ids = torch.nonzero(body_mask)[:, 0].to(torch.int32) + else: + body_ids = self._ALL_BODY_INDICES + return body_ids + + def _resolve_body_ids(self, body_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve body indices to a warp array or tensor. + + Args: + body_ids: Body indices. If None, then all indices are used. + + Returns: + A warp array of body indices or a tensor of body indices. + """ + if isinstance(body_ids, list): + return wp.array(body_ids, dtype=wp.int32, device=self.device) + if (body_ids is None) or (body_ids == slice(None)): + return self._ALL_BODY_INDICES + if isinstance(body_ids, torch.Tensor): + if body_ids.dtype == torch.int64: + body_ids = body_ids.to(torch.int32) + return wp.from_torch(body_ids, dtype=wp.int32) + return body_ids + + def _resolve_fixed_tendon_mask(self, fixed_tendon_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve fixed tendon mask to a torch tensor. + + Args: + fixed_tendon_mask: Fixed tendon mask. If None, then all indices are used. + + Returns: + A torch tensor of fixed tendon indices. + """ + if fixed_tendon_mask is not None: + if isinstance(fixed_tendon_mask, wp.array): + fixed_tendon_mask = wp.to_torch(fixed_tendon_mask) + fixed_tendon_ids = torch.nonzero(fixed_tendon_mask)[:, 0].to(torch.int32) + else: + fixed_tendon_ids = self._ALL_FIXED_TENDON_INDICES + return fixed_tendon_ids + + def _resolve_fixed_tendon_ids( + self, tendon_ids: Sequence[int] | torch.Tensor | wp.array | None + ) -> wp.array | torch.Tensor: + """Resolve tendon indices to a warp array or tensor. + + Args: + tendon_ids: Tendon indices. If None, then all indices are used. + + Returns: + A warp array of tendon indices or a tensor of tendon indices. + """ + if isinstance(tendon_ids, list): + return wp.array(tendon_ids, dtype=wp.int32, device=self.device) + if (tendon_ids is None) or (tendon_ids == slice(None)): + return self._ALL_FIXED_TENDON_INDICES + return tendon_ids + + def _resolve_spatial_tendon_mask(self, spatial_tendon_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve spatial tendon mask to a torch tensor. + + Args: + spatial_tendon_mask: Spatial tendon mask. If None, then all indices are used. + + Returns: + A torch tensor of spatial tendon indices. + """ + if spatial_tendon_mask is not None: + if isinstance(spatial_tendon_mask, wp.array): + spatial_tendon_mask = wp.to_torch(spatial_tendon_mask) + spatial_tendon_ids = torch.nonzero(spatial_tendon_mask)[:, 0].to(torch.int32) + else: + spatial_tendon_ids = self._ALL_SPATIAL_TENDON_INDICES + return spatial_tendon_ids + + def _resolve_spatial_tendon_ids( + self, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None + ) -> wp.array | torch.Tensor: + """Resolve spatial tendon indices to a warp array or tensor. + + Args: + spatial_tendon_ids: Spatial tendon indices. If None, then all indices are used. + + Returns: + A warp array of spatial tendon indices or a tensor of spatial tendon indices. + """ + if isinstance(spatial_tendon_ids, list): + return wp.array(spatial_tendon_ids, dtype=wp.int32, device=self.device) + if (spatial_tendon_ids is None) or (spatial_tendon_ids == slice(None)): + return self._ALL_SPATIAL_TENDON_INDICES + return spatial_tendon_ids + + """ + Deprecated methods. + """ + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + warnings.warn( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.root_view + + def write_joint_friction_coefficient_to_sim( + self, + joint_friction_coeff: torch.Tensor | wp.array | float, + joint_dynamic_friction_coeff: torch.Tensor | wp.array | float | None = None, + joint_viscous_friction_coeff: torch.Tensor | wp.array | float | None = None, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ): + """Deprecated, same as :meth:`write_joint_friction_coefficient_to_sim_index`.""" + warnings.warn( + "The function 'write_joint_friction_coefficient_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_friction_coefficient_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=joint_friction_coeff, + joint_dynamic_friction_coeff=joint_dynamic_friction_coeff, + joint_viscous_friction_coeff=joint_viscous_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=full_data, + ) + + def write_joint_viscous_friction_coefficient_to_sim( + self, + joint_viscous_friction_coeff: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Deprecated, same as :meth:`write_joint_viscous_friction_coefficient_to_sim_index`.""" + warnings.warn( + "The function 'write_joint_viscous_friction_coefficient_to_sim' will be deprecated in a future release. " + "Please use 'write_joint_viscous_friction_coefficient_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_joint_viscous_friction_coefficient_to_sim_index( + joint_viscous_friction_coeff=joint_viscous_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=full_data, + ) + + def write_joint_dynamic_friction_coefficient_to_sim( + self, + joint_dynamic_friction_coeff: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Deprecated, same as :meth:`write_joint_dynamic_friction_coefficient_to_sim_index`.""" + warnings.warn( + "The function 'write_joint_dynamic_friction_coefficient_to_sim' will be deprecated in a future release. " + "Please use 'write_joint_dynamic_friction_coefficient_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_joint_dynamic_friction_coefficient_to_sim_index( + joint_dynamic_friction_coeff=joint_dynamic_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + full_data=full_data, + ) + + def write_root_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_link_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated, same as :meth:`write_root_com_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_com_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_com_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_com_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_link_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_link_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_link_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(root_state, wp.array): + raise ValueError("The root state must be a torch tensor, not a warp array.") + self.write_root_link_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_joint_state_to_sim( + self, + position: torch.Tensor | wp.array, + velocity: torch.Tensor | wp.array, + joint_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ): + """Deprecated, same as :meth:`write_joint_state_to_sim_index`.""" + warnings.warn( + "The function 'write_joint_state_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_state_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_joint_state_to_sim_index(position=position, velocity=velocity, joint_ids=joint_ids, env_ids=env_ids) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py new file mode 100644 index 000000000000..8c2056d9cbfa --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -0,0 +1,1867 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +import logging +import warnings +import weakref +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.math import normalize +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.assets import kernels as shared_kernels +from isaaclab_physx.assets.articulation import kernels as articulation_kernels +from isaaclab_physx.physics import PhysxManager as SimulationManager + +if TYPE_CHECKING: + import omni.physics.tensors.api as physx + +# import logger +logger = logging.getLogger(__name__) + + +class ArticulationData(BaseArticulationData): + """Data container for an articulation. + + This class contains the data for an articulation in the simulation. The data includes the state of + the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is + stored in the simulation world frame unless otherwise specified. + + An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames + of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame + can be interpreted as the link frame. + + .. note:: + **Pull-to-refresh model.** PhysX state properties are *not* automatically updated each + simulation step. Each property getter pulls fresh data from the PhysX tensor API on first + access per timestamp, then caches the result until the next step. This differs from the + Newton backend, where buffers are refreshed automatically by the simulation. + + .. note:: + **ProxyArray pointer stability.** Each :class:`ProxyArray` wrapper is created once on the + first property access and reused thereafter. This is safe because the PhysX tensor API + returns views into stable, pre-allocated GPU buffers whose device pointer does not change + across simulation steps. The ``wp.array`` Python objects returned by getters like + ``get_root_transforms()`` are new wrappers each call, but they alias the same underlying + GPU memory. Sub-view properties (``root_pos_w``, ``root_quat_w``, etc.) similarly wrap + pointer offsets into these stable buffers and are therefore also safe to cache. + """ + + __backend_name__: str = "physx" + """The name of the backend for the articulation data.""" + + def __init__(self, root_view: physx.ArticulationView, device: str): + """Initializes the articulation data. + + Args: + root_view: The root articulation view. + device: The device used for processing. + """ + super().__init__(root_view, device) + # Set the root articulation view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: physx.ArticulationView = weakref.proxy(root_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + gravity = self._physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir = gravity_dir.repeat(self._root_view.count, 1) + forward_vec = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_view.count, 1) + + # Initialize constants + self.GRAVITY_VEC_W = ProxyArray(wp.from_torch(gravity_dir, dtype=wp.vec3f)) + self.FORWARD_VEC_B = ProxyArray(wp.from_torch(forward_vec, dtype=wp.vec3f)) + + self._create_buffers() + + @property + def is_primed(self) -> bool: + """Whether the articulation data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the articulation data is fully instantiated and ready to use. + + .. note:: + Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the articulation data is already primed. + """ + if self._is_primed: + raise ValueError("The articulation data is already primed.") + self._is_primed = True + + def update(self, dt: float) -> None: + """Updates the data for the articulation. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + # Trigger an update of the joint acceleration buffer at a higher frequency + # since we do finite differencing. + self.joint_acc + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + joint_names: list[str] = None + """Joint names in the order parsed by the simulation view.""" + + fixed_tendon_names: list[str] = None + """Fixed tendon names in the order parsed by the simulation view.""" + + spatial_tendon_names: list[str] = None + """Spatial tendon names in the order parsed by the simulation view.""" + + """ + Defaults - Initial state. + """ + + @property + def default_root_pose(self) -> ProxyArray: + """Default root pose ``[pos, quat]`` in the local environment frame. + + The position and quaternion are of the articulation root's actor frame. + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + """ + if self._default_root_pose_ta is None: + self._default_root_pose_ta = ProxyArray(self._default_root_pose) + return self._default_root_pose_ta + + @default_root_pose.setter + def default_root_pose(self, value: wp.array) -> None: + """Set the default root pose. + + Args: + value: The default root pose. Shape is (num_instances, 7). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_pose.assign(value) + + @property + def default_root_vel(self) -> ProxyArray: + """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. + + The linear and angular velocities are of the articulation root's center of mass frame. + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + """ + if self._default_root_vel_ta is None: + self._default_root_vel_ta = ProxyArray(self._default_root_vel) + return self._default_root_vel_ta + + @default_root_vel.setter + def default_root_vel(self, value: wp.array) -> None: + """Set the default root velocity. + + Args: + value: The default root velocity. Shape is (num_instances, 6). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_vel.assign(value) + + @property + def default_joint_pos(self) -> ProxyArray: + """Default joint positions of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + if self._default_joint_pos_ta is None: + self._default_joint_pos_ta = ProxyArray(self._default_joint_pos) + return self._default_joint_pos_ta + + @default_joint_pos.setter + def default_joint_pos(self, value: wp.array) -> None: + """Set the default joint positions. + + Args: + value: The default joint positions. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_pos.assign(value) + + @property + def default_joint_vel(self) -> ProxyArray: + """Default joint velocities of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + if self._default_joint_vel_ta is None: + self._default_joint_vel_ta = ProxyArray(self._default_joint_vel) + return self._default_joint_vel_ta + + @default_joint_vel.setter + def default_joint_vel(self, value: wp.array) -> None: + """Set the default joint velocities. + + Args: + value: The default joint velocities. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_vel.assign(value) + + """ + Joint commands -- Set into simulation. + """ + + @property + def joint_pos_target(self) -> ProxyArray: + """Joint position targets commanded by the user. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + if self._joint_pos_target_ta is None: + self._joint_pos_target_ta = ProxyArray(self._joint_pos_target) + return self._joint_pos_target_ta + + @property + def joint_vel_target(self) -> ProxyArray: + """Joint velocity targets commanded by the user. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + if self._joint_vel_target_ta is None: + self._joint_vel_target_ta = ProxyArray(self._joint_vel_target) + return self._joint_vel_target_ta + + @property + def joint_effort_target(self) -> ProxyArray: + """Joint effort targets commanded by the user. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + if self._joint_effort_target_ta is None: + self._joint_effort_target_ta = ProxyArray(self._joint_effort_target) + return self._joint_effort_target_ta + + """ + Joint commands -- Explicit actuators. + """ + + @property + def computed_torque(self) -> ProxyArray: + """Joint torques computed from the actuator model (before clipping). + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + This quantity is the raw torque output from the actuator mode, before any clipping is applied. + It is exposed for users who want to inspect the computations inside the actuator model. + For instance, to penalize the learning agent for a difference between the computed and applied torques. + """ + if self._computed_torque_ta is None: + self._computed_torque_ta = ProxyArray(self._computed_torque) + return self._computed_torque_ta + + @property + def applied_torque(self) -> ProxyArray: + """Joint torques applied from the actuator model (after clipping). + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the + actuator model. + """ + if self._applied_torque_ta is None: + self._applied_torque_ta = ProxyArray(self._applied_torque) + return self._applied_torque_ta + + """ + Joint properties + """ + + @property + def joint_stiffness(self) -> ProxyArray: + """Joint stiffness provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + if self._joint_stiffness_ta is None: + self._joint_stiffness_ta = ProxyArray(self._joint_stiffness) + return self._joint_stiffness_ta + + @property + def joint_damping(self) -> ProxyArray: + """Joint damping provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + if self._joint_damping_ta is None: + self._joint_damping_ta = ProxyArray(self._joint_damping) + return self._joint_damping_ta + + @property + def joint_armature(self) -> ProxyArray: + """Joint armature provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_armature_ta is None: + self._joint_armature_ta = ProxyArray(self._joint_armature) + return self._joint_armature_ta + + @property + def joint_friction_coeff(self) -> ProxyArray: + """Joint static friction coefficient provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_friction_coeff_ta is None: + self._joint_friction_coeff_ta = ProxyArray(self._joint_friction_coeff) + return self._joint_friction_coeff_ta + + @property + def joint_dynamic_friction_coeff(self) -> ProxyArray: + """Joint dynamic friction coefficient provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_dynamic_friction_coeff_ta is None: + self._joint_dynamic_friction_coeff_ta = ProxyArray(self._joint_dynamic_friction_coeff) + return self._joint_dynamic_friction_coeff_ta + + @property + def joint_viscous_friction_coeff(self) -> ProxyArray: + """Joint viscous friction coefficient provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_viscous_friction_coeff_ta is None: + self._joint_viscous_friction_coeff_ta = ProxyArray(self._joint_viscous_friction_coeff) + return self._joint_viscous_friction_coeff_ta + + @property + def joint_pos_limits(self) -> ProxyArray: + """Joint position limits provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. + """ + if self._joint_pos_limits_ta is None: + self._joint_pos_limits_ta = ProxyArray(self._joint_pos_limits) + return self._joint_pos_limits_ta + + @property + def joint_vel_limits(self) -> ProxyArray: + """Joint maximum velocity provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_vel_limits_ta is None: + self._joint_vel_limits_ta = ProxyArray(self._joint_vel_limits) + return self._joint_vel_limits_ta + + @property + def joint_effort_limits(self) -> ProxyArray: + """Joint maximum effort provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_effort_limits_ta is None: + self._joint_effort_limits_ta = ProxyArray(self._joint_effort_limits) + return self._joint_effort_limits_ta + + """ + Joint properties - Custom. + """ + + @property + def soft_joint_pos_limits(self) -> ProxyArray: + r"""Soft joint positions limits for all joints. + + Shape is (num_instances, num_joints), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`.The soft joint position limits are computed as + a sub-region of the :attr:`joint_pos_limits` based on the + :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. + + Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits + :math:`[soft_lower, soft_upper]`. The soft joint position limits are computed as: + + .. math:: + + soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 + soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 + + The soft joint position limits help specify a safety region around the joint limits. It isn't used by the + simulation, but is useful for learning agents to prevent the joint positions from violating the limits. + """ + if self._soft_joint_pos_limits_ta is None: + self._soft_joint_pos_limits_ta = ProxyArray(self._soft_joint_pos_limits) + return self._soft_joint_pos_limits_ta + + @property + def soft_joint_vel_limits(self) -> ProxyArray: + """Soft joint velocity limits for all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + + These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model + has a variable velocity limit model. For instance, in a variable gear ratio actuator model. + """ + if self._soft_joint_vel_limits_ta is None: + self._soft_joint_vel_limits_ta = ProxyArray(self._soft_joint_vel_limits) + return self._soft_joint_vel_limits_ta + + @property + def gear_ratio(self) -> ProxyArray: + """Gear ratio for relating motor torques to applied Joint torques. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._gear_ratio_ta is None: + self._gear_ratio_ta = ProxyArray(self._gear_ratio) + return self._gear_ratio_ta + + """ + Fixed tendon properties. + """ + + @property + def fixed_tendon_stiffness(self) -> ProxyArray: + """Fixed tendon stiffness provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_stiffness_ta is None: + self._fixed_tendon_stiffness_ta = ProxyArray(self._fixed_tendon_stiffness) + return self._fixed_tendon_stiffness_ta + + @property + def fixed_tendon_damping(self) -> ProxyArray: + """Fixed tendon damping provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_damping_ta is None: + self._fixed_tendon_damping_ta = ProxyArray(self._fixed_tendon_damping) + return self._fixed_tendon_damping_ta + + @property + def fixed_tendon_limit_stiffness(self) -> ProxyArray: + """Fixed tendon limit stiffness provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_limit_stiffness_ta is None: + self._fixed_tendon_limit_stiffness_ta = ProxyArray(self._fixed_tendon_limit_stiffness) + return self._fixed_tendon_limit_stiffness_ta + + @property + def fixed_tendon_rest_length(self) -> ProxyArray: + """Fixed tendon rest length provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_rest_length_ta is None: + self._fixed_tendon_rest_length_ta = ProxyArray(self._fixed_tendon_rest_length) + return self._fixed_tendon_rest_length_ta + + @property + def fixed_tendon_offset(self) -> ProxyArray: + """Fixed tendon offset provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_fixed_tendons). + """ + if self._fixed_tendon_offset_ta is None: + self._fixed_tendon_offset_ta = ProxyArray(self._fixed_tendon_offset) + return self._fixed_tendon_offset_ta + + @property + def fixed_tendon_pos_limits(self) -> ProxyArray: + """Fixed tendon position limits provided to the simulation. + + Shape is (num_instances, num_fixed_tendons), dtype = wp.vec2f. In torch this resolves to + (num_instances, num_fixed_tendons, 2). + """ + if self._fixed_tendon_pos_limits_ta is None: + self._fixed_tendon_pos_limits_ta = ProxyArray(self._fixed_tendon_pos_limits) + return self._fixed_tendon_pos_limits_ta + + """ + Spatial tendon properties. + """ + + @property + def spatial_tendon_stiffness(self) -> ProxyArray: + """Spatial tendon stiffness provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_stiffness_ta is None: + self._spatial_tendon_stiffness_ta = ProxyArray(self._spatial_tendon_stiffness) + return self._spatial_tendon_stiffness_ta + + @property + def spatial_tendon_damping(self) -> ProxyArray: + """Spatial tendon damping provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_damping_ta is None: + self._spatial_tendon_damping_ta = ProxyArray(self._spatial_tendon_damping) + return self._spatial_tendon_damping_ta + + @property + def spatial_tendon_limit_stiffness(self) -> ProxyArray: + """Spatial tendon limit stiffness provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_limit_stiffness_ta is None: + self._spatial_tendon_limit_stiffness_ta = ProxyArray(self._spatial_tendon_limit_stiffness) + return self._spatial_tendon_limit_stiffness_ta + + @property + def spatial_tendon_offset(self) -> ProxyArray: + """Spatial tendon offset provided to the simulation. + + Shape is (num_instances, num_spatial_tendons), dtype = wp.float32. In torch this resolves to + (num_instances, num_spatial_tendons). + """ + if self._spatial_tendon_offset_ta is None: + self._spatial_tendon_offset_ta = ProxyArray(self._spatial_tendon_offset) + return self._spatial_tendon_offset_ta + + """ + Root state properties. + """ + + @property + def root_link_pose_w(self) -> ProxyArray: + """Root link pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + + This quantity is the pose of the articulation root's actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._root_link_pose_w.timestamp < self._sim_timestamp: + # set the buffer data and timestamp + self._root_link_pose_w.data = self._root_view.get_root_transforms().view(wp.transformf) + self._root_link_pose_w.timestamp = self._sim_timestamp + + if self._root_link_pose_w_ta is None: + self._root_link_pose_w_ta = ProxyArray(self._root_link_pose_w.data) + return self._root_link_pose_w_ta + + @property + def root_link_vel_w(self) -> ProxyArray: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's actor frame + relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_root_link_vel_from_root_com_vel, + dim=self._num_instances, + inputs=[ + self.root_com_vel_w, + self.root_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._root_link_vel_w.data, + ], + device=self.device, + ) + self._root_link_vel_w.timestamp = self._sim_timestamp + + if self._root_link_vel_w_ta is None: + self._root_link_vel_w_ta = ProxyArray(self._root_link_vel_w.data) + return self._root_link_vel_w_ta + + @property + def root_com_pose_w(self) -> ProxyArray: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + + This quantity is the pose of the articulation root's center of mass frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + wp.launch( + shared_kernels.get_root_com_pose_from_root_link_pose, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._root_com_pose_w.data, + ], + device=self.device, + ) + self._root_com_pose_w.timestamp = self._sim_timestamp + + if self._root_com_pose_w_ta is None: + self._root_com_pose_w_ta = ProxyArray(self._root_com_pose_w.data) + return self._root_com_pose_w_ta + + @property + def root_com_vel_w(self) -> ProxyArray: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's center of mass frame + relative to the world. + """ + if self._root_com_vel_w.timestamp < self._sim_timestamp: + self._root_com_vel_w.data = self._root_view.get_root_velocities().view(wp.spatial_vectorf) + self._root_com_vel_w.timestamp = self._sim_timestamp + + if self._root_com_vel_w_ta is None: + self._root_com_vel_w_ta = ProxyArray(self._root_com_vel_w.data) + return self._root_com_vel_w_ta + + """ + Body state properties. + """ + + @property + def body_mass(self) -> ProxyArray: + """Body mass in the world frame. + + Shape is (num_instances, num_bodies), dtype = wp.float32. In torch this resolves to (num_instances, num_bodies). + """ + self._body_mass.assign(self._root_view.get_masses()) + if self._body_mass_ta is None: + self._body_mass_ta = ProxyArray(self._body_mass) + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Flattened body inertia in the world frame. + + Shape is (num_instances, num_bodies, 9), dtype = wp.float32. In torch this resolves to + (num_instances, num_bodies, 9). + """ + self._body_inertia.assign(self._root_view.get_inertias()) + if self._body_inertia_ta is None: + self._body_inertia_ta = ProxyArray(self._body_inertia) + return self._body_inertia_ta + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the articulation links' actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_link_pose_w.timestamp < self._sim_timestamp: + # perform forward kinematics (shouldn't cause overhead if it happened already) + self._physics_sim_view.update_articulations_kinematic() + # set the buffer data and timestamp + self._body_link_pose_w.data = self._root_view.get_link_transforms().view(wp.transformf) + self._body_link_pose_w.timestamp = self._sim_timestamp + + if self._body_link_pose_w_ta is None: + self._body_link_pose_w_ta = ProxyArray(self._body_link_pose_w.data) + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' actor frame + relative to the world. + """ + if self._body_link_vel_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_link_vel_from_body_com_vel, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_com_vel_w, + self.body_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._body_link_vel_w.data, + ], + device=self.device, + ) + self._body_link_vel_w.timestamp = self._sim_timestamp + + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w.data) + return self._body_link_vel_w_ta + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the articulation links relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_com_pose_from_body_link_pose, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._body_com_pose_w.data, + ], + device=self.device, + ) + self._body_com_pose_w.timestamp = self._sim_timestamp + + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w.data) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' center of mass frame + relative to the world. + """ + if self._body_com_vel_w.timestamp < self._sim_timestamp: + self._body_com_vel_w.data = self._root_view.get_link_velocities().view(wp.spatial_vectorf) + self._body_com_vel_w.timestamp = self._sim_timestamp + + if self._body_com_vel_w_ta is None: + self._body_com_vel_w_ta = ProxyArray(self._body_com_vel_w.data) + return self._body_com_vel_w_ta + + @property + def body_com_acc_w(self) -> ProxyArray: + """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + + All values are relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._body_com_acc_w.data = self._root_view.get_link_accelerations().view(wp.spatial_vectorf) + self._body_com_acc_w.timestamp = self._sim_timestamp + + if self._body_com_acc_w_ta is None: + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w.data) + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # set the buffer data and timestamp + self._body_com_pose_b.data.assign(self._root_view.get_coms().view(wp.transformf)) + self._body_com_pose_b.timestamp = self._sim_timestamp + + if self._body_com_pose_b_ta is None: + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b.data) + return self._body_com_pose_b_ta + + """ + Joint state properties. + """ + + @property + def joint_pos(self) -> ProxyArray: + """Joint positions of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_pos.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._joint_pos.data = self._root_view.get_dof_positions() + self._joint_pos.timestamp = self._sim_timestamp + if self._joint_pos_ta is None: + self._joint_pos_ta = ProxyArray(self._joint_pos.data) + return self._joint_pos_ta + + @property + def joint_vel(self) -> ProxyArray: + """Joint velocities of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_vel.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._joint_vel.data = self._root_view.get_dof_velocities() + self._joint_vel.timestamp = self._sim_timestamp + if self._joint_vel_ta is None: + self._joint_vel_ta = ProxyArray(self._joint_vel.data) + return self._joint_vel_ta + + @property + def joint_acc(self) -> ProxyArray: + """Joint acceleration of all joints. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to (num_instances, num_joints). + """ + if self._joint_acc.timestamp < self._sim_timestamp: + # note: we use finite differencing to compute acceleration + time_elapsed = self._sim_timestamp - self._joint_acc.timestamp + wp.launch( + articulation_kernels.get_joint_acc_from_joint_vel, + dim=(self._num_instances, self._num_joints), + inputs=[ + self.joint_vel, + self._previous_joint_vel, + time_elapsed, + ], + outputs=[ + self._joint_acc.data, + ], + device=self.device, + ) + self._joint_acc.timestamp = self._sim_timestamp + if self._joint_acc_ta is None: + self._joint_acc_ta = ProxyArray(self._joint_acc.data) + return self._joint_acc_ta + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self) -> ProxyArray: + """Projection of the gravity direction on base frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3).""" + if self._projected_gravity_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.GRAVITY_VEC_W, self.root_link_quat_w], + outputs=[self._projected_gravity_b.data], + device=self.device, + ) + self._projected_gravity_b.timestamp = self._sim_timestamp + if self._projected_gravity_b_ta is None: + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b.data) + return self._projected_gravity_b_ta + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading of the base frame (in radians). Shape is (num_instances,), dtype = wp.float32. + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + if self._heading_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.root_heading_w, + dim=self._num_instances, + inputs=[self.FORWARD_VEC_B, self.root_link_quat_w], + outputs=[self._heading_w.data], + device=self.device, + ) + self._heading_w.timestamp = self._sim_timestamp + if self._heading_w_ta is None: + self._heading_w_ta = ProxyArray(self._heading_w.data) + return self._heading_w_ta + + @property + def root_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in base frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the articulation root's actor frame with respect to its actor frame. + """ + if self._root_link_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_lin_vel_w, self.root_link_quat_w], + outputs=[self._root_link_lin_vel_b.data], + device=self.device, + ) + self._root_link_lin_vel_b.timestamp = self._sim_timestamp + if self._root_link_lin_vel_b_ta is None: + self._root_link_lin_vel_b_ta = ProxyArray(self._root_link_lin_vel_b.data) + return self._root_link_lin_vel_b_ta + + @property + def root_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in base frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the articulation root's actor frame with respect to its actor frame. + """ + if self._root_link_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_ang_vel_w, self.root_link_quat_w], + outputs=[self._root_link_ang_vel_b.data], + device=self.device, + ) + self._root_link_ang_vel_b.timestamp = self._sim_timestamp + if self._root_link_ang_vel_b_ta is None: + self._root_link_ang_vel_b_ta = ProxyArray(self._root_link_ang_vel_b.data) + return self._root_link_ang_vel_b_ta + + @property + def root_com_lin_vel_b(self) -> ProxyArray: + """Root center of mass linear velocity in base frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the articulation root's center of mass frame + with respect to its actor frame. + """ + if self._root_com_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_lin_vel_w, self.root_link_quat_w], + outputs=[self._root_com_lin_vel_b.data], + device=self.device, + ) + self._root_com_lin_vel_b.timestamp = self._sim_timestamp + if self._root_com_lin_vel_b_ta is None: + self._root_com_lin_vel_b_ta = ProxyArray(self._root_com_lin_vel_b.data) + return self._root_com_lin_vel_b_ta + + @property + def root_com_ang_vel_b(self) -> ProxyArray: + """Root center of mass angular velocity in base frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the articulation root's center of mass frame + with respect to its actor frame. + """ + if self._root_com_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_ang_vel_w, self.root_link_quat_w], + outputs=[self._root_com_ang_vel_b.data], + device=self.device, + ) + self._root_com_ang_vel_b.timestamp = self._sim_timestamp + if self._root_com_ang_vel_b_ta is None: + self._root_com_ang_vel_b_ta = ProxyArray(self._root_com_ang_vel_b.data) + return self._root_com_ang_vel_b_ta + + """ + Sliced properties. + """ + + @property + def root_link_pos_w(self) -> ProxyArray: + """Root link position in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_link_pose_w + if self._root_link_pos_w_ta is None: + self._root_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._root_link_pos_w_ta + + @property + def root_link_quat_w(self) -> ProxyArray: + """Root link orientation (x, y, z, w) in simulation world frame. + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_link_pose_w + if self._root_link_quat_w_ta is None: + self._root_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._root_link_quat_w_ta + + @property + def root_link_lin_vel_w(self) -> ProxyArray: + """Root linear velocity in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_link_vel_w + if self._root_link_lin_vel_w_ta is None: + self._root_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._root_link_lin_vel_w_ta + + @property + def root_link_ang_vel_w(self) -> ProxyArray: + """Root link angular velocity in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_link_vel_w + if self._root_link_ang_vel_w_ta is None: + self._root_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._root_link_ang_vel_w_ta + + @property + def root_com_pos_w(self) -> ProxyArray: + """Root center of mass position in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the position of the center of mass frame of the root rigid body relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_com_pose_w + if self._root_com_pos_w_ta is None: + self._root_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._root_com_pos_w_ta + + @property + def root_com_quat_w(self) -> ProxyArray: + """Root center of mass orientation (x, y, z, w) in simulation world frame. + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + + This quantity is the orientation of the principal axes of inertia of the root rigid body relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_com_pose_w + if self._root_com_quat_w_ta is None: + self._root_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._root_com_quat_w_ta + + @property + def root_com_lin_vel_w(self) -> ProxyArray: + """Root center of mass linear velocity in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_com_vel_w + if self._root_com_lin_vel_w_ta is None: + self._root_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._root_com_lin_vel_w_ta + + @property + def root_com_ang_vel_w(self) -> ProxyArray: + """Root center of mass angular velocity in simulation world frame. + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.root_com_vel_w + if self._root_com_ang_vel_w_ta is None: + self._root_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._root_com_ang_vel_w_ta + + @property + def body_link_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_link_pose_w + if self._body_link_pos_w_ta is None: + self._body_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_link_pos_w_ta + + @property + def body_link_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_link_pose_w + if self._body_link_quat_w_ta is None: + self._body_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_link_quat_w_ta + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' actor frame relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_link_vel_w + if self._body_link_lin_vel_w_ta is None: + self._body_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_link_lin_vel_w_ta + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' actor frame relative to the world. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_link_vel_w + if self._body_link_ang_vel_w_ta is None: + self._body_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_link_ang_vel_w_ta + + @property + def body_com_pos_w(self) -> ProxyArray: + """Positions of all bodies' center of mass in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' center of mass frame. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_pose_w + if self._body_com_pos_w_ta is None: + self._body_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_w_ta + + @property + def body_com_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' principal axes of inertia. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_pose_w + if self._body_com_quat_w_ta is None: + self._body_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_w_ta + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_vel_w + if self._body_com_lin_vel_w_ta is None: + self._body_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_vel_w_ta + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_vel_w + if self._body_com_ang_vel_w_ta is None: + self._body_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_vel_w_ta + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Linear acceleration of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the linear acceleration of the articulation bodies' center of mass frame. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_acc_w + if self._body_com_lin_acc_w_ta is None: + self._body_com_lin_acc_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_acc_w_ta + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Angular acceleration of all bodies in simulation world frame. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the angular acceleration of the articulation bodies' center of mass frame. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_acc_w + if self._body_com_ang_acc_w_ta is None: + self._body_com_ang_acc_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_acc_w_ta + + @property + def body_com_pos_b(self) -> ProxyArray: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + + This quantity is the center of mass location relative to its body's link frame. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_pose_b + if self._body_com_pos_b_ta is None: + self._body_com_pos_b_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_b_ta + + @property + def body_com_quat_b(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their + respective link frames. + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + + This quantity is the orientation of the principal axes of inertia relative to its body's link frame. + """ + # Access parent property to trigger its getter call (PhysX is pull-on-demand) + parent = self.body_com_pose_b + if self._body_com_quat_b_ta is None: + self._body_com_quat_b_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_b_ta + + def _create_buffers(self) -> None: + super()._create_buffers() + # Initialize the lazy buffers. + self._num_instances = self._root_view.count + self._num_joints = self._root_view.shared_metatype.dof_count + self._num_bodies = self._root_view.shared_metatype.link_count + self._num_fixed_tendons = self._root_view.max_fixed_tendons + self._num_spatial_tendons = self._root_view.max_spatial_tendons + + # -- link frame w.r.t. world frame + self._root_link_pose_w = TimestampedBuffer((self._num_instances), self.device, wp.transformf) + self._root_link_vel_w = TimestampedBuffer((self._num_instances), self.device, wp.spatial_vectorf) + self._body_link_pose_w = TimestampedBuffer((self._num_instances, self._num_bodies), self.device, wp.transformf) + self._body_link_vel_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, wp.spatial_vectorf + ) + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer((self._num_instances, self._num_bodies), self.device, wp.transformf) + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer((self._num_instances), self.device, wp.transformf) + self._root_com_vel_w = TimestampedBuffer((self._num_instances), self.device, wp.spatial_vectorf) + self._body_com_pose_w = TimestampedBuffer((self._num_instances, self._num_bodies), self.device, wp.transformf) + self._body_com_vel_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, wp.spatial_vectorf + ) + self._body_com_acc_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, wp.spatial_vectorf + ) + # -- combined state (these are cached as they concatenate) + self._root_state_w = TimestampedBuffer((self._num_instances), self.device, shared_kernels.vec13f) + self._root_link_state_w = TimestampedBuffer((self._num_instances), self.device, shared_kernels.vec13f) + self._root_com_state_w = TimestampedBuffer((self._num_instances), self.device, shared_kernels.vec13f) + self._body_state_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, shared_kernels.vec13f + ) + self._body_link_state_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, shared_kernels.vec13f + ) + self._body_com_state_w = TimestampedBuffer( + (self._num_instances, self._num_bodies), self.device, shared_kernels.vec13f + ) + # -- joint state + self._joint_pos = TimestampedBuffer((self._num_instances, self._num_joints), self.device, wp.float32) + self._joint_vel = TimestampedBuffer((self._num_instances, self._num_joints), self.device, wp.float32) + self._joint_acc = TimestampedBuffer((self._num_instances, self._num_joints), self.device, wp.float32) + # -- derived properties (these are cached to avoid repeated memory allocations) + self._projected_gravity_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._heading_w = TimestampedBuffer((self._num_instances), self.device, wp.float32) + self._root_link_lin_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._root_link_ang_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._root_com_lin_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._root_com_ang_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + + # Default root pose and velocity + self._default_root_pose = wp.zeros((self._num_instances), dtype=wp.transformf, device=self.device) + self._default_root_vel = wp.zeros((self._num_instances), dtype=wp.spatial_vectorf, device=self.device) + self._default_joint_pos = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._default_joint_vel = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + + # Initialize history for finite differencing + self._previous_joint_vel = wp.clone(self._root_view.get_dof_velocities(), device=self.device) + + # Pre-allocated buffers + # -- Joint commands (set into simulation) + self._joint_pos_target = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._joint_vel_target = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._joint_effort_target = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + # -- Joint commands (explicit actuator model) + self._computed_torque = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + self._applied_torque = wp.zeros((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + # -- Joint properties + self._joint_stiffness = wp.clone(self._root_view.get_dof_stiffnesses(), device=self.device) + self._joint_damping = wp.clone(self._root_view.get_dof_dampings(), device=self.device) + self._joint_armature = wp.clone(self._root_view.get_dof_armatures(), device=self.device) + friction_props = wp.clone(self._root_view.get_dof_friction_properties(), device=self.device) + # Initialize output arrays + self._joint_friction_coeff = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_dynamic_friction_coeff = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._joint_viscous_friction_coeff = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + # Extract friction properties using kernel + wp.launch( + articulation_kernels.extract_friction_properties, + dim=(self._num_instances, self._num_joints), + inputs=[friction_props], + outputs=[ + self._joint_friction_coeff, + self._joint_dynamic_friction_coeff, + self._joint_viscous_friction_coeff, + ], + device=self.device, + ) + self._joint_pos_limits = wp.zeros((self._num_instances, self._num_joints), dtype=wp.vec2f, device=self.device) + self._joint_pos_limits.assign(self._root_view.get_dof_limits().view(wp.vec2f)) + self._joint_vel_limits = wp.clone(self._root_view.get_dof_max_velocities(), device=self.device) + self._joint_effort_limits = wp.clone(self._root_view.get_dof_max_forces(), device=self.device) + # -- Joint properties (custom) + self._soft_joint_pos_limits = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.vec2f, device=self.device + ) + self._soft_joint_vel_limits = wp.zeros( + (self._num_instances, self._num_joints), dtype=wp.float32, device=self.device + ) + self._gear_ratio = wp.ones((self._num_instances, self._num_joints), dtype=wp.float32, device=self.device) + # -- Fixed tendon properties + if self._num_fixed_tendons > 0: + self._fixed_tendon_stiffness = wp.clone(self._root_view.get_fixed_tendon_stiffnesses(), device=self.device) + self._fixed_tendon_damping = wp.clone(self._root_view.get_fixed_tendon_dampings(), device=self.device) + self._fixed_tendon_limit_stiffness = wp.clone( + self._root_view.get_fixed_tendon_limit_stiffnesses(), device=self.device + ) + self._fixed_tendon_rest_length = wp.clone( + self._root_view.get_fixed_tendon_rest_lengths(), device=self.device + ) + self._fixed_tendon_offset = wp.clone(self._root_view.get_fixed_tendon_offsets(), device=self.device) + self._fixed_tendon_pos_limits = wp.clone(self._root_view.get_fixed_tendon_limits(), device=self.device) + else: + self._fixed_tendon_stiffness = None + self._fixed_tendon_damping = None + self._fixed_tendon_limit_stiffness = None + self._fixed_tendon_rest_length = None + self._fixed_tendon_offset = None + self._fixed_tendon_pos_limits = None + # -- Spatial tendon properties + if self._num_spatial_tendons > 0: + self._spatial_tendon_stiffness = wp.clone( + self._root_view.get_spatial_tendon_stiffnesses(), device=self.device + ) + self._spatial_tendon_damping = wp.clone(self._root_view.get_spatial_tendon_dampings(), device=self.device) + self._spatial_tendon_limit_stiffness = wp.clone( + self._root_view.get_spatial_tendon_limit_stiffnesses(), device=self.device + ) + self._spatial_tendon_offset = wp.clone(self._root_view.get_spatial_tendon_offsets(), device=self.device) + else: + self._spatial_tendon_stiffness = None + self._spatial_tendon_damping = None + self._spatial_tendon_limit_stiffness = None + self._spatial_tendon_offset = None + # -- Body properties + self._body_mass = wp.clone(self._root_view.get_masses(), device=self.device) + self._body_inertia = wp.clone(self._root_view.get_inertias(), device=self.device) + self._default_root_state = None + + # Initialize ProxyArray wrappers + self._pin_proxy_arrays() + + def _pin_proxy_arrays(self) -> None: + """Create pinned ProxyArray wrappers for all data buffers. + + This is called once from :meth:`_create_buffers` during initialization. + PhysX tensor API buffers have stable GPU pointers across simulation steps, + so no rebinding is needed (unlike Newton). + """ + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + # Defaults + self._default_root_pose_ta: ProxyArray | None = None + self._default_root_vel_ta: ProxyArray | None = None + self._default_joint_pos_ta: ProxyArray | None = None + self._default_joint_vel_ta: ProxyArray | None = None + # Joint commands (set into simulation) + self._joint_pos_target_ta: ProxyArray | None = None + self._joint_vel_target_ta: ProxyArray | None = None + self._joint_effort_target_ta: ProxyArray | None = None + # Joint commands (explicit actuator model) + self._computed_torque_ta: ProxyArray | None = None + self._applied_torque_ta: ProxyArray | None = None + # Joint properties + self._joint_stiffness_ta: ProxyArray | None = None + self._joint_damping_ta: ProxyArray | None = None + self._joint_armature_ta: ProxyArray | None = None + self._joint_friction_coeff_ta: ProxyArray | None = None + self._joint_dynamic_friction_coeff_ta: ProxyArray | None = None + self._joint_viscous_friction_coeff_ta: ProxyArray | None = None + self._joint_pos_limits_ta: ProxyArray | None = None + self._joint_vel_limits_ta: ProxyArray | None = None + self._joint_effort_limits_ta: ProxyArray | None = None + # Joint properties (custom) + self._soft_joint_pos_limits_ta: ProxyArray | None = None + self._soft_joint_vel_limits_ta: ProxyArray | None = None + self._gear_ratio_ta: ProxyArray | None = None + # Fixed tendon properties + self._fixed_tendon_stiffness_ta: ProxyArray | None = None + self._fixed_tendon_damping_ta: ProxyArray | None = None + self._fixed_tendon_limit_stiffness_ta: ProxyArray | None = None + self._fixed_tendon_rest_length_ta: ProxyArray | None = None + self._fixed_tendon_offset_ta: ProxyArray | None = None + self._fixed_tendon_pos_limits_ta: ProxyArray | None = None + # Spatial tendon properties + self._spatial_tendon_stiffness_ta: ProxyArray | None = None + self._spatial_tendon_damping_ta: ProxyArray | None = None + self._spatial_tendon_limit_stiffness_ta: ProxyArray | None = None + self._spatial_tendon_offset_ta: ProxyArray | None = None + # Root state (timestamped) + self._root_link_pose_w_ta: ProxyArray | None = None + self._root_link_vel_w_ta: ProxyArray | None = None + self._root_com_pose_w_ta: ProxyArray | None = None + self._root_com_vel_w_ta: ProxyArray | None = None + # Body state (timestamped) + self._body_link_pose_w_ta: ProxyArray | None = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_vel_w_ta: ProxyArray | None = None + self._body_com_acc_w_ta: ProxyArray | None = None + self._body_com_pose_b_ta: ProxyArray | None = None + # Body properties + self._body_mass_ta: ProxyArray | None = None + self._body_inertia_ta: ProxyArray | None = None + # Joint state (timestamped) + self._joint_pos_ta: ProxyArray | None = None + self._joint_vel_ta: ProxyArray | None = None + self._joint_acc_ta: ProxyArray | None = None + # Derived properties (timestamped) + self._projected_gravity_b_ta: ProxyArray | None = None + self._heading_w_ta: ProxyArray | None = None + self._root_link_lin_vel_b_ta: ProxyArray | None = None + self._root_link_ang_vel_b_ta: ProxyArray | None = None + self._root_com_lin_vel_b_ta: ProxyArray | None = None + self._root_com_ang_vel_b_ta: ProxyArray | None = None + # Sliced properties (root link) + self._root_link_pos_w_ta: ProxyArray | None = None + self._root_link_quat_w_ta: ProxyArray | None = None + self._root_link_lin_vel_w_ta: ProxyArray | None = None + self._root_link_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (root com) + self._root_com_pos_w_ta: ProxyArray | None = None + self._root_com_quat_w_ta: ProxyArray | None = None + self._root_com_lin_vel_w_ta: ProxyArray | None = None + self._root_com_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (body link) + self._body_link_pos_w_ta: ProxyArray | None = None + self._body_link_quat_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w_ta: ProxyArray | None = None + self._body_link_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (body com) + self._body_com_pos_w_ta: ProxyArray | None = None + self._body_com_quat_w_ta: ProxyArray | None = None + self._body_com_lin_vel_w_ta: ProxyArray | None = None + self._body_com_ang_vel_w_ta: ProxyArray | None = None + self._body_com_lin_acc_w_ta: ProxyArray | None = None + self._body_com_ang_acc_w_ta: ProxyArray | None = None + # Sliced properties (body com in body frame) + self._body_com_pos_b_ta: ProxyArray | None = None + self._body_com_quat_b_ta: ProxyArray | None = None + # Deprecated state-concat properties + self._default_root_state_ta: ProxyArray | None = None + self._root_state_w_ta: ProxyArray | None = None + self._root_link_state_w_ta: ProxyArray | None = None + self._root_com_state_w_ta: ProxyArray | None = None + self._body_state_w_ta: ProxyArray | None = None + self._body_link_state_w_ta: ProxyArray | None = None + self._body_com_state_w_ta: ProxyArray | None = None + + """ + Internal helpers. + """ + + def _get_pos_from_transform(self, transform: wp.array) -> wp.array: + """Generates a position array from a transform array. + + Args: + transform: The transform array. Shape is (N, 7). + + Returns: + The position array. Shape is (N, 3). + """ + return wp.array( + ptr=transform.ptr, + shape=transform.shape, + dtype=wp.vec3f, + strides=transform.strides, + device=self.device, + ) + + def _get_quat_from_transform(self, transform: wp.array) -> wp.array: + """Generates a quaternion array from a transform array. + + Args: + transform: The transform array. Shape is (N, 7). + + Returns: + The quaternion array. Shape is (N, 4). + """ + return wp.array( + ptr=transform.ptr + 3 * 4, + shape=transform.shape, + dtype=wp.quatf, + strides=transform.strides, + device=self.device, + ) + + def _get_lin_vel_from_spatial_vector(self, spatial_vector: wp.array) -> wp.array: + """Generates a linear velocity array from a spatial vector array. + + Args: + spatial_vector: The spatial vector array. Shape is (N, 6). + + Returns: + The linear velocity array. Shape is (N, 3). + """ + return wp.array( + ptr=spatial_vector.ptr, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + def _get_ang_vel_from_spatial_vector(self, spatial_vector: wp.array) -> wp.array: + """Generates an angular velocity array from a spatial vector array. + + Args: + spatial_vector: The spatial vector array. Shape is (N, 6). + + Returns: + The angular velocity array. Shape is (N, 3). + """ + return wp.array( + ptr=spatial_vector.ptr + 3 * 4, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + """ + Deprecated properties. + """ + + @property + def default_root_state(self) -> ProxyArray: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. + + + The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular + velocities are of its center of mass frame. Shape is (num_instances, 13). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + warnings.warn( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._default_root_state is None: + self._default_root_state = wp.zeros((self._num_instances), dtype=shared_kernels.vec13f, device=self.device) + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self._default_root_pose, + self._default_root_vel, + ], + outputs=[ + self._default_root_state, + ], + device=self.device, + ) + if self._default_root_state_ta is None: + self._default_root_state_ta = ProxyArray(self._default_root_state) + return self._default_root_state_ta + + @property + def root_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_state_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=(self._num_instances), + inputs=[ + self.root_link_pose_w, + self.root_com_vel_w, + ], + outputs=[ + self._root_state_w.data, + ], + device=self.device, + ) + self._root_state_w.timestamp = self._sim_timestamp + + if self._root_state_w_ta is None: + self._root_state_w_ta = ProxyArray(self._root_state_w.data) + return self._root_state_w_ta + + @property + def root_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_link_vel_w`.""" + warnings.warn( + "The `root_link_state_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w, + self.root_link_vel_w, + ], + outputs=[ + self._root_link_state_w.data, + ], + device=self.device, + ) + self._root_link_state_w.timestamp = self._sim_timestamp + + if self._root_link_state_w_ta is None: + self._root_link_state_w_ta = ProxyArray(self._root_link_state_w.data) + return self._root_link_state_w_ta + + @property + def root_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_com_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_com_state_w` property will be deprecated in a IsaacLab 4.0. Please use `root_com_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_com_pose_w, + self.root_com_vel_w, + ], + outputs=[ + self._root_com_state_w.data, + ], + device=self.device, + ) + self._root_com_state_w.timestamp = self._sim_timestamp + + if self._root_com_state_w_ta is None: + self._root_com_state_w_ta = ProxyArray(self._root_com_state_w.data) + return self._root_com_state_w_ta + + @property + def body_state_w(self) -> ProxyArray: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position and quaternion are of all the articulation links' actor frame. Meanwhile, the linear and angular + velocities are of the articulation links' center of mass frame. + """ + warnings.warn( + "The `body_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_link_pose_w, + self.body_com_vel_w, + ], + outputs=[ + self._body_state_w.data, + ], + device=self.device, + ) + self._body_state_w.timestamp = self._sim_timestamp + + if self._body_state_w_ta is None: + self._body_state_w_ta = ProxyArray(self._body_state_w.data) + return self._body_state_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """State of all bodies' link frame`[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + """ + warnings.warn( + "The `body_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_link_pose_w, + self.body_link_vel_w, + ], + outputs=[ + self._body_link_state_w.data, + ], + device=self.device, + ) + self._body_link_state_w.timestamp = self._sim_timestamp + + if self._body_link_state_w_ta is None: + self._body_link_state_w_ta = ProxyArray(self._body_link_state_w.data) + return self._body_link_state_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """State of all bodies center of mass `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principal inertia. + """ + warnings.warn( + "The `body_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_com_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self._num_instances, self._num_bodies), + inputs=[ + self.body_com_pose_w, + self.body_com_vel_w, + ], + outputs=[ + self._body_com_state_w.data, + ], + device=self.device, + ) + self._body_com_state_w.timestamp = self._sim_timestamp + + if self._body_com_state_w_ta is None: + self._body_com_state_w_ta = ProxyArray(self._body_com_state_w.data) + return self._body_com_state_w_ta diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/kernels.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/kernels.py new file mode 100644 index 000000000000..5686c864dd94 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/kernels.py @@ -0,0 +1,488 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + +""" +Articulation-specific warp functions. +""" + + +@wp.func +def compute_soft_joint_pos_limits_func( + joint_pos_limits: wp.vec2f, + soft_limit_factor: wp.float32, +): + """Compute the soft joint position limits. + + Args: + joint_pos_limits: The joint position limits. + soft_limit_factor: The soft limit factor. + + Returns: + The soft joint position limits. + """ + joint_pos_mean = (joint_pos_limits[0] + joint_pos_limits[1]) / 2.0 + joint_pos_range = joint_pos_limits[1] - joint_pos_limits[0] + return wp.vec2f( + joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor, + joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor, + ) + + +""" +Articulation-specific warp kernels. +""" + + +@wp.kernel +def get_joint_acc_from_joint_vel( + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + dt: wp.float32, + joint_acc: wp.array2d(dtype=wp.float32), +): + """Compute the joint acceleration from the joint velocity using finite differencing. + + This kernel computes the joint acceleration by taking the difference between the current + and previous joint velocities, divided by the time step. It also updates the previous + joint velocity buffer with the current values. + + Args: + joint_vel: Input array of current joint velocities. Shape is (num_envs, num_joints). + prev_joint_vel: Input/output array of previous joint velocities. Shape is (num_envs, num_joints). + This buffer is updated with the current joint velocities after computing acceleration. + dt: Input time step (scalar) used for finite differencing. + joint_acc: Output array where joint accelerations are written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + joint_acc[i, j] = (joint_vel[i, j] - prev_joint_vel[i, j]) / dt + prev_joint_vel[i, j] = joint_vel[i, j] + + +@wp.kernel +def write_joint_vel_data( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + from_mask: bool, + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + joint_acc: wp.array2d(dtype=wp.float32), +): + """Write joint velocity data to the output buffers. + + This kernel writes joint velocity data from the input array to the output buffers. + It also updates the previous joint velocity buffer and resets the joint acceleration to 0.0. + + Args: + in_data: Input array containing joint velocity data. Shape is (num_envs, num_joints) or + (num_selected_envs, num_selected_joints) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + from_mask: Input flag indicating whether to use masked indexing. If True, indices from + env_ids and joint_ids are used to index into in_data. If False, in_data is indexed + directly using the thread indices. + joint_vel: Output array where joint velocities are written. Shape is (num_envs, num_joints). + prev_joint_vel: Output array where previous joint velocities are written. Shape is + (num_envs, num_joints). + joint_acc: Output array where joint accelerations are reset to 0.0. Shape is + (num_envs, num_joints). + """ + i, j = wp.tid() + if from_mask: + joint_vel[env_ids[i], joint_ids[j]] = in_data[env_ids[i], joint_ids[j]] + prev_joint_vel[env_ids[i], joint_ids[j]] = in_data[env_ids[i], joint_ids[j]] + else: + joint_vel[env_ids[i], joint_ids[j]] = in_data[i, j] + prev_joint_vel[env_ids[i], joint_ids[j]] = in_data[i, j] + joint_acc[env_ids[i], joint_ids[j]] = 0.0 + + +@wp.kernel +def write_joint_state_data( + pos_data: wp.array2d(dtype=wp.float32), + vel_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + full_data: bool, + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + prev_joint_vel: wp.array2d(dtype=wp.float32), + joint_acc: wp.array2d(dtype=wp.float32), +): + """Write joint position and velocity data in a single kernel launch. + + Args: + pos_data: Input joint positions. Shape is (num_envs, num_joints) if full_data, + otherwise (num_selected_envs, num_selected_joints). + vel_data: Input joint velocities. Shape is (num_envs, num_joints) if full_data, + otherwise (num_selected_envs, num_selected_joints). + env_ids: Environment indices. Shape is (num_selected_envs,). + joint_ids: Joint indices. Shape is (num_selected_joints,). + full_data: If True, data has full (num_envs, num_joints) shape and env_ids/joint_ids + index into it. If False, data is pre-sliced and indexed by thread position. + joint_pos: Output joint positions. Shape is (num_envs, num_joints). + joint_vel: Output joint velocities. Shape is (num_envs, num_joints). + prev_joint_vel: Output previous joint velocities. Shape is (num_envs, num_joints). + joint_acc: Output joint accelerations (reset to 0). Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + if full_data: + p = pos_data[env_ids[i], joint_ids[j]] + v = vel_data[env_ids[i], joint_ids[j]] + else: + p = pos_data[i, j] + v = vel_data[i, j] + joint_pos[env_ids[i], joint_ids[j]] = p + joint_vel[env_ids[i], joint_ids[j]] = v + prev_joint_vel[env_ids[i], joint_ids[j]] = v + joint_acc[env_ids[i], joint_ids[j]] = 0.0 + + +@wp.kernel +def write_joint_limit_data_to_buffer( + in_data: wp.array2d(dtype=wp.vec2f), + soft_limit_factor: wp.float32, + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + from_mask: bool, + joint_pos_limits: wp.array2d(dtype=wp.vec2f), + soft_joint_pos_limits: wp.array2d(dtype=wp.vec2f), + default_joint_pos: wp.array2d(dtype=wp.float32), + clamped_defaults: wp.array(dtype=wp.int32), +): + """Write joint limit data to the output buffers and compute soft limits. + + This kernel writes joint position limits from the input array to the output buffer, + computes soft joint position limits, and clamps default joint positions if they + fall outside the limits. + + Args: + in_data: Input array containing joint position limits as vec2f (lower, upper). + Shape is (num_envs, num_joints) or (num_selected_envs, num_selected_joints) + depending on from_mask. + soft_limit_factor: Input scalar factor for computing soft limits (typically 0.0-1.0). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + from_mask: Input flag indicating whether to use masked indexing. If True, indices from + env_ids and joint_ids are used to index into in_data. If False, in_data is indexed + directly using the thread indices. + joint_pos_limits: Output array where joint position limits are written. Shape is + (num_envs, num_joints). + soft_joint_pos_limits: Output array where soft joint position limits are written. + Shape is (num_envs, num_joints). + default_joint_pos: Input/output array of default joint positions. If any values fall + outside the limits, they are clamped. Shape is (num_envs, num_joints). + clamped_defaults: Output 1-element array flag indicating whether any default joint + positions were clamped. Non-zero if any clamping occurred. Shape is (1,). + """ + i, j = wp.tid() + if from_mask: + joint_pos_limits[env_ids[i], joint_ids[j]] = in_data[env_ids[i], joint_ids[j]] + else: + joint_pos_limits[env_ids[i], joint_ids[j]] = in_data[i, j] + if ( + default_joint_pos[env_ids[i], joint_ids[j]] < joint_pos_limits[env_ids[i], joint_ids[j]][0] + ) or default_joint_pos[env_ids[i], joint_ids[j]] > joint_pos_limits[env_ids[i], joint_ids[j]][1]: + wp.atomic_add(clamped_defaults, 0, 1) + default_joint_pos[env_ids[i], joint_ids[j]] = wp.clamp( + default_joint_pos[env_ids[i], joint_ids[j]], + joint_pos_limits[env_ids[i], joint_ids[j]][0], + joint_pos_limits[env_ids[i], joint_ids[j]][1], + ) + soft_joint_pos_limits[env_ids[i], joint_ids[j]] = compute_soft_joint_pos_limits_func( + joint_pos_limits[env_ids[i], joint_ids[j]], soft_limit_factor + ) + + +@wp.kernel +def write_joint_friction_data_to_buffer( + in_friction: wp.array2d(dtype=wp.float32), + in_dynamic_friction: wp.array2d(dtype=wp.float32), + in_viscous_friction: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_friction: wp.array2d(dtype=wp.float32), + out_dynamic_friction: wp.array2d(dtype=wp.float32), + out_viscous_friction: wp.array2d(dtype=wp.float32), + friction_props: wp.array3d(dtype=wp.float32), +): + """Write joint friction data to the output buffers. + + This kernel writes joint friction coefficients from input arrays to output buffers + and updates the friction properties array used by the physics simulation. + + Args: + in_friction: Input array containing joint friction coefficients. Shape is + (num_envs, num_joints) or (num_selected_envs, num_selected_joints) depending + on from_mask. Can be None if not provided. + in_dynamic_friction: Input array containing joint dynamic friction coefficients. + Shape is (num_envs, num_joints) or (num_selected_envs, num_selected_joints). + Can be None if not provided. + in_viscous_friction: Input array containing joint viscous friction coefficients. + Shape is (num_envs, num_joints) or (num_selected_envs, num_selected_joints). + Can be None if not provided. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + from_mask: Input flag indicating whether to use masked indexing. If True, indices from + env_ids and joint_ids are used to index into input arrays. If False, input arrays + are indexed directly using the thread indices. + out_friction: Output array where joint friction coefficients are written. Shape is + (num_envs, num_joints). + out_dynamic_friction: Output array where joint dynamic friction coefficients are written. + Shape is (num_envs, num_joints). + out_viscous_friction: Output array where joint viscous friction coefficients are written. + Shape is (num_envs, num_joints). + friction_props: Output array where friction properties are written for the physics + simulation. Shape is (num_envs, num_joints, 3) where the last dimension contains + [friction, dynamic_friction, viscous_friction]. + """ + i, j = wp.tid() + # First update the output buffers + if from_mask: + out_friction[env_ids[i], joint_ids[j]] = in_friction[env_ids[i], joint_ids[j]] + if in_dynamic_friction: + out_dynamic_friction[env_ids[i], joint_ids[j]] = in_dynamic_friction[env_ids[i], joint_ids[j]] + if in_viscous_friction: + out_viscous_friction[env_ids[i], joint_ids[j]] = in_viscous_friction[env_ids[i], joint_ids[j]] + else: + out_friction[env_ids[i], joint_ids[j]] = in_friction[i, j] + if in_dynamic_friction: + out_dynamic_friction[env_ids[i], joint_ids[j]] = in_dynamic_friction[i, j] + if in_viscous_friction: + out_viscous_friction[env_ids[i], joint_ids[j]] = in_viscous_friction[i, j] + # Then update the friction properties + friction_props[env_ids[i], joint_ids[j], 0] = out_friction[env_ids[i], joint_ids[j]] + if in_dynamic_friction: + friction_props[env_ids[i], joint_ids[j], 1] = out_dynamic_friction[env_ids[i], joint_ids[j]] + if in_viscous_friction: + friction_props[env_ids[i], joint_ids[j], 2] = out_viscous_friction[env_ids[i], joint_ids[j]] + + +@wp.kernel +def write_joint_friction_param_to_buffer( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + buffer_index: wp.int32, + from_mask: bool, + out_data: wp.array2d(dtype=wp.float32), + out_buffer: wp.array3d(dtype=wp.float32), +): + """Write a joint friction parameter to the output buffers. + + This kernel writes a single joint friction parameter (e.g., dynamic or viscous friction) + from the input array to both a 2D output array and a specific slice of a 3D buffer array. + + Args: + in_data: Input array containing joint friction parameter values. Shape is + (num_envs, num_joints) or (num_selected_envs, num_selected_joints) depending + on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + buffer_index: Input scalar index specifying which slice of the 3D buffer to write to. + Typically 0 for friction, 1 for dynamic friction, or 2 for viscous friction. + from_mask: Input flag indicating whether to use masked indexing. If True, indices from + env_ids and joint_ids are used to index into in_data. If False, in_data is indexed + directly using the thread indices. + out_data: Output array where friction parameter values are written. Shape is + (num_envs, num_joints). + out_buffer: Output 3D array where friction parameter values are written to the specified + slice. Shape is (num_envs, num_joints, num_friction_params). + """ + i, j = wp.tid() + if from_mask: + out_data[env_ids[i], joint_ids[j]] = in_data[env_ids[i], joint_ids[j]] + out_buffer[env_ids[i], joint_ids[j], buffer_index] = in_data[env_ids[i], joint_ids[j]] + else: + out_data[env_ids[i], joint_ids[j]] = in_data[i, j] + out_buffer[env_ids[i], joint_ids[j], buffer_index] = in_data[i, j] + + +@wp.kernel +def float_data_to_buffer_with_indices( + in_data: wp.float32, + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + out_data: wp.array2d(dtype=wp.float32), +): + """Write a scalar float value to a 2D buffer at specified indices. + + This kernel broadcasts a single scalar float value to all specified (env_id, joint_id) + locations in the output buffer. + + Args: + in_data: Input scalar float value to broadcast. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint indices to write to. Shape is (num_selected_joints,). + out_data: Output array where the scalar value is written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + out_data[env_ids[i], joint_ids[j]] = in_data + + +@wp.kernel +def update_soft_joint_pos_limits( + joint_pos_limits: wp.array2d(dtype=wp.vec2f), + soft_limit_factor: wp.float32, + soft_joint_pos_limits: wp.array2d(dtype=wp.vec2f), +): + """Update soft joint position limits based on hard limits and a soft limit factor. + + This kernel computes soft joint position limits from hard joint position limits using + a soft limit factor. Soft limits are typically used to provide a safety margin before + reaching the hard limits. + + Args: + joint_pos_limits: Input array of hard joint position limits as vec2f (lower, upper). + Shape is (num_envs, num_joints). + soft_limit_factor: Input scalar factor for computing soft limits (typically 0.0-1.0). + A value of 1.0 means soft limits equal hard limits, while smaller values create + a tighter range. + soft_joint_pos_limits: Output array where soft joint position limits are written. + Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + soft_joint_pos_limits[i, j] = compute_soft_joint_pos_limits_func(joint_pos_limits[i, j], soft_limit_factor) + + +@wp.kernel +def update_default_joint_values( + source: wp.array(dtype=wp.float32), + ids: wp.array(dtype=wp.int32), + target: wp.array2d(dtype=wp.float32), +): + """Update default joint values from a source array using joint indices. + + This kernel writes values from a 1D source array to specific joint indices in a 2D + target array for all environments. + + Args: + source: Input array containing joint values to write. Shape is (num_joints,). + ids: Input array of joint indices specifying which joints to update. Shape is + (num_selected_joints,). + target: Output array where joint values are written. Shape is (num_envs, num_joints). + Values are written to target[i, ids[j]] for all environments i. + """ + i, j = wp.tid() + target[i, ids[j]] = source[j] + + +@wp.kernel +def update_targets( + source_joint_positions: wp.array2d(dtype=wp.float32), + source_joint_velocities: wp.array2d(dtype=wp.float32), + source_joint_efforts: wp.array2d(dtype=wp.float32), + joint_indices: wp.array(dtype=wp.int32), + target_joint_positions: wp.array2d(dtype=wp.float32), + target_joint_velocities: wp.array2d(dtype=wp.float32), + target_joint_efforts: wp.array2d(dtype=wp.float32), +): + """Update joint target values from source arrays using joint indices. + + This kernel copies joint positions, velocities, and efforts from source arrays to + target arrays, remapping joint indices using the provided joint_indices array. + Only non-None source arrays are processed. + + Args: + source_joint_positions: Input array of source joint positions. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + source_joint_velocities: Input array of source joint velocities. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + source_joint_efforts: Input array of source joint efforts. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + joint_indices: Input array of joint indices for remapping. Shape is + (num_selected_joints,). Specifies which joints in the target arrays to update. + target_joint_positions: Output array where joint positions are written. Shape is + (num_envs, num_joints). + target_joint_velocities: Output array where joint velocities are written. Shape is + (num_envs, num_joints). + target_joint_efforts: Output array where joint efforts are written. Shape is + (num_envs, num_joints). + """ + i, j = wp.tid() + if source_joint_positions: + target_joint_positions[i, joint_indices[j]] = source_joint_positions[i, j] + if source_joint_velocities: + target_joint_velocities[i, joint_indices[j]] = source_joint_velocities[i, j] + if source_joint_efforts: + target_joint_efforts[i, joint_indices[j]] = source_joint_efforts[i, j] + + +@wp.kernel +def update_actuator_state_model( + source_computed_effort: wp.array2d(dtype=wp.float32), + source_applied_effort: wp.array2d(dtype=wp.float32), + source_gear_ratio: wp.array2d(dtype=wp.float32), + source_vel_limits: wp.array2d(dtype=wp.float32), + joint_indices: wp.array(dtype=wp.int32), + target_computed_effort: wp.array2d(dtype=wp.float32), + target_applied_effort: wp.array2d(dtype=wp.float32), + target_gear_ratio: wp.array2d(dtype=wp.float32), + target_soft_joint_vel_limits: wp.array2d(dtype=wp.float32), +): + """Update actuator state model parameters from source arrays using joint indices. + + This kernel copies actuator state model parameters (computed effort, applied effort, + gear ratio, and velocity limits) from source arrays to target arrays, remapping + joint indices using the provided joint_indices array. + + Args: + source_computed_effort: Input array of source computed effort values. Shape is + (num_envs, num_selected_joints). + source_applied_effort: Input array of source applied effort values. Shape is + (num_envs, num_selected_joints). + source_gear_ratio: Input array of source gear ratio values. Shape is + (num_envs, num_selected_joints). Can be None if not provided. + source_vel_limits: Input array of source velocity limit values. Shape is + (num_envs, num_selected_joints). + joint_indices: Input array of joint indices for remapping. Shape is + (num_selected_joints,). Specifies which joints in the target arrays to update. + target_computed_effort: Output array where computed effort values are written. + Shape is (num_envs, num_joints). + target_applied_effort: Output array where applied effort values are written. + Shape is (num_envs, num_joints). + target_gear_ratio: Output array where gear ratio values are written. Shape is + (num_envs, num_joints). + target_soft_joint_vel_limits: Output array where soft joint velocity limits are + written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + target_computed_effort[i, joint_indices[j]] = source_computed_effort[i, j] + target_applied_effort[i, joint_indices[j]] = source_applied_effort[i, j] + target_soft_joint_vel_limits[i, joint_indices[j]] = source_vel_limits[i, j] + if source_gear_ratio: + target_gear_ratio[i, joint_indices[j]] = source_gear_ratio[i, j] + + +@wp.kernel +def extract_friction_properties( + friction_props: wp.array3d(dtype=wp.float32), + out_friction: wp.array2d(dtype=wp.float32), + out_dynamic_friction: wp.array2d(dtype=wp.float32), + out_viscous_friction: wp.array2d(dtype=wp.float32), +): + """Extract friction properties from a 3D array into separate 2D arrays. + + This kernel extracts the three friction components (static friction, dynamic friction, + and viscous friction) from a 3D friction properties array into three separate 2D arrays. + + Args: + friction_props: Input 3D array containing friction properties. Shape is + (num_envs, num_joints, 3) where the last dimension contains + [friction, dynamic_friction, viscous_friction]. + out_friction: Output array where static friction coefficients are written. + Shape is (num_envs, num_joints). + out_dynamic_friction: Output array where dynamic friction coefficients are written. + Shape is (num_envs, num_joints). + out_viscous_friction: Output array where viscous friction coefficients are written. + Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + out_friction[i, j] = friction_props[i, j, 0] + out_dynamic_friction[i, j] = friction_props[i, j, 1] + out_viscous_friction[i, j] = friction_props[i, j, 2] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py new file mode 100644 index 000000000000..525b6ae5fd98 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for deformable object assets.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/assets/deformable_object/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.pyi similarity index 79% rename from source/isaaclab/isaaclab/assets/deformable_object/__init__.py rename to source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.pyi index 503a238935bd..5ed56fc4f384 100644 --- a/source/isaaclab/isaaclab/assets/deformable_object/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.pyi @@ -3,7 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Sub-module for deformable object assets.""" +__all__ = [ + "DeformableObject", + "DeformableObjectCfg", + "DeformableObjectData", +] from .deformable_object import DeformableObject from .deformable_object_cfg import DeformableObjectCfg diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py new file mode 100644 index 000000000000..a240cb8203b5 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -0,0 +1,806 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp + +import omni.physics.tensors.api as physx +from pxr import UsdShade + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.assets.asset_base import AssetBase +from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .deformable_object_data import DeformableObjectData +from .kernels import ( + compute_nodal_state_w, + set_kinematic_flags_to_one, + vec6f, + write_nodal_vec3f_to_buffer, + write_nodal_vec4f_to_buffer, +) + +if TYPE_CHECKING: + from .deformable_object_cfg import DeformableObjectCfg + +# import logger +logger = logging.getLogger(__name__) + + +class DeformableObject(AssetBase): + """A deformable object asset class. + + Deformable objects are assets that can be deformed in the simulation. They are typically used for + soft bodies, such as stuffed animals and food items. + + Unlike rigid object assets, deformable objects have a more complex structure and require additional + handling for simulation. The simulation of deformable objects follows a finite element approach, where + the object is discretized into a mesh of nodes and elements. The nodes are connected by elements, which + define the material properties of the object. The nodes can be moved and deformed, and the elements + respond to these changes. + + The state of a deformable object comprises of its nodal positions and velocities, and not the object's root + position and orientation. The nodal positions and velocities are in the simulation frame. + + Volume deformables can be `partially kinematic`_, where some nodes are driven by kinematic targets, and the rest are + simulated. The kinematic targets are the desired positions of the nodes, and the simulation drives the nodes + towards these targets. This is useful for partial control of the object, such as moving a stuffed animal's + head while the rest of the body is simulated. + + .. attention:: + This class is experimental and subject to change due to changes on the underlying PhysX API on which + it depends. We will try to maintain backward compatibility as much as possible but some changes may be + necessary. + + .. _partially kinematic: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/SoftBodies.html#kinematic-soft-bodies + """ + + cfg: DeformableObjectCfg + """Configuration instance for the deformable object.""" + + def __init__(self, cfg: DeformableObjectCfg): + """Initialize the deformable object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + # Register custom vec6f type for nodal state validation. + self._DTYPE_TO_TORCH_TRAILING_DIMS = {**self._DTYPE_TO_TORCH_TRAILING_DIMS, vec6f: (6,)} + # initialize deformable type to None, should be set to either surface or volume on initialization + self._deformable_type: str | None = None + + """ + Properties + """ + + @property + def data(self) -> DeformableObjectData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count + + @property + def num_bodies(self) -> int: + """Number of bodies in the asset. + + This is always 1 since each object is a single deformable body. + """ + return 1 + + @property + def root_view(self) -> physx.DeformableBodyView: + """Deformable body view for the asset. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_physx_view + + @property + def root_physx_view(self) -> physx.DeformableBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + logger.warning( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead." + ) + return self.root_view + + @property + def material_physx_view(self) -> physx.DeformableMaterialView | None: + """Deformable material view for the asset (PhysX). + + This view is optional and may not be available if the material is not bound to the deformable body. + If the material is not available, then the material properties will be set to default values. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._material_physx_view + + @property + def max_sim_elements_per_body(self) -> int: + """The maximum number of simulation mesh elements per deformable body.""" + return self.root_view.max_simulation_elements_per_body + + @property + def max_collision_elements_per_body(self) -> int: + """The maximum number of collision mesh elements per deformable body.""" + return self.root_view.max_collision_elements_per_body + + @property + def max_sim_vertices_per_body(self) -> int: + """The maximum number of simulation mesh vertices per deformable body.""" + return self.root_view.max_simulation_nodes_per_body + + @property + def max_collision_vertices_per_body(self) -> int: + """The maximum number of collision mesh vertices per deformable body.""" + return self.root_view.max_collision_nodes_per_body + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the deformable object. + + Args: + env_ids: Environment indices. If None, then all indices are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # TODO: Should we reset the kinematic targets when resetting the object? + # This is not done in the current implementation. We assume users will reset the kinematic targets. + pass + + def write_data_to_sim(self): + pass + + def update(self, dt: float): + self._data.update(dt) + + """ + Operations - Write to simulation. + """ + + def write_nodal_state_to_sim_index( + self, + nodal_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the nodal state over selected environment indices into the simulation. + + The nodal state comprises of the nodal positions and velocities. Since these are nodes, the velocity only has + a translational component. All the quantities are in the simulation frame. + + Args: + nodal_state: Nodal state in simulation frame. + Shape is (len(env_ids), max_sim_vertices_per_body, 6) or (num_instances, max_sim_vertices_per_body, 6). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # Convert warp to torch if needed + if isinstance(nodal_state, wp.array): + nodal_state = wp.to_torch(nodal_state) + # set into simulation + self.write_nodal_pos_to_sim_index(nodal_state[..., :3], env_ids=env_ids, full_data=full_data) + self.write_nodal_velocity_to_sim_index(nodal_state[..., 3:], env_ids=env_ids, full_data=full_data) + + def write_nodal_state_to_sim_mask( + self, + nodal_state: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the nodal state over selected environment mask into the simulation. + + The nodal state comprises of the nodal positions and velocities. Since these are nodes, the velocity only has + a translational component. All the quantities are in the simulation frame. + + Args: + nodal_state: Nodal state in simulation frame. + Shape is (num_instances, max_sim_vertices_per_body, 6). + env_mask: Environment mask. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = wp.nonzero(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_nodal_state_to_sim_index(nodal_state, env_ids=env_ids, full_data=True) + + def write_nodal_pos_to_sim_index( + self, + nodal_pos: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the nodal positions over selected environment indices into the simulation. + + The nodal position comprises of individual nodal positions of the simulation mesh for the deformable body. + The positions are in the simulation frame. + + Args: + nodal_pos: Nodal positions in simulation frame. + Shape is (len(env_ids), max_sim_vertices_per_body, 3) or (num_instances, max_sim_vertices_per_body, 3). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve env_ids + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype( + nodal_pos, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos" + ) + else: + self.assert_shape_and_dtype( + nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos" + ) + # convert torch to warp if needed + if isinstance(nodal_pos, torch.Tensor): + nodal_pos = wp.from_torch(nodal_pos.contiguous(), dtype=wp.vec3f) + # write into internal buffer via kernel + wp.launch( + write_nodal_vec3f_to_buffer, + dim=(env_ids.shape[0], self.max_sim_vertices_per_body), + inputs=[nodal_pos, env_ids, full_data], + outputs=[self._data._nodal_pos_w.data], + device=self.device, + ) + # update timestamp + self._data._nodal_pos_w.timestamp = self._data._sim_timestamp + # invalidate dependent buffers + self._data._nodal_state_w.timestamp = -1.0 + self._data._root_pos_w.timestamp = -1.0 + # set into simulation + self.root_view.set_simulation_nodal_positions(self._get_nodal_pos_w_f32(), indices=env_ids) + + def write_nodal_pos_to_sim_mask( + self, + nodal_pos: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the nodal positions over selected environment mask into the simulation. + + The nodal position comprises of individual nodal positions of the simulation mesh for the deformable body. + The positions are in the simulation frame. + + Args: + nodal_pos: Nodal positions in simulation frame. + Shape is (num_instances, max_sim_vertices_per_body, 3). + env_mask: Environment mask. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = wp.nonzero(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_nodal_pos_to_sim_index(nodal_pos, env_ids=env_ids, full_data=True) + + def write_nodal_velocity_to_sim_index( + self, + nodal_vel: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the nodal velocity over selected environment indices into the simulation. + + The nodal velocity comprises of individual nodal velocities of the simulation mesh for the deformable + body. Since these are nodes, the velocity only has a translational component. The velocities are in the + simulation frame. + + Args: + nodal_vel: Nodal velocities in simulation frame. + Shape is (len(env_ids), max_sim_vertices_per_body, 3) or (num_instances, max_sim_vertices_per_body, 3). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve env_ids + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype( + nodal_vel, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel" + ) + else: + self.assert_shape_and_dtype( + nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel" + ) + # convert torch to warp if needed + if isinstance(nodal_vel, torch.Tensor): + nodal_vel = wp.from_torch(nodal_vel.contiguous(), dtype=wp.vec3f) + # write into internal buffer via kernel + wp.launch( + write_nodal_vec3f_to_buffer, + dim=(env_ids.shape[0], self.max_sim_vertices_per_body), + inputs=[nodal_vel, env_ids, full_data], + outputs=[self._data._nodal_vel_w.data], + device=self.device, + ) + # update timestamp + self._data._nodal_vel_w.timestamp = self._data._sim_timestamp + # invalidate dependent buffers + self._data._nodal_state_w.timestamp = -1.0 + self._data._root_vel_w.timestamp = -1.0 + # set into simulation + self.root_view.set_simulation_nodal_velocities(self._get_nodal_vel_w_f32(), indices=env_ids) + + def write_nodal_velocity_to_sim_mask( + self, + nodal_vel: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the nodal velocity over selected environment mask into the simulation. + + The nodal velocity comprises of individual nodal velocities of the simulation mesh for the deformable + body. Since these are nodes, the velocity only has a translational component. The velocities are in the + simulation frame. + + Args: + nodal_vel: Nodal velocities in simulation frame. + Shape is (num_instances, max_sim_vertices_per_body, 3). + env_mask: Environment mask. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = wp.nonzero(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_nodal_velocity_to_sim_index(nodal_vel, env_ids=env_ids, full_data=True) + + def write_nodal_kinematic_target_to_sim_index( + self, + targets: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the kinematic targets of the simulation mesh for the deformable bodies using indices. + + The kinematic targets comprise of individual nodal positions of the simulation mesh for the deformable body + and a flag indicating whether the node is kinematically driven or not. The positions are in the simulation + frame. + + .. note:: + The flag is set to 0.0 for kinematically driven nodes and 1.0 for free nodes. + + Args: + targets: The kinematic targets comprising of nodal positions and flags. + Shape is (len(env_ids), max_sim_vertices_per_body, 4) or (num_instances, max_sim_vertices_per_body, 4). + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + if self._deformable_type != "volume": + raise ValueError("Kinematic targets can only be set for volume deformable bodies.") + + # resolve env_ids + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype( + targets, (self.num_instances, self.max_sim_vertices_per_body), wp.vec4f, "targets" + ) + else: + self.assert_shape_and_dtype( + targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f, "targets" + ) + # convert torch to warp if needed, ensuring 2D (num_envs, V, 4) -> (num_envs, V) vec4f + if isinstance(targets, torch.Tensor): + if targets.dim() == 2: + targets = targets.unsqueeze(0) + targets = wp.from_torch(targets.contiguous(), dtype=wp.vec4f) + # write into internal buffer via kernel + wp.launch( + write_nodal_vec4f_to_buffer, + dim=(env_ids.shape[0], self.max_sim_vertices_per_body), + inputs=[targets, env_ids, full_data], + outputs=[self._data.nodal_kinematic_target], + device=self.device, + ) + # set into simulation + self.root_view.set_simulation_nodal_kinematic_targets( + self._data.nodal_kinematic_target.warp.view(wp.float32), indices=env_ids + ) + + def write_nodal_kinematic_target_to_sim_mask( + self, + targets: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the kinematic targets of the simulation mesh for the deformable bodies using mask. + + The kinematic targets comprise of individual nodal positions of the simulation mesh for the deformable body + and a flag indicating whether the node is kinematically driven or not. The positions are in the simulation + frame. + + .. note:: + The flag is set to 0.0 for kinematically driven nodes and 1.0 for free nodes. + + Args: + targets: The kinematic targets comprising of nodal positions and flags. + Shape is (num_instances, max_sim_vertices_per_body, 4). + env_mask: Environment mask. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = wp.nonzero(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_nodal_kinematic_target_to_sim_index(targets, env_ids=env_ids, full_data=True) + + """ + Operations - Deprecated wrappers. + """ + + def write_nodal_state_to_sim( + self, + nodal_state: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated. Please use :meth:`write_nodal_state_to_sim_index` instead.""" + warnings.warn( + "The method 'write_nodal_state_to_sim' is deprecated. Please use 'write_nodal_state_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_nodal_state_to_sim_index(nodal_state, env_ids=env_ids) + + def write_nodal_kinematic_target_to_sim( + self, + targets: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated. Please use :meth:`write_nodal_kinematic_target_to_sim_index` instead.""" + warnings.warn( + "The method 'write_nodal_kinematic_target_to_sim' is deprecated." + " Please use 'write_nodal_kinematic_target_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_nodal_kinematic_target_to_sim_index(targets, env_ids=env_ids) + + def write_nodal_pos_to_sim( + self, + nodal_pos: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated. Please use :meth:`write_nodal_pos_to_sim_index` instead.""" + warnings.warn( + "The method 'write_nodal_pos_to_sim' is deprecated. Please use 'write_nodal_pos_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_nodal_pos_to_sim_index(nodal_pos, env_ids=env_ids) + + def write_nodal_velocity_to_sim( + self, + nodal_vel: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Deprecated. Please use :meth:`write_nodal_velocity_to_sim_index` instead.""" + warnings.warn( + "The method 'write_nodal_velocity_to_sim' is deprecated." + " Please use 'write_nodal_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_nodal_velocity_to_sim_index(nodal_vel, env_ids=env_ids) + + """ + Operations - Helper. + """ + + def transform_nodal_pos( + self, nodal_pos: torch.tensor, pos: torch.Tensor | None = None, quat: torch.Tensor | None = None + ) -> torch.Tensor: + """Transform the nodal positions based on the pose transformation. + + This function computes the transformation of the nodal positions based on the pose transformation. + It multiplies the nodal positions with the rotation matrix of the pose and adds the translation. + Internally, it calls the :meth:`isaaclab.utils.math.transform_points` function. + + Args: + nodal_pos: The nodal positions in the simulation frame. Shape is (N, max_sim_vertices_per_body, 3). + pos: The position transformation. Shape is (N, 3). + Defaults to None, in which case the position is assumed to be zero. + quat: The orientation transformation as quaternion (x, y, z, w). Shape is (N, 4). + Defaults to None, in which case the orientation is assumed to be identity. + + Returns: + The transformed nodal positions. Shape is (N, max_sim_vertices_per_body, 3). + """ + # offset the nodal positions to center them around the origin + mean_nodal_pos = nodal_pos.mean(dim=1, keepdim=True) + nodal_pos = nodal_pos - mean_nodal_pos + # transform the nodal positions based on the pose around the origin + return math_utils.transform_points(nodal_pos, pos, quat) + mean_nodal_pos + + """ + Internal helper. + """ + + def _get_nodal_pos_w_f32(self) -> wp.array: + """Get a cached float32 view of nodal_pos_w for PhysX TensorAPI. + + Safe because ``DeformableObjectData`` copies into a stable pre-allocated + buffer via ``wp.copy`` (the pointer never changes). + Invalidated in ``_create_buffers``. + """ + if self._nodal_pos_w_f32 is None: + self._nodal_pos_w_f32 = self._data._nodal_pos_w.data.view(wp.float32) + return self._nodal_pos_w_f32 + + def _get_nodal_vel_w_f32(self) -> wp.array: + """Get a cached float32 view of nodal_vel_w for PhysX TensorAPI. + + Safe because ``DeformableObjectData`` copies into a stable pre-allocated + buffer via ``wp.copy`` (the pointer never changes). + Invalidated in ``_create_buffers``. + """ + if self._nodal_vel_w_f32 is None: + self._nodal_vel_w_f32 = self._data._nodal_vel_w.data.view(wp.float32) + return self._nodal_vel_w_f32 + + def _resolve_env_ids(self, env_ids): + """Resolve environment indices to a warp array. + + Args: + env_ids: Environment indices. If None, then all indices are used. + + Returns: + A warp array of environment indices. + """ + if (env_ids is None) or (env_ids == slice(None)): + return self._ALL_INDICES + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids, dtype=wp.int32) + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + return env_ids + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find deformable root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: "OmniPhysicsDeformableBodyAPI" in prim.GetAppliedSchemas(), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a deformable body when resolving '{self.cfg.prim_path}'." + " Please ensure that the prim has 'OmniPhysicsDeformableBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single deformable body when resolving '{self.cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one deformable body in the prim path tree." + ) + # we only need the first one from the list + root_prim = root_prims[0] + + # find deformable material prims + material_prim = None + # obtain material prim from the root prim + # note: here we assume that all the root prims have their material prims at similar paths + # and we only need to find the first one. This may not be the case for all scenarios. + # However, the checks in that case get cumbersome and are not included here. + if root_prim.HasAPI(UsdShade.MaterialBindingAPI): + # check the materials that are bound with the purpose 'physics' + material_paths = UsdShade.MaterialBindingAPI(root_prim).GetDirectBindingRel("physics").GetTargets() + # iterate through targets and find the deformable body material + if len(material_paths) > 0: + for mat_path in material_paths: + mat_prim = root_prim.GetStage().GetPrimAtPath(mat_path) + if "OmniPhysicsDeformableMaterialAPI" in mat_prim.GetAppliedSchemas(): + material_prim = mat_prim + # determine deformable material type + if "PhysxSurfaceDeformableMaterialAPI" in mat_prim.GetAppliedSchemas(): + self._deformable_type = "surface" + elif "PhysxDeformableMaterialAPI" in mat_prim.GetAppliedSchemas(): + self._deformable_type = "volume" + break + + if material_prim is None: + logger.warning( + f"Failed to find a deformable material binding for '{root_prim.GetPath().pathString}'." + " The material properties will be set to default values and are not modifiable " + "at runtime. If you want to modify the material properties, please ensure that the material is " + "bound to the deformable body." + ) + + # fall back to prim hierarchy heuristic when material type detection was inconclusive + if self._deformable_type is None: + # volume deformables must have a tetmesh in the hierarchy + has_tetmesh = ( + len(sim_utils.get_all_matching_child_prims(root_prim.GetPath(), lambda p: p.GetTypeName() == "TetMesh")) + > 0 + ) + if has_tetmesh: + self._deformable_type = "volume" + else: + # surface deformables must have a mesh in the hierarchy + has_mesh = ( + len( + sim_utils.get_all_matching_child_prims(root_prim.GetPath(), lambda p: p.GetTypeName() == "Mesh") + ) + > 0 + ) + if has_mesh: + self._deformable_type = "surface" + + # resolve root path back into regex expression + # -- root prim expression + root_prim_path = root_prim.GetPath().pathString + root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] + # -- object view + if self._deformable_type == "surface": + # surface deformable + self._root_physx_view = self._physics_sim_view.create_surface_deformable_body_view( + root_prim_path_expr.replace(".*", "*") + ) + elif self._deformable_type == "volume": + # volume deformable + self._root_physx_view = self._physics_sim_view.create_volume_deformable_body_view( + root_prim_path_expr.replace(".*", "*") + ) + else: + raise RuntimeError( + f"Failed to determine deformable material type for '{root_prim.GetPath().pathString}'." + " Please ensure that the material has either 'PhysxSurfaceDeformableMaterialAPI' or " + "'PhysxDeformableMaterialAPI' applied, or that a valid tetmesh is found under the root prim." + ) + + # Return if the asset is not found + if self._root_physx_view._backend is None: + raise RuntimeError(f"Failed to create deformable body at: {self.cfg.prim_path}. Please check PhysX logs.") + # Check validity of deformables in view + if not self._root_physx_view.check(): + logger.warning(f"Deformable body view is not valid for: {self.cfg.prim_path}. Please check PhysX logs.") + + # resolve material path back into regex expression + if material_prim is not None: + # -- material prim expression + material_prim_path = material_prim.GetPath().pathString + # check if the material prim is under the template prim + # if not then we are assuming that the single material prim is used for all the deformable bodies + if template_prim_path in material_prim_path: + material_prim_path_expr = self.cfg.prim_path + material_prim_path[len(template_prim_path) :] + else: + material_prim_path_expr = material_prim_path + # -- material view + self._material_physx_view = self._physics_sim_view.create_deformable_material_view( + material_prim_path_expr.replace(".*", "*") + ) + else: + self._material_physx_view = None + + # log information about the deformable body + logger.info(f"Deformable body initialized at: {root_prim_path_expr}") + logger.info(f"Number of instances: {self.num_instances}") + logger.info(f"Number of bodies: {self.num_bodies}") + if self._material_physx_view is not None: + logger.info(f"Deformable material initialized at: {material_prim_path_expr}") + logger.info(f"Number of instances: {self._material_physx_view.count}") + else: + logger.info("No deformable material found. Material properties will be set to default values.") + + # container for data access + self._data = DeformableObjectData(self.root_view, self.device) + + # create buffers + self._create_buffers() + # update the deformable body data + self.update(0.0) + + # Initialize debug visualization handle + if self._debug_vis_handle is None: + # set initial state of debug visualization + self.set_debug_vis(self.cfg.debug_vis) + + def _create_buffers(self): + """Create buffers for storing data.""" + # constants + self._ALL_INDICES = wp.array(np.arange(self.num_instances, dtype=np.int32), device=self.device) + + # Cached .view(wp.float32) wrappers for structured warp arrays. + # Safe because DeformableObjectData uses wp.copy into stable buffers. + self._nodal_pos_w_f32: wp.array | None = None + self._nodal_vel_w_f32: wp.array | None = None + + # default state + # we use the initial nodal positions at spawn time as the default state + # note: these are all in the simulation frame + nodal_positions_raw = self.root_view.get_simulation_nodal_positions() # (N, V, 3) float32 + nodal_positions = nodal_positions_raw.view(wp.vec3f).reshape( + (self.num_instances, self.max_sim_vertices_per_body) + ) + nodal_velocities = wp.zeros( + (self.num_instances, self.max_sim_vertices_per_body), dtype=wp.vec3f, device=self.device + ) + # compute default nodal state as vec6f + default_nodal_state = wp.zeros( + (self.num_instances, self.max_sim_vertices_per_body), dtype=vec6f, device=self.device + ) + wp.launch( + compute_nodal_state_w, + dim=(self.num_instances, self.max_sim_vertices_per_body), + inputs=[nodal_positions, nodal_velocities], + outputs=[default_nodal_state], + device=self.device, + ) + self._data.default_nodal_state_w = ProxyArray(default_nodal_state) + + # kinematic targets (only for volume deformables, surface deformables do not support kinematic targets) + if self._deformable_type == "volume": + # kinematic targets — allocate our own buffer and copy from PhysX + kinematic_raw = self.root_view.get_simulation_nodal_kinematic_targets() # (N, V, 4) float32 + kinematic_view = kinematic_raw.view(wp.vec4f).reshape((self.num_instances, self.max_sim_vertices_per_body)) + kinematic_target = wp.zeros( + (self.num_instances, self.max_sim_vertices_per_body), dtype=wp.vec4f, device=self.device + ) + wp.copy(kinematic_target, kinematic_view) + # set all nodes as non-kinematic targets by default (flag = 1.0) + wp.launch( + set_kinematic_flags_to_one, + dim=(self.num_instances * self.max_sim_vertices_per_body,), + inputs=[kinematic_target.reshape((self.num_instances * self.max_sim_vertices_per_body,))], + device=self.device, + ) + self._data.nodal_kinematic_target = ProxyArray(kinematic_target) + + """ + Internal simulation callbacks. + """ + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + if not hasattr(self, "target_visualizer"): + self.target_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.target_visualizer.set_visibility(True) + else: + if hasattr(self, "target_visualizer"): + self.target_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # check where to visualize, kinematic targets only supported for volume deformables + num_enabled = 0 + if self._deformable_type == "volume": + kinematic_target_torch = self.data.nodal_kinematic_target.torch + targets_enabled = kinematic_target_torch[:, :, 3] == 0.0 + num_enabled = int(torch.sum(targets_enabled).item()) + # get positions if any targets are enabled + if num_enabled == 0: + # create a marker below the ground + positions = torch.tensor([[0.0, 0.0, -10.0]], device=self.device) + else: + positions = kinematic_target_torch[targets_enabled][..., :3] + # show target visualizer + self.target_visualizer.visualize(positions) + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + self._root_physx_view = None diff --git a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py similarity index 75% rename from source/isaaclab/isaaclab/assets/deformable_object/deformable_object_cfg.py rename to source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py index 9ef06d1b54c3..7dbf060c27b6 100644 --- a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py @@ -5,25 +5,28 @@ from __future__ import annotations +from typing import TYPE_CHECKING + +from isaaclab.assets.asset_base_cfg import AssetBaseCfg from isaaclab.markers import VisualizationMarkersCfg from isaaclab.markers.config import DEFORMABLE_TARGET_MARKER_CFG from isaaclab.utils import configclass -from ..asset_base_cfg import AssetBaseCfg -from .deformable_object import DeformableObject +if TYPE_CHECKING: + from .deformable_object import DeformableObject @configclass class DeformableObjectCfg(AssetBaseCfg): """Configuration parameters for a deformable object.""" - class_type: type = DeformableObject + class_type: type[DeformableObject] | str = "{DIR}.deformable_object:DeformableObject" visualizer_cfg: VisualizationMarkersCfg = DEFORMABLE_TARGET_MARKER_CFG.replace( prim_path="/Visuals/DeformableTarget" ) """The configuration object for the visualization markers. Defaults to DEFORMABLE_TARGET_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py new file mode 100644 index 000000000000..30307c1b08ff --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py @@ -0,0 +1,210 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import weakref + +import warp as wp + +import omni.physics.tensors.api as physx + +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.warp import ProxyArray + +from .kernels import compute_mean_vec3f_over_vertices, compute_nodal_state_w, vec6f + + +class DeformableObjectData: + """Data container for a deformable object. + + This class contains the data for a deformable object in the simulation. The data includes the nodal states of + the root deformable body in the object. The data is stored in the simulation world frame unless otherwise specified. + + A deformable object in PhysX uses two tetrahedral meshes to represent the object: + + 1. **Simulation mesh**: This mesh is used for the simulation and is the one that is deformed by the solver. + 2. **Collision mesh**: This mesh only needs to match the surface of the simulation mesh and is used for + collision detection. + + The APIs exposed provides the data for both the simulation and collision meshes. These are specified + by the `sim` and `collision` prefixes in the property names. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + def __init__(self, root_view: physx.DeformableBodyView, device: str): + """Initializes the deformable object data. + + Args: + root_view: The root deformable body view of the object. + device: The device used for processing. + """ + # Set the parameters + self.device = device + # Set the root deformable body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: physx.DeformableBodyView = weakref.proxy(root_view) + + # Store dimensions + self._num_instances = root_view.count + self._max_sim_vertices = root_view.max_simulation_nodes_per_body + self._max_sim_elements = root_view.max_simulation_elements_per_body + self._max_collision_elements = root_view.max_collision_elements_per_body + + # Set initial time stamp + self._sim_timestamp = 0.0 + + # Initialize the lazy buffers. + # -- node state in simulation world frame + self._nodal_pos_w = TimestampedBuffer((self._num_instances, self._max_sim_vertices), device, wp.vec3f) + self._nodal_vel_w = TimestampedBuffer((self._num_instances, self._max_sim_vertices), device, wp.vec3f) + self._nodal_state_w = TimestampedBuffer((self._num_instances, self._max_sim_vertices), device, vec6f) + # -- derived: root pos/vel + self._root_pos_w = TimestampedBuffer((self._num_instances,), device, wp.vec3f) + self._root_vel_w = TimestampedBuffer((self._num_instances,), device, wp.vec3f) + + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + self._nodal_pos_w_ta: ProxyArray | None = None + self._nodal_vel_w_ta: ProxyArray | None = None + self._nodal_state_w_ta: ProxyArray | None = None + self._root_pos_w_ta: ProxyArray | None = None + self._root_vel_w_ta: ProxyArray | None = None + + def update(self, dt: float): + """Updates the data for the deformable object. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + + ## + # Defaults. + ## + + default_nodal_state_w: ProxyArray = None + """Default nodal state ``[nodal_pos, nodal_vel]`` in simulation world frame. + Shape is (num_instances, max_sim_vertices_per_body) with dtype vec6f. + """ + + ## + # Kinematic commands + ## + + nodal_kinematic_target: ProxyArray = None + """Simulation mesh kinematic targets for the deformable bodies. + Shape is (num_instances, max_sim_vertices_per_body) with dtype vec4f. + + The kinematic targets are used to drive the simulation mesh vertices to the target positions. + The targets are stored as (x, y, z, is_not_kinematic) where "is_not_kinematic" is a binary + flag indicating whether the vertex is kinematic or not. The flag is set to 0 for kinematic vertices + and 1 for non-kinematic vertices. + """ + + ## + # Properties. + ## + + @property + def nodal_pos_w(self) -> ProxyArray: + """Nodal positions in simulation world frame. Shape is (num_instances, max_sim_vertices_per_body) vec3f.""" + if self._nodal_pos_w.timestamp < self._sim_timestamp: + # get_simulation_nodal_positions() returns (N, V, 3) float32 — view as (N, V) vec3f + src = ( + self._root_view.get_simulation_nodal_positions() + .view(wp.vec3f) + .reshape((self._num_instances, self._max_sim_vertices)) + ) + wp.copy(self._nodal_pos_w.data, src) + self._nodal_pos_w.timestamp = self._sim_timestamp + # Rebind ProxyArray since .data was replaced with a new wp.array + if self._nodal_pos_w_ta is not None: + self._nodal_pos_w_ta = ProxyArray(self._nodal_pos_w.data) + if self._nodal_pos_w_ta is None: + self._nodal_pos_w_ta = ProxyArray(self._nodal_pos_w.data) + return self._nodal_pos_w_ta + + @property + def nodal_vel_w(self) -> ProxyArray: + """Nodal velocities in simulation world frame. Shape is (num_instances, max_sim_vertices_per_body) vec3f.""" + if self._nodal_vel_w.timestamp < self._sim_timestamp: + src = ( + self._root_view.get_simulation_nodal_velocities() + .view(wp.vec3f) + .reshape((self._num_instances, self._max_sim_vertices)) + ) + wp.copy(self._nodal_vel_w.data, src) + self._nodal_vel_w.timestamp = self._sim_timestamp + # Rebind ProxyArray since .data was replaced with a new wp.array + if self._nodal_vel_w_ta is not None: + self._nodal_vel_w_ta = ProxyArray(self._nodal_vel_w.data) + if self._nodal_vel_w_ta is None: + self._nodal_vel_w_ta = ProxyArray(self._nodal_vel_w.data) + return self._nodal_vel_w_ta + + @property + def nodal_state_w(self) -> ProxyArray: + """Nodal state ``[nodal_pos, nodal_vel]`` in simulation world frame. + Shape is (num_instances, max_sim_vertices_per_body) vec6f. + """ + if self._nodal_state_w.timestamp < self._sim_timestamp: + wp.launch( + compute_nodal_state_w, + dim=(self._num_instances, self._max_sim_vertices), + inputs=[self.nodal_pos_w, self.nodal_vel_w], + outputs=[self._nodal_state_w.data], + device=self.device, + ) + self._nodal_state_w.timestamp = self._sim_timestamp + if self._nodal_state_w_ta is None: + self._nodal_state_w_ta = ProxyArray(self._nodal_state_w.data) + return self._nodal_state_w_ta + + ## + # Derived properties. + ## + + @property + def root_pos_w(self) -> ProxyArray: + """Root position from nodal positions of the simulation mesh for the deformable bodies in simulation + world frame. Shape is (num_instances, 3). + + This quantity is computed as the mean of the nodal positions. + """ + if self._root_pos_w.timestamp < self._sim_timestamp: + wp.launch( + compute_mean_vec3f_over_vertices, + dim=(self._num_instances,), + inputs=[self.nodal_pos_w, self._max_sim_vertices], + outputs=[self._root_pos_w.data], + device=self.device, + ) + self._root_pos_w.timestamp = self._sim_timestamp + if self._root_pos_w_ta is None: + self._root_pos_w_ta = ProxyArray(self._root_pos_w.data) + return self._root_pos_w_ta + + @property + def root_vel_w(self) -> ProxyArray: + """Root velocity from vertex velocities for the deformable bodies in simulation world frame. + Shape is (num_instances, 3). + + This quantity is computed as the mean of the nodal velocities. + """ + if self._root_vel_w.timestamp < self._sim_timestamp: + wp.launch( + compute_mean_vec3f_over_vertices, + dim=(self._num_instances,), + inputs=[self.nodal_vel_w, self._max_sim_vertices], + outputs=[self._root_vel_w.data], + device=self.device, + ) + self._root_vel_w.timestamp = self._sim_timestamp + if self._root_vel_w_ta is None: + self._root_vel_w_ta = ProxyArray(self._root_vel_w.data) + return self._root_vel_w_ta diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/kernels.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/kernels.py new file mode 100644 index 000000000000..bce21adc1521 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/kernels.py @@ -0,0 +1,110 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + +vec6f = wp.types.vector(length=6, dtype=wp.float32) + + +@wp.kernel +def write_nodal_vec3f_to_buffer( + data: wp.array2d(dtype=wp.vec3f), + env_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_data: wp.array2d(dtype=wp.vec3f), +): + """Write nodal vec3f data (positions or velocities) to a buffer at specified indices. + + Args: + data: Input array of nodal vec3f data. Shape is (num_envs, num_vertices) or + (num_selected_envs, num_vertices) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + from_mask: Input flag indicating whether to use masked indexing. + out_data: Output array where data is written. Shape is (num_envs, num_vertices). + """ + i, j = wp.tid() + if from_mask: + out_data[env_ids[i], j] = data[env_ids[i], j] + else: + out_data[env_ids[i], j] = data[i, j] + + +@wp.kernel +def write_nodal_vec4f_to_buffer( + data: wp.array2d(dtype=wp.vec4f), + env_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_data: wp.array2d(dtype=wp.vec4f), +): + """Write nodal vec4f data (kinematic targets) to a buffer at specified indices. + + Args: + data: Input array of nodal vec4f data. Shape is (num_envs, num_vertices) or + (num_selected_envs, num_vertices) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + from_mask: Input flag indicating whether to use masked indexing. + out_data: Output array where data is written. Shape is (num_envs, num_vertices). + """ + i, j = wp.tid() + if from_mask: + out_data[env_ids[i], j] = data[env_ids[i], j] + else: + out_data[env_ids[i], j] = data[i, j] + + +@wp.kernel +def compute_nodal_state_w( + nodal_pos: wp.array2d(dtype=wp.vec3f), + nodal_vel: wp.array2d(dtype=wp.vec3f), + nodal_state: wp.array2d(dtype=vec6f), +): + """Concatenate nodal positions and velocities into a 6-element state vector. + + Args: + nodal_pos: Input array of nodal positions. Shape is (num_instances, num_vertices). + nodal_vel: Input array of nodal velocities. Shape is (num_instances, num_vertices). + nodal_state: Output array where concatenated state vectors are written. + Shape is (num_instances, num_vertices). + """ + i, j = wp.tid() + p = nodal_pos[i, j] + v = nodal_vel[i, j] + nodal_state[i, j] = vec6f(p[0], p[1], p[2], v[0], v[1], v[2]) + + +@wp.kernel +def compute_mean_vec3f_over_vertices( + data: wp.array2d(dtype=wp.vec3f), + num_vertices: int, + result: wp.array(dtype=wp.vec3f), +): + """Compute the mean of vec3f data over the vertex dimension. + + Args: + data: Input array of vec3f data. Shape is (num_instances, num_vertices). + num_vertices: Number of vertices per instance. + result: Output array where mean values are written. Shape is (num_instances,). + """ + i = wp.tid() + acc = wp.vec3f(0.0, 0.0, 0.0) + for j in range(num_vertices): + acc = acc + data[i, j] + result[i] = acc / float(num_vertices) + + +@wp.kernel +def set_kinematic_flags_to_one( + data: wp.array(dtype=wp.vec4f), +): + """Set the w-component (kinematic flag) of all vec4f entries to 1.0. + + This is used to initialize all vertices as non-kinematic (free) nodes. + + Args: + data: Input/output array of vec4f kinematic targets. Shape is (N*V,). + """ + i = wp.tid() + v = data[i] + data[i] = wp.vec4f(v[0], v[1], v[2], 1.0) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/kernels.py b/source/isaaclab_physx/isaaclab_physx/assets/kernels.py new file mode 100644 index 000000000000..d54f395739ae --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/kernels.py @@ -0,0 +1,888 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + +vec13f = wp.types.vector(length=13, dtype=wp.float32) + +""" +Shared @wp.func helpers. +""" + + +@wp.func +def get_link_vel_from_root_com_vel_func( + com_vel: wp.spatial_vectorf, + link_pose: wp.transformf, + body_com_pose: wp.transformf, +): + """Compute link velocity from center-of-mass velocity. + + Transforms a COM spatial velocity into a link-frame velocity by projecting + the angular velocity contribution from the COM offset relative to the link frame. + + Args: + com_vel: COM spatial velocity (angular, linear). + link_pose: Link pose in world frame. + body_com_pose: COM pose in body (link) frame. + + Returns: + Link spatial velocity (angular, linear). + """ + projected_vel = wp.cross( + wp.spatial_bottom(com_vel), + wp.quat_rotate(wp.transform_get_rotation(link_pose), -wp.transform_get_translation(body_com_pose)), + ) + return wp.spatial_vector(wp.spatial_top(com_vel) + projected_vel, wp.spatial_bottom(com_vel)) + + +@wp.func +def get_com_pose_from_link_pose_func( + link_pose: wp.transformf, + body_com_pose: wp.transformf, +): + """Compute COM pose in world frame from link pose and body-frame COM offset. + + Args: + link_pose: Link pose in world frame. + body_com_pose: COM pose in body (link) frame. + + Returns: + COM pose in world frame. + """ + return link_pose * body_com_pose + + +@wp.func +def concat_pose_and_vel_to_state_func( + pose: wp.transformf, + vel: wp.spatial_vectorf, +) -> vec13f: + """Concatenate a pose and velocity into a 13-element state vector. + + The state vector layout is [pos(3), quat(4), ang_vel(3), lin_vel(3)]. + + Args: + pose: Pose as a transform (position + quaternion). + vel: Spatial velocity (angular, linear). + + Returns: + 13-element state vector. + """ + return vec13f( + pose[0], pose[1], pose[2], pose[3], pose[4], pose[5], pose[6], vel[0], vel[1], vel[2], vel[3], vel[4], vel[5] + ) + + +@wp.func +def compute_heading_w_func( + forward_vec: wp.vec3f, + quat: wp.quatf, +): + """Compute heading angle (yaw) in world frame from a forward vector and orientation. + + Rotates the forward vector by the quaternion and computes atan2(y, x). + + Args: + forward_vec: Forward direction vector in body frame. + quat: Orientation quaternion. + + Returns: + Heading angle in radians. + """ + forward_w = wp.quat_rotate(quat, forward_vec) + return wp.atan2(forward_w[1], forward_w[0]) + + +@wp.func +def set_state_transforms_func( + state: vec13f, + transform: wp.transformf, +) -> vec13f: + """Set the pose portion (first 7 elements) of a 13-element state vector. + + Overwrites elements [0..6] (position + quaternion) with the given transform, + leaving the velocity portion [7..12] unchanged. + + Args: + state: 13-element state vector to modify. + transform: New pose (position + quaternion). + + Returns: + Updated 13-element state vector. + """ + state[0] = transform[0] + state[1] = transform[1] + state[2] = transform[2] + state[3] = transform[3] + state[4] = transform[4] + state[5] = transform[5] + state[6] = transform[6] + return state + + +@wp.func +def set_state_velocities_func( + state: vec13f, + velocity: wp.spatial_vectorf, +) -> vec13f: + """Set the velocity portion (last 6 elements) of a 13-element state vector. + + Overwrites elements [7..12] (angular + linear velocity) with the given spatial velocity, + leaving the pose portion [0..6] unchanged. + + Args: + state: 13-element state vector to modify. + velocity: New spatial velocity (angular, linear). + + Returns: + Updated 13-element state vector. + """ + state[7] = velocity[0] + state[8] = velocity[1] + state[9] = velocity[2] + state[10] = velocity[3] + state[11] = velocity[4] + state[12] = velocity[5] + return state + + +@wp.func +def get_link_velocity_in_com_frame_func( + link_velocity_w: wp.spatial_vectorf, + link_pose_w: wp.transformf, + body_com_pose_b: wp.transformf, +): + """Compute COM velocity from link velocity by accounting for the COM offset. + + Transforms a link-frame spatial velocity into a COM-frame velocity by adding + the cross-product contribution of the COM offset rotated into the world frame. + + Args: + link_velocity_w: Link spatial velocity in world frame (angular, linear). + link_pose_w: Link pose in world frame. + body_com_pose_b: COM pose in body (link) frame. + + Returns: + COM spatial velocity in world frame (angular, linear). + """ + return wp.spatial_vector( + wp.spatial_top(link_velocity_w) + + wp.cross( + wp.spatial_bottom(link_velocity_w), + wp.quat_rotate(wp.transform_get_rotation(link_pose_w), wp.transform_get_translation(body_com_pose_b)), + ), + wp.spatial_bottom(link_velocity_w), + ) + + +@wp.func +def get_com_pose_in_link_frame_func( + com_pose_w: wp.transformf, + com_pose_b: wp.transformf, +): + """Compute link pose in world frame from COM pose by inverting the body-frame COM offset. + + This is the inverse of ``get_com_pose_from_link_pose_func``. Given the COM pose in + world frame and the COM offset in body frame, it recovers the link pose in world frame. + + Args: + com_pose_w: COM pose in world frame. + com_pose_b: COM pose in body (link) frame. + + Returns: + Link pose in world frame. + """ + T2 = wp.transform( + wp.quat_rotate( + wp.quat_inverse(wp.transform_get_rotation(com_pose_b)), -wp.transform_get_translation(com_pose_b) + ), + wp.quat_inverse(wp.transform_get_rotation(com_pose_b)), + ) + link_pose_w = com_pose_w * T2 + return link_pose_w + + +""" +Root-level @wp.kernel (1D — used by RigidObject + Articulation). +""" + + +@wp.kernel +def get_root_link_vel_from_root_com_vel( + com_vel: wp.array(dtype=wp.spatial_vectorf), + link_pose: wp.array(dtype=wp.transformf), + body_com_pose_b: wp.array2d(dtype=wp.transformf), + link_vel: wp.array(dtype=wp.spatial_vectorf), +): + """Compute root link velocity from root center-of-mass velocity. + + This kernel transforms the root COM velocity into link-frame velocity by projecting + the angular velocity contribution from the COM offset. + + Args: + com_vel: Input array of root COM spatial velocities. Shape is (num_envs,). + link_pose: Input array of root link poses in world frame. Shape is (num_envs,). + body_com_pose_b: Input array of body COM poses in body frame. Shape is (num_envs, num_bodies). + Only the first body (index 0) is used for the root. + link_vel: Output array where root link velocities are written. Shape is (num_envs,). + """ + i = wp.tid() + link_vel[i] = get_link_vel_from_root_com_vel_func(com_vel[i], link_pose[i], body_com_pose_b[i, 0]) + + +@wp.kernel +def get_root_com_pose_from_root_link_pose( + link_pose: wp.array(dtype=wp.transformf), + body_com_pose_b: wp.array2d(dtype=wp.transformf), + com_pose_w: wp.array(dtype=wp.transformf), +): + """Compute root COM pose from root link pose. + + This kernel transforms the root link pose to the root COM pose using the body COM offset. + + Args: + link_pose: Input array of root link poses in world frame. Shape is (num_envs,). + body_com_pose_b: Input array of body COM poses in body frame. Shape is (num_envs, num_bodies). + Only the first body (index 0) is used for the root. + com_pose_w: Output array where root COM poses are written. Shape is (num_envs,). + """ + i = wp.tid() + com_pose_w[i] = get_com_pose_from_link_pose_func(link_pose[i], body_com_pose_b[i, 0]) + + +@wp.kernel +def concat_root_pose_and_vel_to_state( + pose: wp.array(dtype=wp.transformf), + vel: wp.array(dtype=wp.spatial_vectorf), + state: wp.array(dtype=vec13f), +): + """Concatenate root pose and velocity into a 13-element state vector. + + This kernel combines a 7-element pose (pos + quat) and a 6-element velocity + (angular + linear) into a single 13-element state vector. + + Args: + pose: Input array of root poses in world frame. Shape is (num_envs,). + vel: Input array of root spatial velocities. Shape is (num_envs,). + state: Output array where concatenated state vectors are written. Shape is (num_envs,). + """ + i = wp.tid() + state[i] = concat_pose_and_vel_to_state_func(pose[i], vel[i]) + + +@wp.kernel +def split_state_to_root_pose_and_vel( + state: wp.array2d(dtype=wp.float32), + pose: wp.array(dtype=wp.transformf), + vel: wp.array(dtype=wp.spatial_vectorf), +): + """Split a 13-element state vector into root pose and velocity. + + This kernel extracts a 7-element pose (pos + quat) and a 6-element velocity + (angular + linear) from a 13-element state vector. + + Args: + state: Input array of root states. Shape is (num_envs, 13). + pose: Output array where root poses are written. Shape is (num_envs,). + vel: Output array where root spatial velocities are written. Shape is (num_envs,). + """ + i = wp.tid() + # Extract pose: [pos(3), quat(4)] = state[0:7] + pose[i] = wp.transform( + wp.vec3f(state[i, 0], state[i, 1], state[i, 2]), wp.quatf(state[i, 3], state[i, 4], state[i, 5], state[i, 6]) + ) + # Extract velocity: [ang_vel(3), lin_vel(3)] = state[7:13] + vel[i] = wp.spatial_vector( + wp.vec3f(state[i, 7], state[i, 8], state[i, 9]), # angular velocity + wp.vec3f(state[i, 10], state[i, 11], state[i, 12]), # linear velocity + ) + + +""" +Body-level @wp.kernel (2D — used by Articulation + RigidObjectCollection). +""" + + +@wp.kernel +def get_body_link_vel_from_body_com_vel( + body_com_vel: wp.array2d(dtype=wp.spatial_vectorf), + body_link_pose: wp.array2d(dtype=wp.transformf), + body_com_pose: wp.array2d(dtype=wp.transformf), + body_link_vel: wp.array2d(dtype=wp.spatial_vectorf), +): + """Compute body link velocities from body COM velocities for all bodies. + + This kernel transforms COM velocities into link-frame velocities by projecting + the angular velocity contribution from the COM offset, for each body in each environment. + + Args: + body_com_vel: Input array of body COM spatial velocities. Shape is (num_envs, num_bodies). + body_link_pose: Input array of body link poses in world frame. Shape is (num_envs, num_bodies). + body_com_pose: Input array of body COM poses in body frame. Shape is (num_envs, num_bodies). + body_link_vel: Output array where body link velocities are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + body_link_vel[i, j] = get_link_vel_from_root_com_vel_func( + body_com_vel[i, j], body_link_pose[i, j], body_com_pose[i, j] + ) + + +@wp.kernel +def get_body_com_pose_from_body_link_pose( + body_link_pose: wp.array2d(dtype=wp.transformf), + body_com_pose_b: wp.array2d(dtype=wp.transformf), + body_com_pose_w: wp.array2d(dtype=wp.transformf), +): + """Compute body COM poses from body link poses for all bodies. + + This kernel transforms link poses to COM poses using the body COM offset in the body frame. + + Args: + body_link_pose: Input array of body link poses in world frame. Shape is (num_envs, num_bodies). + body_com_pose_b: Input array of body COM poses in body frame. Shape is (num_envs, num_bodies). + body_com_pose_w: Output array where body COM poses in world frame are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + body_com_pose_w[i, j] = get_com_pose_from_link_pose_func(body_link_pose[i, j], body_com_pose_b[i, j]) + + +@wp.kernel +def concat_body_pose_and_vel_to_state( + pose: wp.array2d(dtype=wp.transformf), + vel: wp.array2d(dtype=wp.spatial_vectorf), + state: wp.array2d(dtype=vec13f), +): + """Concatenate body pose and velocity into 13-element state vectors for all bodies. + + This kernel combines a 7-element pose (pos + quat) and a 6-element velocity + (angular + linear) into a single 13-element state vector, for each body in each environment. + + Args: + pose: Input array of body poses in world frame. Shape is (num_envs, num_bodies). + vel: Input array of body spatial velocities. Shape is (num_envs, num_bodies). + state: Output array where concatenated state vectors are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + state[i, j] = concat_pose_and_vel_to_state_func(pose[i, j], vel[i, j]) + + +""" +Derived property kernels. +""" + + +@wp.kernel +def quat_apply_inverse_1D_kernel( + gravity: wp.array(dtype=wp.vec3f), + quat: wp.array(dtype=wp.quatf), + projected_gravity: wp.array(dtype=wp.vec3f), +): + """Apply inverse quaternion rotation to gravity vectors (1D). + + This kernel rotates gravity vectors into the local frame of each environment + using the inverse of the provided quaternion. + + Args: + gravity: Input array of gravity vectors in world frame. Shape is (num_envs,). + quat: Input array of quaternions representing orientations. Shape is (num_envs,). + projected_gravity: Output array where projected gravity vectors are written. + Shape is (num_envs,). + """ + i = wp.tid() + projected_gravity[i] = wp.quat_rotate_inv(quat[i], gravity[i]) + + +@wp.kernel +def root_heading_w( + forward_vec: wp.array(dtype=wp.vec3f), + quat: wp.array(dtype=wp.quatf), + heading_w: wp.array(dtype=wp.float32), +): + """Compute root heading angle in the world frame. + + This kernel computes the heading angle (yaw) by rotating the forward vector + by the root quaternion and computing atan2 of the resulting x and y components. + + Args: + forward_vec: Input array of forward direction vectors. Shape is (num_envs,). + quat: Input array of root quaternions. Shape is (num_envs,). + heading_w: Output array where heading angles (radians) are written. Shape is (num_envs,). + """ + i = wp.tid() + heading_w[i] = compute_heading_w_func(forward_vec[i], quat[i]) + + +@wp.kernel +def quat_apply_inverse_2D_kernel( + vec: wp.array2d(dtype=wp.vec3f), + quat: wp.array2d(dtype=wp.quatf), + result: wp.array2d(dtype=wp.vec3f), +): + """Apply inverse quaternion rotation to vectors (2D). + + This kernel rotates vectors into the local frame of each body in each environment + using the inverse of the provided quaternion. + + Args: + vec: Input array of vectors in world frame. Shape is (num_envs, num_bodies). + quat: Input array of quaternions representing orientations. Shape is (num_envs, num_bodies). + result: Output array where rotated vectors are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + result[i, j] = wp.quat_rotate_inv(quat[i, j], vec[i, j]) + + +@wp.kernel +def body_heading_w( + forward_vec: wp.array2d(dtype=wp.vec3f), + quat: wp.array2d(dtype=wp.quatf), + heading_w: wp.array2d(dtype=wp.float32), +): + """Compute body heading angles in the world frame for all bodies. + + This kernel computes heading angles (yaw) by rotating forward vectors + by body quaternions and computing atan2 of the resulting x and y components. + + Args: + forward_vec: Input array of forward direction vectors. Shape is (num_envs, num_bodies). + quat: Input array of body quaternions. Shape is (num_envs, num_bodies). + heading_w: Output array where heading angles (radians) are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + heading_w[i, j] = compute_heading_w_func(forward_vec[i, j], quat[i, j]) + + +""" +Root-level write kernels (1D — used by RigidObject + Articulation). +""" + + +@wp.kernel +def set_root_link_pose_to_sim( + data: wp.array(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + from_mask: bool, + root_link_pose_w: wp.array(dtype=wp.transformf), +): + """Write root link pose data to simulation buffers. + + Args: + data: Input array of root link poses. Shape is (num_envs,) or (num_selected_envs,) + depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + from_mask: Input flag indicating whether to use masked indexing. If True, env_ids + are used to index into data. If False, data is indexed sequentially. + root_link_pose_w: Output array where root link poses are written. Shape is (num_envs,). + """ + # If from mask, then we get complete data. Otherwise, we get partial data. + i = wp.tid() + if from_mask: + root_link_pose_w[env_ids[i]] = data[env_ids[i]] + else: + root_link_pose_w[env_ids[i]] = data[i] + + +@wp.kernel +def set_root_com_pose_to_sim( + data: wp.array(dtype=wp.transformf), + body_com_pose_b: wp.array2d(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + from_mask: bool, + root_com_pose_w: wp.array(dtype=wp.transformf), + root_link_pose_w: wp.array(dtype=wp.transformf), +): + """Write root COM pose data to simulation buffers. + + This kernel writes root COM poses from the input array to the output buffers + and computes the corresponding link pose from the COM pose. + + Args: + data: Input array of root COM poses. Shape is (num_envs,) or (num_selected_envs,) + depending on from_mask. + body_com_pose_b: Input array of body COM poses in body frame. Shape is + (num_envs, num_bodies). Only the first body (index 0) is used for the root. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + from_mask: Input flag indicating whether to use masked indexing. If True, env_ids + are used to index into data. If False, data is indexed sequentially. + root_com_pose_w: Output array where root COM poses are written. Shape is (num_envs,). + root_link_pose_w: Output array where root link poses (derived from COM) are written. + Shape is (num_envs,). + """ + i = wp.tid() + # If from mask, then we get complete data. Otherwise, we get partial data. + if from_mask: + root_com_pose_w[env_ids[i]] = data[env_ids[i]] + else: + root_com_pose_w[env_ids[i]] = data[i] + # Get the com pose in the link frame + root_link_pose_w[env_ids[i]] = get_com_pose_in_link_frame_func( + root_com_pose_w[env_ids[i]], body_com_pose_b[env_ids[i], 0] + ) + + +@wp.kernel +def set_root_com_velocity_to_sim( + data: wp.array(dtype=wp.spatial_vectorf), + env_ids: wp.array(dtype=wp.int32), + num_bodies: wp.int32, + from_mask: bool, + root_com_velocity_w: wp.array(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write root COM velocity data to simulation buffers. + + This kernel writes root COM velocities from the input array to the output buffers + and zeros out the body acceleration buffer to prevent reporting stale values. + + Args: + data: Input array of root COM spatial velocities. Shape is (num_envs,) or + (num_selected_envs,) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + num_bodies: Input scalar number of bodies per environment. + from_mask: Input flag indicating whether to use masked indexing. If True, env_ids + are used to index into data. If False, data is indexed sequentially. + root_com_velocity_w: Output array where root COM velocities are written. Shape is (num_envs,). + body_acc_w: Output array where body accelerations are zeroed. Shape is + (num_envs, num_bodies). + """ + i = wp.tid() + # If from mask, then we get complete data. Otherwise, we get partial data. + if from_mask: + root_com_velocity_w[env_ids[i]] = data[env_ids[i]] + else: + root_com_velocity_w[env_ids[i]] = data[i] + # Make the acceleration zero to prevent reporting old values + for j in range(num_bodies): + body_acc_w[env_ids[i], j] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +@wp.kernel +def set_root_link_velocity_to_sim( + data: wp.array(dtype=wp.spatial_vectorf), + body_com_pose_b: wp.array2d(dtype=wp.transformf), + link_pose_w: wp.array(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + num_bodies: wp.int32, + from_mask: bool, + root_link_velocity_w: wp.array(dtype=wp.spatial_vectorf), + root_com_velocity_w: wp.array(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write root link velocity data to simulation buffers. + + This kernel writes root link velocities from the input array to the output buffers, + computes the corresponding COM velocity from the link velocity, and zeros out the + body acceleration buffer. + + Args: + data: Input array of root link spatial velocities. Shape is (num_envs,) or + (num_selected_envs,) depending on from_mask. + body_com_pose_b: Input array of body COM poses in body frame. Shape is + (num_envs, num_bodies). Only the first body (index 0) is used for the root. + link_pose_w: Input array of root link poses in world frame. Shape is (num_envs,). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + num_bodies: Input scalar number of bodies per environment. + from_mask: Input flag indicating whether to use masked indexing. If True, env_ids + are used to index into data. If False, data is indexed sequentially. + root_link_velocity_w: Output array where root link velocities are written. + Shape is (num_envs,). + root_com_velocity_w: Output array where root COM velocities (derived from link) + are written. Shape is (num_envs,). + body_acc_w: Output array where body accelerations are zeroed. + Shape is (num_envs, num_bodies). + """ + # If from mask, then we get complete data. Otherwise, we get partial data. + i = wp.tid() + if from_mask: + root_link_velocity_w[env_ids[i]] = data[env_ids[i]] + else: + root_link_velocity_w[env_ids[i]] = data[i] + # Get the link velocity in the com frame + root_com_velocity_w[env_ids[i]] = get_link_velocity_in_com_frame_func( + root_link_velocity_w[env_ids[i]], link_pose_w[env_ids[i]], body_com_pose_b[env_ids[i], 0] + ) + # Make the acceleration zero to prevent reporting old values + for j in range(num_bodies): + body_acc_w[env_ids[i], j] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +""" +Body-level write kernels (2D — used by RigidObjectCollection). +""" + + +@wp.kernel +def set_body_link_pose_to_sim( + data: wp.array2d(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_link_pose_w: wp.array2d(dtype=wp.transformf), +): + """Write body link pose data to simulation buffers. + + Args: + data: Input array of body link poses. Shape is (num_envs, num_bodies) or + (num_selected_envs, num_selected_bodies) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_link_pose_w: Output array where body link poses are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + if from_mask: + body_link_pose_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + else: + body_link_pose_w[env_ids[i], body_ids[j]] = data[i, j] + + +@wp.kernel +def set_body_com_pose_to_sim( + data: wp.array2d(dtype=wp.transformf), + body_com_pose_b: wp.array2d(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_com_pose_w: wp.array2d(dtype=wp.transformf), + body_link_pose_w: wp.array2d(dtype=wp.transformf), +): + """Write body COM pose data to simulation buffers. + + This kernel writes body COM poses from the input array to the output buffers + and computes the corresponding link poses from the COM poses, for each body + in each environment. + + Args: + data: Input array of body COM poses. Shape is (num_envs, num_bodies) or + (num_selected_envs, num_selected_bodies) depending on from_mask. + body_com_pose_b: Input array of body COM poses in body frame. Shape is + (num_envs, num_bodies). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_com_pose_w: Output array where body COM poses are written. + Shape is (num_envs, num_bodies). + body_link_pose_w: Output array where body link poses (derived from COM) are written. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + if from_mask: + body_com_pose_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + else: + body_com_pose_w[env_ids[i], body_ids[j]] = data[i, j] + # Get the link pose from com pose + body_link_pose_w[env_ids[i], body_ids[j]] = get_com_pose_in_link_frame_func( + body_com_pose_w[env_ids[i], body_ids[j]], body_com_pose_b[env_ids[i], body_ids[j]] + ) + + +@wp.kernel +def set_body_com_velocity_to_sim( + data: wp.array2d(dtype=wp.spatial_vectorf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_com_velocity_w: wp.array2d(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write body COM velocity data to simulation buffers. + + This kernel writes body COM velocities from the input array to the output buffers + and zeros out the body acceleration buffer, for each body in each environment. + + Args: + data: Input array of body COM spatial velocities. Shape is (num_envs, num_bodies) or + (num_selected_envs, num_selected_bodies) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_com_velocity_w: Output array where body COM velocities are written. + Shape is (num_envs, num_bodies). + body_acc_w: Output array where body accelerations are zeroed. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + if from_mask: + body_com_velocity_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + else: + body_com_velocity_w[env_ids[i], body_ids[j]] = data[i, j] + # Make the acceleration zero to prevent reporting old values + body_acc_w[env_ids[i], body_ids[j]] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +@wp.kernel +def set_body_link_velocity_to_sim( + data: wp.array2d(dtype=wp.spatial_vectorf), + body_com_pose_b: wp.array2d(dtype=wp.transformf), + body_link_pose_w: wp.array2d(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + body_link_velocity_w: wp.array2d(dtype=wp.spatial_vectorf), + body_com_velocity_w: wp.array2d(dtype=wp.spatial_vectorf), + body_acc_w: wp.array2d(dtype=wp.spatial_vectorf), +): + """Write body link velocity data to simulation buffers. + + This kernel writes body link velocities from the input array to the output buffers, + computes the corresponding COM velocities from the link velocities, and zeros out + the body acceleration buffer. + + Args: + data: Input array of body link spatial velocities. Shape is (num_envs, num_bodies) + or (num_selected_envs, num_selected_bodies) depending on from_mask. + body_com_pose_b: Input array of body COM poses in body frame. Shape is + (num_envs, num_bodies). + body_link_pose_w: Input array of body link poses in world frame. Shape is + (num_envs, num_bodies). + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + body_link_velocity_w: Output array where body link velocities are written. + Shape is (num_envs, num_bodies). + body_com_velocity_w: Output array where body COM velocities (derived from link) + are written. Shape is (num_envs, num_bodies). + body_acc_w: Output array where body accelerations are zeroed. + Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + if from_mask: + body_link_velocity_w[env_ids[i], body_ids[j]] = data[env_ids[i], body_ids[j]] + else: + body_link_velocity_w[env_ids[i], body_ids[j]] = data[i, j] + # Get the link velocity in the com frame + body_com_velocity_w[env_ids[i], body_ids[j]] = get_link_velocity_in_com_frame_func( + body_link_velocity_w[env_ids[i], body_ids[j]], + body_link_pose_w[env_ids[i], body_ids[j]], + body_com_pose_b[env_ids[i], body_ids[j]], + ) + # Make the acceleration zero to prevent reporting old values + body_acc_w[env_ids[i], body_ids[j]] = wp.spatial_vectorf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) + + +""" +Generic buffer-writing kernels (used by Articulation + RigidObject + RigidObjectCollection). +""" + + +@wp.kernel +def write_2d_data_to_buffer_with_indices( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + joint_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_data: wp.array2d(dtype=wp.float32), +): + """Write 2D float data to a buffer at specified indices. + + This kernel copies float data from an input array to an output buffer at the specified + environment and joint/body indices. + + Args: + in_data: Input array containing float data. Shape is (num_envs, num_joints) or + (num_selected_envs, num_selected_joints) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + joint_ids: Input array of joint/body indices to write to. Shape is (num_selected_joints,). + from_mask: Input flag indicating whether to use masked indexing. If True, env_ids + and joint_ids are used to index into in_data. If False, in_data is indexed + directly using the thread indices. + out_data: Output array where data is written. Shape is (num_envs, num_joints). + """ + i, j = wp.tid() + if from_mask: + out_data[env_ids[i], joint_ids[j]] = in_data[env_ids[i], joint_ids[j]] + else: + out_data[env_ids[i], joint_ids[j]] = in_data[i, j] + + +@wp.kernel +def write_body_inertia_to_buffer( + in_data: wp.array3d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_data: wp.array3d(dtype=wp.float32), +): + """Write body inertia data to a buffer at specified indices. + + This kernel copies 3x3 inertia tensor data (stored as 9 floats) from an input array + to an output buffer at the specified environment and body indices. + + Args: + in_data: Input array containing inertia data. Shape is (num_envs, num_bodies, 9) or + (num_selected_envs, num_selected_bodies, 9) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + out_data: Output array where inertia data is written. Shape is (num_envs, num_bodies, 9). + """ + i, j = wp.tid() + if from_mask: + for k in range(9): + out_data[env_ids[i], body_ids[j], k] = in_data[env_ids[i], body_ids[j], k] + else: + for k in range(9): + out_data[env_ids[i], body_ids[j], k] = in_data[i, j, k] + + +@wp.kernel +def write_single_body_inertia_to_buffer( + in_data: wp.array2d(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_data: wp.array2d(dtype=wp.float32), +): + """Write body inertia data to a buffer at specified indices. + + This kernel copies 3x3 inertia tensor data (stored as 9 floats) from an input array + to an output buffer at the specified environment and body indices. + + Args: + in_data: Input array containing inertia data. Shape is (num_envs, 9) or + (num_selected_envs, 9) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + from_mask: Input flag indicating whether to use masked indexing. + out_data: Output array where inertia data is written. Shape is (num_envs, 9). + """ + i = wp.tid() + if from_mask: + for k in range(9): + out_data[env_ids[i], k] = in_data[env_ids[i], k] + else: + for k in range(9): + out_data[env_ids[i], k] = in_data[i, k] + + +@wp.kernel +def write_body_com_pose_to_buffer( + in_data: wp.array2d(dtype=wp.transformf), + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out_data: wp.array2d(dtype=wp.transformf), +): + """Write body COM pose data to a buffer at specified indices. + + This kernel copies body COM pose data from an input array to an output buffer at the + specified environment and body indices. + + Args: + in_data: Input array containing body COM poses. Shape is (num_envs, num_bodies) or + (num_selected_envs, num_selected_bodies) depending on from_mask. + env_ids: Input array of environment indices to write to. Shape is (num_selected_envs,). + body_ids: Input array of body indices to write to. Shape is (num_selected_bodies,). + from_mask: Input flag indicating whether to use masked indexing. + out_data: Output array where body COM poses are written. Shape is (num_envs, num_bodies). + """ + i, j = wp.tid() + if from_mask: + out_data[env_ids[i], body_ids[j]] = in_data[env_ids[i], body_ids[j]] + else: + out_data[env_ids[i], body_ids[j]] = in_data[i, j] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py new file mode 100644 index 000000000000..89af788c3f4f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for rigid object assets.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.pyi new file mode 100644 index 000000000000..1c96a5aa4550 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "RigidObject", + "RigidObjectData", +] + +from .rigid_object import RigidObject +from .rigid_object_data import RigidObjectData diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py new file mode 100644 index 000000000000..930b8836859a --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -0,0 +1,1197 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp + +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.string as string_utils +from isaaclab.assets.rigid_object.base_rigid_object import BaseRigidObject +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab_physx.assets import kernels as shared_kernels +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .rigid_object_data import RigidObjectData + +if TYPE_CHECKING: + import omni.physics.tensors.api as physx + + from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg + + +class RigidObject(BaseRigidObject): + """A rigid object asset class. + + Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects + such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. + + For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid body. On playing the + simulation, the physics engine will automatically register the rigid body and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. + + .. note:: + + For users familiar with Isaac Sim, the PhysX view class API is not the exactly same as Isaac Sim view + class API. Similar to Isaac Lab, Isaac Sim wraps around the PhysX view API. However, as of now (2023.1 release), + we see a large difference in initializing the view classes in Isaac Sim. This is because the view classes + in Isaac Sim perform additional USD-related operations which are slow and also not required. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "physx" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> RigidObjectData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count + + @property + def num_bodies(self) -> int: + """Number of bodies in the asset. + + This is always 1 since each object is a single rigid body. + """ + return 1 + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object.""" + prim_paths = self.root_view.prim_paths[: self.num_bodies] + return [path.split("/")[-1] for path in prim_paths] + + @property + def root_view(self): + """Root view for the asset. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the rigid object. + + Args: + env_ids: Environment indices. If None, then all indices are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve all indices + if (env_ids is None) or (env_ids == slice(None)): + env_ids = slice(None) + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + .. note:: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + composer = self._instantaneous_wrench_composer + composer.add_raw_buffers_from(self._permanent_wrench_composer) + get_force_data = self._get_inst_wrench_force_f32 + get_torque_data = self._get_inst_wrench_torque_f32 + else: + composer = self._permanent_wrench_composer + get_force_data = self._get_perm_wrench_force_f32 + get_torque_data = self._get_perm_wrench_torque_f32 + composer.compose_to_body_frame() + self.root_view.apply_forces_and_torques_at_position( + force_data=get_force_data(), + torque_data=get_torque_data(), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the rigid body based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + """ + Operations - Write to simulation. + """ + + def write_root_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) + or (len(env_ids),) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids) + + def write_root_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root pose over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_link_pose_to_sim_mask(root_pose=root_pose, env_mask=env_mask) + + def write_root_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) + or (len(env_ids),) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (num_instances, 6) + or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + self.write_root_com_velocity_to_sim_mask(root_velocity=root_velocity, env_mask=env_mask) + + def write_root_link_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7), + or (len(env_ids),) / (num_instances,) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + wp.launch( + shared_kernels.set_root_link_pose_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_pose, + env_ids, + full_data, + ], + outputs=[ + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._root_com_pose_w.timestamp = -1.0 + self.data._root_link_state_w.timestamp = -1.0 + self.data._root_state_w.timestamp = -1.0 + self.data._root_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_transforms(self._get_root_link_pose_w_f32(), indices=env_ids) + + def write_root_link_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_root_link_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids, full_data=True) + + def write_root_com_pose_to_sim_index( + self, + *, + root_pose: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7), + or (len(env_ids),) / (num_instances,) with dtype wp.transformf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + wp.launch( + shared_kernels.set_root_com_pose_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_pose, + self.data.body_com_pose_b, + env_ids, + full_data, + ], + outputs=[ + self.data.root_com_pose_w, + self.data.root_link_pose_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._root_com_state_w.timestamp = -1.0 + self.data._root_link_state_w.timestamp = -1.0 + self.data._root_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_transforms(self._get_root_link_pose_w_f32(), indices=env_ids) + + def write_root_com_pose_to_sim_mask( + self, + *, + root_pose: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass pose over selected environment mask into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (num_instances, 7) + or (num_instances,) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_root_com_pose_to_sim_index(root_pose=root_pose, env_ids=env_ids, full_data=True) + + def write_root_com_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. + Shape is (len(env_ids), 6) or (num_instances, 6), + or (len(env_ids),) / (num_instances,) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + wp.launch( + shared_kernels.set_root_com_velocity_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + env_ids, + 1, # num_bodies is always 1 for RigidObject + full_data, + ], + outputs=[ + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._root_link_vel_w.timestamp = -1.0 + self.data._root_link_state_w.timestamp = -1.0 + self.data._root_com_state_w.timestamp = -1.0 + self.data._root_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_velocities(self._get_root_com_vel_w_f32(), indices=env_ids) + + def write_root_com_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's center of mass rather than the root's frame. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (num_instances, 6) + or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_root_com_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids, full_data=True) + + def write_root_link_velocity_to_sim_index( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root frame velocities in simulation world frame. + Shape is (len(env_ids), 6) or (num_instances, 6), + or (len(env_ids),) / (num_instances,) with dtype wp.spatial_vectorf. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + # Access body_com_pose_b and root_link_pose_w properties to ensure they are current. + wp.launch( + shared_kernels.set_root_link_velocity_to_sim, + dim=env_ids.shape[0], + inputs=[ + root_velocity, + self.data.body_com_pose_b, + self.data.root_link_pose_w, + env_ids, + 1, # num_bodies is always 1 for RigidObject + full_data, + ], + outputs=[ + self.data.root_link_vel_w, + self.data.root_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._root_link_state_w.timestamp = -1.0 + self.data._root_state_w.timestamp = -1.0 + self.data._root_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_velocities(self._get_root_com_vel_w_f32(), indices=env_ids) + + def write_root_link_velocity_to_sim_mask( + self, + *, + root_velocity: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set the root link velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the root's frame rather than the root's center of mass. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (num_instances, 6) + or (num_instances,) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_INDICES + self.write_root_link_velocity_to_sim_index(root_velocity=root_velocity, env_ids=env_ids, full_data=True) + + """ + Operations - Setters. + """ + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set masses of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + masses: Masses of all bodies. Shape is (len(env_ids), len(body_ids)) or (num_instances, num_bodies) + if full_data. + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(masses, (self.num_instances, self.num_bodies), wp.float32, "masses") + else: + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + masses, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_mass, + ], + device=self.device, + ) + + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_body_mass, self.data._body_mass) + self.root_view.set_masses(self._cpu_body_mass, indices=cpu_env_ids) + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set masses of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_masses_index(masses=masses, body_ids=body_ids, env_ids=env_ids, full_data=True) + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set center of mass pose of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + coms: Center of mass pose of all bodies. Shape is (len(env_ids), len(body_ids), 7) or + (num_instances, num_bodies, 7) if full_data. + body_ids: The body indices to set the center of mass pose for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass pose for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(coms, (self.num_instances, self.num_bodies), wp.transformf, "coms") + else: + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_com_pose_to_buffer, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + coms, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_com_pose_b.data, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_body_coms, self.data._body_com_pose_b.data) + self.root_view.set_coms(self._cpu_body_coms, indices=cpu_env_ids) + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set center of mass pose of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + coms: Center of mass pose of all bodies. Shape is (num_instances, num_bodies, 7). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_coms_index(coms=coms, body_ids=body_ids, env_ids=env_ids, full_data=True) + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set inertias of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 9) or + (num_instances, num_bodies, 9) if full_data. + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(inertias, (self.num_instances, self.num_bodies, 9), wp.float32, "inertias") + else: + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_inertia_to_buffer, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + inertias, + env_ids, + self._ALL_BODY_INDICES, + full_data, + ], + outputs=[ + self.data._body_inertia, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + cpu_env_ids = self._get_cpu_env_ids(env_ids) + wp.copy(self._cpu_body_inertia, self.data._body_inertia) + self.root_view.set_inertias(self._cpu_body_inertia.flatten(), indices=cpu_env_ids) + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set inertias of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 9). + body_mask: Body mask. If None, then all bodies are used. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids, full_data=True) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{self.cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{self.cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{self.cfg.prim_path}' for rigid objects. These are" + f" located at: '{articulation_prims}' under '{template_prim_path}'. Please disable the articulation" + " root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] + # -- object view + self._root_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_expr.replace(".*", "*")) + + # check if the rigid body was created + if self.root_view._backend is None: + raise RuntimeError(f"Failed to create rigid body at: {self.cfg.prim_path}. Please check PhysX logs.") + + # container for data access + self._data = RigidObjectData(self.root_view, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + # Let the rigid object data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _create_buffers(self): + """Create buffers for storing data.""" + # constants + self._ALL_INDICES = wp.array(np.arange(self.num_instances, dtype=np.int32), device=self.device) + self._ALL_BODY_INDICES = wp.array(np.arange(self.num_bodies, dtype=np.int32), device=self.device) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # set information about rigid body into data + self._data.body_names = self.body_names + + # Cached .view(wp.float32) wrappers for structured warp arrays. + # These avoid per-call wp.array metadata allocation in writers. + # Reset to None each time _create_buffers runs (during initialization). + self._root_link_pose_w_f32: wp.array | None = None + self._root_com_vel_w_f32: wp.array | None = None + # Cached wrench views for write_data_to_sim + self._inst_wrench_force_f32: wp.array | None = None + self._inst_wrench_torque_f32: wp.array | None = None + self._perm_wrench_force_f32: wp.array | None = None + self._perm_wrench_torque_f32: wp.array | None = None + + # Pre-allocated pinned CPU buffers for PhysX TensorAPI writes. + # PhysX requires CPU arrays for "model" property updates (masses, coms, inertias). + # Pinned memory enables DMA fast path and avoids per-call malloc. + N, B = self.num_instances, self.num_bodies + self._cpu_env_ids_all = wp.zeros(N, dtype=wp.int32, device="cpu", pinned=True) + wp.copy(self._cpu_env_ids_all, self._ALL_INDICES) + self._cpu_body_mass = wp.zeros((N, B), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_body_coms = wp.zeros((N, B, 7), dtype=wp.float32, device="cpu", pinned=True) + self._cpu_body_inertia = wp.zeros((N, B, 9), dtype=wp.float32, device="cpu", pinned=True) + + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + # default state + # -- root state + # note: we cast to tuple to avoid torch/numpy type mismatch. + default_root_pose = tuple(self.cfg.init_state.pos) + tuple(self.cfg.init_state.rot) + default_root_vel = tuple(self.cfg.init_state.lin_vel) + tuple(self.cfg.init_state.ang_vel) + default_root_pose = np.tile(np.array(default_root_pose, dtype=np.float32), (self.num_instances, 1)) + default_root_vel = np.tile(np.array(default_root_vel, dtype=np.float32), (self.num_instances, 1)) + self._data.default_root_pose = wp.array(default_root_pose, dtype=wp.transformf, device=self.device) + self._data.default_root_vel = wp.array(default_root_vel, dtype=wp.spatial_vectorf, device=self.device) + + def _resolve_env_mask(self, env_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve environment mask to a torch tensor. + + Args: + env_mask: Environment mask. If None, then all indices are used. + + Returns: + A torch tensor of environment indices. + """ + if env_mask is not None: + if isinstance(env_mask, wp.array): + env_mask = wp.to_torch(env_mask) + env_ids = torch.nonzero(env_mask)[:, 0].to(torch.int32) + else: + env_ids = self._ALL_INDICES + return env_ids + + def _resolve_body_mask(self, body_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve body mask to a torch tensor. + + Args: + body_mask: Body mask. If None, then all indices are used. + + Returns: + A torch tensor of body indices. + """ + if body_mask is not None: + if isinstance(body_mask, wp.array): + body_mask = wp.to_torch(body_mask) + body_ids = torch.nonzero(body_mask)[:, 0].to(torch.int32) + else: + body_ids = self._ALL_BODY_INDICES + return body_ids + + def _get_cpu_env_ids(self, env_ids: wp.array | torch.Tensor) -> wp.array: + """Get CPU environment indices. Uses pre-allocated pinned buffer for full-index case. + + Args: + env_ids: Environment indices. + + Returns: + A warp array of environment indices on CPU. + """ + if isinstance(env_ids, torch.Tensor): + env_ids = wp.from_torch(env_ids, dtype=wp.int32) + # Fast path: if these are all indices, use pre-allocated pinned buffer + if env_ids.ptr == self._ALL_INDICES.ptr: + return self._cpu_env_ids_all + # Slow path: partial indices (reset), clone to CPU + return wp.clone(env_ids, device="cpu") + + def _get_root_link_pose_w_f32(self) -> wp.array: + """Get a cached float32 view of root_link_pose_w for PhysX TensorAPI. Invalidated in ``_create_buffers``.""" + if self._root_link_pose_w_f32 is None: + self._root_link_pose_w_f32 = self.data._root_link_pose_w.data.view(wp.float32) + return self._root_link_pose_w_f32 + + def _get_root_com_vel_w_f32(self) -> wp.array: + """Get a cached float32 view of root_com_vel_w for PhysX TensorAPI. Invalidated in ``_create_buffers``.""" + if self._root_com_vel_w_f32 is None: + self._root_com_vel_w_f32 = self.data._root_com_vel_w.data.view(wp.float32) + return self._root_com_vel_w_f32 + + def _get_inst_wrench_force_f32(self) -> wp.array: + """Get a cached flattened float32 view of instantaneous wrench force. Invalidated in ``_create_buffers``.""" + if self._inst_wrench_force_f32 is None: + self._inst_wrench_force_f32 = self._instantaneous_wrench_composer.out_force_b.warp.flatten().view( + wp.float32 + ) + return self._inst_wrench_force_f32 + + def _get_inst_wrench_torque_f32(self) -> wp.array: + """Get a cached flattened float32 view of instantaneous wrench torque. Invalidated in ``_create_buffers``.""" + if self._inst_wrench_torque_f32 is None: + self._inst_wrench_torque_f32 = self._instantaneous_wrench_composer.out_torque_b.warp.flatten().view( + wp.float32 + ) + return self._inst_wrench_torque_f32 + + def _get_perm_wrench_force_f32(self) -> wp.array: + """Get a cached flattened float32 view of permanent wrench force. Invalidated in ``_create_buffers``.""" + if self._perm_wrench_force_f32 is None: + self._perm_wrench_force_f32 = self._permanent_wrench_composer.out_force_b.warp.flatten().view(wp.float32) + return self._perm_wrench_force_f32 + + def _get_perm_wrench_torque_f32(self) -> wp.array: + """Get a cached flattened float32 view of permanent wrench torque. Invalidated in ``_create_buffers``.""" + if self._perm_wrench_torque_f32 is None: + self._perm_wrench_torque_f32 = self._permanent_wrench_composer.out_torque_b.warp.flatten().view(wp.float32) + return self._perm_wrench_torque_f32 + + def _resolve_env_ids(self, env_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve environment indices to a warp array or tensor. + + Args: + env_ids: Environment indices. If None, then all indices are used. + + Returns: + A warp array of environment indices or a tensor of environment indices. + """ + if (env_ids is None) or (env_ids == slice(None)): + return self._ALL_INDICES + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids, dtype=wp.int32) + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + return env_ids + + def _resolve_body_ids(self, body_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve body indices to a warp array or tensor. + + Args: + body_ids: Body indices. If None, then all indices are used. + + Returns: + A warp array of body indices or a tensor of body indices. + """ + if isinstance(body_ids, list): + return wp.array(body_ids, dtype=wp.int32, device=self.device) + if (body_ids is None) or (body_ids == slice(None)): + return self._ALL_BODY_INDICES + if isinstance(body_ids, torch.Tensor): + if body_ids.dtype == torch.int64: + body_ids = body_ids.to(torch.int32) + return wp.from_torch(body_ids, dtype=wp.int32) + return body_ids + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_view = None + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + warnings.warn( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.root_view + + def write_root_state_to_sim( + self, root_state: torch.Tensor | wp.array, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None + ): + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_root_link_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim( + self, root_state: torch.Tensor | wp.array, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None + ): + """Deprecated, same as :meth:`write_root_com_pose_to_sim_index` and + :meth:`write_root_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_com_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_com_pose_to_sim_index' and 'write_root_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_root_com_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim( + self, root_state: torch.Tensor | wp.array, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None + ): + """Deprecated, same as :meth:`write_root_link_pose_to_sim_index` and + :meth:`write_root_link_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_root_link_state_to_sim' will be deprecated in a future release. Please" + " use 'write_root_link_pose_to_sim_index' and 'write_root_link_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_root_link_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py new file mode 100644 index 000000000000..a8e674dc6bf8 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -0,0 +1,1122 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +import logging +import warnings +import weakref +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.math import normalize +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.assets import kernels as shared_kernels +from isaaclab_physx.physics import PhysxManager as SimulationManager + +if TYPE_CHECKING: + import omni.physics.tensors.api as physx + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObjectData(BaseRigidObjectData): + """Data container for a rigid object. + + This class contains the data for a rigid object in the simulation. The data includes the state of + the root rigid body and the state of all the bodies in the object. The data is stored in the simulation + world frame unless otherwise specified. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + + .. note:: + **Pull-to-refresh model.** Properties pull fresh data from the PhysX tensor API on first + access per timestamp and cache the result. This differs from Newton, where buffers are + refreshed automatically by the simulation. + + .. note:: + **ProxyArray pointer stability.** Each :class:`ProxyArray` wrapper is created once and + reused because the PhysX tensor API returns views into stable, pre-allocated GPU buffers + whose device pointer does not change across simulation steps. + """ + + __backend_name__: str = "physx" + """The name of the backend for the rigid object data.""" + + def __init__(self, root_view: physx.RigidBodyView, device: str): + """Initializes the rigid object data. + + Args: + root_view: The root rigid body view. + device: The device used for processing. + """ + super().__init__(root_view, device) + # Set the root rigid body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: physx.RigidBodyView = weakref.proxy(root_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + self._num_instances = self._root_view.count + + # Obtain global physics sim view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + gravity = self._physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir = gravity_dir.repeat(self._root_view.count, 1) + forward_vec = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_view.count, 1) + + # Initialize constants + self.GRAVITY_VEC_W = ProxyArray(wp.from_torch(gravity_dir, dtype=wp.vec3f)) + self.FORWARD_VEC_B = ProxyArray(wp.from_torch(forward_vec, dtype=wp.vec3f)) + + self._create_buffers() + + @property + def is_primed(self) -> bool: + """Whether the rigid object data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the rigid object data is fully instantiated and ready to use. + + .. note:: + Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._is_primed = value + + def update(self, dt: float) -> None: + """Updates the data for the rigid object. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + """ + Defaults. + """ + + @property + def default_root_pose(self) -> ProxyArray: + """Default root pose ``[pos, quat]`` in local environment frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + The position and quaternion are of the rigid body's actor frame. + """ + if self._default_root_pose_ta is None: + self._default_root_pose_ta = ProxyArray(self._default_root_pose) + return self._default_root_pose_ta + + @default_root_pose.setter + def default_root_pose(self, value: wp.array) -> None: + """Set the default root pose. + + Args: + value: The default root pose. Shape is (num_instances, 7). + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._default_root_pose.assign(value) + + @property + def default_root_vel(self) -> ProxyArray: + """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + The linear and angular velocities are of the rigid body's center of mass frame. + """ + if self._default_root_vel_ta is None: + self._default_root_vel_ta = ProxyArray(self._default_root_vel) + return self._default_root_vel_ta + + @default_root_vel.setter + def default_root_vel(self, value: wp.array) -> None: + """Set the default root velocity. + + Args: + value: The default root velocity. Shape is (num_instances, 6). + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._default_root_vel.assign(value) + + """ + Root state properties. + """ + + @property + def root_link_pose_w(self) -> ProxyArray: + """Root link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + This quantity is the pose of the actor frame of the root rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._root_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation + self._root_link_pose_w.data = self._root_view.get_transforms().view(wp.transformf) + self._root_link_pose_w.timestamp = self._sim_timestamp + + if self._root_link_pose_w_ta is None: + self._root_link_pose_w_ta = ProxyArray(self._root_link_pose_w.data) + return self._root_link_pose_w_ta + + @property + def root_link_vel_w(self) -> ProxyArray: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + # read the CoM velocity and compute link velocity + wp.launch( + shared_kernels.get_root_link_vel_from_root_com_vel, + dim=self._num_instances, + inputs=[ + self.root_com_vel_w, + self.root_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._root_link_vel_w.data, + ], + device=self.device, + ) + self._root_link_vel_w.timestamp = self._sim_timestamp + + if self._root_link_vel_w_ta is None: + self._root_link_vel_w_ta = ProxyArray(self._root_link_vel_w.data) + return self._root_link_vel_w_ta + + @property + def root_com_pose_w(self) -> ProxyArray: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + This quantity is the pose of the center of mass frame of the root rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + wp.launch( + shared_kernels.get_root_com_pose_from_root_link_pose, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._root_com_pose_w.data, + ], + device=self.device, + ) + self._root_com_pose_w.timestamp = self._sim_timestamp + + if self._root_com_pose_w_ta is None: + self._root_com_pose_w_ta = ProxyArray(self._root_com_pose_w.data) + return self._root_com_pose_w_ta + + @property + def root_com_vel_w(self) -> ProxyArray: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances,), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 6). + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + if self._root_com_vel_w.timestamp < self._sim_timestamp: + self._root_com_vel_w.data = self._root_view.get_velocities().view(wp.spatial_vectorf) + self._root_com_vel_w.timestamp = self._sim_timestamp + + if self._root_com_vel_w_ta is None: + self._root_com_vel_w_ta = ProxyArray(self._root_com_vel_w.data) + return self._root_com_vel_w_ta + + """ + Body state properties. + """ + + @property + def body_mass(self) -> ProxyArray: + """Mass of all bodies in the simulation world frame. + + Shape is (num_instances, 1), dtype = wp.float32. + In torch this resolves to (num_instances, 1). + """ + if self._body_mass_ta is None: + self._body_mass_ta = ProxyArray(self._body_mass) + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Inertia of all bodies in the simulation world frame. + + Shape is (num_instances, 1, 9), dtype = wp.float32. + In torch this resolves to (num_instances, 1, 9). + """ + if self._body_inertia_ta is None: + self._body_inertia_ta = ProxyArray(self._body_inertia) + return self._body_inertia_ta + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.transformf. In torch this resolves to (num_instances, 1, 7). + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + parent = self.root_link_pose_w + if self._body_link_pose_w_ta is None: + self._body_link_pose_w_ta = ProxyArray(parent.warp.reshape((self._num_instances, 1))) + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 1, 6). + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + parent = self.root_link_vel_w + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(parent.warp.reshape((self._num_instances, 1))) + return self._body_link_vel_w_ta + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.transformf. In torch this resolves to (num_instances, 1, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + parent = self.root_com_pose_w + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(parent.warp.reshape((self._num_instances, 1))) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 1, 6). + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + parent = self.root_com_vel_w + if self._body_com_vel_w_ta is None: + self._body_com_vel_w_ta = ProxyArray(parent.warp.reshape((self._num_instances, 1))) + return self._body_com_vel_w_ta + + @property + def body_com_acc_w(self) -> ProxyArray: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + + Shape is (num_instances, 1), dtype = wp.spatial_vectorf. In torch this resolves to (num_instances, 1, 6). + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + self._body_com_acc_w.data = ( + self._root_view.get_accelerations().view(wp.spatial_vectorf).reshape((self._num_instances, 1)) + ) + self._body_com_acc_w.timestamp = self._sim_timestamp + + if self._body_com_acc_w_ta is None: + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w.data) + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + + Shape is (num_instances, 1), dtype = wp.transformf. In torch this resolves to (num_instances, 1, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # read data from simulation + self._body_com_pose_b.data.assign( + self._root_view.get_coms().view(wp.transformf).reshape((self._num_instances, 1)) + ) + self._body_com_pose_b.timestamp = self._sim_timestamp + + if self._body_com_pose_b_ta is None: + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b.data) + return self._body_com_pose_b_ta + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self) -> ProxyArray: + """Projection of the gravity direction on base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._projected_gravity_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.GRAVITY_VEC_W, self.root_link_quat_w], + outputs=[self._projected_gravity_b.data], + device=self.device, + ) + self._projected_gravity_b.timestamp = self._sim_timestamp + if self._projected_gravity_b_ta is None: + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b.data) + return self._projected_gravity_b_ta + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading of the base frame (in radians). + + Shape is (num_instances,), dtype = wp.float32. In torch this resolves to (num_instances,). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + if self._heading_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.root_heading_w, + dim=self._num_instances, + inputs=[self.FORWARD_VEC_B, self.root_link_quat_w], + outputs=[self._heading_w.data], + device=self.device, + ) + self._heading_w.timestamp = self._sim_timestamp + if self._heading_w_ta is None: + self._heading_w_ta = ProxyArray(self._heading_w.data) + return self._heading_w_ta + + @property + def root_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._root_link_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_lin_vel_w, self.root_link_quat_w], + outputs=[self._root_link_lin_vel_b.data], + device=self.device, + ) + self._root_link_lin_vel_b.timestamp = self._sim_timestamp + if self._root_link_lin_vel_b_ta is None: + self._root_link_lin_vel_b_ta = ProxyArray(self._root_link_lin_vel_b.data) + return self._root_link_lin_vel_b_ta + + @property + def root_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._root_link_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_link_ang_vel_w, self.root_link_quat_w], + outputs=[self._root_link_ang_vel_b.data], + device=self.device, + ) + self._root_link_ang_vel_b.timestamp = self._sim_timestamp + if self._root_link_ang_vel_b_ta is None: + self._root_link_ang_vel_b_ta = ProxyArray(self._root_link_ang_vel_b.data) + return self._root_link_ang_vel_b_ta + + @property + def root_com_lin_vel_b(self) -> ProxyArray: + """Root center of mass linear velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._root_com_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_lin_vel_w, self.root_link_quat_w], + outputs=[self._root_com_lin_vel_b.data], + device=self.device, + ) + self._root_com_lin_vel_b.timestamp = self._sim_timestamp + if self._root_com_lin_vel_b_ta is None: + self._root_com_lin_vel_b_ta = ProxyArray(self._root_com_lin_vel_b.data) + return self._root_com_lin_vel_b_ta + + @property + def root_com_ang_vel_b(self) -> ProxyArray: + """Root center of mass angular velocity in base frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._root_com_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_1D_kernel, + dim=self._num_instances, + inputs=[self.root_com_ang_vel_w, self.root_link_quat_w], + outputs=[self._root_com_ang_vel_b.data], + device=self.device, + ) + self._root_com_ang_vel_b.timestamp = self._sim_timestamp + if self._root_com_ang_vel_b_ta is None: + self._root_com_ang_vel_b_ta = ProxyArray(self._root_com_ang_vel_b.data) + return self._root_com_ang_vel_b_ta + + """ + Sliced properties. + """ + + @property + def root_link_pos_w(self) -> ProxyArray: + """Root link position in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + parent = self.root_link_pose_w + if self._root_link_pos_w_ta is None: + self._root_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._root_link_pos_w_ta + + @property + def root_link_quat_w(self) -> ProxyArray: + """Root link orientation (x, y, z, w) in simulation world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + This quantity is the orientation of the actor frame of the root rigid body. + """ + parent = self.root_link_pose_w + if self._root_link_quat_w_ta is None: + self._root_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._root_link_quat_w_ta + + @property + def root_link_lin_vel_w(self) -> ProxyArray: + """Root linear velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + parent = self.root_link_vel_w + if self._root_link_lin_vel_w_ta is None: + self._root_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._root_link_lin_vel_w_ta + + @property + def root_link_ang_vel_w(self) -> ProxyArray: + """Root link angular velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + parent = self.root_link_vel_w + if self._root_link_ang_vel_w_ta is None: + self._root_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._root_link_ang_vel_w_ta + + @property + def root_com_pos_w(self) -> ProxyArray: + """Root center of mass position in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the position of the center of mass frame of the root rigid body relative to the world. + """ + parent = self.root_com_pose_w + if self._root_com_pos_w_ta is None: + self._root_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._root_com_pos_w_ta + + @property + def root_com_quat_w(self) -> ProxyArray: + """Root center of mass orientation (x, y, z, w) in simulation world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + This quantity is the orientation of the principal axes of inertia of the root rigid body relative to the world. + """ + parent = self.root_com_pose_w + if self._root_com_quat_w_ta is None: + self._root_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._root_com_quat_w_ta + + @property + def root_com_lin_vel_w(self) -> ProxyArray: + """Root center of mass linear velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + parent = self.root_com_vel_w + if self._root_com_lin_vel_w_ta is None: + self._root_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._root_com_lin_vel_w_ta + + @property + def root_com_ang_vel_w(self) -> ProxyArray: + """Root center of mass angular velocity in simulation world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + parent = self.root_com_vel_w + if self._root_com_ang_vel_w_ta is None: + self._root_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._root_com_ang_vel_w_ta + + @property + def body_link_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_pose_w + if self._body_link_pos_w_ta is None: + self._body_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_link_pos_w_ta + + @property + def body_link_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.quatf. In torch this resolves to (num_instances, 1, 4). + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_pose_w + if self._body_link_quat_w_ta is None: + self._body_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_link_quat_w_ta + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the linear velocity of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_vel_w + if self._body_link_lin_vel_w_ta is None: + self._body_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_link_lin_vel_w_ta + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the angular velocity of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_vel_w + if self._body_link_ang_vel_w_ta is None: + self._body_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_link_ang_vel_w_ta + + @property + def body_com_pos_w(self) -> ProxyArray: + """Positions of all bodies' center of mass in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the position of the rigid bodies' center of mass frame. + """ + parent = self.body_com_pose_w + if self._body_com_pos_w_ta is None: + self._body_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_w_ta + + @property + def body_com_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.quatf. In torch this resolves to (num_instances, 1, 4). + This quantity is the orientation of the principal axes of inertia of the rigid bodies. + """ + parent = self.body_com_pose_w + if self._body_com_quat_w_ta is None: + self._body_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_w_ta + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + parent = self.body_com_vel_w + if self._body_com_lin_vel_w_ta is None: + self._body_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_vel_w_ta + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + parent = self.body_com_vel_w + if self._body_com_ang_vel_w_ta is None: + self._body_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_vel_w_ta + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Linear acceleration of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + parent = self.body_com_acc_w + if self._body_com_lin_acc_w_ta is None: + self._body_com_lin_acc_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_acc_w_ta + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Angular acceleration of all bodies in simulation world frame. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + parent = self.body_com_acc_w + if self._body_com_ang_acc_w_ta is None: + self._body_com_ang_acc_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_acc_w_ta + + @property + def body_com_pos_b(self) -> ProxyArray: + """Center of mass position of all of the bodies in their respective link frames. + + Shape is (num_instances, 1), dtype = wp.vec3f. In torch this resolves to (num_instances, 1, 3). + This quantity is the center of mass location relative to its body's link frame. + """ + parent = self.body_com_pose_b + if self._body_com_pos_b_ta is None: + self._body_com_pos_b_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_b_ta + + @property + def body_com_quat_b(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their + respective link frames. + + Shape is (num_instances, 1), dtype = wp.quatf. In torch this resolves to (num_instances, 1, 4). + This quantity is the orientation of the principal axes of inertia relative to its body's link frame. + """ + parent = self.body_com_pose_b + if self._body_com_quat_b_ta is None: + self._body_com_quat_b_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_b_ta + + def _create_buffers(self) -> None: + super()._create_buffers() + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._root_link_pose_w = TimestampedBuffer((self._num_instances), self.device, wp.transformf) + self._root_link_vel_w = TimestampedBuffer((self._num_instances), self.device, wp.spatial_vectorf) + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer((self._num_instances, 1), self.device, wp.transformf) + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer((self._num_instances), self.device, wp.transformf) + self._root_com_vel_w = TimestampedBuffer((self._num_instances), self.device, wp.spatial_vectorf) + self._body_com_acc_w = TimestampedBuffer((self._num_instances, 1), self.device, wp.spatial_vectorf) + # -- combined state (these are cached as they concatenate) + self._root_state_w = TimestampedBuffer((self._num_instances), self.device, shared_kernels.vec13f) + self._root_link_state_w = TimestampedBuffer((self._num_instances), self.device, shared_kernels.vec13f) + self._root_com_state_w = TimestampedBuffer((self._num_instances), self.device, shared_kernels.vec13f) + # -- derived properties (these are cached to avoid repeated memory allocations) + self._projected_gravity_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._heading_w = TimestampedBuffer((self._num_instances), self.device, wp.float32) + self._root_link_lin_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._root_link_ang_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._root_com_lin_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + self._root_com_ang_vel_b = TimestampedBuffer((self._num_instances), self.device, wp.vec3f) + + # -- Default state + self._default_root_pose = wp.zeros((self._num_instances), dtype=wp.transformf, device=self.device) + self._default_root_vel = wp.zeros((self._num_instances), dtype=wp.spatial_vectorf, device=self.device) + self._default_root_state = None + + # -- Body properties + self._body_mass = wp.clone(self._root_view.get_masses(), device=self.device) + self._body_inertia = wp.clone(self._root_view.get_inertias(), device=self.device).reshape( + (self._num_instances, 1, 9) + ) + + # Initialize ProxyArray wrappers + self._pin_proxy_arrays() + + def _pin_proxy_arrays(self) -> None: + """Create pinned ProxyArray wrappers for all data buffers. + + This is called once from :meth:`_create_buffers` during initialization. + PhysX tensor API buffers have stable GPU pointers across simulation steps, + so no rebinding is needed (unlike Newton). + """ + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + # Defaults + self._default_root_pose_ta: ProxyArray | None = None + self._default_root_vel_ta: ProxyArray | None = None + # Root state (timestamped) + self._root_link_pose_w_ta: ProxyArray | None = None + self._root_link_vel_w_ta: ProxyArray | None = None + self._root_com_pose_w_ta: ProxyArray | None = None + self._root_com_vel_w_ta: ProxyArray | None = None + # Body properties + self._body_mass_ta: ProxyArray | None = None + self._body_inertia_ta: ProxyArray | None = None + # Body state (reshaped from root) + self._body_link_pose_w_ta: ProxyArray | None = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_vel_w_ta: ProxyArray | None = None + self._body_com_acc_w_ta: ProxyArray | None = None + self._body_com_pose_b_ta: ProxyArray | None = None + # Derived properties (timestamped) + self._projected_gravity_b_ta: ProxyArray | None = None + self._heading_w_ta: ProxyArray | None = None + self._root_link_lin_vel_b_ta: ProxyArray | None = None + self._root_link_ang_vel_b_ta: ProxyArray | None = None + self._root_com_lin_vel_b_ta: ProxyArray | None = None + self._root_com_ang_vel_b_ta: ProxyArray | None = None + # Sliced properties (root link) + self._root_link_pos_w_ta: ProxyArray | None = None + self._root_link_quat_w_ta: ProxyArray | None = None + self._root_link_lin_vel_w_ta: ProxyArray | None = None + self._root_link_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (root com) + self._root_com_pos_w_ta: ProxyArray | None = None + self._root_com_quat_w_ta: ProxyArray | None = None + self._root_com_lin_vel_w_ta: ProxyArray | None = None + self._root_com_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (body link) + self._body_link_pos_w_ta: ProxyArray | None = None + self._body_link_quat_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w_ta: ProxyArray | None = None + self._body_link_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (body com) + self._body_com_pos_w_ta: ProxyArray | None = None + self._body_com_quat_w_ta: ProxyArray | None = None + self._body_com_lin_vel_w_ta: ProxyArray | None = None + self._body_com_ang_vel_w_ta: ProxyArray | None = None + self._body_com_lin_acc_w_ta: ProxyArray | None = None + self._body_com_ang_acc_w_ta: ProxyArray | None = None + # Sliced properties (body com in body frame) + self._body_com_pos_b_ta: ProxyArray | None = None + self._body_com_quat_b_ta: ProxyArray | None = None + # Deprecated state-concat properties + self._default_root_state_ta: ProxyArray | None = None + self._root_state_w_ta: ProxyArray | None = None + self._root_link_state_w_ta: ProxyArray | None = None + self._root_com_state_w_ta: ProxyArray | None = None + self._body_state_w_ta: ProxyArray | None = None + self._body_link_state_w_ta: ProxyArray | None = None + self._body_com_state_w_ta: ProxyArray | None = None + + """ + Internal helpers. + """ + + def _get_pos_from_transform(self, transform: wp.array) -> ProxyArray: + """Generates a position array from a transform array.""" + return wp.array( + ptr=transform.ptr, + shape=transform.shape, + dtype=wp.vec3f, + strides=transform.strides, + device=self.device, + ) + + def _get_quat_from_transform(self, transform: wp.array) -> ProxyArray: + """Generates a quaternion array from a transform array.""" + return wp.array( + ptr=transform.ptr + 3 * 4, + shape=transform.shape, + dtype=wp.quatf, + strides=transform.strides, + device=self.device, + ) + + def _get_lin_vel_from_spatial_vector(self, spatial_vector: wp.array) -> ProxyArray: + """Generates a linear velocity array from a spatial vector array.""" + return wp.array( + ptr=spatial_vector.ptr, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + def _get_ang_vel_from_spatial_vector(self, spatial_vector: wp.array) -> ProxyArray: + """Generates an angular velocity array from a spatial vector array.""" + return wp.array( + ptr=spatial_vector.ptr + 3 * 4, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + """ + Deprecated properties. + """ + + @property + def default_root_state(self) -> ProxyArray: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, 13). + """ + warnings.warn( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._default_root_state is None: + self._default_root_state = wp.zeros((self._num_instances), dtype=shared_kernels.vec13f, device=self.device) + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self._default_root_pose, + self._default_root_vel, + ], + outputs=[ + self._default_root_state, + ], + device=self.device, + ) + if self._default_root_state_ta is None: + self._default_root_state_ta = ProxyArray(self._default_root_state) + return self._default_root_state_ta + + @property + def root_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_state_w` property will be deprecated in IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w, + self.root_com_vel_w, + ], + outputs=[ + self._root_state_w.data, + ], + device=self.device, + ) + self._root_state_w.timestamp = self._sim_timestamp + + if self._root_state_w_ta is None: + self._root_state_w_ta = ProxyArray(self._root_state_w.data) + return self._root_state_w_ta + + @property + def root_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_link_vel_w`.""" + warnings.warn( + "The `root_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `root_link_pose_w` and " + "`root_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w, + self.root_link_vel_w, + ], + outputs=[ + self._root_link_state_w.data, + ], + device=self.device, + ) + self._root_link_state_w.timestamp = self._sim_timestamp + + if self._root_link_state_w_ta is None: + self._root_link_state_w_ta = ProxyArray(self._root_link_state_w.data) + return self._root_link_state_w_ta + + @property + def root_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`root_com_pose_w` and :attr:`root_com_vel_w`.""" + warnings.warn( + "The `root_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `root_com_pose_w` and " + "`root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._root_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_com_pose_w, + self.root_com_vel_w, + ], + outputs=[ + self._root_com_state_w.data, + ], + device=self.device, + ) + self._root_com_state_w.timestamp = self._sim_timestamp + + if self._root_com_state_w_ta is None: + self._root_com_state_w_ta = ProxyArray(self._root_com_state_w.data) + return self._root_com_state_w_ta + + @property + def body_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + # Access internal buffer directly to avoid cascading deprecation warnings from root_state_w + if self._root_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w, + self.root_com_vel_w, + ], + outputs=[ + self._root_state_w.data, + ], + device=self.device, + ) + self._root_state_w.timestamp = self._sim_timestamp + if self._body_state_w_ta is None: + self._body_state_w_ta = ProxyArray(self._root_state_w.data.reshape((self._num_instances, 1))) + return self._body_state_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_link_vel_w`.""" + warnings.warn( + "The `body_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + # Access internal buffer directly to avoid cascading deprecation warnings from root_link_state_w + if self._root_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_link_pose_w, + self.root_link_vel_w, + ], + outputs=[ + self._root_link_state_w.data, + ], + device=self.device, + ) + self._root_link_state_w.timestamp = self._sim_timestamp + if self._body_link_state_w_ta is None: + self._body_link_state_w_ta = ProxyArray(self._root_link_state_w.data.reshape((self._num_instances, 1))) + return self._body_link_state_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_com_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_com_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + # Access internal buffer directly to avoid cascading deprecation warnings from root_com_state_w + if self._root_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_root_pose_and_vel_to_state, + dim=self._num_instances, + inputs=[ + self.root_com_pose_w, + self.root_com_vel_w, + ], + outputs=[ + self._root_com_state_w.data, + ], + device=self.device, + ) + self._root_com_state_w.timestamp = self._sim_timestamp + if self._body_com_state_w_ta is None: + self._body_com_state_w_ta = ProxyArray(self._root_com_state_w.data.reshape((self._num_instances, 1))) + return self._body_com_state_w_ta diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py new file mode 100644 index 000000000000..bb3636b55d8a --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for rigid object collection.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.pyi new file mode 100644 index 000000000000..8b12ec95e7a2 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "RigidObjectCollection", + "RigidObjectCollectionData", +] + +from .rigid_object_collection import RigidObjectCollection +from .rigid_object_collection_data import RigidObjectCollectionData diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/kernels.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/kernels.py new file mode 100644 index 000000000000..ce89604fa272 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/kernels.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def resolve_view_ids( + env_ids: wp.array(dtype=wp.int32), + body_ids: wp.array(dtype=wp.int32), + num_query_envs: wp.int32, + num_total_envs: wp.int32, + view_ids: wp.array(dtype=wp.int32), +) -> None: + """Resolve flat view indices from environment and body index pairs. + + This kernel computes flat PhysX view indices from (env_id, body_id) pairs. The view + ordering is body-major: view_id = body_id * num_total_envs + env_id. The output array + is laid out in column-major order over the (env, body) grid. + + Args: + env_ids: Input array of environment indices. Shape is (num_query_envs,). + body_ids: Input array of body indices. Shape is (num_query_bodies,). + num_query_envs: Input scalar number of queried environments. + num_total_envs: Input scalar total number of environments in the simulation. + view_ids: Output array where flat view indices are written. Shape is + (num_query_bodies * num_query_envs,). + """ + i, j = wp.tid() + view_ids[j * num_query_envs + i] = body_ids[j] * num_total_envs + env_ids[i] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py new file mode 100644 index 000000000000..6d07ddbf1bc1 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -0,0 +1,1457 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import torch +import warp as wp + +import omni.physics.tensors.api as physx +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.string as string_utils +from isaaclab.assets.rigid_object_collection.base_rigid_object_collection import BaseRigidObjectCollection +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab_physx.assets import kernels as shared_kernels +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .kernels import resolve_view_ids +from .rigid_object_collection_data import RigidObjectCollectionData + +if TYPE_CHECKING: + from isaaclab.assets.rigid_object_collection.rigid_object_collection_cfg import RigidObjectCollectionCfg + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObjectCollection(BaseRigidObjectCollection): + """A rigid object collection class. + + This class represents a collection of rigid objects in the simulation, where the state of the + rigid objects can be accessed and modified using a batched ``(env_ids, object_ids)`` API. + + For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the + simulation, the physics engine will automatically register the rigid bodies and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. + + Rigid objects in the collection are uniquely identified via the key of the dictionary + :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the + :class:`~isaaclab.assets.RigidObjectCollectionCfg` configuration class. + This differs from the :class:`~isaaclab.assets.RigidObject` class, where a rigid object is identified by + the name of the Xform where the `USD RigidBodyAPI`_ is applied. This would not be possible for the rigid + object collection since the :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` dictionary + could contain the same rigid object multiple times, leading to ambiguity. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCollectionCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "physx" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCollectionCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + # Note: We never call the parent constructor as it tries to call its own spawning which we don't want. + # check that the config is valid + cfg.validate() + # store inputs + self.cfg = cfg.copy() + # flag for whether the asset is initialized + self._is_initialized = False + # spawn the rigid objects + for rigid_body_cfg in self.cfg.rigid_objects.values(): + # spawn the asset + if rigid_body_cfg.spawn is not None: + rigid_body_cfg.spawn.func( + rigid_body_cfg.prim_path, + rigid_body_cfg.spawn, + translation=rigid_body_cfg.init_state.pos, + orientation=rigid_body_cfg.init_state.rot, + ) + # check that spawn was successful + matching_prims = sim_utils.find_matching_prims(rigid_body_cfg.prim_path) + if len(matching_prims) == 0: + raise RuntimeError(f"Could not find prim with path {rigid_body_cfg.prim_path}.") + # stores object names + self._body_names_list = [] + + # register various callback functions + self._register_callbacks() + self._debug_vis_handle = None + + """ + Properties + """ + + @property + def data(self) -> RigidObjectCollectionData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count // self.num_bodies + + @property + def num_bodies(self) -> int: + """Number of bodies in the rigid object collection.""" + return len(self.body_names) + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object collection.""" + return self._body_names_list + + @property + def root_view(self): + """Root view for the rigid object collection. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset( + self, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + env_mask: wp.array | None = None, + object_mask: wp.array | None = None, + ) -> None: + """Resets all internal buffers of selected environments and objects. + + Args: + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + if object_ids is None: + object_ids = self._ALL_BODY_INDICES + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + .. note:: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + composer = self._instantaneous_wrench_composer + composer.add_raw_buffers_from(self._permanent_wrench_composer) + else: + composer = self._permanent_wrench_composer + composer.compose_to_body_frame() + self.root_view.apply_forces_and_torques_at_position( + force_data=self.reshape_data_to_view_2d(composer.out_force_b.warp, device=self.device).view(wp.float32), + torque_data=self.reshape_data_to_view_2d(composer.out_torque_b.warp, device=self.device).view( + wp.float32 + ), + position_data=None, + indices=self._env_body_ids_to_view_ids( + self._ALL_ENV_INDICES, self._ALL_BODY_INDICES, device=self.device + ), + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str]]: + """Find bodies in the rigid body collection based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + obj_ids, obj_names = string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + return torch.tensor(obj_ids, device=self.device, dtype=torch.int32), obj_names + + """ + Operations - Write to simulation. + """ + + def write_body_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the body pose over selected environment and body indices into the simulation. + + The body pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_poses: Body poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7) + or (len(env_ids), len(body_ids)) with dtype wp.transformf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_body_link_pose_to_sim_index(body_poses=body_poses, env_ids=env_ids, body_ids=body_ids) + + def write_body_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set the body pose over selected environment mask into the simulation. + + The body pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_poses: Body poses in simulation frame. Shape is (num_instances, num_bodies, 7) + or (num_instances, num_bodies) with dtype wp.transformf. + body_mask: Body mask. If None, then all bodies are updated. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + self.write_body_link_pose_to_sim_index( + body_poses=body_poses, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set the body velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_velocities: Body velocities in simulation frame. + Shape is (len(env_ids), len(body_ids), 6) or (num_instances, num_bodies, 6), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_body_com_velocity_to_sim_index(body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids) + + def write_body_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set the body velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_velocities: Body velocities in simulation frame. + Shape is (num_instances, num_bodies, 6) + or (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_mask: Body mask. If None, then all bodies are updated. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + self.write_body_com_velocity_to_sim_index( + body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_link_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body link pose over selected environment and body indices into the simulation. + + The body link pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_poses: Body link poses in simulation frame. + Shape is (len(env_ids), len(body_ids), 7) or (num_instances, num_bodies, 7), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.transformf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(body_poses, (self.num_instances, self.num_bodies), wp.transformf, "body_poses") + else: + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") + wp.launch( + shared_kernels.set_body_link_pose_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_poses, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_link_pose_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + # set into simulation + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids, device=self.device) + self.root_view.set_transforms( + self.reshape_data_to_view_2d(self.data._body_link_pose_w.data, device=self.device).view(wp.float32), + indices=view_ids, + ) + + def write_body_link_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body link pose over selected environment mask into the simulation. + + The body link pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_poses: Body link poses in simulation frame. Shape is (num_instances, num_bodies, 7) + or (num_instances, num_bodies) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + self.write_body_link_pose_to_sim_index( + body_poses=body_poses, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_com_pose_to_sim_index( + self, + *, + body_poses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body center of mass pose over selected environment and body indices into the simulation. + + The body center of mass pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_poses: Body center of mass poses in simulation frame. + Shape is (len(env_ids), len(body_ids), 7) or (num_instances, num_bodies, 7), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.transformf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(body_poses, (self.num_instances, self.num_bodies), wp.transformf, "body_poses") + else: + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") + wp.launch( + shared_kernels.set_body_com_pose_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_poses, + self.data.body_com_pose_b, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_com_pose_w, + self.data.body_link_pose_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + # set into simulation + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids, device=self.device) + self.root_view.set_transforms( + self.reshape_data_to_view_2d(self.data._body_link_pose_w.data, device=self.device).view(wp.float32), + indices=view_ids, + ) + + def write_body_com_pose_to_sim_mask( + self, + *, + body_poses: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body center of mass pose over selected environment mask into the simulation. + + The body center of mass pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principal axes of inertia. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_poses: Body center of mass poses in simulation frame. Shape is (num_instances, num_bodies, 7) + or (num_instances, num_bodies) with dtype wp.transformf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + self.write_body_com_pose_to_sim_index(body_poses=body_poses, env_ids=env_ids, body_ids=body_ids, full_data=True) + + def write_body_com_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body center of mass velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_velocities: Body center of mass velocities in simulation frame. + Shape is (len(env_ids), len(body_ids), 6) or (num_instances, num_bodies, 6), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype( + body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities" + ) + else: + self.assert_shape_and_dtype( + body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities" + ) + wp.launch( + shared_kernels.set_body_com_velocity_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_velocities, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_link_vel_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + # set into simulation + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids, device=self.device) + self.root_view.set_velocities( + self.reshape_data_to_view_2d(self.data._body_com_vel_w.data, device=self.device).view(wp.float32), + indices=view_ids, + ) + + def write_body_com_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body center of mass velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's center of mass rather than the body's frame. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_velocities: Body center of mass velocities in simulation frame. + Shape is (num_instances, num_bodies, 6) + or (num_instances, num_bodies) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + self.write_body_com_velocity_to_sim_index( + body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + def write_body_link_velocity_to_sim_index( + self, + *, + body_velocities: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set the body link velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's frame rather than the body's center of mass. + + .. note:: + This method expects partial data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_velocities: Body link velocities in simulation frame. + Shape is (len(env_ids), len(body_ids), 6) or (num_instances, num_bodies, 6), + or (len(env_ids), len(body_ids)) / (num_instances, num_bodies) with dtype wp.spatial_vectorf. + body_ids: Body indices. If None, then all indices are used. + env_ids: Environment indices. If None, then all indices are used. + full_data: Whether to expect full data. Defaults to False. + """ + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype( + body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities" + ) + else: + self.assert_shape_and_dtype( + body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities" + ) + # Access body_com_pose_b and body_link_pose_w to ensure they are current. + wp.launch( + shared_kernels.set_body_link_velocity_to_sim, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + body_velocities, + self.data.body_com_pose_b, + self.data.body_link_pose_w, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data.body_link_vel_w, + self.data.body_com_vel_w, + self.data.body_com_acc_w, + ], + device=self.device, + ) + # Invalidate dependent timestamps + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + # set into simulation + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids, device=self.device) + self.root_view.set_velocities( + self.reshape_data_to_view_2d(self.data._body_com_vel_w.data, device=self.device).view(wp.float32), + indices=view_ids, + ) + + def write_body_link_velocity_to_sim_mask( + self, + *, + body_velocities: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + body_ids: Sequence[int] | torch.Tensor | wp.array | slice | None = None, + ) -> None: + """Set the body link velocity over selected environment mask into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + + .. note:: + This sets the velocity of the body's frame rather than the body's center of mass. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + body_velocities: Body link velocities in simulation frame. Shape is (num_instances, num_bodies, 6) + or (num_instances, num_bodies) with dtype wp.spatial_vectorf. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + body_ids: Body indices. If None, then all indices are used. + """ + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + self.write_body_link_velocity_to_sim_index( + body_velocities=body_velocities, env_ids=env_ids, body_ids=body_ids, full_data=True + ) + + """ + Operations - Setters. + """ + + def set_masses_index( + self, + *, + masses: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set masses of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + masses: Masses of all bodies. Shape is ``(len(env_ids), len(body_ids))`` + or ``(num_instances, num_bodies)`` if full_data. + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(masses, (self.num_instances, self.num_bodies), wp.float32, "masses") + else: + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + masses, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_mass, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + # Convert from instance order (num_instances, num_bodies) to view order (num_bodies*num_instances, 1) for PhysX. + mass_view_order = self.reshape_data_to_view_2d(self.data._body_mass, device="cpu") # -> (B*I, 1) + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids, device="cpu") + self.root_view.set_masses(mass_view_order, indices=view_ids) + + def set_masses_mask( + self, + *, + masses: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set masses of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + masses: Masses of all bodies. Shape is ``(num_instances, num_bodies)``. + body_mask: Body mask. If None, then all bodies are updated. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_masses_index(masses=masses, body_ids=body_ids, env_ids=env_ids, full_data=True) + + def set_coms_index( + self, + *, + coms: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set center of mass pose of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + coms: Center of mass pose of all bodies. Shape is ``(len(env_ids), len(body_ids), 7)`` + or ``(num_instances, num_bodies, 7)`` if full_data. + body_ids: The body indices to set the center of mass pose for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass pose for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(coms, (self.num_instances, self.num_bodies), wp.transformf, "coms") + else: + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_com_pose_to_buffer, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + coms, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_com_pose_b.data, + ], + device=self.device, + ) + # Invalidate the cached buffer + self.data._body_com_pose_b.timestamp = self.data._sim_timestamp + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + # Convert from instance order (num_instances, num_bodies, 7) to view order (num_bodies*num_instances, 7) for + # PhysX. + com_view_order = self.reshape_data_to_view_2d(self.data._body_com_pose_b.data, device="cpu") # (B*I, 7) + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids, device="cpu") + self.root_view.set_coms(com_view_order, indices=view_ids) + + def set_coms_mask( + self, + *, + coms: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set center of mass pose of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + coms: Center of mass pose of all bodies. Shape is ``(num_instances, num_bodies, 7)``. + body_mask: Body mask. If None, then all bodies are updated. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_coms_index(coms=coms, body_ids=body_ids, env_ids=env_ids, full_data=True) + + def set_inertias_index( + self, + *, + inertias: torch.Tensor | wp.array, + body_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + full_data: bool = False, + ) -> None: + """Set inertias of all bodies using indices. + + .. note:: + This method expects partial data or full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + inertias: Inertias of all bodies. Shape is ``(len(env_ids), len(body_ids), 9)`` + or ``(num_instances, num_bodies, 9)`` if full_data. + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + full_data: Whether to expect full data. Defaults to False. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + body_ids = self._resolve_body_ids(body_ids) + if full_data: + self.assert_shape_and_dtype(inertias, (self.num_instances, self.num_bodies, 9), wp.float32, "inertias") + else: + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_body_inertia_to_buffer, + dim=(env_ids.shape[0], body_ids.shape[0]), + inputs=[ + inertias, + env_ids, + body_ids, + full_data, + ], + outputs=[ + self.data._body_inertia, + ], + device=self.device, + ) + # Set into simulation, note that when updating "model" properties with PhysX we need to do it on CPU. + # Convert from instance order (num_instances, num_bodies) to view order for PhysX. + inertia_view_order = self.reshape_data_to_view_2d(self.data._body_inertia, device="cpu") + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids, device="cpu") + self.root_view.set_inertias(inertia_view_order, indices=view_ids) + + def set_inertias_mask( + self, + *, + inertias: torch.Tensor | wp.array, + body_mask: wp.array | None = None, + env_mask: wp.array | None = None, + ) -> None: + """Set inertias of all bodies using masks. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + inertias: Inertias of all bodies. Shape is ``(num_instances, num_bodies, 9)``. + body_mask: Body mask. If None, then all bodies are updated. Shape is (num_bodies,). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks. + if env_mask is not None: + env_ids = self._resolve_env_mask(env_mask) + else: + env_ids = self._ALL_ENV_INDICES + if body_mask is not None: + body_ids = self._resolve_body_mask(body_mask) + else: + body_ids = self._ALL_BODY_INDICES + # Set full data to True to ensure the right code path is taken inside the kernel. + self.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids, full_data=True) + + """ + Helper functions. + """ + + def reshape_view_to_data_2d(self, data: wp.array, device: str = "cpu") -> wp.array: + """Reshapes and arranges the data from the physics view to (num_instances, num_bodies, data_size). + + The view returns data ordered as: ``(num_bodies * num_instances,)`` + ``[body0_env0, body0_env1, ..., body1_env0, body1_env1, ...]`` + + This function returns the data arranged as:: + + [[env_0_body_0, env_0_body_1, ...], [env_1_body_0, env_1_body_1, ...], ...] + + The shape of the returned data is ``(num_instances, num_bodies)``. + + Args: + data: The data from the physics view. Shape is (num_instances * num_bodies). + + Returns: + The reshaped data. Shape is (num_instances, num_bodies). + """ + element_size = wp.types.type_size_in_bytes(data.dtype) + strided_view = wp.array( + ptr=data.ptr, + shape=(self.num_instances, self.num_bodies), + dtype=data.dtype, + strides=(element_size, self.num_instances * element_size), + # PhysX returns some data on CPU, use self.device may cause a device mismatch error + device=data.device, + ) + # Clone to make contiguous + return wp.clone(strided_view, device=device) + + def reshape_view_to_data_3d(self, data: wp.array, data_dim: int, device: str = "cpu") -> wp.array: + """Reshapes and arranges 3D view data to (num_instances, num_bodies, data_dim). + + The view returns data ordered as ``(num_bodies * num_instances, data_dim)``:: + + [[body0_env0_data_0, body0_env0_data_1, ...], [body0_env1_data_0, body0_env1_data_1, ...], ...] + + This function returns the data arranged as ``(num_instances, num_bodies, data_dim)``:: + + [ + [[env_0_body_0_data_0, env_0_body_0_data_1, ...], [env_0_body_1_data_0, env_0_body_1_data_1, ...], ...], + [[env_1_body_0_data_0, env_1_body_0_data_1, ...], [env_1_body_1_data_0, env_1_body_1_data_1, ...], ...], + ..., + ] + + Args: + data: The data from the physics view. Shape is (num_bodies * num_instances, data_dim). + data_dim: The trailing dimension size. + + Returns: + The reshaped data. Shape is (num_instances, num_bodies, data_dim). + """ + element_size = wp.types.type_size_in_bytes(data.dtype) + row_size = element_size * data_dim + strided_view = wp.array( + ptr=data.ptr, + shape=(self.num_instances, self.num_bodies, data_dim), + dtype=data.dtype, + strides=(row_size, self.num_instances * row_size, element_size), + # PhysX returns some data on CPU, use self.device may cause a device mismatch error + device=data.device, + ) + return wp.clone(strided_view, device=device) + + def reshape_data_to_view_2d(self, data: wp.array, device: str = "cpu") -> wp.array: + """Reshapes and arranges the data to the be consistent with data from the :attr:`root_view`. + + Our internal methods consume and return data arranged as: + [[env_0_body_0, env_0_body_1, ...], + [env_1_body_0, env_1_body_1, ...], + ...] + The view needs data ordered as: (num_bodies * num_instances,) + [body0_env0, body0_env1, ..., body1_env0, body1_env1, ...] + + Args: + data: The data to be formatted for the view. Shape is (num_instances, num_bodies). + + Returns: + The data formatted for the view. Shape is (num_bodies * num_instances,). + """ + element_size = wp.types.type_size_in_bytes(data.dtype) + strided_view = wp.array( + ptr=data.ptr, + shape=(self.num_bodies, self.num_instances), + dtype=data.dtype, + strides=(element_size, self.num_bodies * element_size), + device=data.device, + ) + # Clone to make contiguous (now row-major num_bodies x num_instances), then flatten + return wp.clone(strided_view, device=device).reshape((self.num_bodies * self.num_instances,)) + + def reshape_data_to_view_3d(self, data: wp.array, data_dim: int, device: str = "cpu") -> wp.array: + """Reshapes and arranges 3D data to (num_bodies * num_instances, data_dim). + + Our internal methods consume and return data arranged as ``(num_instances, num_bodies, data_dim)``:: + + [ + [[env_0_body_0_data_0, env_0_body_0_data_1, ...], [env_0_body_1_data_0, env_0_body_1_data_1, ...], ...], + [[env_1_body_0_data_0, env_1_body_0_data_1, ...], [env_1_body_1_data_0, env_1_body_1_data_1, ...], ...], + ..., + ] + + The view needs data ordered as ``(num_bodies * num_instances, data_dim)``:: + + [[body0_env0_data_0, body0_env0_data_1, ...], [body0_env1_data_0, body0_env1_data_1, ...], ...] + + Args: + data: The data to be formatted for the view. Shape is (num_instances, num_bodies, data_dim). + data_dim: The trailing dimension size. + + Returns: + The data formatted for the view. Shape is (num_bodies * num_instances, data_dim). + """ + element_size = wp.types.type_size_in_bytes(data.dtype) + row_size = element_size * data_dim + strided_view = wp.array( + ptr=data.ptr, + shape=(self.num_bodies, self.num_instances, data_dim), + dtype=data.dtype, + strides=(row_size, self.num_bodies * row_size, element_size), + device=data.device, + ) + # Clone to make contiguous (now row-major num_bodies x num_instances x data_dim), then flatten + return wp.clone(strided_view, device=device).reshape((self.num_bodies * self.num_instances, data_dim)) + + """ + Internal helper. + """ + + def _resolve_env_ids(self, env_ids) -> wp.array: + """Resolve environment indices to a warp array.""" + if (env_ids is None) or (env_ids == slice(None)): + return self._ALL_ENV_INDICES + if isinstance(env_ids, torch.Tensor): + if env_ids.dtype == torch.int64: + env_ids = env_ids.to(torch.int32) + return wp.from_torch(env_ids, dtype=wp.int32) + if isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self.device) + return env_ids + + def _resolve_body_ids(self, body_ids) -> wp.array: + """Resolve body indices to a warp array.""" + if isinstance(body_ids, list): + return wp.array(body_ids, dtype=wp.int32, device=self.device) + if body_ids is None or (body_ids == slice(None)): + return self._ALL_BODY_INDICES + if isinstance(body_ids, slice): + return wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 + ) + if isinstance(body_ids, torch.Tensor): + if body_ids.dtype == torch.int64: + body_ids = body_ids.to(torch.int32) + return wp.from_torch(body_ids, dtype=wp.int32) + return body_ids + + def _resolve_env_mask(self, env_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve environment mask to indices via torch.nonzero.""" + if env_mask is not None: + if isinstance(env_mask, wp.array): + env_mask = wp.to_torch(env_mask) + env_ids = torch.nonzero(env_mask)[:, 0].to(torch.int32) + else: + env_ids = self._ALL_ENV_INDICES + return env_ids + + def _resolve_body_mask(self, body_mask: wp.array | None) -> torch.Tensor | wp.array: + """Resolve body mask to indices via torch.nonzero.""" + if body_mask is not None: + if isinstance(body_mask, wp.array): + body_mask = wp.to_torch(body_mask) + body_ids = torch.nonzero(body_mask)[:, 0].to(torch.int32) + else: + body_ids = self._ALL_BODY_INDICES + return body_ids + + def _get_cpu_env_ids(self, env_ids: wp.array | torch.Tensor) -> wp.array: + """Get CPU environment indices.""" + if isinstance(env_ids, torch.Tensor): + env_ids = wp.from_torch(env_ids, dtype=wp.int32) + return wp.clone(env_ids, device="cpu") + + def _initialize_impl(self): + # clear body names list to prevent double counting on re-initialization + self._body_names_list.clear() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + root_prim_path_exprs = [] + for name, rigid_body_cfg in self.cfg.rigid_objects.items(): + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(rigid_body_cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{rigid_body_cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{rigid_body_cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{rigid_body_cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + # check that no rigid object has an articulation root API, which decreases simulation performance + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{rigid_body_cfg.prim_path}' in the rigid object" + f" collection. These are located at: '{articulation_prims}' under '{template_prim_path}'." + " Please disable the articulation root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = rigid_body_cfg.prim_path + root_prim_path[len(template_prim_path) :] + root_prim_path_exprs.append(root_prim_path_expr.replace(".*", "*")) + + self._body_names_list.append(name) + + # -- object view + self._root_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_exprs) + + # check if the rigid body was created + if self._root_view._backend is None: + raise RuntimeError("Failed to create rigid body collection. Please check PhysX logs.") + + # log information about the rigid body + logger.info(f"Number of instances: {self.num_instances}") + logger.info(f"Number of distinct bodies: {self.num_bodies}") + logger.info(f"Body names: {self.body_names}") + + # container for data access + self._data = RigidObjectCollectionData(self.root_view, self.num_bodies, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + + def _create_buffers(self): + # constants + self._ALL_ENV_INDICES = wp.array( + np.arange(self.num_instances, dtype=np.int32), device=self.device, dtype=wp.int32 + ) + self._ALL_BODY_INDICES = wp.array( + np.arange(self.num_bodies, dtype=np.int32), device=self.device, dtype=wp.int32 + ) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # set information about rigid body into data + self._data.body_names = self.body_names + + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + # default state + # -- body state + default_body_poses = [] + default_body_vels = [] + for rigid_object_cfg in self.cfg.rigid_objects.values(): + default_body_pose = tuple(rigid_object_cfg.init_state.pos) + tuple(rigid_object_cfg.init_state.rot) + default_body_vel = tuple(rigid_object_cfg.init_state.lin_vel) + tuple(rigid_object_cfg.init_state.ang_vel) + default_body_pose = np.tile(np.array(default_body_pose, dtype=np.float32), (self.num_instances, 1)) + default_body_vel = np.tile(np.array(default_body_vel, dtype=np.float32), (self.num_instances, 1)) + default_body_poses.append(default_body_pose) + default_body_vels.append(default_body_vel) + # Stack: each has shape (num_instances, data_size) -> (num_instances, num_bodies, data_size) + default_body_poses = np.stack(default_body_poses, axis=1) + default_body_vels = np.stack(default_body_vels, axis=1) + self.data.default_body_pose = wp.array(default_body_poses, dtype=wp.transformf, device=self.device) + self.data.default_body_vel = wp.array(default_body_vels, dtype=wp.spatial_vectorf, device=self.device) + + def _env_body_ids_to_view_ids( + self, env_ids: torch.Tensor | wp.array, body_ids: torch.Tensor | wp.array, device: str = "cuda:0" + ) -> wp.array: + """Converts environment and body indices to indices consistent with data from :attr:`root_view`. + + Args: + env_ids: Environment indices. + body_ids: Body indices. + + Returns: + The view indices. + """ + # the order is body_0/env_0, body_0/env_1, body_0/env_..., body_1/env_0, body_1/env_1, ... + # return a flat tensor of indices + # ensure env_ids and body_ids are on the target device + if isinstance(env_ids, torch.Tensor): + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + if isinstance(body_ids, torch.Tensor): + body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) + if env_ids.device != device: + env_ids = wp.clone(env_ids, device=device) + if body_ids.device != device: + body_ids = wp.clone(body_ids, device=device) + num_query_envs = env_ids.shape[0] + view_ids = wp.zeros(num_query_envs * body_ids.shape[0], dtype=wp.int32, device=device) + wp.launch( + resolve_view_ids, + dim=(num_query_envs, body_ids.shape[0]), + inputs=[env_ids, body_ids, num_query_envs, self.num_instances], + outputs=[view_ids], + device=device, + ) + return view_ids + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event) -> None: + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_view = None + + def _on_prim_deletion(self, prim_path: str) -> None: + """Invalidates and deletes the callbacks when the prim is deleted. + + Args: + prim_path: The path to the prim that is being deleted. + + .. note:: + This function is called when the prim is deleted. + """ + if prim_path == "/": + self._clear_callbacks() + return + for prim_path_expr in [obj.prim_path for obj in self.cfg.rigid_objects.values()]: + result = re.match( + pattern="^" + "/".join(prim_path_expr.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path + ) + if result: + self._clear_callbacks() + return + + """ + Deprecated properties and methods. + """ + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + warnings.warn( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self.root_view + + def write_body_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated, same as :meth:`write_body_link_pose_to_sim_index` and + :meth:`write_body_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_body_state_to_sim' will be deprecated in a future release. Please" + " use 'write_body_link_pose_to_sim_index' and 'write_body_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_body_link_pose_to_sim_index(body_poses=body_states[:, :, :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_com_velocity_to_sim_index( + body_velocities=body_states[:, :, 7:], env_ids=env_ids, body_ids=body_ids + ) + + def write_body_com_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated, same as :meth:`write_body_com_pose_to_sim_index` and + :meth:`write_body_com_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_body_com_state_to_sim' will be deprecated in a future release. Please" + " use 'write_body_com_pose_to_sim_index' and 'write_body_com_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_body_com_pose_to_sim_index(body_poses=body_states[:, :, :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_com_velocity_to_sim_index( + body_velocities=body_states[:, :, 7:], env_ids=env_ids, body_ids=body_ids + ) + + def write_body_link_state_to_sim( + self, + body_states: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated, same as :meth:`write_body_link_pose_to_sim_index` and + :meth:`write_body_link_velocity_to_sim_index`.""" + warnings.warn( + "The function 'write_body_link_state_to_sim' will be deprecated in a future release. Please" + " use 'write_body_link_pose_to_sim_index' and 'write_body_link_velocity_to_sim_index' instead.", + DeprecationWarning, + stacklevel=2, + ) + self.write_body_link_pose_to_sim_index(body_poses=body_states[:, :, :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_link_velocity_to_sim_index( + body_velocities=body_states[:, :, 7:], env_ids=env_ids, body_ids=body_ids + ) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py new file mode 100644 index 000000000000..cee0d4de01dd --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -0,0 +1,981 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +import logging +import warnings +import weakref +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData +from isaaclab.utils.buffers import TimestampedBufferWarp as TimestampedBuffer +from isaaclab.utils.math import normalize +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.assets import kernels as shared_kernels +from isaaclab_physx.physics import PhysxManager as SimulationManager + +if TYPE_CHECKING: + import omni.physics.tensors.api as physx + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObjectCollectionData(BaseRigidObjectCollectionData): + """Data container for a rigid object collection. + + This class contains the data for a rigid object collection in the simulation. The data includes the state of + all the bodies in the collection. The data is stored in the simulation world frame unless otherwise specified. + The data is in the order ``(num_instances, num_objects, data_size)``, where data_size is the size of the data. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + + .. note:: + **Pull-to-refresh model.** Properties pull fresh data from the PhysX tensor API on first + access per timestamp and cache the result. This differs from Newton, where buffers are + refreshed automatically by the simulation. + + .. note:: + **ProxyArray pointer stability.** Each :class:`ProxyArray` wrapper is created once and + reused because the PhysX tensor API returns views into stable, pre-allocated GPU buffers + whose device pointer does not change across simulation steps. + """ + + __backend_name__: str = "physx" + """The name of the backend for the rigid object collection data.""" + + def __init__(self, root_view: physx.RigidBodyView, num_bodies: int, device: str): + """Initializes the rigid object data. + + Args: + root_view: The root rigid body collection view. + num_bodies: The number of bodies in the collection. + device: The device used for processing. + """ + super().__init__(root_view, num_bodies, device) + self.num_bodies = num_bodies + # Set the root rigid body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: physx.RigidBodyView = weakref.proxy(root_view) + self.num_instances = self._root_view.count // self.num_bodies + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + + # Obtain global physics sim view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + gravity = self._physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = ProxyArray( + wp.from_torch(gravity_dir.repeat(self.num_instances, self.num_bodies, 1), dtype=wp.vec3f) + ) + self.FORWARD_VEC_B = ProxyArray( + wp.from_torch( + torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self.num_instances, self.num_bodies, 1), + dtype=wp.vec3f, + ) + ) + + self._create_buffers() + + @property + def is_primed(self) -> bool: + """Whether the rigid object collection data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the rigid object collection data is fully instantiated and ready to use. + + .. note:: + Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._is_primed = value + + def update(self, dt: float) -> None: + """Updates the data for the rigid object collection. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + """ + Defaults. + """ + + @property + def default_body_pose(self) -> ProxyArray: + """Default body pose ``[pos, quat]`` in local environment frame. + + The position and quaternion are of the rigid body's actor frame. + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + """ + if self._default_body_pose_ta is None: + self._default_body_pose_ta = ProxyArray(self._default_body_pose) + return self._default_body_pose_ta + + @default_body_pose.setter + def default_body_pose(self, value: wp.array) -> None: + """Set the default body pose. + + Args: + value: The default body pose. Shape is (num_instances, num_bodies, 7). + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._default_body_pose.assign(value) + + @property + def default_body_vel(self) -> ProxyArray: + """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. + + The linear and angular velocities are of the rigid body's center of mass frame. + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + """ + if self._default_body_vel_ta is None: + self._default_body_vel_ta = ProxyArray(self._default_body_vel) + return self._default_body_vel_ta + + @default_body_vel.setter + def default_body_vel(self, value: wp.array) -> None: + """Set the default body velocity. + + Args: + value: The default body velocity. Shape is (num_instances, num_bodies, 6). + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._default_body_vel.assign(value) + + """ + Body state properties. + """ + + @property + def body_link_pose_w(self) -> ProxyArray: + """Body link pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation and reshape + pose = self._reshape_view_to_data_2d(self._root_view.get_transforms().view(wp.transformf)) + # set the buffer data and timestamp + self._body_link_pose_w.data = pose + self._body_link_pose_w.timestamp = self._sim_timestamp + # Rebind ProxyArray since reshape creates a new wp.array each time + if self._body_link_pose_w_ta is not None: + self._body_link_pose_w_ta = ProxyArray(pose) + self._body_link_pos_w_ta = None + self._body_link_quat_w_ta = None + + if self._body_link_pose_w_ta is None: + self._body_link_pose_w_ta = ProxyArray(self._body_link_pose_w.data) + return self._body_link_pose_w_ta + + @property + def body_link_vel_w(self) -> ProxyArray: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._body_link_vel_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_link_vel_from_body_com_vel, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_com_vel_w, + self.body_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._body_link_vel_w.data, + ], + device=self.device, + ) + self._body_link_vel_w.timestamp = self._sim_timestamp + + if self._body_link_vel_w_ta is None: + self._body_link_vel_w_ta = ProxyArray(self._body_link_vel_w.data) + return self._body_link_vel_w_ta + + @property + def body_com_pose_w(self) -> ProxyArray: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.get_body_com_pose_from_body_link_pose, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_link_pose_w, + self.body_com_pose_b, + ], + outputs=[ + self._body_com_pose_w.data, + ], + device=self.device, + ) + self._body_com_pose_w.timestamp = self._sim_timestamp + + if self._body_com_pose_w_ta is None: + self._body_com_pose_w_ta = ProxyArray(self._body_com_pose_w.data) + return self._body_com_pose_w_ta + + @property + def body_com_vel_w(self) -> ProxyArray: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + if self._body_com_vel_w.timestamp < self._sim_timestamp: + vel = self._reshape_view_to_data_2d(self._root_view.get_velocities().view(wp.spatial_vectorf)) + self._body_com_vel_w.data = vel + self._body_com_vel_w.timestamp = self._sim_timestamp + # Rebind ProxyArray since reshape creates a new wp.array each time + if self._body_com_vel_w_ta is not None: + self._body_com_vel_w_ta = ProxyArray(vel) + self._body_com_lin_vel_w_ta = None + self._body_com_ang_vel_w_ta = None + + if self._body_com_vel_w_ta is None: + self._body_com_vel_w_ta = ProxyArray(self._body_com_vel_w.data) + return self._body_com_vel_w_ta + + @property + def body_com_acc_w(self) -> ProxyArray: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.spatial_vectorf. In torch this resolves to + (num_instances, num_bodies, 6). + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + acc = self._reshape_view_to_data_2d(self._root_view.get_accelerations().view(wp.spatial_vectorf)) + self._body_com_acc_w.data = acc + self._body_com_acc_w.timestamp = self._sim_timestamp + # Rebind ProxyArray since reshape creates a new wp.array each time + if self._body_com_acc_w_ta is not None: + self._body_com_acc_w_ta = ProxyArray(acc) + self._body_com_lin_acc_w_ta = None + self._body_com_ang_acc_w_ta = None + if self._body_com_acc_w_ta is None: + self._body_com_acc_w_ta = ProxyArray(self._body_com_acc_w.data) + return self._body_com_acc_w_ta + + @property + def body_com_pose_b(self) -> ProxyArray: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + + Shape is (num_instances, num_bodies), dtype = wp.transformf. In torch this resolves to + (num_instances, num_bodies, 7). + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # obtain the coms + poses = self._reshape_view_to_data_2d(self._root_view.get_coms().view(wp.transformf)) + # read data from simulation + self._body_com_pose_b.data.assign(poses) + self._body_com_pose_b.timestamp = self._sim_timestamp + + if self._body_com_pose_b_ta is None: + self._body_com_pose_b_ta = ProxyArray(self._body_com_pose_b.data) + return self._body_com_pose_b_ta + + @property + def body_mass(self) -> ProxyArray: + """Mass of all bodies in the simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.float32. + In torch this resolves to (num_instances, num_bodies). + """ + if self._body_mass_ta is None: + self._body_mass_ta = ProxyArray(self._body_mass) + return self._body_mass_ta + + @property + def body_inertia(self) -> ProxyArray: + """Inertia of all bodies in the simulation world frame. + + Shape is (num_instances, num_bodies, 9), dtype = wp.float32. + In torch this resolves to (num_instances, num_bodies, 9). + """ + if self._body_inertia_ta is None: + self._body_inertia_ta = ProxyArray(self._body_inertia) + return self._body_inertia_ta + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self) -> ProxyArray: + """Projection of the gravity direction on base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + """ + if self._projected_gravity_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.GRAVITY_VEC_W, self.body_link_quat_w], + outputs=[self._projected_gravity_b.data], + device=self.device, + ) + self._projected_gravity_b.timestamp = self._sim_timestamp + if self._projected_gravity_b_ta is None: + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b.data) + return self._projected_gravity_b_ta + + @property + def heading_w(self) -> ProxyArray: + """Yaw heading of the base frame (in radians). + + Shape is (num_instances, num_bodies), dtype = wp.float32. In torch this resolves to (num_instances, num_bodies). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + if self._heading_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.body_heading_w, + dim=(self.num_instances, self.num_bodies), + inputs=[self.FORWARD_VEC_B, self.body_link_quat_w], + outputs=[self._heading_w.data], + device=self.device, + ) + self._heading_w.timestamp = self._sim_timestamp + if self._heading_w_ta is None: + self._heading_w_ta = ProxyArray(self._heading_w.data) + return self._heading_w_ta + + @property + def body_link_lin_vel_b(self) -> ProxyArray: + """Root link linear velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._body_link_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_link_lin_vel_w, self.body_link_quat_w], + outputs=[self._body_link_lin_vel_b.data], + device=self.device, + ) + self._body_link_lin_vel_b.timestamp = self._sim_timestamp + if self._body_link_lin_vel_b_ta is None: + self._body_link_lin_vel_b_ta = ProxyArray(self._body_link_lin_vel_b.data) + return self._body_link_lin_vel_b_ta + + @property + def body_link_ang_vel_b(self) -> ProxyArray: + """Root link angular velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + if self._body_link_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_link_ang_vel_w, self.body_link_quat_w], + outputs=[self._body_link_ang_vel_b.data], + device=self.device, + ) + self._body_link_ang_vel_b.timestamp = self._sim_timestamp + if self._body_link_ang_vel_b_ta is None: + self._body_link_ang_vel_b_ta = ProxyArray(self._body_link_ang_vel_b.data) + return self._body_link_ang_vel_b_ta + + @property + def body_com_lin_vel_b(self) -> ProxyArray: + """Root center of mass linear velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._body_com_lin_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_com_lin_vel_w, self.body_link_quat_w], + outputs=[self._body_com_lin_vel_b.data], + device=self.device, + ) + self._body_com_lin_vel_b.timestamp = self._sim_timestamp + if self._body_com_lin_vel_b_ta is None: + self._body_com_lin_vel_b_ta = ProxyArray(self._body_com_lin_vel_b.data) + return self._body_com_lin_vel_b_ta + + @property + def body_com_ang_vel_b(self) -> ProxyArray: + """Root center of mass angular velocity in base frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + if self._body_com_ang_vel_b.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.quat_apply_inverse_2D_kernel, + dim=(self.num_instances, self.num_bodies), + inputs=[self.body_com_ang_vel_w, self.body_link_quat_w], + outputs=[self._body_com_ang_vel_b.data], + device=self.device, + ) + self._body_com_ang_vel_b.timestamp = self._sim_timestamp + if self._body_com_ang_vel_b_ta is None: + self._body_com_ang_vel_b_ta = ProxyArray(self._body_com_ang_vel_b.data) + return self._body_com_ang_vel_b_ta + + """ + Sliced properties. + """ + + @property + def body_link_pos_w(self) -> ProxyArray: + """Positions of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_pose_w + if self._body_link_pos_w_ta is None: + self._body_link_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_link_pos_w_ta + + @property + def body_link_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_pose_w + if self._body_link_quat_w_ta is None: + self._body_link_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_link_quat_w_ta + + @property + def body_link_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_vel_w + if self._body_link_lin_vel_w_ta is None: + self._body_link_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_link_lin_vel_w_ta + + @property + def body_link_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the rigid bodies' actor frame relative to the world. + """ + parent = self.body_link_vel_w + if self._body_link_ang_vel_w_ta is None: + self._body_link_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_link_ang_vel_w_ta + + @property + def body_com_pos_w(self) -> ProxyArray: + """Positions of all bodies' center of mass in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the position of the rigid bodies' center of mass frame. + """ + parent = self.body_com_pose_w + if self._body_com_pos_w_ta is None: + self._body_com_pos_w_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_w_ta + + @property + def body_com_quat_w(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + This quantity is the orientation of the principal axes of inertia of the rigid bodies. + """ + parent = self.body_com_pose_w + if self._body_com_quat_w_ta is None: + self._body_com_quat_w_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_w_ta + + @property + def body_com_lin_vel_w(self) -> ProxyArray: + """Linear velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + parent = self.body_com_vel_w + if self._body_com_lin_vel_w_ta is None: + self._body_com_lin_vel_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_vel_w_ta + + @property + def body_com_ang_vel_w(self) -> ProxyArray: + """Angular velocity of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + parent = self.body_com_vel_w + if self._body_com_ang_vel_w_ta is None: + self._body_com_ang_vel_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_vel_w_ta + + @property + def body_com_lin_acc_w(self) -> ProxyArray: + """Linear acceleration of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + parent = self.body_com_acc_w + if self._body_com_lin_acc_w_ta is None: + self._body_com_lin_acc_w_ta = ProxyArray(self._get_lin_vel_from_spatial_vector(parent.warp)) + return self._body_com_lin_acc_w_ta + + @property + def body_com_ang_acc_w(self) -> ProxyArray: + """Angular acceleration of all bodies in simulation world frame. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + parent = self.body_com_acc_w + if self._body_com_ang_acc_w_ta is None: + self._body_com_ang_acc_w_ta = ProxyArray(self._get_ang_vel_from_spatial_vector(parent.warp)) + return self._body_com_ang_acc_w_ta + + @property + def body_com_pos_b(self) -> ProxyArray: + """Center of mass position of all of the bodies in their respective link frames. + + Shape is (num_instances, num_bodies), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_bodies, 3). + This quantity is the center of mass location relative to its body's link frame. + """ + parent = self.body_com_pose_b + if self._body_com_pos_b_ta is None: + self._body_com_pos_b_ta = ProxyArray(self._get_pos_from_transform(parent.warp)) + return self._body_com_pos_b_ta + + @property + def body_com_quat_b(self) -> ProxyArray: + """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their + respective link frames. + + Shape is (num_instances, num_bodies), dtype = wp.quatf. In torch this resolves to + (num_instances, num_bodies, 4). + This quantity is the orientation of the principal axes of inertia relative to its body's link frame. + """ + parent = self.body_com_pose_b + if self._body_com_quat_b_ta is None: + self._body_com_quat_b_ta = ProxyArray(self._get_quat_from_transform(parent.warp)) + return self._body_com_quat_b_ta + + def _create_buffers(self) -> None: + super()._create_buffers() + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._body_link_pose_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.transformf) + self._body_link_vel_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, wp.spatial_vectorf + ) + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.transformf) + # -- com frame w.r.t. world frame + self._body_com_pose_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.transformf) + self._body_com_vel_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.spatial_vectorf) + self._body_com_acc_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.spatial_vectorf) + # -- combined state (these are cached as they concatenate) + self._body_state_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, shared_kernels.vec13f + ) + self._body_link_state_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, shared_kernels.vec13f + ) + self._body_com_state_w = TimestampedBuffer( + (self.num_instances, self.num_bodies), self.device, shared_kernels.vec13f + ) + + # -- Default state + self._default_body_pose = wp.zeros( + (self.num_instances, self.num_bodies), dtype=wp.transformf, device=self.device + ) + self._default_body_vel = wp.zeros( + (self.num_instances, self.num_bodies), dtype=wp.spatial_vectorf, device=self.device + ) + self._default_body_state = None + + # -- Derived properties + self._projected_gravity_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._heading_w = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.float32) + self._body_link_lin_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._body_link_ang_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._body_com_lin_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + self._body_com_ang_vel_b = TimestampedBuffer((self.num_instances, self.num_bodies), self.device, wp.vec3f) + + # -- Body properties (stored in instance order: num_instances, num_bodies[, data_dim]) + # Masses: view returns (B*I, 1) in view order. _reshape_view_to_data gives (I, B) in instance order. + self._body_mass = self._reshape_view_to_data_2d(self._root_view.get_masses()).reshape( + (self.num_instances, self.num_bodies) + ) + # Inertias: view returns (B*I, 9) in view order. Need (I, B, 9) in instance order. + # _reshape_view_to_data only handles single-element dtypes, so we use _reshape_view_to_data_3d. + self._body_inertia = self._reshape_view_to_data_3d(self._root_view.get_inertias(), 9) + + # Initialize ProxyArray wrappers + self._pin_proxy_arrays() + + def _pin_proxy_arrays(self) -> None: + """Create pinned ProxyArray wrappers for all data buffers. + + This is called once from :meth:`_create_buffers` during initialization. + PhysX tensor API buffers have stable GPU pointers across simulation steps, + so no rebinding is needed (unlike Newton). + """ + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + # Defaults + self._default_body_pose_ta: ProxyArray | None = None + self._default_body_vel_ta: ProxyArray | None = None + # Body state (timestamped) + self._body_link_pose_w_ta: ProxyArray | None = None + self._body_link_vel_w_ta: ProxyArray | None = None + self._body_com_pose_w_ta: ProxyArray | None = None + self._body_com_vel_w_ta: ProxyArray | None = None + self._body_com_acc_w_ta: ProxyArray | None = None + self._body_com_pose_b_ta: ProxyArray | None = None + # Body properties + self._body_mass_ta: ProxyArray | None = None + self._body_inertia_ta: ProxyArray | None = None + # Derived properties (timestamped) + self._projected_gravity_b_ta: ProxyArray | None = None + self._heading_w_ta: ProxyArray | None = None + self._body_link_lin_vel_b_ta: ProxyArray | None = None + self._body_link_ang_vel_b_ta: ProxyArray | None = None + self._body_com_lin_vel_b_ta: ProxyArray | None = None + self._body_com_ang_vel_b_ta: ProxyArray | None = None + # Sliced properties (body link) + self._body_link_pos_w_ta: ProxyArray | None = None + self._body_link_quat_w_ta: ProxyArray | None = None + self._body_link_lin_vel_w_ta: ProxyArray | None = None + self._body_link_ang_vel_w_ta: ProxyArray | None = None + # Sliced properties (body com) + self._body_com_pos_w_ta: ProxyArray | None = None + self._body_com_quat_w_ta: ProxyArray | None = None + self._body_com_lin_vel_w_ta: ProxyArray | None = None + self._body_com_ang_vel_w_ta: ProxyArray | None = None + self._body_com_lin_acc_w_ta: ProxyArray | None = None + self._body_com_ang_acc_w_ta: ProxyArray | None = None + # Sliced properties (body com in body frame) + self._body_com_pos_b_ta: ProxyArray | None = None + self._body_com_quat_b_ta: ProxyArray | None = None + # Deprecated state-concat properties + self._default_body_state_ta: ProxyArray | None = None + self._body_state_w_ta: ProxyArray | None = None + self._body_link_state_w_ta: ProxyArray | None = None + self._body_com_state_w_ta: ProxyArray | None = None + + """ + Helpers. + """ + + def _reshape_view_to_data_2d(self, data: wp.array) -> wp.array: + """Reshapes and arranges the data from the physics view to (num_instances, num_bodies, data_size). + + Args: + data: The data from the physics view. Shape is (num_instances * num_bodies, data_size). + + Returns: + The reshaped data. Shape is (num_instances, num_bodies, data_size). + """ + # The view returns data ordered as (body0_env0, body0_env1, ..., body1_env0, body1_env1, ...) + # i.e. shape (num_bodies, num_instances) when reshaped. + # We need (num_instances, num_bodies) so we create a strided view with transposed strides. + # Use data.device for the strided view (PhysX returns masses/coms/inertias on CPU), + # then clone to self.device (handles both contiguity and device transfer). + element_size = wp.types.type_size_in_bytes(data.dtype) + strided_view = wp.array( + ptr=data.ptr, + shape=(self.num_instances, self.num_bodies), + dtype=data.dtype, + strides=(element_size, self.num_instances * element_size), + device=data.device, + ) + return wp.clone(strided_view, self.device) + + def _reshape_view_to_data_3d(self, data: wp.array, data_dim: int) -> wp.array: + """Reshapes and arranges 2D view data to (num_instances, num_bodies, data_dim). + + Args: + data: The data from the physics view. Shape is (num_instances * num_bodies, data_dim). + data_dim: The trailing dimension size. + + Returns: + The reshaped data. Shape is (num_instances, num_bodies, data_dim). + """ + # The view returns data ordered as (body0_env0, body0_env1, ..., body1_env0, body1_env1, ...) + # We need (num_instances, num_bodies, data_dim), so stride through the flat float32 data. + # Use data.device for the strided view (PhysX returns masses/coms/inertias on CPU), + # then clone to self.device (handles both contiguity and device transfer). + element_size = wp.types.type_size_in_bytes(wp.float32) + row_size = element_size * data_dim + strided_view = wp.array( + ptr=data.ptr, + shape=(self.num_instances, self.num_bodies, data_dim), + dtype=wp.float32, + strides=(row_size, self.num_instances * row_size, element_size), + device=data.device, + ) + return wp.clone(strided_view, self.device) + + def _get_pos_from_transform(self, transform: wp.array) -> ProxyArray: + """Generates a position array from a transform array.""" + return wp.array( + ptr=transform.ptr, + shape=transform.shape, + dtype=wp.vec3f, + strides=transform.strides, + device=self.device, + ) + + def _get_quat_from_transform(self, transform: wp.array) -> ProxyArray: + """Generates a quaternion array from a transform array.""" + return wp.array( + ptr=transform.ptr + 3 * 4, + shape=transform.shape, + dtype=wp.quatf, + strides=transform.strides, + device=self.device, + ) + + def _get_lin_vel_from_spatial_vector(self, spatial_vector: wp.array) -> ProxyArray: + """Generates a linear velocity array from a spatial vector array.""" + return wp.array( + ptr=spatial_vector.ptr, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + def _get_ang_vel_from_spatial_vector(self, spatial_vector: wp.array) -> ProxyArray: + """Generates an angular velocity array from a spatial vector array.""" + return wp.array( + ptr=spatial_vector.ptr + 3 * 4, + shape=spatial_vector.shape, + dtype=wp.vec3f, + strides=spatial_vector.strides, + device=self.device, + ) + + """ + Deprecated properties. + """ + + @property + def default_body_state(self) -> ProxyArray: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, num_bodies, 13). + """ + warnings.warn( + "Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_body_pose and default_body_vel properties instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._default_body_state is None: + self._default_body_state = wp.zeros( + (self.num_instances, self.num_bodies), dtype=shared_kernels.vec13f, device=self.device + ) + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self._default_body_pose, + self._default_body_vel, + ], + outputs=[ + self._default_body_state, + ], + device=self.device, + ) + if self._default_body_state_ta is None: + self._default_body_state_ta = ProxyArray(self._default_body_state) + return self._default_body_state_ta + + @property + def body_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_link_pose_w, + self.body_com_vel_w, + ], + outputs=[ + self._body_state_w.data, + ], + device=self.device, + ) + self._body_state_w.timestamp = self._sim_timestamp + + if self._body_state_w_ta is None: + self._body_state_w_ta = ProxyArray(self._body_state_w.data) + return self._body_state_w_ta + + @property + def body_link_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_link_vel_w`.""" + warnings.warn( + "The `body_link_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_link_pose_w` and " + "`body_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_link_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_link_pose_w, + self.body_link_vel_w, + ], + outputs=[ + self._body_link_state_w.data, + ], + device=self.device, + ) + self._body_link_state_w.timestamp = self._sim_timestamp + + if self._body_link_state_w_ta is None: + self._body_link_state_w_ta = ProxyArray(self._body_link_state_w.data) + return self._body_link_state_w_ta + + @property + def body_com_state_w(self) -> ProxyArray: + """Deprecated, same as :attr:`body_com_pose_w` and :attr:`body_com_vel_w`.""" + warnings.warn( + "The `body_com_state_w` property will be deprecated in IsaacLab 4.0. Please use `body_com_pose_w` and " + "`body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._body_com_state_w.timestamp < self._sim_timestamp: + wp.launch( + shared_kernels.concat_body_pose_and_vel_to_state, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self.body_com_pose_w, + self.body_com_vel_w, + ], + outputs=[ + self._body_com_state_w.data, + ], + device=self.device, + ) + self._body_com_state_w.timestamp = self._sim_timestamp + + if self._body_com_state_w_ta is None: + self._body_com_state_w_ta = ProxyArray(self._body_com_state_w.data) + return self._body_com_state_w_ta diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py new file mode 100644 index 000000000000..38e37d548cc7 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for surface_gripper assets.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/assets/surface_gripper/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.pyi similarity index 82% rename from source/isaaclab/isaaclab/assets/surface_gripper/__init__.py rename to source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.pyi index 3786976617c3..dd6d3a55a08f 100644 --- a/source/isaaclab/isaaclab/assets/surface_gripper/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.pyi @@ -3,7 +3,10 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Sub-module for surface_gripper assets.""" +__all__ = [ + "SurfaceGripper", + "SurfaceGripperCfg", +] from .surface_gripper import SurfaceGripper from .surface_gripper_cfg import SurfaceGripperCfg diff --git a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py similarity index 54% rename from source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py rename to source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index 2742e9baeb4c..6662582dac7c 100644 --- a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -9,13 +9,15 @@ import warnings from typing import TYPE_CHECKING +import numpy as np import torch +import warp as wp -from isaacsim.core.utils.extensions import enable_extension +from isaacsim.core.experimental.utils.app import enable_extension import isaaclab.sim as sim_utils from isaaclab.assets import AssetBase -from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.version import get_isaac_sim_version, has_kit if TYPE_CHECKING: from isaacsim.robot.surface_gripper import GripperView @@ -25,6 +27,32 @@ # import logger logger = logging.getLogger(__name__) +# -- Warp kernels -- + + +@wp.kernel +def write_scalar_at_indices( + data: wp.array(dtype=wp.float32), + env_ids: wp.array(dtype=wp.int32), + from_mask: bool, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + if from_mask: + out[env_ids[i]] = data[env_ids[i]] + else: + out[env_ids[i]] = data[i] + + +@wp.kernel +def fill_scalar_at_indices( + value: wp.float32, + env_ids: wp.array(dtype=wp.int32), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + out[env_ids[i]] = value + class SurfaceGripper(AssetBase): """A surface gripper actuator class. @@ -40,12 +68,12 @@ class SurfaceGripper(AssetBase): properties of the grippers at runtime. Finally, the :func:`set_grippers_command` function should be used to set the desired command for the grippers. - Note: + .. note:: The :func:`set_grippers_command` function does not write to the simulation. The simulation automatically calls :func:`write_data_to_sim` function to write the command to the simulation. Similarly, the update function is called automatically for every simulation step, and does not need to be called by the user. - Note: + .. note:: The SurfaceGripper is only supported on CPU for now. Please set the simulation backend to run on CPU. Use `--device cpu` to run the simulation on CPU. """ @@ -60,7 +88,7 @@ def __init__(self, cfg: SurfaceGripperCfg): self._cfg = cfg.copy() # checks for Isaac Sim v5.0 to ensure that the surface gripper is supported - if get_isaac_sim_version().major < 5: + if has_kit() and get_isaac_sim_version().major < 5: raise NotImplementedError( "SurfaceGrippers are only supported by IsaacSim 5.0 and newer. Current version is" f" '{get_isaac_sim_version()}'. Please update to IsaacSim 5.0 or newer to use this feature." @@ -90,7 +118,7 @@ def num_instances(self) -> int: return self._num_envs @property - def state(self) -> torch.Tensor: + def state(self) -> wp.array: """Returns the gripper state buffer. The gripper state is a list of integers: @@ -101,7 +129,7 @@ def state(self) -> torch.Tensor: return self._gripper_state @property - def command(self) -> torch.Tensor: + def command(self) -> wp.array: """Returns the gripper command buffer. The gripper command is a list of floats: @@ -117,9 +145,150 @@ def gripper_view(self) -> GripperView: return self._gripper_view """ - Operations + Operations - _index / _mask API """ + def set_grippers_command_index( + self, states: torch.Tensor | wp.array, env_ids: wp.array | None = None, full_data: bool = False + ) -> None: + """Set the internal gripper command buffer. This function does not write to the simulation. + + Possible values for the gripper command are: + - [-1, -0.3] --> Open + - ]-0.3, 0.3[ --> Do nothing + - [0.3, 1] --> Close + + Args: + states: A tensor/array of floats representing the gripper command. + env_ids: Environment indices. Defaults to None (all environments). + full_data: Whether ``states`` is indexed by ``env_ids`` (True) or is compact (False). + """ + if env_ids is None: + env_ids = self._ALL_INDICES + if full_data: + self.assert_shape_and_dtype(states, (self.num_instances,), wp.float32, "states") + else: + self.assert_shape_and_dtype(states, (env_ids.shape[0],), wp.float32, "states") + + # Convert torch input to warp + if isinstance(states, torch.Tensor): + states = wp.from_torch(states.to(torch.float32).contiguous(), dtype=wp.float32) + + wp.launch( + write_scalar_at_indices, + dim=env_ids.shape[0], + inputs=[states, env_ids, full_data], + outputs=[self._gripper_command], + device=self._device, + ) + + def set_grippers_command_mask(self, states: torch.Tensor | wp.array, env_mask: torch.Tensor | None = None) -> None: + """Set the internal gripper command buffer using a boolean mask. + + Args: + states: A tensor/array of floats representing the gripper command (full size). + env_mask: Boolean mask of shape (num_envs,). Defaults to None (all environments). + """ + if env_mask is not None: + env_ids = wp.from_torch(torch.argwhere(env_mask).view(-1).to(torch.int32), dtype=wp.int32) + else: + env_ids = self._ALL_INDICES + self.set_grippers_command_index(states, env_ids, full_data=True) + + def set_grippers_command(self, states: torch.Tensor, indices: torch.Tensor | None = None) -> None: + """Set the internal gripper command buffer. This function does not write to the simulation. + + .. deprecated:: v2.0.0 + Use :meth:`set_grippers_command_index` instead. + + Args: + states: A tensor of integers representing the gripper command. + indices: A tensor of integers representing the indices. Defaults to None. + """ + env_ids = self._resolve_env_ids(indices) + self.set_grippers_command_index(states, env_ids) + + def update_gripper_properties_index( + self, + max_grip_distance: wp.array | None = None, + coaxial_force_limit: wp.array | None = None, + shear_force_limit: wp.array | None = None, + retry_interval: wp.array | None = None, + env_ids: wp.array | None = None, + full_data: bool = False, + ) -> None: + """Update the gripper properties. + + Args: + max_grip_distance: The maximum grip distance. Shape (num_envs,) or (len(env_ids),). + coaxial_force_limit: The coaxial force limit. Shape (num_envs,) or (len(env_ids),). + shear_force_limit: The shear force limit. Shape (num_envs,) or (len(env_ids),). + retry_interval: The retry interval. Shape (num_envs,) or (len(env_ids),). + env_ids: Environment indices. Defaults to None (all environments). + full_data: Whether input arrays are indexed by ``env_ids`` (True) or compact (False). + """ + if env_ids is None: + env_ids = self._ALL_INDICES + + for prop_name, prop_data, prop_buf in [ + ("max_grip_distance", max_grip_distance, self._max_grip_distance), + ("coaxial_force_limit", coaxial_force_limit, self._coaxial_force_limit), + ("shear_force_limit", shear_force_limit, self._shear_force_limit), + ("retry_interval", retry_interval, self._retry_interval), + ]: + if prop_data is not None: + if full_data: + self.assert_shape_and_dtype(prop_data, (self.num_instances,), wp.float32, prop_name) + else: + self.assert_shape_and_dtype(prop_data, (env_ids.shape[0],), wp.float32, prop_name) + wp.launch( + write_scalar_at_indices, + dim=env_ids.shape[0], + inputs=[prop_data, env_ids, full_data], + outputs=[prop_buf], + device=self._device, + ) + + # Convert to list for the GripperView API + indices_list = wp.to_torch(env_ids).tolist() + self._gripper_view.set_surface_gripper_properties( + max_grip_distance=wp.to_torch(self._max_grip_distance).tolist(), + coaxial_force_limit=wp.to_torch(self._coaxial_force_limit).tolist(), + shear_force_limit=wp.to_torch(self._shear_force_limit).tolist(), + retry_interval=wp.to_torch(self._retry_interval).tolist(), + indices=indices_list, + ) + + def update_gripper_properties_mask( + self, + max_grip_distance: wp.array | None = None, + coaxial_force_limit: wp.array | None = None, + shear_force_limit: wp.array | None = None, + retry_interval: wp.array | None = None, + env_mask: torch.Tensor | None = None, + ) -> None: + """Update the gripper properties using a boolean mask. + + Args: + max_grip_distance: The maximum grip distance. Shape (num_envs,). + coaxial_force_limit: The coaxial force limit. Shape (num_envs,). + shear_force_limit: The shear force limit. Shape (num_envs,). + retry_interval: The retry interval. Shape (num_envs,). + env_mask: Boolean mask of shape (num_envs,). Defaults to None (all environments). + """ + if env_mask is not None: + env_ids = wp.from_torch(torch.argwhere(env_mask).view(-1).to(torch.int32), dtype=wp.int32) + else: + env_ids = self._ALL_INDICES + self.update_gripper_properties_index( + max_grip_distance=max_grip_distance, + coaxial_force_limit=coaxial_force_limit, + shear_force_limit=shear_force_limit, + retry_interval=retry_interval, + env_ids=env_ids, + full_data=True, + ) + def update_gripper_properties( self, max_grip_distance: torch.Tensor | None = None, @@ -130,34 +299,32 @@ def update_gripper_properties( ) -> None: """Update the gripper properties. + .. deprecated:: v2.0.0 + Use :meth:`update_gripper_properties_index` instead. + Args: - max_grip_distance: The maximum grip distance of the gripper. Should be a tensor of shape (num_envs,). - coaxial_force_limit: The coaxial force limit of the gripper. Should be a tensor of shape (num_envs,). - shear_force_limit: The shear force limit of the gripper. Should be a tensor of shape (num_envs,). - retry_interval: The retry interval of the gripper. Should be a tensor of shape (num_envs,). - indices: The indices of the grippers to update the properties for. Can be a tensor of any shape. + max_grip_distance: The maximum grip distance. Shape (num_envs,). + coaxial_force_limit: The coaxial force limit. Shape (num_envs,). + shear_force_limit: The shear force limit. Shape (num_envs,). + retry_interval: The retry interval. Shape (num_envs,). + indices: The indices of the grippers to update. Defaults to None. """ - - if indices is None: - indices = self._ALL_INDICES - - indices_as_list = indices.tolist() - - if max_grip_distance is not None: - self._max_grip_distance[indices] = max_grip_distance - if coaxial_force_limit is not None: - self._coaxial_force_limit[indices] = coaxial_force_limit - if shear_force_limit is not None: - self._shear_force_limit[indices] = shear_force_limit - if retry_interval is not None: - self._retry_interval[indices] = retry_interval - - self._gripper_view.set_surface_gripper_properties( - max_grip_distance=self._max_grip_distance.tolist(), - coaxial_force_limit=self._coaxial_force_limit.tolist(), - shear_force_limit=self._shear_force_limit.tolist(), - retry_interval=self._retry_interval.tolist(), - indices=indices_as_list, + env_ids = self._resolve_env_ids(indices) + + # Convert torch inputs to warp + def _to_wp(t): + if t is None: + return None + if isinstance(t, torch.Tensor): + return wp.from_torch(t.to(torch.float32).contiguous(), dtype=wp.float32) + return t + + self.update_gripper_properties_index( + max_grip_distance=_to_wp(max_grip_distance), + coaxial_force_limit=_to_wp(coaxial_force_limit), + shear_force_limit=_to_wp(shear_force_limit), + retry_interval=_to_wp(retry_interval), + env_ids=env_ids, ) def update(self, dt: float) -> None: @@ -165,21 +332,23 @@ def update(self, dt: float) -> None: This function is called every simulation step. The data fetched from the gripper view is a list of strings containing 3 possible states: - - "Open" --> 0 - - "Closing" --> 1 - - "Closed" --> 2 + + - "Open" --> 0 + - "Closing" --> 1 + - "Closed" --> 2 To make this more neural network friendly, we convert the list of strings to a list of floats: - - "Open" --> -1.0 - - "Closing" --> 0.0 - - "Closed" --> 1.0 - Note: + - "Open" --> -1.0 + - "Closing" --> 0.0 + - "Closed" --> 1.0 + + .. note:: We need to do this conversion for every single step of the simulation because the gripper can lose contact with the object if some conditions are met: such as if a large force is applied to the gripped object. """ state_list: list[int] = self._gripper_view.get_surface_gripper_status() - self._gripper_state = torch.tensor(state_list, dtype=torch.float32, device=self._device) - 1.0 + self._gripper_state = wp.array([float(s) - 1.0 for s in state_list], dtype=wp.float32, device=self._device) def write_data_to_sim(self) -> None: """Write the gripper command to the SurfaceGripperView. @@ -191,52 +360,72 @@ def write_data_to_sim(self) -> None: The Do nothing command is not applied, and is only used to indicate whether the gripper state has changed. """ + # Convert to torch at the GripperView boundary (zero-copy on CPU) + command_torch = wp.to_torch(self._gripper_command) # Remove the SurfaceGripper indices that have a commanded value of 2 - indices = ( - torch.argwhere(torch.logical_or(self._gripper_command < -0.3, self._gripper_command > 0.3)) - .to(torch.int32) - .tolist() - ) + indices = torch.argwhere(torch.logical_or(command_torch < -0.3, command_torch > 0.3)).to(torch.int32).tolist() # Write to the SurfaceGripperView if there are any indices to write to if len(indices) > 0: - self._gripper_view.apply_gripper_action(self._gripper_command.tolist(), indices) - - def set_grippers_command(self, states: torch.Tensor, indices: torch.Tensor | None = None) -> None: - """Set the internal gripper command buffer. This function does not write to the simulation. + self._gripper_view.apply_gripper_action(command_torch.tolist(), indices) - Possible values for the gripper command are: - - [-1, -0.3] --> Open - - ]-0.3, 0.3[ --> Do nothing - - [0.3, 1] --> Close + def reset_index(self, env_ids: wp.array | None = None) -> None: + """Reset the gripper command buffer. Args: - states: A tensor of integers representing the gripper command. Shape must match that of indices. - indices: A tensor of integers representing the indices of the grippers to set the command for. Defaults - to None, in which case all grippers are set. + env_ids: Environment indices. Defaults to None (all environments). """ - if indices is None: - indices = self._ALL_INDICES + if env_ids is None: + env_ids = self._ALL_INDICES + + # Reset the selected grippers to an open status + wp.launch( + fill_scalar_at_indices, + dim=env_ids.shape[0], + inputs=[wp.float32(-1.0), env_ids], + outputs=[self._gripper_command], + device=self._device, + ) + self.write_data_to_sim() + # Sets the gripper last command to be 0.0 (do nothing) + wp.launch( + fill_scalar_at_indices, + dim=env_ids.shape[0], + inputs=[wp.float32(0.0), env_ids], + outputs=[self._gripper_command], + device=self._device, + ) + # Force set the state to open. It will read open in the next update call. + wp.launch( + fill_scalar_at_indices, + dim=env_ids.shape[0], + inputs=[wp.float32(-1.0), env_ids], + outputs=[self._gripper_state], + device=self._device, + ) + + def reset_mask(self, env_mask: torch.Tensor | None = None) -> None: + """Reset the gripper command buffer using a boolean mask. - self._gripper_command[indices] = states + Args: + env_mask: Boolean mask of shape (num_envs,). Defaults to None (all environments). + """ + if env_mask is not None: + env_ids = wp.from_torch(torch.argwhere(env_mask).view(-1).to(torch.int32), dtype=wp.int32) + else: + env_ids = self._ALL_INDICES + self.reset_index(env_ids) def reset(self, indices: torch.Tensor | None = None) -> None: """Reset the gripper command buffer. + .. deprecated:: v2.0.0 + Use :meth:`reset_index` instead. + Args: - indices: A tensor of integers representing the indices of the grippers to reset the command for. Defaults - to None, in which case all grippers are reset. + indices: A tensor of integers representing the indices of the grippers to reset. Defaults to None. """ - # Would normally set the buffer to 0, for now we won't do that - if indices is None: - indices = self._ALL_INDICES - - # Reset the selected grippers to an open status - self._gripper_command[indices] = -1.0 - self.write_data_to_sim() - # Sets the gripper last command to be 0.0 (do nothing) - self._gripper_command[indices] = 0 - # Force set the state to open. It will read open in the next update call. - self._gripper_state[indices] = -1.0 + env_ids = self._resolve_env_ids(indices) + self.reset_index(env_ids) """ Initialization. @@ -249,14 +438,11 @@ def _initialize_impl(self) -> None: ValueError: If the simulation backend is not CPU. RuntimeError: If the Simulation Context is not initialized or if gripper prims are not found. - Note: + .. note:: The SurfaceGripper is only supported on CPU for now. Please set the simulation backend to run on CPU. Use `--device cpu` to run the simulation on CPU. """ - enable_extension("isaacsim.robot.surface_gripper") - from isaacsim.robot.surface_gripper import GripperView - # Check that we are using the CPU backend. if self._device != "cpu": raise Exception( @@ -264,6 +450,9 @@ def _initialize_impl(self) -> None: " `--device cpu` to run the simulation on CPU." ) + enable_extension("isaacsim.robot.surface_gripper") + from isaacsim.robot.surface_gripper import GripperView + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) template_prim = sim_utils.find_first_matching_prim(self._cfg.prim_path) if template_prim is None: @@ -304,17 +493,15 @@ def _initialize_impl(self) -> None: # Process the configuration self._process_cfg() - # Initialize gripper view and set properties. Note we do not set the properties through the gripper view - # to avoid having to convert them to list of floats here. Instead, we do it in the update_gripper_properties - # function which does this conversion internally. + # Initialize gripper view and set properties. self._gripper_view = GripperView( self._prim_expr, ) - self.update_gripper_properties( - max_grip_distance=self._max_grip_distance.clone(), - coaxial_force_limit=self._coaxial_force_limit.clone(), - shear_force_limit=self._shear_force_limit.clone(), - retry_interval=self._retry_interval.clone(), + self.update_gripper_properties_index( + max_grip_distance=wp.clone(self._max_grip_distance), + coaxial_force_limit=wp.clone(self._coaxial_force_limit), + shear_force_limit=wp.clone(self._shear_force_limit), + retry_interval=wp.clone(self._retry_interval), ) # log information about the surface gripper @@ -322,18 +509,18 @@ def _initialize_impl(self) -> None: logger.info(f"Number of instances: {self._num_envs}") # Reset grippers - self.reset() + self.reset_index() def _create_buffers(self) -> None: """Create the buffers for storing the gripper state, command, and properties.""" - self._gripper_state = torch.zeros(self._num_envs, device=self._device, dtype=torch.float32) - self._gripper_command = torch.zeros(self._num_envs, device=self._device, dtype=torch.float32) - self._ALL_INDICES = torch.arange(self._num_envs, device=self._device, dtype=torch.long) + self._gripper_state = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) + self._gripper_command = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) + self._ALL_INDICES = wp.array(np.arange(self._num_envs, dtype=np.int32), device=self._device) - self._max_grip_distance = torch.zeros(self._num_envs, device=self._device, dtype=torch.float32) - self._coaxial_force_limit = torch.zeros(self._num_envs, device=self._device, dtype=torch.float32) - self._shear_force_limit = torch.zeros(self._num_envs, device=self._device, dtype=torch.float32) - self._retry_interval = torch.zeros(self._num_envs, device=self._device, dtype=torch.float32) + self._max_grip_distance = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) + self._coaxial_force_limit = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) + self._shear_force_limit = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) + self._retry_interval = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device) def _process_cfg(self) -> None: """Process the configuration for the gripper properties.""" @@ -380,15 +567,35 @@ def _process_cfg(self) -> None: Helper functions. """ + def _resolve_env_ids(self, env_ids) -> wp.array: + """Resolve environment indices to a warp array. + + Args: + env_ids: Environment indices. Can be None, a slice, a list, or a torch.Tensor. + + Returns: + A warp array of int32 indices. + """ + if env_ids is None or env_ids == slice(None): + return self._ALL_INDICES + elif isinstance(env_ids, list): + return wp.array(env_ids, dtype=wp.int32, device=self._device) + elif isinstance(env_ids, torch.Tensor): + return wp.from_torch(env_ids.to(torch.int32).contiguous(), dtype=wp.int32) + return env_ids + def parse_gripper_parameter( self, cfg_value: float | int | tuple | None, default_value: float | int | tuple | None, ndim: int = 0 - ) -> torch.Tensor: + ) -> wp.array: """Parse the gripper parameter. Args: cfg_value: The value to parse. Can be a float, int, tuple, or None. default_value: The default value to use if cfg_value is None. Can be a float, int, tuple, or None. ndim: The number of dimensions of the parameter. Defaults to 0. + + Returns: + A warp array of float32 values. """ # Adjust the buffer size based on the number of dimensions if ndim == 0: @@ -424,4 +631,5 @@ def parse_gripper_parameter( else: raise ValueError("The parameter value is None and no default value is provided.") - return param + # Convert to warp + return wp.from_torch(param, dtype=wp.float32) diff --git a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py similarity index 78% rename from source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper_cfg.py rename to source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py index 6648e183e2fb..7b2c3c5493fe 100644 --- a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py @@ -4,11 +4,13 @@ # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING +from typing import TYPE_CHECKING +from isaaclab.assets.asset_base_cfg import AssetBaseCfg from isaaclab.utils import configclass -from ..asset_base_cfg import AssetBaseCfg -from .surface_gripper import SurfaceGripper +if TYPE_CHECKING: + from .surface_gripper import SurfaceGripper @configclass @@ -30,4 +32,4 @@ class SurfaceGripperCfg(AssetBaseCfg): retry_interval: float | None = None """The amount of time the gripper will spend trying to grasp an object.""" - class_type: type = SurfaceGripper + class_type: type["SurfaceGripper"] | str = "{DIR}.surface_gripper:SurfaceGripper" diff --git a/source/isaaclab_physx/isaaclab_physx/cloner/__init__.py b/source/isaaclab_physx/isaaclab_physx/cloner/__init__.py new file mode 100644 index 000000000000..e14e0f6d52c5 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/cloner/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/cloner/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/cloner/__init__.pyi new file mode 100644 index 000000000000..ecc5b6e3923e --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/cloner/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "physx_replicate", +] + +from .physx_replicate import physx_replicate diff --git a/source/isaaclab_physx/isaaclab_physx/cloner/physx_replicate.py b/source/isaaclab_physx/isaaclab_physx/cloner/physx_replicate.py new file mode 100644 index 000000000000..d90d413bffa2 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/cloner/physx_replicate.py @@ -0,0 +1,111 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch + +from omni.physx import get_physx_replicator_interface +from pxr import Usd, UsdUtils + + +def physx_replicate( + stage: Usd.Stage, + sources: list[str], # e.g. ["/World/Template/A", "/World/Template/B"] + destinations: list[str], # e.g. ["/World/envs/env_{}/Robot", "/World/envs/env_{}/Object"] + env_ids: torch.Tensor, # env_ids + mapping: torch.Tensor, # (num_sources, num_envs) bool; True -> place sources[i] into world=j + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + use_fabric: bool = False, + device: str = "cpu", + exclude_self_replication: bool = True, +) -> None: + """Replicate prims via PhysX replicator with per-row mapping. + + Builds per-source destination lists from ``mapping`` and calls PhysX ``replicate``. + Rows covering all environments use ``useEnvIds=True``; partial rows use ``False``. + The replicator is registered for the call and then unregistered. + + ``attach_fn`` excludes ``/World/template`` and ``/World/envs`` so that PhysX does + not independently parse prims that the replicator will handle. The source prim + receives its physics body as a side-effect of ``rep.replicate()`` (which always + parses the source internally), so every source must appear in at least one + ``replicate`` call. + + When ``exclude_self_replication`` is True (default), each source environment is + removed from its own replication targets so the replicator only creates bodies at + non-self destinations. If removing self would leave the world list empty (i.e. the + source maps only to its own environment), self is kept so that ``rep.replicate()`` + is still called and the source prim gets its physics body. + + Args: + stage: USD stage. + sources: Source prim paths (``S``). + destinations: Destination templates (``S``) with ``"{}"`` for env index. + env_ids: Environment indices (``[E]``). + mapping: Bool/int mask (``[S, E]``) selecting envs per source. + positions: Optional positions (unused, for API compatibility). + quaternions: Optional orientations (unused, for API compatibility). + use_fabric: Use Fabric for replication. + device: Torch device for determining replication mode. + exclude_self_replication: If True, skip replicating a source prim onto itself + when the source also maps to other environments. Default is True. + Self-only sources always keep self so that ``rep.replicate()`` fires. + + Returns: + None + """ + # Note: positions and quaternions are unused by PhysX replicator + # They are included for API compatibility with other backends (e.g., Newton) + del positions, quaternions + + stage_id = UsdUtils.StageCache.Get().Insert(stage).ToLongInt() + current_worlds: list[int] = [] + current_template: str = "" + num_envs = mapping.size(1) + + if num_envs > 1: + # Pre-compute effective world lists after self-exclusion. + # Self is only removed when the source also maps to other environments; + # if it is the sole destination we must keep it so that rep.replicate() + # is still called (the source gets its physics body from that call). + effective_worlds: list[list[int]] = [] + for i, src in enumerate(sources): + worlds = env_ids[mapping[i]].tolist() + if exclude_self_replication: + pre, _, suf = destinations[i].partition("{}") + self_id = src.removeprefix(pre).removesuffix(suf) + if self_id.isdigit(): + filtered = [w for w in worlds if w != int(self_id)] + worlds = filtered if filtered else worlds + effective_worlds.append(worlds) + + def attach_fn(_stage_id: int): + return ["/World/template", "/World/envs"] + + def rename_fn(_replicate_path: str, i: int): + return current_template.format(current_worlds[i]) + + def attach_end_fn(_stage_id: int): + nonlocal current_template + rep = get_physx_replicator_interface() + for i, src in enumerate(sources): + current_template = destinations[i] + current_worlds[:] = effective_worlds[i] + if not current_worlds: + continue + rep.replicate( + _stage_id, + src, + len(current_worlds), + # TODO: envIds needs to support heterogeneous setup. for now, we rely on USD collision filtering + useEnvIds=False, # (len(current_worlds) == num_envs - 1) and device != "cpu", + useFabricForReplication=use_fabric, + ) + # unregister only AFTER all replicate() calls completed + rep.unregister_replicator(_stage_id) + + get_physx_replicator_interface().register_replicator(stage_id, attach_fn, attach_end_fn, rename_fn) diff --git a/source/isaaclab_physx/isaaclab_physx/physics/__init__.py b/source/isaaclab_physx/isaaclab_physx/physics/__init__.py new file mode 100644 index 000000000000..7254081c805a --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/physics/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Implementation backends for simulation interfaces.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/physics/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/physics/__init__.pyi new file mode 100644 index 000000000000..9eeb559c2927 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/physics/__init__.pyi @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "PhysxManager", + "IsaacEvents", + "PhysxCfg", +] + +from .physx_manager import PhysxManager, IsaacEvents +from .physx_manager_cfg import PhysxCfg diff --git a/source/isaaclab_physx/isaaclab_physx/physics/physx_manager.py b/source/isaaclab_physx/isaaclab_physx/physics/physx_manager.py new file mode 100644 index 000000000000..4fb55064f072 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/physics/physx_manager.py @@ -0,0 +1,752 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX Manager for Isaac Lab. + +This module manages PhysX physics simulation lifecycle, configuration, callbacks, and physics views. +""" + +from __future__ import annotations + +import glob +import logging +import os +import re +import time +from collections.abc import Callable +from datetime import datetime +from enum import Enum +from typing import TYPE_CHECKING, Any, ClassVar + +import torch + +import carb +import omni.kit.app +import omni.physics.tensors +import omni.physx +import omni.timeline +import omni.usd +from pxr import Sdf, UsdUtils + +import isaaclab.sim as sim_utils +from isaaclab.physics import CallbackHandle, PhysicsEvent, PhysicsManager +from isaaclab.utils.string import to_camel_case + +if TYPE_CHECKING: + from isaaclab.sim.simulation_context import SimulationContext + + from .physx_cfg import PhysxCfg + +__all__ = ["IsaacEvents", "PhysxManager"] + +logger = logging.getLogger(__name__) + + +class IsaacEvents(Enum): + """Events dispatched during simulation lifecycle. + + Note: This enum is kept for backward compatibility. New code should use + PhysicsEvent from physics_manager for cross-backend compatibility. + """ + + PHYSICS_WARMUP = "isaac.physics_warmup" + SIMULATION_VIEW_CREATED = "isaac.simulation_view_created" + PHYSICS_READY = "isaac.physics_ready" + POST_RESET = "isaac.post_reset" + PRIM_DELETION = "isaac.prim_deletion" + PRE_PHYSICS_STEP = "isaac.pre_physics_step" + POST_PHYSICS_STEP = "isaac.post_physics_step" + TIMELINE_STOP = "isaac.timeline_stop" + + +_PHYSICS_EVENT_TO_ISAAC_EVENT: dict[PhysicsEvent, IsaacEvents] = { + PhysicsEvent.MODEL_INIT: IsaacEvents.PHYSICS_WARMUP, + PhysicsEvent.PHYSICS_READY: IsaacEvents.PHYSICS_READY, + PhysicsEvent.STOP: IsaacEvents.TIMELINE_STOP, +} +_PHYSICS_EVENT_VALUE_TO_ISAAC_EVENT: dict[str, IsaacEvents] = { + event.value: isaac_event for event, isaac_event in _PHYSICS_EVENT_TO_ISAAC_EVENT.items() +} + + +class AnimationRecorder: + """Handles animation recording using PhysX PVD interface.""" + + def __init__(self, sim_context: SimulationContext): + self._sim = sim_context + self._enabled = bool(sim_context.get_setting("/isaaclab/anim_recording/enabled")) + self._started_at: float | None = None + self._physx_pvd = None + + if self._enabled: + self._start_time = sim_context.get_setting("/isaaclab/anim_recording/start_time") + self._stop_time = sim_context.get_setting("/isaaclab/anim_recording/stop_time") + self._setup_output_dir() + + def _setup_output_dir(self) -> None: + """Initialize recording directory and PVD interface.""" + from omni.physxpvd.bindings import _physxPvd + + repo_path = os.path.join(carb.tokens.get_tokens_interface().resolve("${app}"), "..") + timestamp = datetime.now().strftime("%Y_%m_%d_%H%M%S") + self._output_dir = os.path.join(repo_path, "anim_recordings", timestamp).replace("\\", "/").rstrip("/") + "/" + os.makedirs(self._output_dir, exist_ok=True) + + self._physx_pvd = _physxPvd.acquire_physx_pvd_interface() + self._sim.set_setting("/persistent/physics/omniPvdOvdRecordingDirectory", self._output_dir) + self._sim.set_setting("/physics/omniPvdOutputEnabled", True) + + @property + def enabled(self) -> bool: + return self._enabled + + def update(self) -> bool: + """Update recording state. Returns True if recording finished.""" + if not self._enabled: + return False + if self._started_at is None: + self._started_at = time.time() + if time.time() - self._started_at > self._stop_time: + self._finish() + return True + return False + + def _finish(self) -> None: + """Finalize and export the recording.""" + logger.warning("[AnimationRecorder] Finishing recording. This may take a few minutes.") + + physx = omni.physx.get_physx_simulation_interface() + physx.detach_stage() + + stage_path = os.path.join(self._output_dir, "stage_simulation.usdc") + sim_utils.save_stage(stage_path, save_and_reload_in_place=False) + + ovd_files = [f for f in glob.glob(os.path.join(self._output_dir, "*.ovd")) if not f.endswith("tmp.ovd")] + if ovd_files and self._physx_pvd: + input_ovd = max(ovd_files, key=os.path.getctime) + self._physx_pvd.ovd_to_usd_over_with_layer_creation( + input_ovd, + stage_path, + self._output_dir, + "baked_animation_recording.usda", + self._start_time, + self._stop_time, + True, + False, + ) + self._update_usda_start_time(os.path.join(self._output_dir, "baked_animation_recording.usda")) + + self._sim.set_setting("/physics/omniPvdOutputEnabled", False) + + def _update_usda_start_time(self, file_path: str) -> None: + """Patch the start time in the exported USDA file.""" + with open(file_path) as f: + content = f.read() + match = re.search(r"timeCodesPerSecond\s*=\s*(\d+)", content) + if match: + fps = int(match.group(1)) + new_start = int(self._start_time * fps) + content = re.sub(r"startTimeCode\s*=\s*\d+", f"startTimeCode = {new_start}", content) + with open(file_path, "w") as f: + f.write(content) + + +class PhysxManager(PhysicsManager): + """Manages PhysX physics simulation lifecycle. + + Lifecycle: initialize() -> reset() -> step() (repeated) -> close() + """ + + _cfg: ClassVar[PhysxCfg | None] = None + + _timeline: ClassVar[omni.timeline.ITimeline] = omni.timeline.get_timeline_interface() + _event_bus: ClassVar[carb.eventdispatcher.IEventDispatcher] = carb.eventdispatcher.get_eventdispatcher() + _physx: ClassVar[omni.physx.IPhysx] = omni.physx.get_physx_interface() + _physx_sim: ClassVar[omni.physx.IPhysxSimulation] = omni.physx.get_physx_simulation_interface() + + _view: ClassVar[omni.physics.tensors.SimulationView | None] = None + _view_warp: ClassVar[omni.physics.tensors.SimulationView | None] = None + _warmup_needed: ClassVar[bool] = True + _view_created: ClassVar[bool] = False + _assets_loaded: ClassVar[bool] = True + _stage_id: ClassVar[int] = -1 + _subscriptions: ClassVar[dict[str, Any]] = {} + _fabric: ClassVar[Any] = None + _update_fabric: ClassVar[Callable[[float, float], None] | None] = None + _anim_recorder: ClassVar[AnimationRecorder | None] = None + _callback_exception: ClassVar[Exception | None] = None + + class _SimManagerStub: + """No-op stub for Isaac Sim APIs expecting simulation_manager_interface.""" + + def reset(self) -> None: + pass + + def get_simulation_time(self) -> float: + return omni.physx.get_physx_interface().get_simulation_time() + + def is_simulating(self) -> bool: + return omni.physx.get_physx_interface().is_simulating() + + def __getattr__(self, name: str) -> Callable[..., Any]: + return lambda *a, **kw: None + + # field stubs for Isaac Sim APIs expecting simulation_manager_interface + _simulation_manager_interface: ClassVar[_SimManagerStub] = _SimManagerStub() + _physics_scene_apis: ClassVar[dict[str, Any]] = {} + _message_bus = _event_bus + + @classmethod + def initialize(cls, sim_context: SimulationContext) -> None: + """Initialize the physics manager.""" + from isaaclab.sim.utils.stage import get_current_stage_id + + super().initialize(sim_context) + cls._stage_id = get_current_stage_id() + + cls._setup_subscriptions() + cls._configure_physics() + cls._load_fabric() + cls._anim_recorder = AnimationRecorder(sim_context) + + # force update cycle to apply dt + sim = PhysicsManager._sim + sim.set_setting("/app/player/playSimulations", False) # type: ignore[union-attr] + omni.kit.app.get_app().update() + sim.set_setting("/app/player/playSimulations", True) # type: ignore[union-attr] + + @classmethod + def reset(cls, soft: bool = False) -> None: + """Reset the physics simulation.""" + if not soft: + # Ensure views are created (warmup only happens once per stage) + if cls._view is None: + cls._warmup_and_create_views() + # Deterministic lifecycle dispatch for backend-agnostic callbacks. + # This avoids relying on asynchronous event-bus ordering during env construction. + cls.dispatch_event(PhysicsEvent.PHYSICS_READY, payload={}) + # Legacy IsaacEvents dispatch for callbacks registered directly on IsaacEvents. + cls._event_bus.dispatch_event(IsaacEvents.PHYSICS_READY.value, payload={}) + + device = PhysicsManager._device + if "cuda" in device: + torch.cuda.set_device(device) + + if cls._view is not None: + cls._view._backend.initialize_kinematic_bodies() + + cls.raise_callback_exception_if_any() + + @classmethod + def forward(cls) -> None: + """Update articulation kinematics and fabric for rendering.""" + sim = PhysicsManager._sim + if cls._fabric is not None and cls._update_fabric is not None: + if cls._view is not None and sim is not None and sim.is_playing(): + cls._view.update_articulations_kinematic() + cls._update_fabric(0.0, 0.0) + + @classmethod + def step(cls) -> None: + """Step the physics simulation.""" + sim = PhysicsManager._sim + if sim is None: + return + + if cls._anim_recorder and cls._anim_recorder.enabled and cls._anim_recorder.update(): + logger.warning("Animation recording finished. Shutting down.") + omni.kit.app.get_app().shutdown() + return + + cls._physx_sim.simulate(sim.cfg.dt, 0.0) + cls._physx_sim.fetch_results() + + device = PhysicsManager._device + if "cuda" in device: + torch.cuda.set_device(device) + + cls.raise_callback_exception_if_any() + + @classmethod + def play(cls) -> None: + """Start or resume the timeline.""" + cls._timeline.play() + # Pump events so timeline callbacks fire synchronously + omni.kit.app.get_app().update() + + @classmethod + def pause(cls) -> None: + """Pause the timeline.""" + cls._timeline.pause() + # Pump events so timeline callbacks fire synchronously + omni.kit.app.get_app().update() + + @classmethod + def stop(cls) -> None: + """Stop the timeline.""" + cls._timeline.stop() + # Pump events so timeline callbacks fire synchronously + omni.kit.app.get_app().update() + + @classmethod + def wait_for_playing(cls) -> None: + """Block until the timeline is playing, keeping the GUI responsive. + + After resume, forces a fabric re-sync so articulation meshes unfreeze. + See: https://github.com/isaac-sim/IsaacLab/issues/4279 + """ + if cls._timeline.is_playing(): + return + app = omni.kit.app.get_app() + while not cls._timeline.is_playing(): + app.update() + if cls._timeline.is_stopped(): + break + # Force fabric to re-sync articulation transforms after resume. + # detach/attach resets the FabricManager, then we immediately push + # current poses so the first render after resume shows correct state. + if not cls._timeline.is_stopped(): + cls._re_sync_fabric() + if cls._view is not None: + cls._view.update_articulations_kinematic() + if cls._update_fabric is not None: + cls._update_fabric(0.0, 0.0) + + @classmethod + def close(cls) -> None: + """Clean up physics resources.""" + # Detach PhysX from the stage FIRST to prevent shape/actor cleanup errors + # This disconnects PhysX from USD before any deletion events are fired + if cls._physx_sim is not None: + cls._physx_sim.detach_stage() + # Pump the app to flush pending PhysX cleanup operations + omni.kit.app.get_app().update() + + # Now invalidate views (they're already disconnected from PhysX) + cls._invalidate_views() + cls._subscriptions.clear() + + # Notify listeners that prims are being deleted (safe now since PhysX is detached) + cls._event_bus.dispatch_event(IsaacEvents.PRIM_DELETION.value, payload={"prim_path": "/"}) + + cls._fabric = None + cls._update_fabric = None + cls._anim_recorder = None + cls._warmup_needed = True + cls._view_created = False + cls._assets_loaded = True + cls._callback_exception = None + + super().close() + + @classmethod + def get_physics_sim_view(cls) -> omni.physics.tensors.SimulationView | None: + return cls._view + + @classmethod + def get_physics_sim_device(cls) -> str: + """Get the physics simulation device (Isaac Sim compatibility alias).""" + return PhysicsManager.get_device() + + @classmethod + def assets_loading(cls) -> bool: + return not cls._assets_loaded + + @classmethod + def store_callback_exception(cls, exception: Exception) -> None: + """Store an exception from a callback to be raised later. + + Omniverse event systems catch exceptions internally. Use this to store + exceptions that should be surfaced after the event dispatch completes. + """ + if cls._callback_exception is None: + cls._callback_exception = exception + + @classmethod + def raise_callback_exception_if_any(cls) -> None: + """Raise any stored callback exception and clear it. + + Call this after operations that may trigger callbacks (reset, step, etc.) + to propagate exceptions from Omniverse event callbacks. + """ + if cls._callback_exception is not None: + exc = cls._callback_exception + cls._callback_exception = None + raise exc + + @classmethod + def register_callback( + cls, + callback: Callable, + event: PhysicsEvent | IsaacEvents, + order: int = 0, + name: str | None = None, + wrap_weak_ref: bool = True, + ) -> CallbackHandle: + """Register a callback. Accepts both PhysicsEvent and IsaacEvents.""" + if isinstance(event, IsaacEvents): + cid = cls._callback_id + cls._callback_id += 1 + cb = cls._wrap_weak_ref(callback) if wrap_weak_ref else callback + sub = cls._subscribe_isaac(cb, event, order, name) + cls._callbacks[cid] = (event, cb, order, name, sub) + return CallbackHandle(cid, cls) + return super().register_callback(callback, event, order, name, wrap_weak_ref) + + @classmethod + def _subscribe_to_event( + cls, callback_id: int, callback: Callable, event: PhysicsEvent, order: int, name: str | None + ) -> Any: + """Subscribe to PhysX events. Maps PhysicsEvent → IsaacEvents.""" + isaac_event = _PHYSICS_EVENT_TO_ISAAC_EVENT.get(event) + if isaac_event is None: + isaac_event = _PHYSICS_EVENT_VALUE_TO_ISAAC_EVENT.get(getattr(event, "value", event)) + return cls._subscribe_isaac(callback, isaac_event, order, name) if isaac_event else None + + @classmethod + def _unsubscribe_from_event(cls, callback_id: int, event: PhysicsEvent | IsaacEvents, subscription: Any) -> None: + """Unsubscribe from PhysX/Isaac events.""" + if subscription is not None and hasattr(subscription, "unsubscribe"): + subscription.unsubscribe() + + @classmethod + def _subscribe_isaac(cls, callback: Callable, event: IsaacEvents, order: int, name: str | None) -> Any: + """Subscribe to an IsaacEvents event.""" + + def guarded(cb: Callable) -> Callable: + def wrapper(dt: float) -> Any: + return cb(dt) if cls._view_created else None + + return wrapper + + if event in ( + IsaacEvents.PHYSICS_WARMUP, + IsaacEvents.PHYSICS_READY, + IsaacEvents.POST_RESET, + IsaacEvents.SIMULATION_VIEW_CREATED, + IsaacEvents.PRIM_DELETION, + ): + return cls._event_bus.observe_event(event_name=event.value, order=order, on_event=callback) + elif event == IsaacEvents.POST_PHYSICS_STEP: + return cls._physx.subscribe_physics_on_step_events(guarded(callback), pre_step=False, order=order) + elif event == IsaacEvents.PRE_PHYSICS_STEP: + return cls._physx.subscribe_physics_on_step_events(guarded(callback), pre_step=True, order=order) + elif event == IsaacEvents.TIMELINE_STOP: + return cls._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.STOP), callback, order=order, name=name + ) + return None + + @classmethod + def _setup_subscriptions(cls) -> None: + """Subscribe to timeline events.""" + if "play" in cls._subscriptions: + return + stream = cls._timeline.get_timeline_event_stream() + cls._subscriptions["play"] = stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), cls._on_play + ) + cls._subscriptions["stop"] = stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.STOP), cls._on_stop + ) + if "stage_open" not in cls._subscriptions: + ctx = omni.usd.get_context() + cls._subscriptions["stage_open"] = cls._event_bus.observe_event( + event_name=ctx.stage_event_name(omni.usd.StageEventType.OPENED), on_event=cls._on_stage_open + ) + + @classmethod + def _configure_physics(cls) -> None: + """Apply all physics settings.""" + # Access base class variables since that's where initialize() sets them + sim = PhysicsManager._sim + cfg = PhysicsManager._cfg + if sim is None or cfg is None: + return + + device = sim.device + + # global settings (via SettingsManager) + sim.set_setting("/persistent/omnihydra/useSceneGraphInstancing", True) # type: ignore[union-attr] + sim.set_setting("/physics/physxDispatcher", True) # type: ignore[union-attr] + sim.set_setting("/physics/disableContactProcessing", True) # type: ignore[union-attr] + sim.set_setting("/physics/collisionConeCustomGeometry", False) # type: ignore[union-attr] + sim.set_setting("/physics/collisionCylinderCustomGeometry", False) # type: ignore[union-attr] + sim.set_setting("/physics/autoPopupSimulationOutputWindow", False) # type: ignore[union-attr] + + # device setup (set on PhysicsManager so PhysicsManager.get_device() works) + is_gpu = "cuda" in device + if is_gpu: + parts = device.split(":") + cuda_device = sim.get_setting("/physics/cudaDevice") # type: ignore[union-attr] + device_id = int(parts[1]) if len(parts) > 1 else max(0, int(cuda_device) if cuda_device is not None else 0) + sim.set_setting("/physics/cudaDevice", device_id) # type: ignore[union-attr] + sim.set_setting("/physics/suppressReadback", True) # type: ignore[union-attr] + PhysicsManager._device = f"cuda:{device_id}" + else: + sim.set_setting("/physics/cudaDevice", -1) # type: ignore[union-attr] + sim.set_setting("/physics/suppressReadback", False) # type: ignore[union-attr] + PhysicsManager._device = "cpu" + + # physx scene api (use sim.cfg for shared parameters like physics_prim_path, dt, physics_material) + # apply schema and set attributes by name + sim_cfg = sim.cfg + stage = sim.stage + scene_prim = stage.GetPrimAtPath(sim_cfg.physics_prim_path) + if "PhysxSceneAPI" not in scene_prim.GetAppliedSchemas(): + scene_prim.AddAppliedSchema("PhysxSceneAPI") + + # timestep and frame rate + steps_per_sec = int(1.0 / sim_cfg.dt) + sim_utils.safe_set_attribute_on_usd_prim( + scene_prim, "physxScene:timeStepsPerSecond", steps_per_sec, camel_case=False + ) + render_interval = max(sim_cfg.render_interval, 1) + sim.set_setting("/persistent/simulation/minFrameRate", steps_per_sec // render_interval) # type: ignore[union-attr] + + # gpu dynamics + sim_utils.safe_set_attribute_on_usd_prim( + scene_prim, "physxScene:broadphaseType", "GPU" if is_gpu else "MBP", camel_case=False + ) + sim_utils.safe_set_attribute_on_usd_prim(scene_prim, "physxScene:enableGPUDynamics", is_gpu, camel_case=False) + + # ccd (not supported on gpu) + enable_ccd = cfg.enable_ccd and not is_gpu + if cfg.enable_ccd and is_gpu: + logger.warning("CCD disabled when GPU dynamics is enabled.") + sim_utils.safe_set_attribute_on_usd_prim(scene_prim, "physxScene:enableCCD", enable_ccd, camel_case=False) + + # solver + sim_utils.safe_set_attribute_on_usd_prim( + scene_prim, "physxScene:solverType", "TGS" if cfg.solver_type == 1 else "PGS", camel_case=False + ) + scene_prim.CreateAttribute("physxScene:solveArticulationContactLast", Sdf.ValueTypeNames.Bool).Set( + cfg.solve_articulation_contact_last + ) + + # scene query support: forward SimulationCfg value and override for GUI + if hasattr(sim_cfg, "enable_scene_query_support"): + cfg.enable_scene_query_support = sim_cfg.enable_scene_query_support + if bool(sim.get_setting("/isaaclab/has_gui")): + cfg.enable_scene_query_support = True + + # apply remaining cfg attributes to scene (physxScene:*) + skip = { + "solver_type", + "enable_ccd", + "solve_articulation_contact_last", + "dt", + "device", + "render_interval", + "gravity", + "physics_prim_path", + "use_fabric", + "physics_material", + "class_type", + } + for key, value in cfg.to_dict().items(): # type: ignore + if key not in skip: + attr_name = "bounce_threshold" if key == "bounce_threshold_velocity" else key + sim_utils.safe_set_attribute_on_usd_prim( + scene_prim, + f"physxScene:{to_camel_case(attr_name, 'cC')}", + value, + camel_case=False, + ) + + # default physics material (from SimulationCfg, or create default if None) + physics_material = sim_cfg.physics_material + if physics_material is None: + from isaaclab.sim.spawners.materials import RigidBodyMaterialCfg + + physics_material = RigidBodyMaterialCfg() + mat_path = f"{sim_cfg.physics_prim_path}/defaultMaterial" + physics_material.func(mat_path, physics_material) + sim_utils.bind_physics_material(sim_cfg.physics_prim_path, mat_path) + + # warnings + if cfg.solver_type == 1 and not cfg.enable_external_forces_every_iteration: + logger.warning("TGS solver with enable_external_forces_every_iteration=False may cause noisy velocities.") + if not cfg.enable_stabilization and sim_cfg.dt > 0.0333: + logger.warning("Large timestep without stabilization may cause physics issues.") + + @classmethod + def _load_fabric(cls) -> None: + """Load fabric interface if enabled.""" + sim = PhysicsManager._sim + cfg = PhysicsManager._cfg + if sim is None or cfg is None: + return + + use_fabric = sim.cfg.use_fabric + ext_mgr = omni.kit.app.get_app().get_extension_manager() + + # enable/disable fabric extension + if use_fabric: + if not ext_mgr.is_extension_enabled("omni.physx.fabric"): + ext_mgr.set_extension_enabled_immediate("omni.physx.fabric", True) + from omni.physxfabric import get_physx_fabric_interface + + cls._fabric = get_physx_fabric_interface() + cls._update_fabric = getattr(cls._fabric, "force_update", cls._fabric.update) + else: + if ext_mgr.is_extension_enabled("omni.physx.fabric"): + ext_mgr.set_extension_enabled_immediate("omni.physx.fabric", False) + cls._fabric = None + cls._update_fabric = None + + # disable usd sync when fabric is enabled (via SettingsManager) + for key in [ + "updateToUsd", + "updateParticlesToUsd", + "updateVelocitiesToUsd", + "updateForceSensorsToUsd", + "updateResidualsToUsd", + ]: + sim.set_setting(f"/physics/{key}", not use_fabric) # type: ignore[union-attr] + sim.set_setting("/isaaclab/fabric_enabled", use_fabric) # type: ignore[union-attr] + sim.set_setting("/physics/visualizationDisplaySimulationOutput", False) # type: ignore[union-attr] + + @classmethod + def _re_sync_fabric(cls) -> None: + """Force the PhysX fabric extension to re-synchronize after a pause/resume transition. + + Starting with PhysX fabric 107.3.21 (Isaac Sim 5.1), the FabricManager skips writing + initial articulation poses to fabric on subsequent resumes, causing articulation meshes + to freeze visually while physics continues to run. + + The workaround detaches and re-attaches the USD stage on the fabric interface, forcing + the FabricManager to fully reinitialize and write transforms into fabric so that Hydra + picks them up. + """ + if cls._fabric is None: + return + sim = PhysicsManager._sim + if sim is None: + return + stage = sim.stage + if stage is None: + return + stage_id = UsdUtils.StageCache.Get().GetId(stage).ToLongInt() + if stage_id <= 0: + return + try: + cls._fabric.detach_stage() + except Exception: + logger.warning("Failed to detach fabric stage during re-sync. Articulation visuals may be stale.") + return + try: + cls._fabric.attach_stage(stage_id) + except Exception: + logger.error( + "Could not re-attach fabric stage. Articulation visuals will be broken until next reset.", + exc_info=True, + ) + + @classmethod + def _warmup_and_create_views(cls) -> None: + """Warm-start physics and create simulation views.""" + if not cls._warmup_needed: + return + + # Get stage ID first (needed for both warmup and view creation) + from isaaclab.sim.utils.stage import get_current_stage_id + + stage_id = get_current_stage_id() + + is_gpu = "cuda" in PhysicsManager.get_device() + + # Attach stage to PhysX BEFORE loading/starting - only needed for GPU pipeline. + # For CPU, the old SimulationManager never called attach_stage() explicitly. + # Calling attach_stage() + force_load_physics_from_usd() together causes a + # double-initialization that corrupts the CPU broadphase (MBP) collision setup, + # causing objects to fall through surfaces non-deterministically. + if is_gpu: + cls._physx_sim.attach_stage(stage_id) + + # warmup physx + cls._physx.force_load_physics_from_usd() + cls._physx.start_simulation() + cls._physx.update_simulation(cls.get_physics_dt(), 0.0) + cls._physx_sim.fetch_results() + cls._event_bus.dispatch_event(IsaacEvents.PHYSICS_WARMUP.value, payload={}) + cls._warmup_needed = False + + if cls._view_created: + return + + # Create tensor views + cls._view = omni.physics.tensors.create_simulation_view("warp", stage_id=stage_id) + cls._view_warp = omni.physics.tensors.create_simulation_view("warp", stage_id=stage_id) + + if cls._view: + cls._view.set_subspace_roots("/") + if cls._view_warp: + cls._view_warp.set_subspace_roots("/") + + # Final update after view creation + cls._physx.update_simulation(cls.get_physics_dt(), 0.0) + cls._view_created = True + + cls._event_bus.dispatch_event(IsaacEvents.SIMULATION_VIEW_CREATED.value, payload={}) + cls.dispatch_event(PhysicsEvent.PHYSICS_READY, payload={}) + cls._event_bus.dispatch_event(IsaacEvents.PHYSICS_READY.value, payload={}) + + @classmethod + def _invalidate_views(cls) -> None: + """Invalidate and clear simulation views.""" + for view in (cls._view, cls._view_warp): + if view: + view.invalidate() + cls._view = None + cls._view_warp = None + cls._view_created = False + + @classmethod + def _on_play(cls, event: Any) -> None: + sim = PhysicsManager._sim + if sim is not None and sim.get_setting("/app/player/playSimulations"): # type: ignore[union-attr] + cls._warmup_and_create_views() + + @classmethod + def _on_stop(cls, event: Any) -> None: + cls._warmup_needed = True + cls._invalidate_views() + + @classmethod + def _on_stage_open(cls, event: Any) -> None: + from isaaclab.sim.utils.stage import get_current_stage, get_current_stage_id + + # Guard against stage open events when stage is not yet valid + stage = get_current_stage() + if stage is None or not stage.GetRootLayer(): + return + + try: + new_stage_id = get_current_stage_id() + except Exception: + # Stage may not be ready for caching yet + return + + if new_stage_id == cls._stage_id: + return + + cls._stage_id = new_stage_id + cls._callbacks.clear() + cls._assets_loaded = True + + def on_loading(e: Any) -> None: + cls._assets_loaded = False + + def on_loaded(e: Any) -> None: + cls._assets_loaded = True + + ctx = omni.usd.get_context() + cls._subscriptions["assets_loading"] = cls._event_bus.observe_event( + event_name=ctx.stage_event_name(omni.usd.StageEventType.ASSETS_LOADING), on_event=on_loading + ) + cls._subscriptions["assets_loaded"] = cls._event_bus.observe_event( + event_name=ctx.stage_event_name(omni.usd.StageEventType.ASSETS_LOADED), on_event=on_loaded + ) diff --git a/source/isaaclab_physx/isaaclab_physx/physics/physx_manager_cfg.py b/source/isaaclab_physx/isaaclab_physx/physics/physx_manager_cfg.py new file mode 100644 index 000000000000..301ffa3c72ed --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/physics/physx_manager_cfg.py @@ -0,0 +1,227 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for PhysX physics manager.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal + +from isaaclab.physics import PhysicsCfg +from isaaclab.utils import configclass + +if TYPE_CHECKING: + from .physx_manager import PhysxManager + + +@configclass +class PhysxCfg(PhysicsCfg): + """Configuration for PhysX physics manager. + + This configuration includes all PhysX-specific settings including solver + parameters, scene configuration, and GPU buffer sizes. For more information, + see the `PhysX 5 SDK documentation`_. + + PhysX 5 supports GPU-accelerated physics simulation. This is enabled by default, + but can be disabled by setting :attr:`device` to ``cpu``. Unlike CPU PhysX, the + GPU simulation feature is unable to dynamically grow all the buffers. Therefore, + it is necessary to provide a reasonable estimate of the buffer sizes for GPU + features. If insufficient buffer sizes are provided, the simulation will fail + with errors and lead to adverse behaviors. The buffer sizes can be adjusted + through the ``gpu_*`` parameters. + + .. _PhysX 5 SDK documentation: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/_api_build/classPxSceneDesc.html + """ + + # ------------------------------------------------------------------ + # PhysX Scene Settings + # ------------------------------------------------------------------ + + class_type: type[PhysxManager] | str = "{DIR}.physx_manager:PhysxManager" + """The class type of the PhysxManager.""" + + # ------------------------------------------------------------------ + # Solver Settings + # ------------------------------------------------------------------ + + solver_type: Literal[0, 1] = 1 + """The type of solver to use. Default is 1 (TGS). + + Available solvers: + + * :obj:`0`: PGS (Projective Gauss-Seidel) + * :obj:`1`: TGS (Temporal Gauss-Seidel) + """ + + solve_articulation_contact_last: bool = False + """Changes the ordering inside the articulation solver. Default is False. + + PhysX employs a strict ordering for handling constraints in an articulation. The outcome of + each constraint resolution modifies the joint and associated link speeds. However, the default + ordering may not be ideal for gripping scenarios because the solver favours the constraint + types that are resolved last. This is particularly true of stiff constraint systems that are hard + to resolve without resorting to vanishingly small simulation timesteps. + + With dynamic contact resolution being such an important part of gripping, it may make + more sense to solve dynamic contact towards the end of the solver rather than at the + beginning. This parameter modifies the default ordering to enable this change. + + For more information, please check `here `__. + + .. versionadded:: v2.3 + This parameter is only available with Isaac Sim 5.1. + """ + + min_position_iteration_count: int = 1 + """Minimum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 1. + + .. note:: + + Each physics actor in Omniverse specifies its own solver iteration count. The solver takes + the number of iterations specified by the actor with the highest iteration and clamps it to + the range ``[min_position_iteration_count, max_position_iteration_count]``. + """ + + max_position_iteration_count: int = 255 + """Maximum number of solver position iterations (rigid bodies, cloth, particles etc.). Default is 255. + + .. note:: + + Each physics actor in Omniverse specifies its own solver iteration count. The solver takes + the number of iterations specified by the actor with the highest iteration and clamps it to + the range ``[min_position_iteration_count, max_position_iteration_count]``. + """ + + min_velocity_iteration_count: int = 0 + """Minimum number of solver velocity iterations (rigid bodies, cloth, particles etc.). Default is 0. + + .. note:: + + Each physics actor in Omniverse specifies its own solver iteration count. The solver takes + the number of iterations specified by the actor with the highest iteration and clamps it to + the range ``[min_velocity_iteration_count, max_velocity_iteration_count]``. + """ + + max_velocity_iteration_count: int = 255 + """Maximum number of solver velocity iterations (rigid bodies, cloth, particles etc.). Default is 255. + + .. note:: + + Each physics actor in Omniverse specifies its own solver iteration count. The solver takes + the number of iterations specified by the actor with the highest iteration and clamps it to + the range ``[min_velocity_iteration_count, max_velocity_iteration_count]``. + """ + + enable_scene_query_support: bool = False + """Enable/disable scene query support for collision shapes. Default is False. + + This flag allows performing collision queries (raycasts, sweeps, and overlaps) on actors and + attached shapes in the scene. This is useful for implementing custom collision detection logic + outside of the physics engine. + + If set to False, the physics engine does not create the scene query manager and the scene query + functionality will not be available. However, this provides some performance speed-up. + + Note: + This flag is overridden to True + when running the simulation with the GUI enabled. This is to allow certain GUI features + to work properly. + """ + + enable_ccd: bool = False + """Enable a second broad-phase pass that makes it possible to prevent objects from tunneling through each other. + Default is False.""" + + enable_stabilization: bool = False + """Enable/disable additional stabilization pass in solver. Default is False. + + .. note:: + + We recommend setting this flag to true only when the simulation step size is large + (i.e., less than 30 Hz or more than 0.0333 seconds). + + .. warning:: + + Enabling this flag may lead to incorrect contact forces report from the contact sensor. + """ + + enable_external_forces_every_iteration: bool = False + """Enable/disable external forces every position iteration in the TGS solver. Default is False. + + When using the TGS solver (:attr:`solver_type` is 1), this flag allows enabling external forces + every solver position iteration. This can help improve the accuracy of velocity updates. + Consider enabling this flag if the velocities generated by the simulation are noisy. + Increasing the number of velocity iterations, together with this flag, can help improve + the accuracy of velocity updates. + + .. note:: + + This flag is ignored when using the PGS solver (:attr:`solver_type` is 0). + """ + + enable_enhanced_determinism: bool = False + """Enable/disable improved determinism at the expense of performance. Defaults to False. + + For more information on PhysX determinism, please check `here`_. + + .. _here: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/docs/RigidBodyDynamics.html#enhanced-determinism + """ + + bounce_threshold_velocity: float = 0.5 + """Relative velocity threshold for contacts to bounce (in m/s). Default is 0.5 m/s.""" + + friction_offset_threshold: float = 0.04 + """Threshold for contact point to experience friction force (in m). Default is 0.04 m.""" + + friction_correlation_distance: float = 0.025 + """Distance threshold for merging contacts into a single friction anchor point (in m). Default is 0.025 m.""" + + # ------------------------------------------------------------------ + # GPU Buffer Settings + # ------------------------------------------------------------------ + + gpu_max_rigid_contact_count: int = 2**23 + """Size of rigid contact stream buffer allocated in pinned host memory. Default is 2 ** 23.""" + + gpu_max_rigid_patch_count: int = 5 * 2**15 + """Size of the rigid contact patch stream buffer allocated in pinned host memory. Default is 5 * 2 ** 15.""" + + gpu_found_lost_pairs_capacity: int = 2**21 + """Capacity of found and lost buffers allocated in GPU global memory. Default is 2 ** 21. + + This is used for the found/lost pair reports in the BP. + """ + + gpu_found_lost_aggregate_pairs_capacity: int = 2**25 + """Capacity of found and lost buffers in aggregate system allocated in GPU global memory. + Default is 2 ** 25. + + This is used for the found/lost pair reports in AABB manager. + """ + + gpu_total_aggregate_pairs_capacity: int = 2**21 + """Capacity of total number of aggregate pairs allocated in GPU global memory. Default is 2 ** 21.""" + + gpu_collision_stack_size: int = 2**26 + """Size of the collision stack buffer allocated in pinned host memory. Default is 2 ** 26.""" + + gpu_heap_capacity: int = 2**26 + """Initial capacity of the GPU and pinned host memory heaps. Additional memory will be allocated + if more memory is required. Default is 2 ** 26.""" + + gpu_temp_buffer_capacity: int = 2**24 + """Capacity of temp buffer allocated in pinned host memory. Default is 2 ** 24.""" + + gpu_max_num_partitions: int = 8 + """Limitation for the partitions in the GPU dynamics pipeline. Default is 8. + + This variable must be power of 2. A value greater than 32 is currently not supported. Range: (1, 32) + """ + + gpu_max_soft_body_contacts: int = 2**20 + """Size of soft body contacts stream buffer allocated in pinned host memory. Default is 2 ** 20.""" + + gpu_max_particle_contacts: int = 2**20 + """Size of particle contacts stream buffer allocated in pinned host memory. Default is 2 ** 20.""" diff --git a/source/isaaclab_physx/isaaclab_physx/renderers/__init__.py b/source/isaaclab_physx/isaaclab_physx/renderers/__init__.py new file mode 100644 index 000000000000..e07cebbea77f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/renderers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX renderer backends (Isaac RTX / Omniverse Replicator).""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/renderers/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/renderers/__init__.pyi new file mode 100644 index 000000000000..2b2097eb47bd --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/renderers/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "IsaacRtxRenderer", + "IsaacRtxRendererCfg", +] + +from .isaac_rtx_renderer import IsaacRtxRenderer +from .isaac_rtx_renderer_cfg import IsaacRtxRendererCfg diff --git a/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer.py b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer.py new file mode 100644 index 000000000000..242ac3729d0b --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer.py @@ -0,0 +1,412 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Isaac RTX renderer using Omniverse Replicator for tiled camera rendering.""" + +from __future__ import annotations + +import json +import logging +import math +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +import numpy as np +import torch +import warp as wp +from packaging import version + +from pxr import Sdf + +from isaaclab.app.settings_manager import get_settings_manager +from isaaclab.renderers import BaseRenderer, RenderBufferKind, RenderBufferSpec +from isaaclab.renderers.camera_render_spec import CameraRenderSpec +from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.warp.kernels import reshape_tiled_image + +from .isaac_rtx_renderer_utils import ensure_isaac_rtx_render_update, ensure_rtx_hydra_engine_attached + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from isaaclab.sensors.camera.camera_data import CameraData + +from .isaac_rtx_renderer_cfg import IsaacRtxRendererCfg + +# RTX simple-shading constants. +# +# Simple shading is driven by Kit's RTX "Minimal" render mode via the +# ``/rtx/minimal/mode`` carb setting (key ``omni:rtx:minimal:mode``), with +# integer values: +# 0 = No Rendering (black output; only other AOVs are produced) +# 1 = Constant Diffuse (single constant color for all surfaces) +# 2 = Texture Diffuse (diffuse shading using texture colors) +# 3 = Diffuse/Glossy/Emission (full material shading) +# +# The public data-type names we expose (``simple_shading_*``) are kept stable +# for backwards compatibility and map onto the Kit integer values below. +SIMPLE_SHADING_AOV = "SimpleShadingSD" +SIMPLE_SHADING_MODES = { + "simple_shading_constant_diffuse": 1, + "simple_shading_diffuse_mdl": 2, + "simple_shading_full_mdl": 3, +} +SIMPLE_SHADING_MODE_SETTING = "/rtx/minimal/mode" + + +def _camera_semantic_filter_predicate(semantic_filter: str | list[str]) -> str: + """Build the instance-mapping semantics predicate from :attr:`isaaclab.sensors.camera.CameraCfg.semantic_filter`. + + Replicator's semantic/instance segmentation annotators consume this via the synthetic-data pipeline. + """ + if isinstance(semantic_filter, list): + return ":*; ".join(semantic_filter) + ":*" + return semantic_filter + + +@dataclass +class IsaacRtxRenderData: + """Render data for Isaac RTX renderer.""" + + annotators: dict[str, Any] + render_product_paths: list[str] + output_data: dict[str, torch.Tensor] | None = None + spec: CameraRenderSpec | None = None + renderer_info: dict[str, Any] = field(default_factory=dict) + + +class IsaacRtxRenderer(BaseRenderer): + """Isaac RTX backend using Omniverse Replicator for tiled camera rendering. + + Requires Isaac Sim. + """ + + def __init__(self, cfg: IsaacRtxRendererCfg): + self.cfg = cfg + # RTX rendering requires the app to be launched with ``--enable_cameras``. + if not get_settings_manager().get("/isaaclab/cameras_enabled"): + raise RuntimeError( + "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" + " rendering." + ) + ensure_rtx_hydra_engine_attached() + # ``/isaaclab/render/rtx_sensors`` is owned by ``Camera.__init__`` (must be set pre-``sim.reset()``). + + def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: + """Publish the per-output Replicator layout this RTX backend writes. + + ``ALBEDO`` and the three ``SIMPLE_SHADING_*`` outputs require Isaac Sim 6.0+ + and are omitted on older versions. The three segmentation outputs report + ``RenderBufferSpec(4, uint8)`` when the matching ``self.cfg.colorize_*`` flag is + set, otherwise ``RenderBufferSpec(1, int32)``. + """ + sim_major = get_isaac_sim_version().major + + specs: dict[RenderBufferKind, RenderBufferSpec] = { + # Replicator's native layout for color output is rgba/uint8; + # ``Camera`` aliases ``rgb`` as a view into ``rgba`` storage. + RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), + RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.DEPTH: RenderBufferSpec(1, torch.float32), + RenderBufferKind.DISTANCE_TO_IMAGE_PLANE: RenderBufferSpec(1, torch.float32), + RenderBufferKind.DISTANCE_TO_CAMERA: RenderBufferSpec(1, torch.float32), + RenderBufferKind.NORMALS: RenderBufferSpec(3, torch.float32), + RenderBufferKind.MOTION_VECTORS: RenderBufferSpec(2, torch.float32), + } + + if sim_major >= 6: + specs[RenderBufferKind.ALBEDO] = RenderBufferSpec(4, torch.uint8) + for shading_type in SIMPLE_SHADING_MODES: + specs[RenderBufferKind(shading_type)] = RenderBufferSpec(3, torch.uint8) + + seg_specs = ( + (RenderBufferKind.SEMANTIC_SEGMENTATION, self.cfg.colorize_semantic_segmentation), + (RenderBufferKind.INSTANCE_SEGMENTATION_FAST, self.cfg.colorize_instance_segmentation), + (RenderBufferKind.INSTANCE_ID_SEGMENTATION_FAST, self.cfg.colorize_instance_id_segmentation), + ) + for name, colorize in seg_specs: + specs[name] = RenderBufferSpec(4, torch.uint8) if colorize else RenderBufferSpec(1, torch.int32) + + return specs + + def prepare_stage(self, stage: Any, num_envs: int) -> None: + """No-op for Isaac RTX - uses USD scene directly without export. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.prepare_stage`.""" + pass + + def create_render_data(self, spec: CameraRenderSpec) -> IsaacRtxRenderData: + """Create render product and annotators for the tiled camera. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.create_render_data`.""" + import omni.replicator.core as rep + from omni.syntheticdata import SyntheticData + from pxr import UsdGeom + + from isaaclab.sim.utils.stage import get_current_stage + + settings = get_settings_manager() + isaac_sim_version = get_isaac_sim_version() + + if isaac_sim_version.major >= 6: + needs_color_render = "rgb" in spec.cfg.data_types or "rgba" in spec.cfg.data_types + if not needs_color_render: + settings.set_bool("/rtx/sdg/force/disableColorRender", True) + if settings.get("/isaaclab/has_gui"): + settings.set_bool("/rtx/sdg/force/disableColorRender", False) + else: + if "albedo" in spec.cfg.data_types: + logger.warning( + "Albedo annotator is only supported in Isaac Sim 6.0+. The albedo data type will be ignored." + ) + if any(dt in SIMPLE_SHADING_MODES for dt in spec.cfg.data_types): + logger.warning( + "Simple shading annotators are only supported in Isaac Sim 6.0+." + " The simple shading data types will be ignored." + ) + + # HACK: Isaac Sim 4.5 has a bug in Camera that breaks segmentation + # outputs for instanceable assets. Disable instancing as a workaround. + stage = get_current_stage() + if isaac_sim_version == version.parse("4.5") and ( + "semantic_segmentation" in spec.cfg.data_types or "instance_segmentation_fast" in spec.cfg.data_types + ): + logger.warning( + "Isaac Sim 4.5 introduced a bug in Camera when outputting instance and semantic" + " segmentation outputs for instanceable assets. As a workaround, the instanceable flag on assets" + " will be disabled in the current workflow and may lead to longer load times and increased memory" + " usage." + ) + with Sdf.ChangeBlock(): + for prim in stage.Traverse(): + prim.SetInstanceable(False) + + # Get camera prim paths from sensor view + cam_prim_paths = list(spec.camera_prim_paths) + for cam_prim_path in cam_prim_paths: + cam_prim = stage.GetPrimAtPath(cam_prim_path) + if not cam_prim.IsA(UsdGeom.Camera): + raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") + + # Create replicator tiled render product + rp = rep.create.render_product_tiled(cameras=cam_prim_paths, tile_resolution=(spec.cfg.width, spec.cfg.height)) + render_product_paths = [rp.path] + + # Synthetic-data instance mapping filter for segmentation; before annotator attach. + SyntheticData.Get().set_instance_mapping_semantic_filter( + _camera_semantic_filter_predicate(self.cfg.semantic_filter) + ) + + # Register simple shading if needed + if any(data_type in SIMPLE_SHADING_MODES for data_type in spec.cfg.data_types): + rep.AnnotatorRegistry.register_annotator_from_aov( + aov=SIMPLE_SHADING_AOV, output_data_type=np.uint8, output_channels=4 + ) + # Set simple shading mode (if requested) before rendering + simple_shading_mode = self._resolve_simple_shading_mode(spec) + if simple_shading_mode is not None: + get_settings_manager().set_int(SIMPLE_SHADING_MODE_SETTING, simple_shading_mode) + + # Define annotators based on requested data types + annotators = {} + for annotator_type in spec.cfg.data_types: + if annotator_type == "rgba" or annotator_type == "rgb": + annotator = rep.AnnotatorRegistry.get_annotator("rgb", device=spec.device, do_array_copy=False) + annotators["rgba"] = annotator + elif annotator_type == "albedo": + # TODO: this is a temporary solution because replicator has not exposed the annotator yet + # once it's exposed, we can remove this + rep.AnnotatorRegistry.register_annotator_from_aov( + aov="DiffuseAlbedoSD", output_data_type=np.uint8, output_channels=4 + ) + annotator = rep.AnnotatorRegistry.get_annotator( + "DiffuseAlbedoSD", device=spec.device, do_array_copy=False + ) + annotators["albedo"] = annotator + elif annotator_type in SIMPLE_SHADING_MODES: + annotator = rep.AnnotatorRegistry.get_annotator( + SIMPLE_SHADING_AOV, device=spec.device, do_array_copy=False + ) + annotators[annotator_type] = annotator + elif annotator_type == "depth" or annotator_type == "distance_to_image_plane": + # keep depth for backwards compatibility + annotator = rep.AnnotatorRegistry.get_annotator( + "distance_to_image_plane", device=spec.device, do_array_copy=False + ) + annotators[annotator_type] = annotator + # note: we are verbose here to make it easier to understand the code. + # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. + # if colorize is false, the data is returned as a uint32 image with ids as values. + else: + init_params = None + if annotator_type == "semantic_segmentation": + init_params = { + "colorize": self.cfg.colorize_semantic_segmentation, + "mapping": json.dumps(self.cfg.semantic_segmentation_mapping), + } + elif annotator_type == "instance_segmentation_fast": + init_params = {"colorize": self.cfg.colorize_instance_segmentation} + elif annotator_type == "instance_id_segmentation_fast": + init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} + + annotator = rep.AnnotatorRegistry.get_annotator( + annotator_type, init_params, device=spec.device, do_array_copy=False + ) + annotators[annotator_type] = annotator + + # Attach annotators to render product + for annotator in annotators.values(): + annotator.attach(render_product_paths) + + return IsaacRtxRenderData( + annotators=annotators, + render_product_paths=render_product_paths, + spec=spec, + ) + + def _resolve_simple_shading_mode(self, spec: CameraRenderSpec) -> int | None: + """Resolve the requested simple shading mode from data types.""" + requested = [dt for dt in spec.cfg.data_types if dt in SIMPLE_SHADING_MODES] + if not requested: + return None + if len(requested) > 1: + logger.warning( + "Multiple simple shading modes requested (%s). Using '%s' only.", + requested, + requested[0], + ) + return SIMPLE_SHADING_MODES[requested[0]] + + def set_outputs(self, render_data: IsaacRtxRenderData, output_data: dict[str, torch.Tensor]): + """Store reference to output buffers for writing during render. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.set_outputs`.""" + render_data.output_data = output_data + + def update_transforms(self) -> None: + """No-op for Isaac RTX - uses USD scene directly. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.update_transforms`.""" + pass + + def update_camera( + self, + render_data: IsaacRtxRenderData, + positions: torch.Tensor, + orientations: torch.Tensor, + intrinsics: torch.Tensor, + ): + """No-op for Replicator - uses USD camera prims directly. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.update_camera`.""" + pass + + def render(self, render_data: IsaacRtxRenderData): + """Extract data from annotators and write to output buffers. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.render`.""" + spec = render_data.spec + output_data = render_data.output_data + if output_data is None or spec is None: + return + + # Ensure the RTX renderer has been pumped so annotator buffers are fresh. + # This is a no-op if another camera instance already triggered the update + # for the current physics step, or if a visualizer already pumped it. + ensure_isaac_rtx_render_update() + + view_count = spec.view_count + cfg = spec.cfg + device = spec.device + + def tiling_grid_shape(): + cols = math.ceil(math.sqrt(view_count)) + rows = math.ceil(view_count / cols) + return (cols, rows) + + num_tiles_x = tiling_grid_shape()[0] + + # Extract the flattened image buffer + for data_type, annotator in render_data.annotators.items(): + # check whether returned data is a dict (used for segmentation) + output = annotator.get_data() + if isinstance(output, dict): + tiled_data_buffer = output["data"] + render_data.renderer_info[data_type] = output["info"] + else: + tiled_data_buffer = output + + # convert data buffer to warp array + if isinstance(tiled_data_buffer, np.ndarray): + # Let warp infer the dtype from numpy array instead of hardcoding uint8 + # Different annotators return different dtypes: RGB(uint8), depth(float32), segmentation(uint32) + tiled_data_buffer = wp.array(tiled_data_buffer, device=device) + else: + tiled_data_buffer = tiled_data_buffer.to(device=device) + + # process data for different segmentation types + # Note: Replicator returns raw buffers of dtype uint32 for segmentation types + # so we need to convert them to uint8 4 channel images for colorized types + if ( + (data_type == "semantic_segmentation" and self.cfg.colorize_semantic_segmentation) + or (data_type == "instance_segmentation_fast" and self.cfg.colorize_instance_segmentation) + or (data_type == "instance_id_segmentation_fast" and self.cfg.colorize_instance_id_segmentation) + ): + tiled_data_buffer = wp.array( + ptr=tiled_data_buffer.ptr, shape=(*tiled_data_buffer.shape, 4), dtype=wp.uint8, device=device + ) + + # For motion vectors, use specialized kernel that reads 4 channels but only writes 2 + # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/2003) + if data_type == "motion_vectors": + tiled_data_buffer = tiled_data_buffer[:, :, :2].contiguous() + + # For normals, we only require the first three channels of the tiled buffer + # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/4239) + if data_type == "normals": + tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() + if data_type in SIMPLE_SHADING_MODES: + tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() + + wp.launch( + kernel=reshape_tiled_image, + dim=(view_count, cfg.height, cfg.width), + inputs=[ + tiled_data_buffer.flatten(), + wp.from_torch(output_data[data_type]), + *list(output_data[data_type].shape[1:]), + num_tiles_x, + ], + device=device, + ) + + # alias rgb as first 3 channels of rgba + if data_type == "rgba" and "rgb" in cfg.data_types: + output_data["rgb"] = output_data["rgba"][..., :3] + + # NOTE: The `distance_to_camera` annotator returns the distance to the camera optical center. + # However, the replicator depth clipping is applied w.r.t. to the image plane which may result + # in values larger than the clipping range in the output. We apply an additional clipping to + # ensure values are within the clipping range for all the annotators. + if data_type == "distance_to_camera": + output_data[data_type][output_data[data_type] > cfg.spawn.clipping_range[1]] = torch.inf + + # apply defined clipping behavior + if ( + data_type in ("distance_to_camera", "distance_to_image_plane", "depth") + and self.cfg.depth_clipping_behavior != "none" + ): + output_data[data_type][torch.isinf(output_data[data_type])] = ( + 0.0 if self.cfg.depth_clipping_behavior == "zero" else cfg.spawn.clipping_range[1] + ) + + def read_output(self, render_data: IsaacRtxRenderData, camera_data: CameraData) -> None: + """Populate per-output metadata collected during render(). Pixel data already written in render(). + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.read_output`.""" + for output_name, info in render_data.renderer_info.items(): + if info is not None: + camera_data.info[output_name] = info + + def cleanup(self, render_data: IsaacRtxRenderData | None): + """Detach annotators from render product. + See :meth:`~isaaclab.renderers.base_renderer.BaseRenderer.cleanup`.""" + if render_data: + for annotator in render_data.annotators.values(): + annotator.detach(render_data.render_product_paths) + render_data.spec = None diff --git a/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer_cfg.py b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer_cfg.py new file mode 100644 index 000000000000..de5d433e867f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer_cfg.py @@ -0,0 +1,92 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Isaac RTX (Replicator) Renderer.""" + +from typing import Literal + +from isaaclab.renderers.renderer_cfg import RendererCfg +from isaaclab.utils import configclass + + +@configclass +class IsaacRtxRendererCfg(RendererCfg): + """Configuration for Isaac RTX renderer using Omniverse Replicator. + + Holds the Replicator/RTX-specific knobs (semantic segmentation, instance + segmentation, semantic filtering, depth clipping) used by the RTX rendering + pipeline. + """ + + renderer_type: str = "isaac_rtx" + """Type identifier for Isaac RTX renderer.""" + + semantic_filter: str | list[str] = "*:*" + """A string or a list specifying a semantic filter predicate. Defaults to ``"*:*"``. + + If a string, it should be a disjunctive normal form of (semantic type, labels). For examples: + + * ``"typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelE"``: + All prims with semantic type "typeA" and label "labelA" but not "labelB" or with label "labelC". + Also, all prims with semantic type "typeB" and label "labelA", or with semantic type "typeC" and label "labelE". + * ``"typeA : * ; * : labelA"``: All prims with semantic type "typeA" or with label "labelA" + + If a list of strings, each string should be a semantic type. The segmentation for prims with + semantics of the specified types will be retrieved. For example, if the list is ["class"], only + the segmentation for prims with semantics of type "class" will be retrieved. + + .. seealso:: + + For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_. + + .. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering + """ + + colorize_semantic_segmentation: bool = True + """Whether to colorize the semantic segmentation images. Defaults to True. + + If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors + and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + """ + + colorize_instance_id_segmentation: bool = True + """Whether to colorize the instance ID segmentation images. Defaults to True. + + If True, instance id segmentation is converted to an image where instance IDs are mapped to colors. + and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + """ + + colorize_instance_segmentation: bool = True + """Whether to colorize the instance segmentation images. Defaults to True. + + If True, instance segmentation is converted to an image where instance IDs are mapped to colors. + and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + """ + + semantic_segmentation_mapping: dict = {} + """Dictionary mapping semantics to specific colours + + Eg. + + .. code-block:: python + + { + "class:cube_1": (255, 36, 66, 255), + "class:cube_2": (255, 184, 48, 255), + "class:cube_3": (55, 255, 139, 255), + "class:table": (255, 237, 218, 255), + "class:ground": (100, 100, 100, 255), + "class:robot": (61, 178, 255, 255), + } + + """ + + depth_clipping_behavior: Literal["max", "zero", "none"] = "none" + """Clipping behavior for the camera for values exceed the maximum value. Defaults to "none". + + - ``"max"``: Values are clipped to the maximum value. + - ``"zero"``: Values are clipped to zero. + - ``"none"``: No clipping is applied. Values will be returned as ``inf``. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer_utils.py b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer_utils.py new file mode 100644 index 000000000000..bdc5596b717f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer_utils.py @@ -0,0 +1,181 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for Isaac RTX renderer integration.""" + +from __future__ import annotations + +import logging +import time +from typing import Any + +import omni.usd + +import isaaclab.sim as sim_utils + +logger = logging.getLogger(__name__) + +# Module-level dedup stamp: tracks the last (sim instance, physics step, render generation) at +# which Kit's ``app.update()`` was pumped. Keyed on ``id(sim)`` so that a +# new ``SimulationContext`` (e.g. in a new test) automatically invalidates +# any stale stamp from a previous instance. +_last_render_update_key: tuple[int, int, int] = (0, -1, -1) + +_STREAMING_WAIT_TIMEOUT_S: float = 30.0 + + +def _get_stage_streaming_busy() -> bool: + """Synchronously query whether RTX stage streaming is still in progress.""" + import omni.usd + + usd_context = omni.usd.get_context() + if usd_context is None: + return False + return usd_context.get_stage_streaming_status() + + +def _wait_for_streaming_complete() -> None: + """Pump ``app.update()`` until RTX streaming reports idle or timeout. + + After streaming finishes a final ``app.update()`` is issued so that the + frame captured by downstream annotators reflects the newly loaded textures. + """ + import omni.kit.app + + start = time.monotonic() + while _get_stage_streaming_busy() and (time.monotonic() - start) < _STREAMING_WAIT_TIMEOUT_S: + omni.kit.app.get_app().update() + + elapsed = time.monotonic() - start + if _get_stage_streaming_busy(): + logger.warning( + "RTX streaming did not complete within %.1f s – proceeding anyway.", + _STREAMING_WAIT_TIMEOUT_S, + ) + elif elapsed > 0.01: + logger.info("RTX streaming completed in %.2f s.", elapsed) + + omni.kit.app.get_app().update() + + +def ensure_rtx_hydra_engine_attached() -> None: + """Attach the RTX Hydra engine to the USD context if not already attached. + + Headless app files such as ``isaaclab.python.headless.rendering.kit`` intentionally + omit ``omni.kit.viewport.window`` to avoid pulling in the ``omni.ui``-based viewport + stack. However, ``ViewportWindow`` is normally responsible for calling + :func:`omni.usd.create_hydra_engine` at startup; without it the RTX Hydra engine is + never bound to the :class:`omni.usd.UsdContext`, and the first Replicator tiled + render product runs against a cold pipeline. On some GPUs this manifests as + ``cudaErrorIllegalAddress`` inside ``omni.rtx`` (CUDA ``freeAsync``) and/or all + tiles rendering as black. + + This helper replicates only the activation step ``ViewportWindow`` performs, + without creating a UI or a window. It is idempotent: when the engine is already + attached (e.g. GUI runs that do load ``omni.kit.viewport.window``, or a previous + call already attached it) the function is a no-op. Failures are logged as errors + and do not propagate, so non-RTX contexts (e.g. unit tests importing this module + without a running Kit app) continue to work. + """ + try: + ctx = omni.usd.get_context() + if ctx is None: + return + if "rtx" in ctx.get_attached_hydra_engine_names(): + return + omni.usd.create_hydra_engine("rtx", ctx) + except Exception as e: # noqa: BLE001 + logger.error("RTX Hydra engine attach failed: %s", e) + + +def ensure_isaac_rtx_render_update() -> None: + """Ensure the Isaac RTX renderer has been pumped for the current sim step. + + This keeps the Kit-specific ``app.update()`` logic inside the renderers + package rather than in the backend-agnostic ``SimulationContext``. + + Safe to call from multiple ``Camera`` instances per step — + only the first call triggers ``app.update()``. Subsequent calls are no-ops + because the module-level ``_last_render_update_key`` already matches the + current ``(id(sim), step_count, render_generation)`` tuple. + + The key is a ``(sim_instance_id, step_count, render_generation)`` tuple so that: + - creating a new ``SimulationContext`` invalidates stale stamps, and + - render/reset transitions that do not advance physics step count still force a fresh update. + + After the initial ``app.update()`` the streaming subsystem is queried + synchronously via ``UsdContext.get_stage_streaming_status()``. If textures + are still loading, additional ``app.update()`` calls are pumped until the + subsystem reports idle (or a timeout is reached). + + No-op conditions: + * Already called this step (dedup across camera instances). + * A visualizer already pumps ``app.update()`` (e.g. KitVisualizer). + * Rendering is not active. + """ + global _last_render_update_key + + sim = sim_utils.SimulationContext.instance() + if sim is None: + return + + render_generation = getattr(sim, "render_generation", getattr(sim, "_render_generation", 0)) + key = (id(sim), sim._physics_step_count, render_generation) + if _last_render_update_key == key: + return # Already pumped this step (by another camera or a visualizer) + + # If a visualizer already pumps the Kit app loop, mark as done and skip. + # However, on the very first call for a new SimulationContext, the visualizer + # has not had a chance to pump yet (sim.render() was never called), so we + # must perform the initial app.update() ourselves to populate annotator buffers. + first_call_for_sim = _last_render_update_key[0] != id(sim) + if not first_call_for_sim and any(viz.pumps_app_update() for viz in sim.visualizers): + _last_render_update_key = key + return + + if not sim.is_rendering: + return + + # Sync physics results → Fabric so RTX sees updated positions. + # physics_manager.step() only runs simulate()/fetch_results() and does NOT + # call _update_fabric(), so without this the render would lag one frame behind. + sim.physics_manager.forward() + + import omni.kit.app + + sim.set_setting("/app/player/playSimulations", False) + omni.kit.app.get_app().update() + + if _get_stage_streaming_busy(): + _wait_for_streaming_complete() + + sim.set_setting("/app/player/playSimulations", True) + + _last_render_update_key = key + + +def pump_kit_app_for_headless_video_render_if_needed(sim: Any) -> None: + """Pump Kit app-loop for headless rgb-array rendering when needed. + + Isaac Sim / RTX specific; kept out of backend-agnostic :class:`~isaaclab.sim.SimulationContext`. + """ + if not bool(sim.get_setting("/isaaclab/video/enabled")): + return + + from isaaclab.utils.version import has_kit + + if not has_kit(): + return + if any(viz.pumps_app_update() for viz in sim.visualizers): + return + try: + ensure_isaac_rtx_render_update() + except (ImportError, AttributeError, ModuleNotFoundError) as exc: + logger.debug("[isaac_rtx] Skipping Kit app-loop pump in render() (non-Kit env): %s", exc) + except Exception as exc: + logger.warning( + "[isaac_rtx] Kit app-loop pump failed in render() — video frames may be stale or black: %s", + exc, + ) diff --git a/source/isaaclab_physx/isaaclab_physx/renderers/kit_viewport_utils.py b/source/isaaclab_physx/isaaclab_physx/renderers/kit_viewport_utils.py new file mode 100644 index 000000000000..f371aac7c603 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/renderers/kit_viewport_utils.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kit / Omniverse viewport helpers (Isaac Sim specific). + +These live in :mod:`isaaclab_physx` so :class:`~isaaclab.sim.SimulationContext` stays +backend-agnostic. +""" + +from __future__ import annotations + +import logging + +logger = logging.getLogger(__name__) + + +def set_kit_renderer_camera_view( + eye: tuple[float, float, float] | list[float], + target: tuple[float, float, float] | list[float], + camera_prim_path: str = "/OmniverseKit_Persp", +) -> None: + """Set camera view for the renderer/viewport camera only. + + This does not broadcast to visualizers. + """ + try: + from isaacsim.core.rendering_manager import ViewportManager + + ViewportManager.set_camera_view( + str(camera_prim_path), + eye=list(eye), + target=list(target), + ) + except (ImportError, ModuleNotFoundError) as exc: + logger.debug("[kit_viewport] Renderer camera update skipped (no Kit): %s", exc) + except Exception as exc: + logger.warning("[kit_viewport] Renderer camera update failed: %s", exc) diff --git a/source/isaaclab_physx/isaaclab_physx/scene_data_providers/__init__.py b/source/isaaclab_physx/isaaclab_physx/scene_data_providers/__init__.py new file mode 100644 index 000000000000..0a3fe4d79fb4 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/scene_data_providers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX scene data provider backends.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/scene_data_providers/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/scene_data_providers/__init__.pyi new file mode 100644 index 000000000000..32c6f9c07335 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/scene_data_providers/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "PhysxSceneDataProvider", +] + +from .physx_scene_data_provider import PhysxSceneDataProvider diff --git a/source/isaaclab_physx/isaaclab_physx/scene_data_providers/physx_scene_data_provider.py b/source/isaaclab_physx/isaaclab_physx/scene_data_providers/physx_scene_data_provider.py new file mode 100644 index 000000000000..9a88660da498 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/scene_data_providers/physx_scene_data_provider.py @@ -0,0 +1,727 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX scene data provider for Omni/PhysX backend.""" + +from __future__ import annotations + +import logging +import re +import time +from collections import deque +from typing import Any + +import torch +import warp as wp + +from pxr import UsdGeom, UsdPhysics + +from isaaclab.physics.base_scene_data_provider import BaseSceneDataProvider +from isaaclab.sim.utils.newton_model_utils import replace_newton_shape_colors + +logger = logging.getLogger(__name__) + + +@wp.kernel(enable_backward=False) +def _set_body_q_kernel( + positions: wp.array(dtype=wp.vec3), + orientations: wp.array(dtype=wp.quatf), + body_q: wp.array(dtype=wp.transformf), +): + """Write pose arrays into Newton ``body_q`` in one-to-one index order.""" + i = wp.tid() + body_q[i] = wp.transformf(positions[i], orientations[i]) + + +class PhysxSceneDataProvider(BaseSceneDataProvider): + """Scene data provider for Omni PhysX backend. + + Supports: + - body poses via PhysX tensor views, with FrameView fallback + - camera poses & intrinsics + - USD stage handles + - Newton model/state (built locally from the scene's per-group :class:`ClonePlan` map + when required) + """ + + # ---- Environment discovery / metadata ------------------------------------------------- + + def get_num_envs(self) -> int: + """Return env count from stage discovery, cached once available.""" + if self._num_envs is not None and self._num_envs > 0: + return self._num_envs + discovered_num_envs = self._determine_num_envs_in_scene() + if discovered_num_envs > 0: + self._num_envs = discovered_num_envs + return discovered_num_envs + return 0 + + def _determine_num_envs_in_scene(self) -> int: + """Infer env count from /World/envs/env_ prims.""" + if self._stage is None: + return 0 + + max_env_id = -1 + env_name_re = re.compile(r"^env_(\d+)$") + + envs_root = self._stage.GetPrimAtPath("/World/envs") + if envs_root.IsValid(): + for child in envs_root.GetChildren(): + match = env_name_re.match(child.GetName()) + if match: + max_env_id = max(max_env_id, int(match.group(1))) + return max_env_id + 1 if max_env_id >= 0 else 0 + + def __init__(self, stage, simulation_context) -> None: + """Initialize the PhysX scene data provider. + + Args: + stage: USD stage handle. + simulation_context: Active simulation context. + """ + from isaaclab_physx.physics import PhysxManager as SimulationManager + + self._simulation_context = simulation_context + self._stage = stage + self._physics_sim_view = SimulationManager.get_physics_sim_view() + self._rigid_body_view = None + self._xform_views: dict[str, Any] = {} + self._xform_view_failures: set[str] = set() + self._view_body_index_map: dict[str, list[int]] = {} + self._warned_once: set[str] = set() + + # Single source of truth: discovered from stage and cached once available. + self._num_envs: int | None = None + + # Determine if newton model sync is required for selected renderers and visualizers + requirements = self._simulation_context.get_scene_data_requirements() + self._needs_newton_sync = bool(requirements.requires_newton_model) + + # Fixed metadata for visualizers. get_metadata() returns this plus num_envs so visualizers + # can .get("num_envs", 0), .get("physics_backend", ...) etc. without the provider exposing many methods. + self._metadata = {"physics_backend": "omni"} + if self._stage is None: + raise RuntimeError( + "[PhysxSceneDataProvider] USD stage is None and not available from simulation_context. " + "Ensure the simulation context has a valid stage when using OV/Newton/Rerun/Viser visualizers." + ) + self._num_envs_at_last_newton_build: int | None = None # for _refresh_newton_model_if_needed + + self._device = getattr(self._simulation_context, "device", "cuda:0") + self._newton_model = None + self._newton_state = None + self._rigid_body_paths: list[str] = [] + # Paths used to create PhysX views. May include articulation roots for coverage. + self._rigid_body_view_paths: list[str] = [] + + # Reused pose buffers (MR perf): avoid per-call allocations in _read_poses_from_best_source. + self._pose_buf_num_bodies = 0 + self._positions_buf = None + self._orientations_buf = None + self._covered_buf = None + self._xform_mask_buf = None + # View index order as device tensors for vectorized scatter in _apply_view_poses. + self._view_order_tensors: dict[str, Any] = {} + # Last load outcome (tests / debug): "built" | "missing" | "error". + self._last_newton_model_build_source: str | None = None + self._last_newton_model_build_elapsed_ms: float | None = None + + if self._needs_newton_sync: + self._build_newton_model_from_clone_plans() + self._setup_rigid_body_view() + + # ---- Newton model + PhysX view setup -------------------------------------------------- + + def _wildcard_env_paths(self, paths: list[str]) -> list[str]: + """Convert /World/envs/env_0 paths to a wildcard pattern when possible.""" + wildcard_paths = [ + path.replace("/World/envs/env_0", "/World/envs/env_*") for path in paths if "/World/envs/env_0" in path + ] + return list(dict.fromkeys(wildcard_paths)) if wildcard_paths else paths + + def _refresh_newton_model_if_needed(self) -> None: + """Reload Newton model/state and PhysX views when the discovered env count changes.""" + num_envs = self.get_num_envs() + if num_envs <= 0: + return + + needs_rebuild = self._newton_model is None or self._newton_state is None + needs_rebuild = needs_rebuild or (self._num_envs_at_last_newton_build != num_envs) + if needs_rebuild: + self._build_newton_model_from_clone_plans() + self._setup_rigid_body_view() + + def _build_newton_model_from_clone_plans(self) -> None: + """Build Newton model and state from the scene's per-group :class:`ClonePlan` map. + + Reads plans :meth:`InteractiveScene.clone_environments` publishes on + :class:`SimulationContext`, derives the flat ``(sources, destinations, mask)`` shape + :func:`isaaclab_newton.cloner.newton_visualizer_prebuild` expects, and caches the + resulting model/state. Per-prototype source paths recover as + ``dest_template.format()``; per-env positions are + read off ``xformOp:translate`` on the env-level prims derived from the same template. + Pre-condition violations raise :class:`RuntimeError` (logged as ``"missing"``); + ``isaaclab_newton`` being absent (optional dep) maps to ``"missing"`` via the + import's own exception types; unexpected failures fall through to ``"error"``. + """ + start_t = time.perf_counter() + source = "missing" + try: + plans = self._simulation_context.get_clone_plans() + if not plans: + raise RuntimeError("No clone plans on simulation context.") + from isaaclab_newton.cloner.newton_replicate import newton_visualizer_prebuild + + # Flatten per-group plans into one (sources, destinations, mask) bundle. Source + # paths recover via ``dest_template.format()``; + # all-False rows are dropped (possible when ``num_prototypes > num_envs``). + plan_list = list(plans.values()) + num_envs = plan_list[0].clone_mask.size(1) + if any(p.clone_mask.size(1) != num_envs for p in plan_list): + raise RuntimeError(f"Clone plans disagree on num_envs: {[p.clone_mask.size(1) for p in plan_list]}") + sources, destinations, mask_rows = [], [], [] + for p in plan_list: + for i in range(p.clone_mask.size(0)): + nz = p.clone_mask[i].nonzero(as_tuple=False) + if nz.numel() == 0: + continue + sources.append(p.dest_template.format(int(nz[0].item()))) + destinations.append(p.dest_template) + mask_rows.append(p.clone_mask[i : i + 1]) + if not sources: + raise RuntimeError("All clone-plan prototype rows are empty.") + mask = torch.cat(mask_rows, dim=0) + + # Env-level path template = dest_template up to the first ``{}``. Per-env world + # positions: xformOp:translate read off each env prim; missing prims fall through. + env_path_template = plan_list[0].dest_template.split("{}")[0] + "{}" + positions = torch.zeros((num_envs, 3), dtype=torch.float32, device=self._device) + for i in range(num_envs): + prim = self._stage.GetPrimAtPath(env_path_template.format(i)) + if prim.IsValid() and (v := prim.GetAttribute("xformOp:translate").Get()) is not None: + positions[i] = torch.tensor([v[0], v[1], v[2]], device=self._device) + + model, state = newton_visualizer_prebuild( + stage=self._stage, + sources=sources, + destinations=destinations, + env_ids=torch.arange(num_envs, dtype=torch.long, device=mask.device), + mapping=mask, + positions=positions, + device=self._device, + up_axis=UsdGeom.GetStageUpAxis(self._stage), + ) + if model is None or state is None: + raise RuntimeError("newton_visualizer_prebuild returned None.") + + self._newton_model, self._newton_state = model, state + replace_newton_shape_colors(self._newton_model, self._stage) + # Newton renamed ``*_key`` → ``*_label`` mid-development; fall back so we work either way. + # ``dict.fromkeys`` preserves order while deduping — articulation roots can overlap rigid bodies. + label_or_key = lambda kind: list(getattr(model, f"{kind}_label", None) or getattr(model, f"{kind}_key", [])) # noqa: E731 + self._rigid_body_paths = label_or_key("body") + self._rigid_body_view_paths = list(dict.fromkeys(self._rigid_body_paths + label_or_key("articulation"))) + # Reset cached views/buffers; rebuilt lazily by ``_setup_rigid_body_view``. + self._xform_views.clear() + self._view_order_tensors.clear() + self._view_body_index_map = {} + self._pose_buf_num_bodies = 0 + self._positions_buf = self._orientations_buf = self._covered_buf = self._xform_mask_buf = None + self._num_envs_at_last_newton_build = num_envs + source = "built" + except (ImportError, ModuleNotFoundError) as exc: + logger.warning("[PhysxSceneDataProvider] isaaclab_newton not available: %s", exc) + self._clear_newton_model_state() + except RuntimeError as exc: + logger.error("[PhysxSceneDataProvider] %s", exc) + self._clear_newton_model_state() + except Exception as exc: + source = "error" + logger.error("[PhysxSceneDataProvider] Failed to build Newton model from clone plans: %s", exc) + self._clear_newton_model_state() + finally: + self._last_newton_model_build_elapsed_ms = (time.perf_counter() - start_t) * 1000.0 + self._last_newton_model_build_source = source + logger.debug( + "[PhysxSceneDataProvider] Newton model build source=%s elapsed_ms=%.2f", + source, + self._last_newton_model_build_elapsed_ms, + ) + + def _clear_newton_model_state(self) -> None: + """Clear cached Newton model, state, and rigid-body path lists.""" + self._newton_model = None + self._newton_state = None + self._rigid_body_paths = [] + self._rigid_body_view_paths = [] + self._num_envs_at_last_newton_build = None + + def _setup_rigid_body_view(self) -> None: + """Create PhysX RigidBodyView from Newton's body paths. + + Uses body paths extracted from Newton model to create PhysX tensor API view + for reading rigid body transforms. + """ + if self._physics_sim_view is None: + return + paths = self._rigid_body_view_paths or self._rigid_body_paths + if not paths: + return + # Defensive: only pass true rigid-body prims into PhysX RigidBodyView. + # Some prebuilt artifacts carry articulation root paths for coverage, but + # those roots are not guaranteed to be rigid-body prims and can trip native + # view creation paths on some tasks. + rigid_paths: list[str] = [] + dropped_non_rigid = 0 + for path in paths: + prim = self._stage.GetPrimAtPath(path) if self._stage is not None else None + if prim and prim.IsValid() and prim.HasAPI(UsdPhysics.RigidBodyAPI): + rigid_paths.append(path) + else: + dropped_non_rigid += 1 + if not rigid_paths: + self._warn_once( + "rigid-view-no-rigid-paths", + "[PhysxSceneDataProvider] No rigid-body prim paths available for RigidBodyView creation.", + level=logging.WARNING, + ) + return + try: + paths_to_use = self._wildcard_env_paths(rigid_paths) + self._rigid_body_view = self._physics_sim_view.create_rigid_body_view(paths_to_use) + self._cache_view_index_map(self._rigid_body_view, "rigid_body_view") + except Exception as exc: + logger.warning(f"[PhysxSceneDataProvider] Failed to create RigidBodyView: {exc}") + self._rigid_body_view = None + + # ---- Pose/velocity read pipeline ------------------------------------------------------ + + def _warn_once(self, key: str, message: str, *args, level=logging.WARNING) -> None: + """Log a warning only once for a given key.""" + if key in self._warned_once: + return + self._warned_once.add(key) + logger.log(level, message, *args) + + def _get_view_world_poses(self, view: Any): + """Read world poses from a PhysX view.""" + if view is None: + return None, None + + result = view.get_transforms() + if isinstance(result, tuple) and len(result) == 2: + return result + if hasattr(result, "shape"): + return result[:, :3], result[:, 3:7] + + import warp as wp + + result_t = wp.to_torch(result) + return result_t[:, :3], result_t[:, 3:7] + + def _cache_view_index_map(self, view, key: str) -> None: + """Map PhysX view indices to Newton body_key ordering.""" + prim_paths = getattr(view, "prim_paths", None) + if not prim_paths or not self._rigid_body_paths: + return + + # Build map: (env_id, relative_path) -> view_index to align view order. + view_map: dict[tuple[int | None, str], int] = {} + for view_idx, path in enumerate(prim_paths): + env_id, rel = self._split_env_relative_path(path) + view_map[(env_id, rel)] = view_idx + + # Build reordering: newton_body_index -> view_index so we can scatter + # PhysX view outputs into Newton body ordering. + order: list[int | None] = [None] * len(self._rigid_body_paths) + for body_idx, path in enumerate(self._rigid_body_paths): + env_id, rel = self._split_env_relative_path(path) + view_idx = view_map.get((env_id, rel)) + if view_idx is None: + view_idx = view_map.get((None, rel)) # Try without env_id + order[body_idx] = view_idx + + if all(idx is not None for idx in order): + self._view_body_index_map[key] = order # type: ignore[arg-type] + # Cache as device tensor for vectorized scatter in _apply_view_poses. + import torch + + self._view_order_tensors[key] = torch.tensor(order, dtype=torch.long, device=self._device) + + def _split_env_relative_path(self, path: str) -> tuple[int | None, str]: + """Extract (env_id, relative_path) from a prim path.""" + match = re.search(r"/World/envs/env_(\d+)(/.*)", path) + return (int(match.group(1)), match.group(2)) if match else (None, path) + + def _get_view_velocities(self, view): + """Read linear/angular velocities from a PhysX view.""" + if view is None: + return None, None + + try: + # Canonical API for PhysX tensor views. + result = view.get_velocities() + if isinstance(result, tuple) and len(result) == 2: + return result + if hasattr(result, "shape") and result.shape[-1] == 6: + return result[..., :3], result[..., 3:6] + except (AttributeError, RuntimeError, TypeError) as exc: + logger.debug("[PhysxSceneDataProvider] get_velocities() unavailable/failed for %s: %s", type(view), exc) + return None, None + + def _apply_view_poses(self, view: Any, view_key: str, positions: Any, orientations: Any, covered: Any) -> int: + """Fill poses from a PhysX view for bodies not yet covered.""" + import torch + import warp as wp + + if view is None: + return 0 + + pos, quat = self._get_view_world_poses(view) + if pos is None or quat is None: + return 0 + + order = self._view_body_index_map.get(view_key) + if not order: + return 0 + + # Normalize returned arrays to torch tensors across backends (torch/warp/other). + if not isinstance(pos, torch.Tensor): + try: + pos = wp.to_torch(pos) + except Exception: + pos = torch.as_tensor(pos) + if not isinstance(quat, torch.Tensor): + try: + quat = wp.to_torch(quat) + except Exception: + quat = torch.as_tensor(quat) + + pos = pos.to(device=self._device, dtype=torch.float32) + quat = quat.to(device=self._device, dtype=torch.float32) + + # Vectorized scatter when we have a cached order tensor (view fully covers bodies). + order_t = self._view_order_tensors.get(view_key) + if order_t is not None: + uncovered_mask = ~covered + if uncovered_mask.any(): + newton_indices = uncovered_mask.nonzero(as_tuple=True)[0] + view_indices = order_t[newton_indices] + positions[newton_indices] = pos[view_indices] + orientations[newton_indices] = quat[view_indices] + covered[newton_indices] = True + return newton_indices.numel() + return 0 + + # Per-index path when the view does not fully cover bodies or the order cache is missing. + count = 0 + for newton_idx, view_idx in enumerate(order): + if view_idx is not None and not covered[newton_idx]: + positions[newton_idx] = pos[view_idx] + orientations[newton_idx] = quat[view_idx] + covered[newton_idx] = True + count += 1 + + return count + + def _apply_xform_poses(self, positions: Any, orientations: Any, covered: Any, xform_mask: Any) -> int: + """Fill remaining body poses using ``XformPrimView`` for prims not covered by the rigid-body view.""" + import torch + + from isaaclab.sim.views import FrameView + + uncovered = torch.where(~covered)[0].cpu().tolist() + if not uncovered: + return 0 + + # Query each uncovered prim path directly from USD. + count = 0 + for idx in uncovered: + path = self._rigid_body_paths[idx] + try: + if path not in self._xform_views: + self._xform_views[path] = FrameView( + path, device=self._device, stage=self._stage, validate_xform_ops=False + ) + + pos_w, quat_w = self._xform_views[path].get_world_poses() + if pos_w is not None and quat_w is not None: + positions[idx] = pos_w.torch.to(device=self._device, dtype=torch.float32).squeeze() + orientations[idx] = quat_w.torch.to(device=self._device, dtype=torch.float32).squeeze() + covered[idx] = True + xform_mask[idx] = True + count += 1 + except Exception: + self._xform_view_failures.add(path) + continue + + if len(self._xform_view_failures) > 0: + self._warn_once( + "xform-fallback-failures", + "[PhysxSceneDataProvider] XformPrimView reads failed for %d body paths.", + len(self._xform_view_failures), + level=logging.DEBUG, + ) + return count + + def _convert_xform_quats(self, orientations: Any, xform_mask: Any) -> Any: + """Return quaternions in xyzw convention. + + PhysX views, FrameView, and resolve_prim_pose() in Isaac Lab all use xyzw. + Keeping this helper as a no-op preserves a single conversion point if conventions + ever diverge again. + """ + return orientations + + def _read_poses_from_best_source(self) -> tuple[Any, Any, str, Any] | None: + """Merge pose data from rigid-body and xform views.""" + if self._newton_state is None or not self._rigid_body_paths: + return None + + import torch + + num_bodies = len(self._rigid_body_paths) + if num_bodies != self._newton_state.body_q.shape[0]: + self._warn_once( + "body-count-mismatch", + "[PhysxSceneDataProvider] Body count mismatch: body_key=%d, state=%d", + num_bodies, + int(self._newton_state.body_q.shape[0]), + ) + return None + + # Reuse buffers when size unchanged to avoid per-call allocations (MR perf). + if num_bodies != self._pose_buf_num_bodies or self._positions_buf is None: + self._pose_buf_num_bodies = num_bodies + self._positions_buf = torch.zeros((num_bodies, 3), dtype=torch.float32, device=self._device) + self._orientations_buf = torch.zeros((num_bodies, 4), dtype=torch.float32, device=self._device) + self._covered_buf = torch.zeros(num_bodies, dtype=torch.bool, device=self._device) + self._xform_mask_buf = torch.zeros(num_bodies, dtype=torch.bool, device=self._device) + else: + self._covered_buf.zero_() + self._xform_mask_buf.zero_() + + positions = self._positions_buf + orientations = self._orientations_buf + covered = self._covered_buf + xform_mask = self._xform_mask_buf + + rigid_count = self._apply_view_poses(self._rigid_body_view, "rigid_body_view", positions, orientations, covered) + xform_count = self._apply_xform_poses(positions, orientations, covered, xform_mask) + if rigid_count == 0: + self._warn_once( + "rigid-source-unused", + ( + "[PhysxSceneDataProvider] RigidBodyView returned no transforms; " + "filled from XformPrimView where needed." + ), + level=logging.DEBUG, + ) + + if not covered.all(): + self._warn_once( + "pose-read-incomplete", + "[PhysxSceneDataProvider] Failed to read %d/%d body poses.", + int((~covered).sum().item()), + num_bodies, + ) + return None + + active = sum([rigid_count > 0, xform_count > 0]) + source = ( + "merged" if active > 1 else ("rigid_body_view" if rigid_count else "xform_view" if xform_count else "none") + ) + return positions, orientations, source, xform_mask + + def _get_set_body_q_kernel(self): + """Return module-level Warp kernel for writing transforms to Newton state.""" + return _set_body_q_kernel + + # ---- Newton state sync ---------------------------------------------------------------- + + def update(self) -> None: + """Sync PhysX transforms into the full Newton state (one kernel launch).""" + if not self._needs_newton_sync or self._newton_state is None: + return + + try: + # Re-check env count in case stage population completed after provider construction. + self._refresh_newton_model_if_needed() + + result = self._read_poses_from_best_source() + if result is None: + return + + positions, orientations, _, xform_mask = result + orientations_xyzw = self._convert_xform_quats(orientations.reshape(-1, 4), xform_mask) + + positions_wp = wp.from_torch(positions.reshape(-1, 3), dtype=wp.vec3) + orientations_wp = wp.from_torch(orientations_xyzw, dtype=wp.quatf) + + set_body_q = self._get_set_body_q_kernel() + if set_body_q is None or positions_wp.shape[0] != self._newton_state.body_q.shape[0]: + return + wp.launch( + set_body_q, + dim=positions_wp.shape[0], + inputs=[positions_wp, orientations_wp, self._newton_state.body_q], + device=self._device, + ) + except Exception as exc: + self._warn_once( + "newton-sync-update-failed", + "[PhysxSceneDataProvider] Failed to sync transforms to Newton state: %s", + exc, + ) + + def get_newton_model(self) -> Any | None: + """Return Newton model when sync is enabled. + + Returns: + Newton model object, or ``None`` when unavailable. + """ + return self._newton_model if self._needs_newton_sync else None + + def get_newton_state(self) -> Any | None: + """Return full Newton state when sync is enabled.""" + if not self._needs_newton_sync or self._newton_state is None: + return None + return self._newton_state + + # ---- Public provider API --------------------------------------------------------------- + + def get_usd_stage(self) -> Any: + """Return USD stage handle. + + Returns: + USD stage object. + """ + if self._stage is not None: + return self._stage + return getattr(self._simulation_context, "stage", None) + + def get_camera_transforms(self) -> dict[str, Any] | None: + """Return per-camera, per-environment transforms. + + Returns: + Dictionary containing camera order, positions, orientations, and environment count, + or ``None`` when unavailable. + """ + if self._stage is None: + return None + + import isaaclab.sim as isaaclab_sim + + env_pattern = re.compile(r"(?P/World/envs/env_)(?P\d+)(?P/.*)") + shared_paths: list[str] = [] + instances: dict[str, list[tuple[int, str]]] = {} + num_envs = -1 + + # Breadth-first walk so we discover camera prims across the full stage. + stage_prims = deque([self._stage.GetPseudoRoot()]) + while stage_prims: + prim = stage_prims.popleft() + prim_path = prim.GetPath().pathString + + world_id = 0 + template_path = prim_path + if match := env_pattern.match(prim_path): + # Normalize per-env path to a shared template key (env_%d/...) so + # visualizers can query one camera path for all env instances. + world_id = int(match.group("id")) + template_path = match.group("root") + "%d" + match.group("path") + if world_id > num_envs: + num_envs = world_id + + imageable = UsdGeom.Imageable(prim) + if imageable and imageable.ComputeVisibility() == UsdGeom.Tokens.invisible: + continue + + if prim.IsA(UsdGeom.Camera): + if template_path not in instances: + instances[template_path] = [] + instances[template_path].append((world_id, prim_path)) + if template_path not in shared_paths: + shared_paths.append(template_path) + + if hasattr(UsdGeom, "TraverseInstanceProxies"): + child_prims = prim.GetFilteredChildren(UsdGeom.TraverseInstanceProxies()) + else: + child_prims = prim.GetChildren() + if child_prims: + stage_prims.extend(child_prims) + + num_envs += 1 + positions: list[list[list[float] | None]] = [] + orientations: list[list[list[float] | None]] = [] + + for template_path in shared_paths: + per_world_pos: list[list[float] | None] = [None] * num_envs + per_world_ori: list[list[float] | None] = [None] * num_envs + for world_id, prim_path in instances.get(template_path, []): + if world_id < 0 or world_id >= num_envs: + continue + prim = self._stage.GetPrimAtPath(prim_path) + if not prim.IsValid(): + continue + pos, ori = isaaclab_sim.resolve_prim_pose(prim) + per_world_pos[world_id] = [float(pos[0]), float(pos[1]), float(pos[2])] + per_world_ori[world_id] = [float(ori[0]), float(ori[1]), float(ori[2]), float(ori[3])] + positions.append(per_world_pos) + orientations.append(per_world_ori) + + return {"order": shared_paths, "positions": positions, "orientations": orientations, "num_envs": num_envs} + + def get_metadata(self) -> dict[str, Any]: + """Return provider metadata for visualizers and renderers. + + Returns: + Metadata dictionary with backend and environment count. + """ + out = dict(self._metadata) + out["num_envs"] = self.get_num_envs() + return out + + def get_transforms(self) -> dict[str, Any] | None: + """Return merged body transforms from available PhysX views. + + Returns: + Dictionary with positions/orientations, or ``None`` when unavailable. + """ + try: + result = self._read_poses_from_best_source() + if result is None: + return None + + positions, orientations, _, xform_mask = result + orientations_xyzw = self._convert_xform_quats(orientations, xform_mask) + return {"positions": positions, "orientations": orientations_xyzw} + except Exception as exc: + self._warn_once( + "get-transforms-failed", + "[PhysxSceneDataProvider] get_transforms() failed: %s", + exc, + ) + return None + + def get_velocities(self) -> dict[str, Any] | None: + """Return linear/angular velocities from available PhysX views. + + Returns: + Dictionary with linear/angular velocities, or ``None`` when unavailable. + """ + for source, view in (("rigid_body_view", self._rigid_body_view),): + linear, angular = self._get_view_velocities(view) + if linear is not None and angular is not None: + return {"linear": linear, "angular": angular, "source": source} + return None + + def get_contacts(self) -> dict[str, Any] | None: + """Return contact data for PhysX provider. + + Returns: + ``None`` because contacts are not currently implemented in this provider. + """ + return None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py new file mode 100644 index 000000000000..676c5d23d67c --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package containing PhysX-specific sensor implementations.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.pyi new file mode 100644 index 000000000000..e536b281952b --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ContactSensor", + "ContactSensorData", + "ContactSensorCfg", + "FrameTransformer", + "FrameTransformerData", + "Imu", + "ImuData", + "JointWrenchSensor", + "JointWrenchSensorData", + "Pva", + "PvaData", +] + +from .contact_sensor import ContactSensor, ContactSensorData, ContactSensorCfg +from .frame_transformer import FrameTransformer, FrameTransformerData +from .imu import Imu, ImuData +from .joint_wrench import JointWrenchSensor, JointWrenchSensorData +from .pva import Pva, PvaData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py new file mode 100644 index 000000000000..f77a181913f1 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX rigid contact sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.pyi new file mode 100644 index 000000000000..668ecdcec870 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ContactSensor", + "ContactSensorData", + "ContactSensorCfg", +] + +from .contact_sensor import ContactSensor +from .contact_sensor_data import ContactSensorData +from .contact_sensor_cfg import ContactSensorCfg diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py new file mode 100644 index 000000000000..c38a0fffe281 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py @@ -0,0 +1,506 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ignore optional memory usage warning globally +# pyright: reportOptionalSubscript=false + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +import omni.physics.tensors.api as physx + +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.contact_sensor import BaseContactSensor +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .contact_sensor_data import ContactSensorData +from .kernels import ( + compute_first_transition_kernel, + reset_contact_sensor_kernel, + split_flat_pose_to_pos_quat, + unpack_contact_buffer_data, + update_net_forces_kernel, +) + +if TYPE_CHECKING: + from isaaclab.sensors.contact_sensor import ContactSensorCfg + + +class ContactSensor(BaseContactSensor): + """A PhysX contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body in the world frame. + It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. + + To enable the contact reporter on a rigid body, please make sure to enable the + :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your + asset spawner configuration. This will enable the contact reporter on all the rigid bodies + in the asset. + + The sensor can be configured to report the contact forces on a set of bodies with a given + filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful + when you want to report the contact forces between the sensor bodies and a specific set of + bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + Please check the documentation on `RigidContact`_ for more details. + + The reporting of the filtered contact forces is only possible as one-to-many. This means that only one + sensor body in an environment can be filtered against multiple bodies in that environment. If you need to + filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor + body. + + As an example, suppose you want to report the contact forces for all the feet of a robot against an object + exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and + :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` + respectively will not work. Instead, you need to create a separate sensor for each foot and filter + it against the object. + + .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html + .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.experimental.prims/docs/index.html + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the contact sensor.""" + + def __init__(self, cfg: ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + # Enable contact processing + get_settings_manager().set_bool("/physics/disableContactProcessing", False) + + # Create empty variables for storing output data + self._data: ContactSensorData = ContactSensorData() + # initialize self._body_physx_view for running in extension mode + self._body_physx_view = None + # Warp env index array (set in _initialize_impl) + self._ALL_ENV_INDICES: wp.array | None = None + self._ALL_ENV_MASK: wp.array | None = None + self._reset_mask: wp.array | None = None + + # check if max_contact_data_count_per_prim is set + if self.cfg.max_contact_data_count_per_prim is None: + self.cfg.max_contact_data_count_per_prim = 4 + + # check if force_threshold is set + if self.cfg.force_threshold is None: + self.cfg.force_threshold = 1.0 + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Contact sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self.body_physx_view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of bodies : {self.num_bodies}\n" + f"\tbody names : {self.body_names}\n" + ) + + """ + Properties + """ + + @property + def num_instances(self) -> int: + return self.body_physx_view.count + + @property + def data(self) -> ContactSensorData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_sensors(self) -> int: + """Number of bodies with contact sensors attached.""" + return self._num_sensors + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors attached.""" + prim_paths = self.body_physx_view.prim_paths[: self.num_sensors] + return [path.split("/")[-1] for path in prim_paths] + + @property + def body_physx_view(self) -> physx.RigidBodyView: + """View for the rigid bodies captured (PhysX). + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._body_physx_view + + @property + def contact_view(self) -> physx.RigidContactView: + """Contact reporter view for the bodies (PhysX). + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._contact_view + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + # resolve env_ids to warp array + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + # reset the timers and counters + super().reset(None, env_mask) + + wp.launch( + reset_contact_sensor_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[ + self._history_length, + self._num_filter_shapes, + env_mask, + self._data._net_forces_w, + self._data._net_forces_w_history, + self._data._force_matrix_w, + ], + outputs=[ + self._data._current_air_time, + self._data._last_air_time, + self._data._current_contact_time, + self._data._last_contact_time, + self._data._friction_forces_w, + self._data._contact_pos_w, + ], + device=self._device, + ) + + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> ProxyArray: + """Checks if bodies that have established contact within the last :attr:`dt` seconds. + + This function checks if the bodies have established contact within the last :attr:`dt` seconds + by comparing the current contact time with the given time period. If the contact time is less + than the given time period, then the bodies are considered to be in contact. + + .. note:: + The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other + words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true + if the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + .. caution:: + The tensor returned by this function is only valid when called. If compute_first_air is called after + compute_first_contact, the tensor returned by this method will be have changed values. To avoid this, + either consume the results of this call immediately or clone the output of this tensor. + + Args: + dt: The time period since the contact was established. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have established contact within the last + :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the + number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + wp.launch( + compute_first_transition_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[float(dt + abs_tol), self._data._current_contact_time], + outputs=[self._data._first_transition], + device=self._device, + ) + return self._data._first_transition_ta + + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> ProxyArray: + """Checks if bodies that have broken contact within the last :attr:`dt` seconds. + + This function checks if the bodies have broken contact within the last :attr:`dt` seconds + by comparing the current air time with the given time period. If the air time is less + than the given time period, then the bodies are considered to not be in contact. + + .. note:: + It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, + :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if + the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + .. caution:: + The tensor returned by this function is only valid when called. If compute_first_contact is called after + compute_first_air, the tensor returned by this method will be have changed values. To avoid this, + either consume the results of this call immediately or clone the output of this tensor. + + Args: + dt: The time period since the contract is broken. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track air time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + + wp.launch( + compute_first_transition_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[float(dt + abs_tol), self._data._current_air_time], + outputs=[self._data._first_transition], + device=self._device, + ) + return self._data._first_transition_ta + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check that only rigid bodies are selected + leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] + template_prim_path = self._parent_prims[0].GetPath().pathString + body_names = list() + for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): + # check if prim has contact reporter API + if "PhysxContactReportAPI" in prim.GetAppliedSchemas(): + prim_path = prim.GetPath().pathString + body_names.append(prim_path.rsplit("/", 1)[-1]) + # check that there is at least one body with contact reporter API + if not body_names: + raise RuntimeError( + f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." + "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." + ) + + # construct regex expression for the body names + body_names_regex = r"(" + "|".join(body_names) + r")" + body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" + # convert regex expressions to glob expressions for PhysX + body_names_glob = body_names_regex.replace(".*", "*") + filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] + + # create a rigid prim view for the sensor + self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) + self._contact_view = self._physics_sim_view.create_rigid_contact_view( + body_names_glob, + filter_patterns=filter_prim_paths_glob, + max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, + ) + # resolve the true count of bodies + self._num_sensors = self.body_physx_view.count // self._num_envs + # check that contact reporter succeeded + if self._num_sensors != len(body_names): + raise RuntimeError( + "Failed to initialize contact reporter for specified bodies." + f"\n\tInput prim path : {self.cfg.prim_path}" + f"\n\tResolved prim paths: {body_names_regex}" + ) + + # check if filter paths are valid + if self.cfg.track_contact_points or self.cfg.track_friction_forces: + if not self.cfg.filter_prim_paths_expr: + raise ValueError( + "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + if self.cfg.max_contact_data_count_per_prim < 1: + raise ValueError( + f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " + "Please set it to a value greater than 0 to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + + self._create_buffers() + + def _create_buffers(self) -> None: + # Store filter shapes count + self._num_filter_shapes = self.contact_view.filter_count if self.cfg.filter_prim_paths_expr else 0 + # Store effective history length (always >= 1 for consistent buffer shapes) + self._history_length = max(self.cfg.history_length, 1) + + # prepare data buffers + self._data.create_buffers( + num_envs=self._num_envs, + num_sensors=self._num_sensors, + num_filter_shapes=self._num_filter_shapes, + history_length=self.cfg.history_length, + track_pose=self.cfg.track_pose, + track_air_time=self.cfg.track_air_time, + track_contact_points=self.cfg.track_contact_points, + track_friction_forces=self.cfg.track_friction_forces, + device=self._device, + ) + + def _update_buffers_impl(self, env_mask: wp.array | None = None): + """Fills the buffers of the sensor data.""" + # Convert env_mask to warp array + env_mask = self._resolve_indices_and_mask(None, env_mask) + + # PhysX returns (N*B, 3) float32 -> (N*B,) vec3f + net_forces_flat = self.contact_view.get_net_contact_forces(dt=self._sim_physics_dt).view(wp.vec3f) + # PhysX returns (N*B, M, 3) float32 -> (N*B, M) vec3f + if self.cfg.filter_prim_paths_expr: + force_matrix_flat = self.contact_view.get_contact_force_matrix(dt=self._sim_physics_dt).view(wp.vec3f) + else: + force_matrix_flat = None + # + wp.launch( + update_net_forces_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[ + net_forces_flat, + force_matrix_flat, + env_mask, + self._num_sensors, + self._num_filter_shapes, + self._history_length, + self.cfg.force_threshold, + self._timestamp, + self._timestamp_last_update, + ], + outputs=[ + self._data._net_forces_w, + self._data._net_forces_w_history, + self._data._force_matrix_w, + self._data._force_matrix_w_history, + self._data._current_air_time, + self._data._current_contact_time, + self._data._last_air_time, + self._data._last_contact_time, + ], + device=self._device, + ) + + # -- Pose -- + if self.cfg.track_pose: + # PhysX returns (N*B, 7) float32 -> (N*B,) transformf + poses_flat = self.body_physx_view.get_transforms().view(wp.transformf) + wp.launch( + split_flat_pose_to_pos_quat, + dim=(self._num_envs, self._num_sensors), + inputs=[poses_flat, env_mask, self._num_sensors], + outputs=[self._data._pos_w, self._data._quat_w], + device=self.device, + ) + + # -- Contact points -- + if self.cfg.track_contact_points: + _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = self.contact_view.get_contact_data( + dt=self._sim_physics_dt + ) + # buffer_contact_points: (total_contacts, 3) float32 -> (total_contacts,) vec3f + pts_vec3 = buffer_contact_points.view(wp.vec3f) + wp.launch( + unpack_contact_buffer_data, + dim=(self._num_envs, self._num_sensors, self._num_filter_shapes), + inputs=[ + pts_vec3, + buffer_count, + buffer_start_indices, + env_mask, + self._num_sensors, + True, + float("nan"), + ], + outputs=[self._data._contact_pos_w], + device=self.device, + ) + + # -- Friction forces -- + if self.cfg.track_friction_forces: + friction_forces, _, buffer_count, buffer_start_indices = self.contact_view.get_friction_data( + dt=self._sim_physics_dt + ) + friction_vec3 = friction_forces.view(wp.vec3f) + wp.launch( + unpack_contact_buffer_data, + dim=(self._num_envs, self._num_sensors, self._num_filter_shapes), + inputs=[ + friction_vec3, + buffer_count, + buffer_start_indices, + env_mask, + self._num_sensors, + False, + 0.0, + ], + outputs=[self._data._friction_forces_w], + device=self.device, + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "contact_visualizer"): + self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.contact_visualizer.set_visibility(True) + else: + if hasattr(self, "contact_visualizer"): + self.contact_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self.body_physx_view is None: + return + # Convert warp data to torch at the boundary for visualization + net_forces_torch = self._data.net_forces_w.torch # (N, B, 3) + net_contact_force_w = torch.linalg.norm(net_forces_torch, dim=-1) + # marker indices: 0 = contact, 1 = no contact + marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) + # check if prim is visualized + if self.cfg.track_pose: + frame_origins = self._data.pos_w.torch # (N, B, 3) + else: + pose = self.body_physx_view.get_transforms() # (N*B, 7) float32 + pose_torch = wp.to_torch(pose) + frame_origins = pose_torch.view(-1, self._num_sensors, 7)[:, :, :3] + # visualize + self.contact_visualizer.visualize(frame_origins.reshape(-1, 3), marker_indices=marker_indices.reshape(-1)) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._body_physx_view = None + self._contact_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py new file mode 100644 index 000000000000..8950342a0f37 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from typing import TYPE_CHECKING + +from isaaclab.sensors.contact_sensor.contact_sensor_cfg import ContactSensorCfg as _BaseContactSensorCfg +from isaaclab.utils import configclass + +if TYPE_CHECKING: + from .contact_sensor import ContactSensor + + +@configclass +class ContactSensorCfg(_BaseContactSensorCfg): + """PhysX contact sensor configuration.""" + + class_type: type["ContactSensor"] | str = "{DIR}.contact_sensor:ContactSensor" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py new file mode 100644 index 000000000000..5e50c42da7ef --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py @@ -0,0 +1,318 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import math + +import warp as wp + +from isaaclab.sensors.contact_sensor import BaseContactSensorData +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.sensors.kernels import concat_pos_and_quat_to_pose_kernel + +logger = logging.getLogger(__name__) + + +class ContactSensorData(BaseContactSensorData): + """Data container for the PhysX contact reporting sensor.""" + + @property + def pose_w(self) -> ProxyArray | None: + """Pose of the sensor origin in world frame. + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + if self._pose_w is None: + return None + wp.launch( + concat_pos_and_quat_to_pose_kernel, + dim=(self._num_envs, self._num_sensors), + inputs=[self._pos_w, self._quat_w], + outputs=[self._pose_w], + device=self._device, + ) + if self._pose_w_ta is None: + self._pose_w_ta = ProxyArray(self._pose_w) + return self._pose_w_ta + + @property + def pos_w(self) -> ProxyArray | None: + """Position of the sensor origin in world frame. + + Shape is (num_instances, num_sensors), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, 3). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + if self._pos_w is None: + return None + if self._pos_w_ta is None: + self._pos_w_ta = ProxyArray(self._pos_w) + return self._pos_w_ta + + @property + def quat_w(self) -> ProxyArray | None: + """Orientation of the sensor origin in world frame. + + Shape is (num_instances, num_sensors), dtype = wp.quatf. In torch this resolves to + (num_instances, num_sensors, 4). The orientation is provided in (x, y, z, w) format. + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + if self._quat_w is None: + return None + if self._quat_w_ta is None: + self._quat_w_ta = ProxyArray(self._quat_w) + return self._quat_w_ta + + @property + def net_forces_w(self) -> ProxyArray | None: + """The net normal contact forces in world frame. + + Shape is (num_instances, num_sensors), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, 3). + """ + if self._net_forces_w is None: + return None + if self._net_forces_w_ta is None: + self._net_forces_w_ta = ProxyArray(self._net_forces_w) + return self._net_forces_w_ta + + @property + def net_forces_w_history(self) -> ProxyArray | None: + """History of net normal contact forces. + + Shape is (num_instances, history_length, num_sensors), dtype = wp.vec3f. In torch this resolves to + (num_instances, history_length, num_sensors, 3). + """ + if self._net_forces_w_history is None: + return None + if self._net_forces_w_history_ta is None: + self._net_forces_w_history_ta = ProxyArray(self._net_forces_w_history) + return self._net_forces_w_history_ta + + @property + def force_matrix_w(self) -> ProxyArray | None: + """Normal contact forces filtered between sensor and filtered bodies. + + Shape is (num_instances, num_sensors, num_filter_shapes), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + if self._force_matrix_w is None: + return None + if self._force_matrix_w_ta is None: + self._force_matrix_w_ta = ProxyArray(self._force_matrix_w) + return self._force_matrix_w_ta + + @property + def force_matrix_w_history(self) -> ProxyArray | None: + """History of filtered contact forces. + + Shape is (num_instances, history_length, num_sensors, num_filter_shapes), dtype = wp.vec3f. + In torch this resolves to (num_instances, history_length, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + if self._force_matrix_w_history is None: + return None + if self._force_matrix_w_history_ta is None: + self._force_matrix_w_history_ta = ProxyArray(self._force_matrix_w_history) + return self._force_matrix_w_history_ta + + @property + def contact_pos_w(self) -> ProxyArray | None: + """Average position of contact points. + + Shape is (num_instances, num_sensors, num_filter_shapes), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.track_contact_points` is False. + """ + if self._contact_pos_w is None: + return None + if self._contact_pos_w_ta is None: + self._contact_pos_w_ta = ProxyArray(self._contact_pos_w) + return self._contact_pos_w_ta + + @property + def friction_forces_w(self) -> ProxyArray | None: + """Sum of friction forces. + + Shape is (num_instances, num_sensors, num_filter_shapes), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_sensors, num_filter_shapes, 3). + + None if :attr:`ContactSensorCfg.track_friction_forces` is False. + """ + if self._friction_forces_w is None: + return None + if self._friction_forces_w_ta is None: + self._friction_forces_w_ta = ProxyArray(self._friction_forces_w) + return self._friction_forces_w_ta + + @property + def last_air_time(self) -> ProxyArray | None: + """Time spent in air before last contact. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + if self._last_air_time is None: + return None + if self._last_air_time_ta is None: + self._last_air_time_ta = ProxyArray(self._last_air_time) + return self._last_air_time_ta + + @property + def current_air_time(self) -> ProxyArray | None: + """Time spent in air since last detach. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + if self._current_air_time is None: + return None + if self._current_air_time_ta is None: + self._current_air_time_ta = ProxyArray(self._current_air_time) + return self._current_air_time_ta + + @property + def last_contact_time(self) -> ProxyArray | None: + """Time spent in contact before last detach. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + if self._last_contact_time is None: + return None + if self._last_contact_time_ta is None: + self._last_contact_time_ta = ProxyArray(self._last_contact_time) + return self._last_contact_time_ta + + @property + def current_contact_time(self) -> ProxyArray | None: + """Time spent in contact since last contact. + + Shape is (num_instances, num_sensors), dtype = wp.float32. + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + if self._current_contact_time is None: + return None + if self._current_contact_time_ta is None: + self._current_contact_time_ta = ProxyArray(self._current_contact_time) + return self._current_contact_time_ta + + def create_buffers( + self, + num_envs: int, + num_sensors: int, + num_filter_shapes: int, + history_length: int, + track_pose: bool, + track_air_time: bool, + track_contact_points: bool, + track_friction_forces: bool, + device: str, + ) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + num_sensors: Number of sensors per environment. + num_filter_shapes: Number of filtered shapes for force matrix. + history_length: Length of force history buffer. + track_pose: Whether to track sensor pose. + track_air_time: Whether to track air/contact time. + track_contact_points: Whether to track contact points. + track_friction_forces: Whether to track friction forces. + device: Device for tensor storage. + """ + self._num_envs = num_envs + self._num_sensors = num_sensors + self._device = device + # Ensure history_length >= 1 for consistent buffer shapes + effective_history = max(history_length, 1) + + # Net forces (always tracked) + self._net_forces_w = wp.zeros((num_envs, num_sensors), dtype=wp.vec3f, device=device) + self._net_forces_w_history = wp.zeros((num_envs, effective_history, num_sensors), dtype=wp.vec3f, device=device) + + # Track force matrix if requested - only with filter + if num_filter_shapes > 0: + self._force_matrix_w = wp.zeros((num_envs, num_sensors, num_filter_shapes), dtype=wp.vec3f, device=device) + self._force_matrix_w_history = wp.zeros( + (num_envs, effective_history, num_sensors, num_filter_shapes), dtype=wp.vec3f, device=device + ) + else: + self._force_matrix_w = None + self._force_matrix_w_history = None + + # Track pose if requested + if track_pose: + self._pos_w = wp.zeros((num_envs, num_sensors), dtype=wp.vec3f, device=device) + self._quat_w = wp.zeros((num_envs, num_sensors), dtype=wp.quatf, device=device) + self._pose_w = wp.zeros((num_envs, num_sensors), dtype=wp.transformf, device=device) + else: + self._pos_w = None + self._quat_w = None + self._pose_w = None + + # Track air time if requested + if track_air_time: + self._last_air_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._current_air_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._last_contact_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._current_contact_time = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._first_transition = wp.zeros((num_envs, num_sensors), dtype=wp.float32, device=device) + self._first_transition_ta = ProxyArray(self._first_transition) + else: + self._last_air_time = None + self._current_air_time = None + self._last_contact_time = None + self._current_contact_time = None + self._first_transition = None + self._first_transition_ta = None + + # Track contact points if requested - filled with NaN + if track_contact_points: + self._contact_pos_w = wp.full( + (num_envs, num_sensors, num_filter_shapes), + dtype=wp.vec3f, + device=device, + value=wp.vec3f(math.nan, math.nan, math.nan), + ) + else: + self._contact_pos_w = None + + # Track friction forces if requested + if track_friction_forces: + self._friction_forces_w = wp.zeros( + (num_envs, num_sensors, num_filter_shapes), dtype=wp.vec3f, device=device + ) + else: + self._friction_forces_w = None + + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + self._pose_w_ta: ProxyArray | None = None + self._pos_w_ta: ProxyArray | None = None + self._quat_w_ta: ProxyArray | None = None + self._net_forces_w_ta: ProxyArray | None = None + self._net_forces_w_history_ta: ProxyArray | None = None + self._force_matrix_w_ta: ProxyArray | None = None + self._force_matrix_w_history_ta: ProxyArray | None = None + self._contact_pos_w_ta: ProxyArray | None = None + self._friction_forces_w_ta: ProxyArray | None = None + self._last_air_time_ta: ProxyArray | None = None + self._current_air_time_ta: ProxyArray | None = None + self._last_contact_time_ta: ProxyArray | None = None + self._current_contact_time_ta: ProxyArray | None = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/kernels.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/kernels.py new file mode 100644 index 000000000000..4e65a7ca53be --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/kernels.py @@ -0,0 +1,292 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp kernels for the PhysX contact sensor.""" + +import warp as wp + +# ---- Copy kernels (flat PhysX view -> structured data buffers) ---- + + +@wp.kernel +def split_flat_pose_to_pos_quat( + src: wp.array(dtype=wp.transformf), + mask: wp.array(dtype=wp.bool), + num_bodies: wp.int32, + dst_pos: wp.array2d(dtype=wp.vec3f), + dst_quat: wp.array2d(dtype=wp.quatf), +): + """Split flat (N*B,) transformf into (N, B) vec3f pos and (N, B) quatf quat. + + Args: + src: Flat source array of transforms from PhysX view. Shape is (N*B,). + mask: Boolean mask for which environments to update. Shape is (N,). + num_bodies: Number of bodies per environment. + dst_pos: Destination position buffer. Shape is (N, B). + dst_quat: Destination quaternion buffer. Shape is (N, B). + """ + env, sensor = wp.tid() + if mask: + if not mask[env]: + return + + src_idx = env * num_bodies + sensor + dst_pos[env, sensor] = wp.transform_get_translation(src[src_idx]) + dst_quat[env, sensor] = wp.transform_get_rotation(src[src_idx]) + + +# ---- Unpack contact buffer data kernel ---- + + +@wp.kernel +def unpack_contact_buffer_data( + contact_data: wp.array(dtype=wp.vec3f), + buffer_count: wp.array2d(dtype=wp.uint32), + buffer_start_indices: wp.array2d(dtype=wp.uint32), + mask: wp.array(dtype=wp.bool), + num_bodies: wp.int32, + avg: bool, + default_val: wp.float32, + dst: wp.array3d(dtype=wp.vec3f), +): + """Unpack and aggregate contact buffer data for each (env, body, filter) group. + + Each thread handles one (body, filter) pair for one environment. It reads + `count` contact entries starting at `start_index` and either averages or + sums them. + + Args: + contact_data: Flat buffer of contact data. Shape is (total_contacts,) vec3f. + buffer_count: Count of contacts per (env*body, filter). Shape is (N*B, M) uint32. + buffer_start_indices: Start indices per (env*body, filter). Shape is (N*B, M) uint32. + mask: Boolean mask for which environments to update. Shape is (N,). + num_bodies: Number of bodies per environment. + avg: If True, average the data; if False, sum it. + default_val: Default value for groups with zero contacts (e.g. NaN or 0.0). + dst: Destination buffer. Shape is (N, B, M). + """ + env, sensor, contact = wp.tid() + if mask: + if not mask[env]: + return + + flat_idx = env * num_bodies + sensor + count = wp.int32(buffer_count[flat_idx, contact]) + start = wp.int32(buffer_start_indices[flat_idx, contact]) + + if count > 0: + accum = wp.vec3f(0.0, 0.0, 0.0) + for c in range(count): + accum = accum + contact_data[start + c] + if avg: + accum = accum / wp.float32(count) + dst[env, sensor, contact] = accum + else: + dst[env, sensor, contact] = wp.vec3f(default_val, default_val, default_val) + + +@wp.kernel +def reset_contact_sensor_kernel( + # in + history_length: int, + num_filter_objects: int, + env_mask: wp.array(dtype=wp.bool), + # in-out + net_forces_w: wp.array2d(dtype=wp.vec3f), + net_forces_w_history: wp.array3d(dtype=wp.vec3f), + force_matrix_w: wp.array3d(dtype=wp.vec3f), + # outputs + current_air_time: wp.array2d(dtype=wp.float32), + last_air_time: wp.array2d(dtype=wp.float32), + current_contact_time: wp.array2d(dtype=wp.float32), + last_contact_time: wp.array2d(dtype=wp.float32), + friction_forces_w: wp.array3d(dtype=wp.vec3f), + contact_pos_w: wp.array3d(dtype=wp.vec3f), +): + """Reset the contact sensor data for specified environments. + + Launch with dim=(num_envs, num_sensors). + + Args: + history_length: Length of history. + num_filter_objects: Number of filter objects. + env_mask: Mask array. Shape is (num_envs,). + net_forces_w: Net forces array. Shape is (num_envs, num_sensors). + net_forces_w_history: Net forces history array. Shape is (num_envs, history_length, num_sensors). + force_matrix_w: Force matrix array. Shape is (num_envs, num_sensors, num_filter_objects). + current_air_time: Current air time array. Shape is (num_envs, num_sensors). + last_air_time: Last air time array. Shape is (num_envs, num_sensors). + current_contact_time: Current contact time array. Shape is (num_envs, num_sensors). + last_contact_time: Last contact time array. Shape is (num_envs, num_sensors). + friction_forces_w: Friction forces array. Shape is (num_envs, num_sensors, num_filter_objects). + contact_pos_w: Contact pos array. Shape is (num_envs, num_sensors, num_filter_objects). + """ + env, sensor = wp.tid() + + if env_mask: + if not env_mask[env]: + return + + # Reset net forces + net_forces_w[env, sensor] = wp.vec3f(0.0) + + # Reset history + if net_forces_w_history: + for i in range(history_length): + net_forces_w_history[env, i, sensor] = wp.vec3f(0.0) + + # Reset force matrix (guard for None case) + if force_matrix_w: + for f in range(num_filter_objects): + force_matrix_w[env, sensor, f] = wp.vec3f(0.0) + + # Reset air/contact time tracking + if current_air_time: + current_air_time[env, sensor] = 0.0 + last_air_time[env, sensor] = 0.0 + current_contact_time[env, sensor] = 0.0 + last_contact_time[env, sensor] = 0.0 + + if friction_forces_w: + for f in range(num_filter_objects): + friction_forces_w[env, sensor, f] = wp.vec3f(0.0) + + if contact_pos_w: + for f in range(num_filter_objects): + contact_pos_w[env, sensor, f] = wp.vec3f(0.0) + + +@wp.kernel +def compute_first_transition_kernel( + # in + threshold: wp.float32, + time: wp.array2d(dtype=wp.float32), + # out + result: wp.array2d(dtype=wp.float32), +): + """Compute boolean mask (as float) for sensors whose time is in (0, threshold). + + Used by both compute_first_contact (with current_contact_time) and + compute_first_air (with current_air_time). + + Launch with dim=(num_envs, num_sensors). + + Args: + threshold: Threshold for the time. + time: Time array. Shape is (num_envs, num_sensors). + result: Result array. Shape is (num_envs, num_sensors). + """ + env, sensor = wp.tid() + t = time[env, sensor] + if t > 0.0 and t < threshold: + result[env, sensor] = 1.0 + else: + result[env, sensor] = 0.0 + + +@wp.kernel +def update_net_forces_kernel( + # in + net_forces_flat: wp.array(dtype=wp.vec3f), + net_forces_matrix_flat: wp.array2d(dtype=wp.vec3f), + mask: wp.array(dtype=wp.bool), + num_sensors: int, + num_filter_shapes: int, + history_length: int, + contact_force_threshold: wp.float32, + timestamp: wp.array(dtype=wp.float32), + timestamp_last_update: wp.array(dtype=wp.float32), + # out + net_forces_w: wp.array2d(dtype=wp.vec3f), + net_forces_w_history: wp.array3d(dtype=wp.vec3f), + force_matrix_w: wp.array3d(dtype=wp.vec3f), + force_matrix_w_history: wp.array4d(dtype=wp.vec3f), + current_air_time: wp.array2d(dtype=wp.float32), + current_contact_time: wp.array2d(dtype=wp.float32), + last_air_time: wp.array2d(dtype=wp.float32), + last_contact_time: wp.array2d(dtype=wp.float32), +): + """Update the net forces, force matrix and air/contact time for each (env, sensor) pair. + + Launch with dim=(num_envs, num_sensors). + + Args: + net_forces_flat: Flat net forces. Shape is (num_envs*num_sensors,). + net_forces_matrix_flat: Flat force matrix. Shape is (num_envs*num_sensors, num_filter_shapes). + mask: Mask array. Shape is (num_envs,). + num_sensors: Number of sensors per environment. + num_filter_shapes: Number of filter shapes. + history_length: Length of history. + contact_force_threshold: Threshold for the contact force. + timestamp: Timestamp array. Shape is (num_envs,). + timestamp_last_update: Timestamp last update array. Shape is (num_envs,). + net_forces_w: Net forces array. Shape is (num_envs, num_sensors). + net_forces_w_history: Net forces history array. Shape is (num_envs, history_length, num_sensors). + force_matrix_w: Force matrix array. Shape is (num_envs, num_sensors, num_filter_shapes). + force_matrix_w_history: Force matrix history array. Shape is + (num_envs, history_length, num_sensors, num_filter_shapes). + current_air_time: Current air time array. Shape is (num_envs, num_sensors). + current_contact_time: Current contact time array. Shape is (num_envs, num_sensors). + last_air_time: Last air time array. Shape is (num_envs, num_sensors). + last_contact_time: Last contact time array. Shape is (num_envs, num_sensors). + """ + env, sensor = wp.tid() + + if mask: + if not mask[env]: + return + + src_idx = env * num_sensors + sensor + + # Update net forces + net_forces_w[env, sensor] = net_forces_flat[src_idx] + # Update history + if net_forces_w_history: + for i in range(history_length - 1, 0, -1): + net_forces_w_history[env, i, sensor] = net_forces_w_history[env, i - 1, sensor] + net_forces_w_history[env, 0, sensor] = net_forces_w[env, sensor] + + # update force matrix + if net_forces_matrix_flat: + for f in range(num_filter_shapes): + force_matrix_w[env, sensor, f] = net_forces_matrix_flat[src_idx, f] + for i in range(history_length - 1, 0, -1): + force_matrix_w_history[env, i, sensor, f] = force_matrix_w_history[env, i - 1, sensor, f] + force_matrix_w_history[env, 0, sensor, f] = force_matrix_w[env, sensor, f] + + # Update air/contact time tracking + if current_air_time: + elapsed_time = timestamp[env] - timestamp_last_update[env] + in_contact = wp.length_sq(net_forces_w[env, sensor]) > contact_force_threshold * contact_force_threshold + + cat = current_air_time[env, sensor] + cct = current_contact_time[env, sensor] + is_first_contact = in_contact and (cat > 0.0) + is_first_detached = not in_contact and (cct > 0.0) + + if is_first_contact: + last_air_time[env, sensor] = cat + elapsed_time + elif is_first_detached: + last_contact_time[env, sensor] = cct + elapsed_time + + current_contact_time[env, sensor] = wp.where(in_contact, cct + elapsed_time, 0.0) + current_air_time[env, sensor] = wp.where(in_contact, 0.0, cat + elapsed_time) + + +@wp.kernel +def concat_pos_and_quat_to_pose_kernel( + pos: wp.array2d(dtype=wp.vec3f), + quat: wp.array2d(dtype=wp.quatf), + pose: wp.array2d(dtype=wp.transformf), +): + """Concatenate position and quaternion to pose. + + Args: + pos: Position array. Shape is (N, B). + quat: Quaternion array. Shape is (N, B). + pose: Pose array. Shape is (N, B). + """ + env, sensor = wp.tid() + pose[env, sensor] = wp.transform(pos[env, sensor], quat[env, sensor]) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py new file mode 100644 index 000000000000..6b89a19317fa --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX frame transformer sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.pyi new file mode 100644 index 000000000000..1bd63d0390d3 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "FrameTransformer", + "FrameTransformerData", +] + +from .frame_transformer import FrameTransformer +from .frame_transformer_data import FrameTransformerData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py new file mode 100644 index 000000000000..b7215062ff3f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py @@ -0,0 +1,571 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +import warnings +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.frame_transformer import BaseFrameTransformer +from isaaclab.utils.math import is_identity_pose, normalize, quat_from_angle_axis + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .frame_transformer_data import FrameTransformerData +from .kernels import frame_transformer_update_kernel + +if TYPE_CHECKING: + from isaaclab.sensors.frame_transformer import FrameTransformerCfg + +# import logger +logger = logging.getLogger(__name__) + + +class FrameTransformer(BaseFrameTransformer): + """A PhysX sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the frame transformer sensor.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data: FrameTransformerData = FrameTransformerData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"FrameTransformer @ '{self.cfg.prim_path}': \n" + f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" + f"\tnumber of envs: {self._num_envs}\n" + f"\tsource body frame: {self._source_frame_body_name}\n" + f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> FrameTransformerData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked. + + .. deprecated:: + Use ``len(data.target_frame_names)`` instead. This property will be removed in a future release. + """ + warnings.warn( + "The `num_bodies` property will be deprecated in a future release." + " Please use `len(data.target_frame_names)` instead.", + DeprecationWarning, + stacklevel=2, + ) + return len(self._target_frame_body_names) + + @property + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked. + + .. deprecated:: + Use ``data.target_frame_names`` instead. This property will be removed in a future release. + """ + warnings.warn( + "The `body_names` property will be deprecated in a future release." + " Please use `data.target_frame_names` instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._target_frame_body_names + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + # resolve indices and mask + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + # reset the timers and counters + super().reset(None, env_mask) + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + + # resolve source frame offset + source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) + source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) + # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is + # not the identity quaternion for efficiency in _update_buffer_impl + self._apply_source_frame_offset = True + # Handle source frame offsets + if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): + logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") + self._apply_source_frame_offset = False + else: + logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") + # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) + self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) + self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) + + # Keep track of mapping from the rigid body name to the desired frames and prim path, + # as there may be multiple frames based upon the same body name and we don't want to + # create unnecessary views. + body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} + # The offsets associated with each target frame + target_offsets: dict[str, dict[str, torch.Tensor]] = {} + # The frames whose offsets are not identity (use set to avoid duplicates across envs) + non_identity_offset_frames: set[str] = set() + + # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the + # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl + self._apply_target_frame_offset = False + + # Need to keep track of whether the source frame is also a target frame + self._source_is_also_target_frame = False + + # Collect all target frames, their associated body prim paths and their offsets so that we can extract + # the prim, check that it has the appropriate rigid body API in a single loop. + # First element is None because user can't specify source frame name + frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] + frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] + # First element is None because source frame offset is handled separately + frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] + frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) + for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): + # Find correct prim + matching_prims = sim_utils.find_matching_prims(prim_path) + if len(matching_prims) == 0: + raise ValueError( + f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." + " No matching prims were found." + ) + for prim in matching_prims: + # Get the prim path of the matching prim + matching_prim_path = prim.GetPath().pathString + # Check if it is a rigid prim + if not prim.HasAPI(UsdPhysics.RigidBodyAPI): + raise ValueError( + f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" + " rigid body. The class only supports transformations between rigid bodies." + ) + + # Get the name of the body: use relative prim path for unique identification + body_name = self._get_relative_body_path(matching_prim_path) + # Use leaf name of prim path if frame name isn't specified by user + frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] + + # Keep track of which frames are associated with which bodies + if body_name in body_names_to_frames: + body_names_to_frames[body_name]["frames"].add(frame_name) + + # This is a corner case where the source frame is also a target frame + if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": + self._source_is_also_target_frame = True + + else: + # Store the first matching prim path and the type of frame + body_names_to_frames[body_name] = { + "frames": {frame_name}, + "prim_path": matching_prim_path, + "type": frame_type, + } + + if offset is not None: + offset_pos = torch.tensor(offset.pos, device=self.device) + offset_quat = torch.tensor(offset.rot, device=self.device) + # Check if we need to apply offsets (optimized code path in _update_buffer_impl) + if not is_identity_pose(offset_pos, offset_quat): + non_identity_offset_frames.add(frame_name) + self._apply_target_frame_offset = True + target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} + + if not self._apply_target_frame_offset: + logger.info( + f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" + f" are identity: {frames[1:]}" + ) + else: + logger.info( + f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" + f" {sorted(non_identity_offset_frames)}" + ) + + # The names of bodies that RigidPrim will be tracking to later extract transforms from + tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] + tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] + + body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] + + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # Create a prim view for all frames and initialize it + # order of transforms coming out of view will be source frame followed by target frame(s) + self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) + + # Determine the order in which regex evaluated body names so we can later index into frame transforms + # by frame name correctly + all_prim_paths = self._frame_physx_view.prim_paths + + if "env_" in all_prim_paths[0]: + + def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: + """Separates the environment number and prim_path from the item. + + Args: + item: The item to extract the environment number from. Assumes item is of the form + `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. + Returns: + The environment number and the prim_path. + """ + match = re.search(r"env_(\d+)(.*)", item) + return (int(match.group(1)), match.group(2)) + + # Find the indices that would reorganize output to be per environment. + # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base + # to come before env_1/Robot/foot so we need to use custom key function + self._per_env_indices = [ + index + for index, _ in sorted( + list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) + ) + ] + + # Only need 0th env as the names and their ordering are the same across environments + sorted_prim_paths = [ + all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] + ] + + else: + # If no environment is present, then the order of the body names is the same as the order of the + # prim paths sorted alphabetically + self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] + sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] + + # -- target frames: use relative prim path for unique identification + self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] + + # -- source frame: use relative prim path for unique identification + self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) + source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) + + # Only remove source frame from tracked bodies if it is not also a target frame + if not self._source_is_also_target_frame: + self._target_frame_body_names.remove(self._source_frame_body_name) + + # Determine indices into all tracked body frames for both source and target frames + all_ids = torch.arange(self._num_envs * len(tracked_body_names)) + self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index + + # If source frame is also a target frame, then the target frame body ids are the same as + # the source frame body ids + if self._source_is_also_target_frame: + self._target_frame_body_ids = all_ids + else: + self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] + + # The name of each of the target frame(s) - either user specified or defaulted to the body name + self._target_frame_names: list[str] = [] + # The position and rotation components of target frame offsets + target_frame_offset_pos = [] + target_frame_offset_quat = [] + # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed + # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations + # when updating sensor in _update_buffers_impl + duplicate_frame_indices = [] + + # Go through each body name and determine the number of duplicates we need for that frame + # and extract the offsets. This is all done to handle the case where multiple frames + # reference the same body, but have different names and/or offsets + for i, body_name in enumerate(self._target_frame_body_names): + for frame in body_names_to_frames[body_name]["frames"]: + # Only need to handle target frames here as source frame is handled separately + if frame in target_offsets: + target_frame_offset_pos.append(target_offsets[frame]["pos"]) + target_frame_offset_quat.append(target_offsets[frame]["quat"]) + self._target_frame_names.append(frame) + duplicate_frame_indices.append(i) + + # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes + # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient + duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) + if self._source_is_also_target_frame: + num_target_body_frames = len(tracked_body_names) + else: + num_target_body_frames = len(tracked_body_names) - 1 + + self._duplicate_frame_indices = torch.cat( + [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] + ) + + # Target frame offsets are only applied if at least one of the offsets are non-identity + if self._apply_target_frame_offset: + # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) + self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) + self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) + + # Store number of target frames for kernel launch + self._num_target_frames = len(self._target_frame_names) + + # --- Pre-compute warp index arrays for fused kernel --- + # Source raw indices: (N,) — direct index into raw_transforms per env + source_raw_list = [] + for e in range(self._num_envs): + source_raw_list.append(self._per_env_indices[self._source_frame_body_ids[e].item()]) + self._source_raw_indices = wp.from_torch( + torch.tensor(source_raw_list, dtype=torch.int32, device=self._device), dtype=wp.int32 + ) + + # Target raw indices: (N, M) — direct index into raw_transforms per (env, frame) + M = self._num_target_frames + target_raw = torch.zeros((self._num_envs, M), dtype=torch.int32, device=self._device) + for e in range(self._num_envs): + for f in range(M): + dup_idx = self._duplicate_frame_indices[e * M + f].item() + body_idx = self._target_frame_body_ids[dup_idx].item() + target_raw[e, f] = self._per_env_indices[body_idx] + self._target_raw_indices = wp.from_torch(target_raw.contiguous(), dtype=wp.int32) + + # --- Pre-compute warp offset arrays (always created; identity when not configured) --- + # Source offsets: (N,) + if self._apply_source_frame_offset: + self._source_offset_pos_wp = wp.from_torch(self._source_frame_offset_pos.contiguous(), dtype=wp.vec3f) + self._source_offset_quat_wp = wp.from_torch(self._source_frame_offset_quat.contiguous(), dtype=wp.quatf) + else: + self._source_offset_pos_wp = wp.zeros(self._num_envs, dtype=wp.vec3f, device=self._device) + self._source_offset_quat_wp = wp.zeros(self._num_envs, dtype=wp.quatf, device=self._device) + # Identity quaternion: (0, 0, 0, 1) + wp.to_torch(self._source_offset_quat_wp)[:, 3] = 1.0 + + # Target offsets: (M,) + if self._apply_target_frame_offset: + # Only need per-frame offsets (not per-env*frame), take first M entries + tgt_off_pos = torch.stack(target_frame_offset_pos) # (M, 3) + tgt_off_quat = torch.stack(target_frame_offset_quat) # (M, 4) + self._target_offset_pos_wp = wp.from_torch(tgt_off_pos.contiguous(), dtype=wp.vec3f) + self._target_offset_quat_wp = wp.from_torch(tgt_off_quat.contiguous(), dtype=wp.quatf) + else: + self._target_offset_pos_wp = wp.zeros(M, dtype=wp.vec3f, device=self._device) + self._target_offset_quat_wp = wp.zeros(M, dtype=wp.quatf, device=self._device) + # Identity quaternion: (0, 0, 0, 1) + wp.to_torch(self._target_offset_quat_wp)[:, 3] = 1.0 + + # Create data buffers + self._data.create_buffers( + num_envs=self._num_envs, + num_target_frames=self._num_target_frames, + target_frame_names=self._target_frame_names, + device=self._device, + ) + + def _update_buffers_impl(self, env_mask: wp.array | None = None): + """Fills the buffers of the sensor data.""" + # Resolve mask + env_mask = self._resolve_indices_and_mask(None, env_mask) + # Get raw transforms from PhysX view and reinterpret as transformf + raw_transforms = self._frame_physx_view.get_transforms().view(wp.transformf) + + wp.launch( + frame_transformer_update_kernel, + dim=(self._num_envs, self._num_target_frames), + inputs=[ + env_mask, + raw_transforms, + self._source_raw_indices, + self._target_raw_indices, + self._source_offset_pos_wp, + self._source_offset_quat_wp, + self._target_offset_pos_wp, + self._target_offset_quat_wp, + self._data._source_pos_w, + self._data._source_quat_w, + self._data._target_pos_w, + self._data._target_quat_w, + self._data._target_pos_source, + self._data._target_quat_source, + ], + device=self._device, + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + if not hasattr(self, "frame_visualizer"): + self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + + # set their visibility to true + self.frame_visualizer.set_visibility(True) + else: + if hasattr(self, "frame_visualizer"): + self.frame_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # Convert warp -> torch at the boundary for visualization + source_pos_w = wp.to_torch(self._data._source_pos_w) + source_quat_w = wp.to_torch(self._data._source_quat_w) + target_pos_w = wp.to_torch(self._data._target_pos_w) + target_quat_w = wp.to_torch(self._data._target_quat_w) + + # Get the all frames pose + frames_pos = torch.cat([source_pos_w, target_pos_w.view(-1, 3)], dim=0) + frames_quat = torch.cat([source_quat_w, target_quat_w.view(-1, 4)], dim=0) + + # Get the all connecting lines between frames pose + lines_pos, lines_quat, lines_length = self._get_connecting_lines( + start_pos=source_pos_w.repeat_interleave(target_pos_w.size(1), dim=0), + end_pos=target_pos_w.view(-1, 3), + ) + + # Initialize default (identity) scales and marker indices for all markers (frames + lines) + marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) + marker_indices = torch.zeros(marker_scales.size(0)) + + # Set the z-scale of line markers to represent their actual length + marker_scales[-lines_length.size(0) :, -1] = lines_length + + # Assign marker config index 1 to line markers + marker_indices[-lines_length.size(0) :] = 1 + + # Update the frame and the connecting line visualizer + self.frame_visualizer.visualize( + translations=torch.cat((frames_pos, lines_pos), dim=0), + orientations=torch.cat((frames_quat, lines_quat), dim=0), + scales=marker_scales, + marker_indices=marker_indices, + ) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._frame_physx_view = None + + """ + Internal helpers. + """ + + def _get_connecting_lines( + self, start_pos: torch.Tensor, end_pos: torch.Tensor + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Draws connecting lines between frames. + + Given start and end points, this function computes the positions (mid-point), orientations, + and lengths of the connecting lines. + + Args: + start_pos: The start positions of the connecting lines. Shape is (N, 3). + end_pos: The end positions of the connecting lines. Shape is (N, 3). + + Returns: + A tuple containing: + - The positions of each connecting line. Shape is (N, 3). + - The orientations of each connecting line in quaternion. Shape is (N, 4). + - The lengths of each connecting line. Shape is (N,). + """ + direction = end_pos - start_pos + lengths = torch.linalg.norm(direction, dim=-1) + positions = (start_pos + end_pos) / 2 + + # Get default direction (along z-axis) + default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) + + # Normalize direction vector + direction_norm = normalize(direction) + + # Calculate rotation from default direction to target direction + rotation_axis = torch.linalg.cross(default_direction, direction_norm) + rotation_axis_norm = torch.linalg.norm(rotation_axis, dim=-1) + + # Handle case where vectors are parallel + mask = rotation_axis_norm > 1e-6 + rotation_axis = torch.where( + mask.unsqueeze(-1), + normalize(rotation_axis), + torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), + ) + + # Calculate rotation angle + cos_angle = torch.sum(default_direction * direction_norm, dim=-1) + cos_angle = torch.clamp(cos_angle, -1.0, 1.0) + angle = torch.acos(cos_angle) + orientations = quat_from_angle_axis(angle, rotation_axis) + + return positions, orientations, lengths + + @staticmethod + def _get_relative_body_path(prim_path: str) -> str: + """Extract a normalized body path from a prim path. + + Removes the environment instance segment `/envs/env_/` to normalize paths + across multiple environments, while preserving the `/envs/` prefix to + distinguish environment-scoped paths from non-environment paths. + + Examples: + - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' + - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' + - '/World/Robot' -> '/World/Robot' + - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' + + Args: + prim_path: The full prim path. + + Returns: + The prim path with `/envs/env_/` removed, preserving `/envs/`. + """ + pattern = re.compile(r"/envs/env_[^/]+/") + return pattern.sub("/envs/", prim_path) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py new file mode 100644 index 000000000000..7f7c0a837b2f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py @@ -0,0 +1,187 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp + +from isaaclab.sensors.frame_transformer import BaseFrameTransformerData +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.sensors.kernels import concat_pos_and_quat_to_pose_1d_kernel, concat_pos_and_quat_to_pose_kernel + + +class FrameTransformerData(BaseFrameTransformerData): + """Data container for the PhysX frame transformer sensor.""" + + @property + def target_frame_names(self) -> list[str]: + """Target frame names (order matches data ordering).""" + return self._target_frame_names + + @property + def target_pose_source(self) -> ProxyArray: + """Pose of target frame(s) relative to source frame. + + Shape is (num_instances, num_target_frames), dtype = wp.transformf. In torch this resolves to + (num_instances, num_target_frames, 7). The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + wp.launch( + concat_pos_and_quat_to_pose_kernel, + dim=(self._num_envs, self._num_target_frames), + inputs=[self._target_pos_source, self._target_quat_source], + outputs=[self._target_pose_source], + device=self._device, + ) + if self._target_pose_source_ta is None: + self._target_pose_source_ta = ProxyArray(self._target_pose_source) + return self._target_pose_source_ta + + @property + def target_pos_source(self) -> ProxyArray: + """Position of target frame(s) relative to source frame. + + Shape is (num_instances, num_target_frames), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_target_frames, 3). + """ + if self._target_pos_source_ta is None: + self._target_pos_source_ta = ProxyArray(self._target_pos_source) + return self._target_pos_source_ta + + @property + def target_quat_source(self) -> ProxyArray: + """Orientation of target frame(s) relative to source frame. + + Shape is (num_instances, num_target_frames), dtype = wp.quatf. In torch this resolves to + (num_instances, num_target_frames, 4). The orientation is provided in (x, y, z, w) format. + """ + if self._target_quat_source_ta is None: + self._target_quat_source_ta = ProxyArray(self._target_quat_source) + return self._target_quat_source_ta + + @property + def target_pose_w(self) -> ProxyArray: + """Pose of target frame(s) after offset in world frame. + + Shape is (num_instances, num_target_frames), dtype = wp.transformf. In torch this resolves to + (num_instances, num_target_frames, 7). The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + wp.launch( + concat_pos_and_quat_to_pose_kernel, + dim=(self._num_envs, self._num_target_frames), + inputs=[self._target_pos_w, self._target_quat_w], + outputs=[self._target_pose_w], + device=self._device, + ) + if self._target_pose_w_ta is None: + self._target_pose_w_ta = ProxyArray(self._target_pose_w) + return self._target_pose_w_ta + + @property + def target_pos_w(self) -> ProxyArray: + """Position of target frame(s) after offset in world frame. + + Shape is (num_instances, num_target_frames), dtype = wp.vec3f. In torch this resolves to + (num_instances, num_target_frames, 3). + """ + if self._target_pos_w_ta is None: + self._target_pos_w_ta = ProxyArray(self._target_pos_w) + return self._target_pos_w_ta + + @property + def target_quat_w(self) -> ProxyArray: + """Orientation of target frame(s) after offset in world frame. + + Shape is (num_instances, num_target_frames), dtype = wp.quatf. In torch this resolves to + (num_instances, num_target_frames, 4). The orientation is provided in (x, y, z, w) format. + """ + if self._target_quat_w_ta is None: + self._target_quat_w_ta = ProxyArray(self._target_quat_w) + return self._target_quat_w_ta + + @property + def source_pose_w(self) -> ProxyArray: + """Pose of source frame after offset in world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + wp.launch( + concat_pos_and_quat_to_pose_1d_kernel, + dim=self._num_envs, + inputs=[self._source_pos_w, self._source_quat_w], + outputs=[self._source_pose_w], + device=self._device, + ) + if self._source_pose_w_ta is None: + self._source_pose_w_ta = ProxyArray(self._source_pose_w) + return self._source_pose_w_ta + + @property + def source_pos_w(self) -> ProxyArray: + """Position of source frame after offset in world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._source_pos_w_ta is None: + self._source_pos_w_ta = ProxyArray(self._source_pos_w) + return self._source_pos_w_ta + + @property + def source_quat_w(self) -> ProxyArray: + """Orientation of source frame after offset in world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + The orientation is provided in (x, y, z, w) format. + """ + if self._source_quat_w_ta is None: + self._source_quat_w_ta = ProxyArray(self._source_quat_w) + return self._source_quat_w_ta + + def create_buffers( + self, + num_envs: int, + num_target_frames: int, + target_frame_names: list[str], + device: str, + ) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + num_target_frames: Number of target frames. + target_frame_names: Names of target frames. + device: Device for tensor storage. + """ + self._num_envs = num_envs + self._device = device + self._num_target_frames = num_target_frames + self._target_frame_names = target_frame_names + self._source_pose_w = wp.zeros(num_envs, dtype=wp.transformf, device=device) + self._source_pos_w = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._source_quat_w = wp.zeros(num_envs, dtype=wp.quatf, device=device) + self._target_pose_w = wp.zeros((num_envs, num_target_frames), dtype=wp.transformf, device=device) + self._target_pos_w = wp.zeros((num_envs, num_target_frames), dtype=wp.vec3f, device=device) + self._target_quat_w = wp.zeros((num_envs, num_target_frames), dtype=wp.quatf, device=device) + self._target_pose_source = wp.zeros((num_envs, num_target_frames), dtype=wp.transformf, device=device) + self._target_pos_source = wp.zeros((num_envs, num_target_frames), dtype=wp.vec3f, device=device) + self._target_quat_source = wp.zeros((num_envs, num_target_frames), dtype=wp.quatf, device=device) + + # Initialize quaternions to identity (w=1). wp.zeros gives (0,0,0,0) not (0,0,0,1). + + wp.to_torch(self._source_quat_w)[:, 3] = 1.0 + wp.to_torch(self._target_quat_w)[:, :, 3] = 1.0 + wp.to_torch(self._target_quat_source)[:, :, 3] = 1.0 + + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + self._target_pose_source_ta: ProxyArray | None = None + self._target_pos_source_ta: ProxyArray | None = None + self._target_quat_source_ta: ProxyArray | None = None + self._target_pose_w_ta: ProxyArray | None = None + self._target_pos_w_ta: ProxyArray | None = None + self._target_quat_w_ta: ProxyArray | None = None + self._source_pose_w_ta: ProxyArray | None = None + self._source_pos_w_ta: ProxyArray | None = None + self._source_quat_w_ta: ProxyArray | None = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/kernels.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/kernels.py new file mode 100644 index 000000000000..9b612c6df3ea --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/kernels.py @@ -0,0 +1,82 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp kernels for the PhysX frame transformer sensor.""" + +import warp as wp + +# ---- Frame transformer update kernel ---- + + +@wp.kernel +def frame_transformer_update_kernel( + env_mask: wp.array(dtype=wp.bool), + raw_transforms: wp.array(dtype=wp.transformf), + source_raw_indices: wp.array(dtype=wp.int32), + target_raw_indices: wp.array2d(dtype=wp.int32), + source_offset_pos: wp.array(dtype=wp.vec3f), + source_offset_quat: wp.array(dtype=wp.quatf), + target_offset_pos: wp.array(dtype=wp.vec3f), + target_offset_quat: wp.array(dtype=wp.quatf), + source_pos_w: wp.array(dtype=wp.vec3f), + source_quat_w: wp.array(dtype=wp.quatf), + target_pos_w: wp.array2d(dtype=wp.vec3f), + target_quat_w: wp.array2d(dtype=wp.quatf), + target_pos_source: wp.array2d(dtype=wp.vec3f), + target_quat_source: wp.array2d(dtype=wp.quatf), +): + """Update frame transformer sensor data from raw PhysX transforms. + + This kernel processes raw transforms from PhysX and computes: + 1. Source frame pose in world frame (with optional offset) + 2. Target frame poses in world frame (with optional offsets) + 3. Target frame poses relative to source frame + + Args: + raw_transforms: Raw transforms from PhysX view. Shape is (N*M,) where N is num_envs and M is num_bodies. + source_raw_indices: Indices into raw_transforms for source frame per environment. Shape is (N,). + target_raw_indices: Indices into raw_transforms for target frames per (env, frame). Shape is (N, M) where M is + num_target_frames. + source_offset_pos: Optional position offset for source frame. Shape is (N, 3). + source_offset_quat: Optional quaternion offset for source frame. Shape is (N, 4). + target_offset_pos: Optional position offsets for target frames. Shape is (M, 3). + target_offset_quat: Optional quaternion offsets for target frames. Shape is (M, 4). + source_pos_w: Output source position in world frame. Shape is (N, 3). + source_quat_w: Output source quaternion in world frame. Shape is (N, 4). + target_pos_w: Output target positions in world frame. Shape is (N, M, 3). + target_quat_w: Output target quaternions in world frame. Shape is (N, M, 4). + target_pos_source: Output target positions relative to source frame. Shape is (N, M, 3). + target_quat_source: Output target quaternions relative to source frame. Shape is (N, M, 4). + """ + env_id, frame_id = wp.tid() + + if not env_mask[env_id]: + return + + # Get source frame transform + source_idx = source_raw_indices[env_id] + source_tf = raw_transforms[source_idx] + + # Apply source frame offset + source_offset_tf = wp.transform(source_offset_pos[env_id], source_offset_quat[env_id]) + source_tf_offset = wp.transform_multiply(source_tf, source_offset_tf) + source_pos_w[env_id] = wp.transform_get_translation(source_tf_offset) + source_quat_w[env_id] = wp.transform_get_rotation(source_tf_offset) + + # Get target frame transform + target_idx = target_raw_indices[env_id, frame_id] + target_tf = raw_transforms[target_idx] + + # Apply target offset if needed + target_offset_tf = wp.transform(target_offset_pos[frame_id], target_offset_quat[frame_id]) + target_tf_offset = wp.transform_multiply(target_tf, target_offset_tf) + target_pos_w[env_id, frame_id] = wp.transform_get_translation(target_tf_offset) + target_quat_w[env_id, frame_id] = wp.transform_get_rotation(target_tf_offset) + + # Compute target frame relative to source frame + source_tf_inv = wp.transform_inverse(source_tf_offset) + target_relative_tf = wp.transform_multiply(source_tf_inv, target_tf_offset) + target_pos_source[env_id, frame_id] = wp.transform_get_translation(target_relative_tf) + target_quat_source[env_id, frame_id] = wp.transform_get_rotation(target_relative_tf) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py new file mode 100644 index 000000000000..68a02fd61f56 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX IMU sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.pyi new file mode 100644 index 000000000000..7513020a3ac8 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Imu", + "ImuData", +] + +from .imu import Imu +from .imu_data import ImuData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py new file mode 100644 index 000000000000..50c173103492 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py @@ -0,0 +1,208 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.sensors.imu import BaseImu + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .imu_data import ImuData +from .kernels import imu_reset_kernel, imu_update_kernel + +if TYPE_CHECKING: + from isaaclab.sensors.imu import ImuCfg + + +class Imu(BaseImu): + """The PhysX Inertial Measurement Unit (IMU) sensor. + + This sensor models a real IMU that measures angular velocity (gyroscope) and + linear acceleration (accelerometer) in the sensor's body frame. Unlike the PVA + sensor, it does not provide pose, linear velocity, angular acceleration, or + projected gravity. + + Like a real accelerometer, the linear acceleration readings always include the + contribution of gravity. The gravity vector is queried from the simulation at + initialization. + + The sensor can be attached to any prim path with a rigid ancestor in its tree. + If the provided path is not a rigid body, the closest rigid-body ancestor is used + for simulation queries. The fixed transform from that ancestor to the target prim + is computed once during initialization and composed with the configured sensor offset. + + .. note:: + + Linear acceleration is computed using numerical differentiation from velocities. + Consequently, the IMU sensor accuracy depends on the chosen physics timestep. + For sufficient accuracy, we recommend keeping the timestep at least 200 Hz. + """ + + cfg: ImuCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the IMU sensor.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the IMU sensor. + + Args: + cfg: The configuration parameters. + """ + super().__init__(cfg) + self._data = ImuData() + self._rigid_parent_expr: str | None = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Imu sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self._view.count}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> ImuData: + self._update_outdated_buffers() + return self._data + + @property + def num_instances(self) -> int: + return self._view.count + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + super().reset(None, env_mask) + + wp.launch( + imu_reset_kernel, + dim=self._num_envs, + inputs=[ + env_mask, + self._data._ang_vel_b, + self._data._lin_acc_b, + self._prev_lin_vel_w, + ], + device=self._device, + ) + + def update(self, dt: float, force_recompute: bool = False): + self._dt = dt + super().update(dt, force_recompute) + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + - If the target prim path is a rigid body, build the view directly on it. + - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor + to the target prim, and build the view on the ancestor expression. + """ + super()._initialize_impl() + self._physics_sim_view = SimulationManager.get_physics_sim_view() + prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if prim is None: + raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") + + ancestor_prim = sim_utils.get_first_matching_ancestor_prim( + prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) + ) + if ancestor_prim is None: + raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") + + if ancestor_prim == prim: + self._rigid_parent_expr = self.cfg.prim_path + fixed_pos_b, fixed_quat_b = None, None + else: + relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString + self._rigid_parent_expr = self.cfg.prim_path.replace("/" + relative_path, "") + fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) + + self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) + + # Query world gravity and compute accelerometer bias (real IMUs always measure gravity) + gravity = self._physics_sim_view.get_gravity() + gravity_bias = torch.tensor((-gravity[0], -gravity[1], -gravity[2]), device=self._device) + gravity_bias_torch = gravity_bias.repeat(self._view.count, 1) + self._gravity_bias_w = wp.from_torch(gravity_bias_torch.contiguous(), dtype=wp.vec3f) + + self._initialize_buffers_impl() + + # Compose the configured offset with the fixed ancestor->target transform (done once) + if fixed_pos_b is not None and fixed_quat_b is not None: + fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) + fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) + + cfg_p = wp.to_torch(self._offset_pos_b).clone() + cfg_q = wp.to_torch(self._offset_quat_b).clone() + + composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) + composed_q = math_utils.quat_mul(fixed_q, cfg_q) + + self._offset_pos_b = wp.from_torch(composed_p.contiguous(), dtype=wp.vec3f) + self._offset_quat_b = wp.from_torch(composed_q.contiguous(), dtype=wp.quatf) + + def _update_buffers_impl(self, env_mask: wp.array | None = None): + """Fills the buffers of the sensor data.""" + env_mask = self._resolve_indices_and_mask(None, env_mask) + + transforms = self._view.get_transforms().view(wp.transformf) + velocities = self._view.get_velocities().view(wp.spatial_vectorf) + wp.copy(self._coms_buffer, self._view.get_coms().view(wp.transformf)) + + wp.launch( + imu_update_kernel, + dim=self._num_envs, + inputs=[ + env_mask, + transforms, + velocities, + self._coms_buffer, + self._offset_pos_b, + self._offset_quat_b, + self._gravity_bias_w, + self._prev_lin_vel_w, + 1.0 / self._dt, + self._data._ang_vel_b, + self._data._lin_acc_b, + ], + device=self._device, + ) + + def _initialize_buffers_impl(self): + """Create buffers for storing data.""" + self._data.create_buffers(num_envs=self._view.count, device=self._device) + + self._prev_lin_vel_w = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + + offset_pos_torch = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + offset_quat_torch = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) + self._offset_pos_b = wp.from_torch(offset_pos_torch.contiguous(), dtype=wp.vec3f) + self._offset_quat_b = wp.from_torch(offset_quat_torch.contiguous(), dtype=wp.quatf) + + self._coms_buffer = wp.zeros(self._view.count, dtype=wp.transformf, device=self._device) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py new file mode 100644 index 000000000000..7251111c3eb5 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py @@ -0,0 +1,53 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp + +from isaaclab.sensors.imu import BaseImuData +from isaaclab.utils.warp import ProxyArray + + +class ImuData(BaseImuData): + """Data container for the PhysX IMU sensor.""" + + @property + def ang_vel_b(self) -> ProxyArray: + """IMU frame angular velocity relative to the world expressed in IMU frame [rad/s]. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._ang_vel_b_ta is None: + self._ang_vel_b_ta = ProxyArray(self._ang_vel_b) + return self._ang_vel_b_ta + + @property + def lin_acc_b(self) -> ProxyArray: + """Linear acceleration (proper) in the IMU frame [m/s^2]. + + Zero in freefall, +g upward at rest. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._lin_acc_b_ta is None: + self._lin_acc_b_ta = ProxyArray(self._lin_acc_b) + return self._lin_acc_b_ta + + def create_buffers(self, num_envs: int, device: str) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + device: Device for tensor storage. + """ + self._num_envs = num_envs + self._device = device + self._ang_vel_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._lin_acc_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + self._ang_vel_b_ta: ProxyArray | None = None + self._lin_acc_b_ta: ProxyArray | None = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/kernels.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/kernels.py new file mode 100644 index 000000000000..b22912ec0faa --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/kernels.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def imu_update_kernel( + # indexing + env_mask: wp.array(dtype=wp.bool), + # PhysX view data + transforms: wp.array(dtype=wp.transformf), + velocities: wp.array(dtype=wp.spatial_vectorf), + coms: wp.array(dtype=wp.transformf), + # sensor config (per-env) + offset_pos_b: wp.array(dtype=wp.vec3f), + offset_quat_b: wp.array(dtype=wp.quatf), + gravity_bias_w: wp.array(dtype=wp.vec3f), + # previous velocities (read + write) + prev_lin_vel_w: wp.array(dtype=wp.vec3f), + # scalar + inv_dt: wp.float32, + # outputs (written in-place) + out_ang_vel_b: wp.array(dtype=wp.vec3f), + out_lin_acc_b: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + + # 1. Extract body orientation + body_quat = wp.transform_get_rotation(transforms[idx]) + + # 2. Apply sensor offset to get sensor orientation + sensor_quat = body_quat * offset_quat_b[idx] + + # 3. Extract lin/ang velocity + lin_vel_w = wp.spatial_top(velocities[idx]) + ang_vel_w = wp.spatial_bottom(velocities[idx]) + + # 4. COM correction: lin_vel += cross(ang_vel, quat_rotate(body_quat, offset_pos - com_pos)) + com_pos_b = wp.transform_get_translation(coms[idx]) + lever_arm = wp.quat_rotate(body_quat, offset_pos_b[idx] - com_pos_b) + lin_vel_w = lin_vel_w + wp.cross(ang_vel_w, lever_arm) + + # 5. Numerical differentiation for linear acceleration (world frame) + lin_acc_w = (lin_vel_w - prev_lin_vel_w[idx]) * inv_dt + gravity_bias_w[idx] + + # 6. Rotate world -> body using sensor orientation + ang_vel_b = wp.quat_rotate_inv(sensor_quat, ang_vel_w) + lin_acc_b = wp.quat_rotate_inv(sensor_quat, lin_acc_w) + + # 7. Store results + out_ang_vel_b[idx] = ang_vel_b + out_lin_acc_b[idx] = lin_acc_b + + # Update previous velocity + prev_lin_vel_w[idx] = lin_vel_w + + +@wp.kernel +def imu_reset_kernel( + env_mask: wp.array(dtype=wp.bool), + out_ang_vel_b: wp.array(dtype=wp.vec3f), + out_lin_acc_b: wp.array(dtype=wp.vec3f), + prev_lin_vel_w: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + + out_ang_vel_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_lin_acc_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + prev_lin_vel_w[idx] = wp.vec3f(0.0, 0.0, 0.0) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/__init__.py new file mode 100644 index 000000000000..539f0250a568 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for the PhysX joint-wrench sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/__init__.pyi new file mode 100644 index 000000000000..b2bcd3582d44 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/__init__.pyi @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = ["JointWrenchSensor", "JointWrenchSensorData"] + +from .joint_wrench_sensor import JointWrenchSensor +from .joint_wrench_sensor_data import JointWrenchSensorData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/joint_wrench_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/joint_wrench_sensor.py new file mode 100644 index 000000000000..9ae39c621240 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/joint_wrench_sensor.py @@ -0,0 +1,253 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import warp as wp + +from pxr import Usd, UsdPhysics + +from isaaclab.sensors.joint_wrench import BaseJointWrenchSensor +from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .joint_wrench_sensor_data import JointWrenchSensorData +from .kernels import joint_wrench_reset_kernel, joint_wrench_split_kernel + +if TYPE_CHECKING: + import omni.physics.tensors.api as physx + + from isaaclab.sensors.joint_wrench import JointWrenchSensorCfg + +logger = logging.getLogger(__name__) + + +class JointWrenchSensor(BaseJointWrenchSensor): + """PhysX joint reaction wrench sensor. + + The sensor reads PhysX's incoming joint wrench for every articulation link + and exposes the linear force [N] and angular torque [N·m] components in + the child-side joint frame, with torque referenced at the child-side joint + anchor. The root body's entry is included. + + :attr:`~isaaclab.sensors.SensorBaseCfg.prim_path` must point at either + the articulation root prim or a parent prim containing a single + articulation root in every environment. + """ + + cfg: JointWrenchSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the joint wrench sensor.""" + + def __init__(self, cfg: JointWrenchSensorCfg): + """Initialize the PhysX joint-wrench sensor. + + Args: + cfg: The configuration parameters. + """ + super().__init__(cfg) + + self._data = JointWrenchSensorData() + self._physics_sim_view = None + self._root_view: physx.ArticulationView | None = None + self._joint_pos_b: wp.array | None = None + self._joint_quat_b: wp.array | None = None + self._num_bodies: int = 0 + + def __str__(self) -> str: + """String representation of the sensor instance.""" + return ( + f"Joint wrench sensor @ '{self.cfg.prim_path}': \n" + f"\tbackend : physx\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of bodies : {self._num_bodies}\n" + f"\tbody names : {self.body_names}\n" + ) + + """ + Properties + """ + + @property + def body_names(self) -> list[str]: + """Ordered names of the bodies whose incoming joint wrench is reported.""" + return self._data._body_names + + @property + def data(self) -> JointWrenchSensorData: + """The joint-wrench sensor data.""" + self._update_outdated_buffers() + return self._data + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None) -> None: + """Reset the sensor buffers for the given environments. + + Args: + env_ids: The environment ids to reset. + env_mask: The mask used to reset the environments. Shape is ``(num_envs,)``. + """ + if self._data._force is None or self._data._torque is None: + return + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + super().reset(None, env_mask) + wp.launch( + joint_wrench_reset_kernel, + dim=(self._num_envs, self._num_bodies), + inputs=[env_mask, self._data._force, self._data._torque], + device=self._device, + ) + + """ + Implementation + """ + + def _initialize_impl(self) -> None: + """PHYSICS_READY callback: builds the articulation view and allocates buffers.""" + super()._initialize_impl() + + self._physics_sim_view = SimulationManager.get_physics_sim_view() + root_prim_path_expr = self._resolve_articulation_root_prim_path() + self._root_view = self._physics_sim_view.create_articulation_view(root_prim_path_expr.replace(".*", "*")) + if self._root_view._backend is None: + raise RuntimeError(f"Failed to create articulation view at: {root_prim_path_expr}. Check PhysX logs.") + + self._num_bodies = self._root_view.shared_metatype.link_count + if self._num_bodies == 0: + raise RuntimeError(f"Joint wrench sensor matched zero bodies at '{self.cfg.prim_path}'.") + + self._data._body_names = list(self._root_view.shared_metatype.link_names) + self._create_joint_frame_buffers() + self._data.create_buffers(num_envs=self._num_envs, num_bodies=self._num_bodies, device=self._device) + + logger.info(f"Joint wrench sensor initialized: {self._num_envs} envs, {self._num_bodies} bodies") + + def _resolve_articulation_root_prim_path(self) -> str: + """Resolve the articulation root prim path expression from the configured asset prim path.""" + first_env_matching_prim = find_first_matching_prim(self.cfg.prim_path) + if first_env_matching_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString + + first_env_root_prims = get_all_matching_child_prims( + first_env_matching_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI) + and prim.GetAttribute("physxArticulation:articulationEnabled").Get() is not False, + traverse_instance_prims=False, + ) + if len(first_env_root_prims) == 0: + raise RuntimeError( + f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." + " Please ensure that the prim has 'USD ArticulationRootAPI' applied." + ) + if len(first_env_root_prims) > 1: + raise RuntimeError( + f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." + f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." + " Please ensure that there is only one articulation in the prim path tree." + ) + + first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString + root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] + return self.cfg.prim_path + root_prim_path_relative_to_prim_path + + def _create_joint_frame_buffers(self) -> None: + """Create child-side joint frame transforms indexed by PhysX link order.""" + joint_pos_b = np.zeros((self._num_bodies, 3), dtype=np.float32) + joint_quat_b = np.zeros((self._num_bodies, 4), dtype=np.float32) + joint_quat_b[:, 3] = 1.0 + + first_env_matching_prim = find_first_matching_prim(self.cfg.prim_path) + if first_env_matching_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + link_name_to_index = {name: index for index, name in enumerate(self._data._body_names)} + + for prim in Usd.PrimRange(first_env_matching_prim): + joint = UsdPhysics.Joint(prim) + if not joint or joint.GetJointEnabledAttr().Get() is False: + continue + body1_targets = joint.GetBody1Rel().GetTargets() + if len(body1_targets) == 0: + continue + body_index = link_name_to_index.get(body1_targets[0].name) + if body_index is None: + continue + + local_pos1 = joint.GetLocalPos1Attr().Get() + if local_pos1 is not None: + joint_pos_b[body_index] = (float(local_pos1[0]), float(local_pos1[1]), float(local_pos1[2])) + + local_rot1 = joint.GetLocalRot1Attr().Get() + if local_rot1 is not None: + local_rot1_imag = local_rot1.GetImaginary() + joint_quat_b[body_index] = ( + float(local_rot1_imag[0]), + float(local_rot1_imag[1]), + float(local_rot1_imag[2]), + float(local_rot1.GetReal()), + ) + + self._joint_pos_b = wp.array(joint_pos_b, dtype=wp.vec3f, device=self._device) + self._joint_quat_b = wp.array(joint_quat_b, dtype=wp.quatf, device=self._device) + + def _update_buffers_impl(self, env_mask: wp.array) -> None: + """Read PhysX incoming joint wrenches and split them into force / torque buffers. + + Args: + env_mask: A mask containing which environments need to be updated. Shape is ``(num_envs,)``. + """ + if self._root_view is None: + raise RuntimeError( + f"Joint wrench sensor '{self.cfg.prim_path}': not initialized." + " Access sensor data only after sim.reset() has been called." + ) + if self._joint_pos_b is None or self._joint_quat_b is None: + raise RuntimeError(f"Joint wrench sensor '{self.cfg.prim_path}': joint frame buffers are not initialized.") + + incoming_joint_wrench = self._root_view.get_link_incoming_joint_force().view(wp.spatial_vectorf) + wp.launch( + joint_wrench_split_kernel, + dim=(self._num_envs, self._num_bodies), + inputs=[ + env_mask, + incoming_joint_wrench, + self._joint_pos_b, + self._joint_quat_b, + self._data._force, + self._data._torque, + ], + device=self._device, + ) + + def _invalidate_initialize_callback(self, event) -> None: + """Drop view, cached sizes, and buffers when physics stops. + + Args: + event: An invalidate event. + """ + super()._invalidate_initialize_callback(event) + self._physics_sim_view = None + self._root_view = None + self._joint_pos_b = None + self._joint_quat_b = None + self._num_bodies = 0 + self._data._force = None + self._data._torque = None + self._data._body_names = [] + self._data._force_ta = None + self._data._torque_ta = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/joint_wrench_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/joint_wrench_sensor_data.py new file mode 100644 index 000000000000..80cea4c1a5ce --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/joint_wrench_sensor_data.py @@ -0,0 +1,67 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp + +from isaaclab.sensors.joint_wrench import BaseJointWrenchSensorData +from isaaclab.utils.warp import ProxyArray + + +class JointWrenchSensorData(BaseJointWrenchSensorData): + """Data container for the PhysX joint-wrench sensor.""" + + def __init__(self): + self._force: wp.array | None = None + self._torque: wp.array | None = None + self._body_names: list[str] = [] + self._force_ta: ProxyArray | None = None + self._torque_ta: ProxyArray | None = None + + @property + def force(self) -> ProxyArray | None: + """Linear component of the incoming joint wrench [N]. + + Expressed in the frame selected by + :attr:`~isaaclab.sensors.JointWrenchSensorCfg.convention`. Shape is + ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. In torch this resolves + to ``(num_envs, num_bodies, 3)``. ``None`` before the simulation is + initialized. + """ + if self._force is None: + return None + if self._force_ta is None: + self._force_ta = ProxyArray(self._force) + return self._force_ta + + @property + def torque(self) -> ProxyArray | None: + """Angular component of the incoming joint wrench [N·m]. + + Expressed in the frame selected by + :attr:`~isaaclab.sensors.JointWrenchSensorCfg.convention`. Shape is + ``(num_envs, num_bodies)``, dtype ``wp.vec3f``. In torch this resolves + to ``(num_envs, num_bodies, 3)``. ``None`` before the simulation is + initialized. + """ + if self._torque is None: + return None + if self._torque_ta is None: + self._torque_ta = ProxyArray(self._torque) + return self._torque_ta + + def create_buffers(self, num_envs: int, num_bodies: int, device: str) -> None: + """Allocate internal buffers. + + Args: + num_envs: Number of environments. + num_bodies: Number of bodies with incoming joint wrench reports. + device: Device for array storage. + """ + self._force = wp.zeros((num_envs, num_bodies), dtype=wp.vec3f, device=device) + self._torque = wp.zeros((num_envs, num_bodies), dtype=wp.vec3f, device=device) + self._force_ta = None + self._torque_ta = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/kernels.py b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/kernels.py new file mode 100644 index 000000000000..bb3d0c7cf67c --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/joint_wrench/kernels.py @@ -0,0 +1,47 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def joint_wrench_split_kernel( + env_mask: wp.array(dtype=wp.bool), + incoming_joint_wrench: wp.array(dtype=wp.spatial_vectorf, ndim=2), + joint_pos_b: wp.array(dtype=wp.vec3f), + joint_quat_b: wp.array(dtype=wp.quatf), + out_force: wp.array(dtype=wp.vec3f, ndim=2), + out_torque: wp.array(dtype=wp.vec3f, ndim=2), +): + """Convert PhysX incoming joint spatial wrenches into the child-side joint frame.""" + env, body = wp.tid() + if not env_mask[env]: + return + + wrench = incoming_joint_wrench[env, body] + force_b = wp.spatial_top(wrench) + torque_b = wp.spatial_bottom(wrench) + + # PhysX reports the wrench in body1's frame, referenced at body1's origin. + # Shift torque to the child-side joint anchor and rotate both components + # into the child-side joint frame. + torque_joint_anchor_b = torque_b - wp.cross(joint_pos_b[body], force_b) + out_force[env, body] = wp.quat_rotate_inv(joint_quat_b[body], force_b) + out_torque[env, body] = wp.quat_rotate_inv(joint_quat_b[body], torque_joint_anchor_b) + + +@wp.kernel +def joint_wrench_reset_kernel( + env_mask: wp.array(dtype=wp.bool), + out_force: wp.array(dtype=wp.vec3f, ndim=2), + out_torque: wp.array(dtype=wp.vec3f, ndim=2), +): + """Zero force and torque entries for the environments selected by ``env_mask``.""" + env, body = wp.tid() + if not env_mask[env]: + return + + out_force[env, body] = wp.vec3f(0.0, 0.0, 0.0) + out_torque[env, body] = wp.vec3f(0.0, 0.0, 0.0) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/kernels.py b/source/isaaclab_physx/isaaclab_physx/sensors/kernels.py new file mode 100644 index 000000000000..c240500c1499 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/kernels.py @@ -0,0 +1,42 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared warp kernels for PhysX sensors.""" + +import warp as wp + + +@wp.kernel +def concat_pos_and_quat_to_pose_kernel( + pos: wp.array2d(dtype=wp.vec3f), + quat: wp.array2d(dtype=wp.quatf), + pose: wp.array2d(dtype=wp.transformf), +): + """Concatenate 2D position and quaternion arrays to pose. + + Args: + pos: Position array. Shape is (N, B). + quat: Quaternion array. Shape is (N, B). + pose: Pose array. Shape is (N, B). + """ + env, sensor = wp.tid() + pose[env, sensor] = wp.transform(pos[env, sensor], quat[env, sensor]) + + +@wp.kernel +def concat_pos_and_quat_to_pose_1d_kernel( + pos: wp.array(dtype=wp.vec3f), + quat: wp.array(dtype=wp.quatf), + pose: wp.array(dtype=wp.transformf), +): + """Concatenate 1D position and quaternion arrays to pose. + + Args: + pos: Position array. Shape is (N,). + quat: Quaternion array. Shape is (N,). + pose: Pose array. Shape is (N,). + """ + env = wp.tid() + pose[env] = wp.transform(pos[env], quat[env]) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/pva/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/pva/__init__.py new file mode 100644 index 000000000000..aba8c3b7af7f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/pva/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX PVA sensor.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/pva/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sensors/pva/__init__.pyi new file mode 100644 index 000000000000..3ad9f6fff741 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/pva/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "Pva", + "PvaData", +] + +from .pva import Pva +from .pva_data import PvaData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/pva/kernels.py b/source/isaaclab_physx/isaaclab_physx/sensors/pva/kernels.py new file mode 100644 index 000000000000..a76c6dcf5fda --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/pva/kernels.py @@ -0,0 +1,106 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import warp as wp + + +@wp.kernel +def pva_update_kernel( + # indexing + env_mask: wp.array(dtype=wp.bool), + # PhysX view data + transforms: wp.array(dtype=wp.transformf), + velocities: wp.array(dtype=wp.spatial_vectorf), + coms: wp.array(dtype=wp.transformf), + # sensor config (per-env) + offset_pos_b: wp.array(dtype=wp.vec3f), + offset_quat_b: wp.array(dtype=wp.quatf), + gravity_vec_w: wp.array(dtype=wp.vec3f), + # previous velocities (read + write) + prev_lin_vel_w: wp.array(dtype=wp.vec3f), + prev_ang_vel_w: wp.array(dtype=wp.vec3f), + # scalar + inv_dt: wp.float32, + # outputs (written in-place) + out_pos_w: wp.array(dtype=wp.vec3f), + out_quat_w: wp.array(dtype=wp.quatf), + out_lin_vel_b: wp.array(dtype=wp.vec3f), + out_ang_vel_b: wp.array(dtype=wp.vec3f), + out_lin_acc_b: wp.array(dtype=wp.vec3f), + out_ang_acc_b: wp.array(dtype=wp.vec3f), + out_projected_gravity_b: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + + # 1. Extract body pose + body_pos = wp.transform_get_translation(transforms[idx]) + body_quat = wp.transform_get_rotation(transforms[idx]) + + # 2. Apply sensor offset + sensor_pos = body_pos + wp.quat_rotate(body_quat, offset_pos_b[idx]) + sensor_quat = body_quat * offset_quat_b[idx] + + # 3. Extract lin/ang velocity + lin_vel_w = wp.spatial_top(velocities[idx]) + ang_vel_w = wp.spatial_bottom(velocities[idx]) + + # 4. COM correction: lin_vel += cross(ang_vel, quat_rotate(body_quat, offset_pos - com_pos)) + com_pos_b = wp.transform_get_translation(coms[idx]) + lever_arm = wp.quat_rotate(body_quat, offset_pos_b[idx] - com_pos_b) + lin_vel_w = lin_vel_w + wp.cross(ang_vel_w, lever_arm) + + # 5. Numerical differentiation (world frame) + lin_acc_w = (lin_vel_w - prev_lin_vel_w[idx]) * inv_dt + ang_acc_w = (ang_vel_w - prev_ang_vel_w[idx]) * inv_dt + + # 6. Rotate world -> body using sensor orientation + lin_vel_b = wp.quat_rotate_inv(sensor_quat, lin_vel_w) + ang_vel_b = wp.quat_rotate_inv(sensor_quat, ang_vel_w) + lin_acc_b = wp.quat_rotate_inv(sensor_quat, lin_acc_w) + ang_acc_b = wp.quat_rotate_inv(sensor_quat, ang_acc_w) + projected_gravity_b = wp.quat_rotate_inv(sensor_quat, gravity_vec_w[idx]) + + # 7. Store results + out_pos_w[idx] = sensor_pos + out_quat_w[idx] = sensor_quat + out_lin_vel_b[idx] = lin_vel_b + out_ang_vel_b[idx] = ang_vel_b + out_lin_acc_b[idx] = lin_acc_b + out_ang_acc_b[idx] = ang_acc_b + out_projected_gravity_b[idx] = projected_gravity_b + + # Update previous velocities + prev_lin_vel_w[idx] = lin_vel_w + prev_ang_vel_w[idx] = ang_vel_w + + +@wp.kernel +def pva_reset_kernel( + env_mask: wp.array(dtype=wp.bool), + out_pos_w: wp.array(dtype=wp.vec3f), + out_quat_w: wp.array(dtype=wp.quatf), + out_lin_vel_b: wp.array(dtype=wp.vec3f), + out_ang_vel_b: wp.array(dtype=wp.vec3f), + out_lin_acc_b: wp.array(dtype=wp.vec3f), + out_ang_acc_b: wp.array(dtype=wp.vec3f), + out_projected_gravity_b: wp.array(dtype=wp.vec3f), + prev_lin_vel_w: wp.array(dtype=wp.vec3f), + prev_ang_vel_w: wp.array(dtype=wp.vec3f), +): + idx = wp.tid() + if not env_mask[idx]: + return + + out_pos_w[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_quat_w[idx] = wp.quatf(0.0, 0.0, 0.0, 1.0) + out_lin_vel_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_ang_vel_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_lin_acc_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_ang_acc_b[idx] = wp.vec3f(0.0, 0.0, 0.0) + out_projected_gravity_b[idx] = wp.vec3f(0.0, 0.0, -1.0) + prev_lin_vel_w[idx] = wp.vec3f(0.0, 0.0, 0.0) + prev_ang_vel_w[idx] = wp.vec3f(0.0, 0.0, 0.0) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/pva/pva.py b/source/isaaclab_physx/isaaclab_physx/sensors/pva/pva.py new file mode 100644 index 000000000000..32f4549e416d --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/pva/pva.py @@ -0,0 +1,301 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +from pxr import UsdGeom, UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.pva import BasePva +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.physics import PhysxManager as SimulationManager + +from .kernels import pva_reset_kernel, pva_update_kernel +from .pva_data import PvaData + +if TYPE_CHECKING: + from isaaclab.sensors.pva import PvaCfg + + +class Pva(BasePva): + """The PhysX Pose Velocity Acceleration (PVA) sensor. + + The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame + linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular + accelerations/velocities. + + If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. + The fixed transform from that ancestor to the target prim is computed once during initialization and + composed with the configured sensor offset. + + .. note:: + + We are computing the accelerations using numerical differentiation from the velocities. Consequently, the + PVA sensor accuracy depends on the chosen physx timestep. For a sufficient accuracy, we recommend to keep the + timestep at least as 200Hz. + + .. note:: + + The user can configure the sensor offset in the configuration file. The offset is applied relative to the + rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform + from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. + The offset is defined as a position vector and a quaternion rotation, which + are applied in the order: position, then rotation. The position is applied as a translation + in the body frame of the rigid source prim, and the rotation is applied as a rotation + in the body frame of the rigid source prim. + + """ + + cfg: PvaCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the PVA sensor.""" + + def __init__(self, cfg: PvaCfg): + """Initializes the PVA sensor. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data = PvaData() + + # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) + self._rigid_parent_expr: str | None = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"PVA sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self._view.count}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> PvaData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_instances(self) -> int: + return self._view.count + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None, env_mask: wp.array | None = None): + # resolve indices and mask + env_mask = self._resolve_indices_and_mask(env_ids, env_mask) + # reset the timestamps + super().reset(None, env_mask) + + wp.launch( + pva_reset_kernel, + dim=self._num_envs, + inputs=[ + env_mask, + self._data._pos_w, + self._data._quat_w, + self._data._lin_vel_b, + self._data._ang_vel_b, + self._data._lin_acc_b, + self._data._ang_acc_b, + self._data._projected_gravity_b, + self._prev_lin_vel_w, + self._prev_ang_vel_w, + ], + device=self._device, + ) + + def update(self, dt: float, force_recompute: bool = False): + # save timestamp + self._dt = dt + # execute updating + super().update(dt, force_recompute) + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + - If the target prim path is a rigid body, build the view directly on it. + - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor + to the target prim, and build the view on the ancestor expression. + """ + # Initialize parent class + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check if the prim at path is a rigid prim + prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if prim is None: + raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") + + # Find the first matching ancestor prim that implements rigid body API + ancestor_prim = sim_utils.get_first_matching_ancestor_prim( + prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) + ) + if ancestor_prim is None: + raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") + # Convert ancestor prim path to expression + if ancestor_prim == prim: + self._rigid_parent_expr = self.cfg.prim_path + fixed_pos_b, fixed_quat_b = None, None + else: + # Convert ancestor prim path to expression by stripping the relative + # suffix (including its leading '/') so no trailing '/' remains. + relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString + self._rigid_parent_expr = self.cfg.prim_path.replace("/" + relative_path, "") + # Resolve the relative pose between the target prim and the ancestor prim + fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) + + # Create the rigid body view on the ancestor + self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) + + # Get world gravity + gravity = self._physics_sim_view.get_gravity() + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir_repeated = gravity_dir.repeat(self.num_instances, 1) + self.GRAVITY_VEC_W = ProxyArray(wp.from_torch(gravity_dir_repeated.contiguous(), dtype=wp.vec3f)) + + # Create internal buffers + self._initialize_buffers_impl() + + # Compose the configured offset with the fixed ancestor->target transform (done once) + # new_offset = fixed * cfg.offset + # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg + if fixed_pos_b is not None and fixed_quat_b is not None: + # Broadcast fixed transform across instances + fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) + fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) + + cfg_p = wp.to_torch(self._offset_pos_b).clone() + cfg_q = wp.to_torch(self._offset_quat_b).clone() + + composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) + composed_q = math_utils.quat_mul(fixed_q, cfg_q) + + self._offset_pos_b = wp.from_torch(composed_p.contiguous(), dtype=wp.vec3f) + self._offset_quat_b = wp.from_torch(composed_q.contiguous(), dtype=wp.quatf) + + def _update_buffers_impl(self, env_mask: wp.array | None = None): + """Fills the buffers of the sensor data.""" + env_mask = self._resolve_indices_and_mask(None, env_mask) + + # Fetch view data as warp typed arrays + transforms = self._view.get_transforms().view(wp.transformf) + velocities = self._view.get_velocities().view(wp.spatial_vectorf) + # get_coms() returns a CPU warp array; copy to pre-allocated GPU buffer + wp.copy(self._coms_buffer, self._view.get_coms().view(wp.transformf)) + + wp.launch( + pva_update_kernel, + dim=self._num_envs, + inputs=[ + env_mask, + transforms, + velocities, + self._coms_buffer, + self._offset_pos_b, + self._offset_quat_b, + self.GRAVITY_VEC_W, + self._prev_lin_vel_w, + self._prev_ang_vel_w, + 1.0 / self._dt, + self._data._pos_w, + self._data._quat_w, + self._data._lin_vel_b, + self._data._ang_vel_b, + self._data._lin_acc_b, + self._data._ang_acc_b, + self._data._projected_gravity_b, + ], + device=self._device, + ) + + def _initialize_buffers_impl(self): + """Create buffers for storing data.""" + # Create data buffers via data class + self._data.create_buffers(num_envs=self._view.count, device=self._device) + + # Sensor-internal buffers for velocity tracking (not exposed via data) + self._prev_lin_vel_w = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + self._prev_ang_vel_w = wp.zeros(self._view.count, dtype=wp.vec3f, device=self._device) + + # Store sensor offset (applied relative to rigid source). + # This may be composed later with a fixed ancestor->target transform. + offset_pos_torch = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + offset_quat_torch = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) + self._offset_pos_b = wp.from_torch(offset_pos_torch.contiguous(), dtype=wp.vec3f) + self._offset_quat_b = wp.from_torch(offset_quat_torch.contiguous(), dtype=wp.quatf) + + # Pre-allocate GPU buffer for COMs (get_coms() returns CPU array) + self._coms_buffer = wp.zeros(self._view.count, dtype=wp.transformf, device=self._device) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.acceleration_visualizer.set_visibility(True) + else: + if hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self._view is None: + return + # get marker location + # -- base state (convert warp -> torch for visualization) + base_pos_w = self._data.pos_w.torch.clone() + base_pos_w[:, 2] += 0.5 + # -- resolve the scales + default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale + arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.torch.shape[0], 1) + # get up axis of current stage + up_axis = UsdGeom.GetStageUpAxis(self.stage) + # arrow-direction + pos_w_torch = self._data.pos_w.torch + quat_w_torch = self._data.quat_w.torch + lin_acc_b_torch = self._data.lin_acc_b.torch + quat_opengl = math_utils.quat_from_matrix( + math_utils.create_rotation_matrix_from_view( + pos_w_torch, + pos_w_torch + math_utils.quat_apply(quat_w_torch, lin_acc_b_torch), + up_axis=up_axis, + device=self._device, + ) + ) + quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") + # display markers + self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/pva/pva_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/pva/pva_data.py new file mode 100644 index 000000000000..3128551c3273 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/pva/pva_data.py @@ -0,0 +1,147 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging + +import warp as wp + +from isaaclab.sensors.pva import BasePvaData +from isaaclab.utils.warp import ProxyArray + +from isaaclab_physx.sensors.kernels import concat_pos_and_quat_to_pose_1d_kernel + +logger = logging.getLogger(__name__) + + +class PvaData(BasePvaData): + """Data container for the PhysX PVA sensor.""" + + @property + def pose_w(self) -> ProxyArray: + """Pose of the sensor origin in world frame. + + Shape is (num_instances,), dtype = wp.transformf. In torch this resolves to (num_instances, 7). + The pose is provided in (x, y, z, qx, qy, qz, qw) format. + """ + wp.launch( + concat_pos_and_quat_to_pose_1d_kernel, + dim=self._num_envs, + inputs=[self._pos_w, self._quat_w], + outputs=[self._pose_w], + device=self._device, + ) + if self._pose_w_ta is None: + self._pose_w_ta = ProxyArray(self._pose_w) + return self._pose_w_ta + + @property + def pos_w(self) -> ProxyArray: + """Position of the sensor origin in world frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._pos_w_ta is None: + self._pos_w_ta = ProxyArray(self._pos_w) + return self._pos_w_ta + + @property + def quat_w(self) -> ProxyArray: + """Orientation of the sensor origin in world frame. + + Shape is (num_instances,), dtype = wp.quatf. In torch this resolves to (num_instances, 4). + The orientation is provided in (x, y, z, w) format. + """ + if self._quat_w_ta is None: + self._quat_w_ta = ProxyArray(self._quat_w) + return self._quat_w_ta + + @property + def projected_gravity_b(self) -> ProxyArray: + """Gravity direction unit vector projected on the PVA frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._projected_gravity_b_ta is None: + self._projected_gravity_b_ta = ProxyArray(self._projected_gravity_b) + return self._projected_gravity_b_ta + + @property + def lin_vel_b(self) -> ProxyArray: + """PVA frame linear velocity relative to the world expressed in PVA frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._lin_vel_b_ta is None: + self._lin_vel_b_ta = ProxyArray(self._lin_vel_b) + return self._lin_vel_b_ta + + @property + def ang_vel_b(self) -> ProxyArray: + """PVA frame angular velocity relative to the world expressed in PVA frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._ang_vel_b_ta is None: + self._ang_vel_b_ta = ProxyArray(self._ang_vel_b) + return self._ang_vel_b_ta + + @property + def lin_acc_b(self) -> ProxyArray: + """Linear acceleration (coordinate) in the PVA frame [m/s^2]. + + Equal to -g in freefall, zero at rest. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._lin_acc_b_ta is None: + self._lin_acc_b_ta = ProxyArray(self._lin_acc_b) + return self._lin_acc_b_ta + + @property + def ang_acc_b(self) -> ProxyArray: + """PVA frame angular acceleration relative to the world expressed in PVA frame. + + Shape is (num_instances,), dtype = wp.vec3f. In torch this resolves to (num_instances, 3). + """ + if self._ang_acc_b_ta is None: + self._ang_acc_b_ta = ProxyArray(self._ang_acc_b) + return self._ang_acc_b_ta + + def create_buffers(self, num_envs: int, device: str) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + device: Device for tensor storage. + """ + self._num_envs = num_envs + self._device = device + self._pose_w = wp.zeros(num_envs, dtype=wp.transformf, device=device) + self._pos_w = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._quat_w = wp.zeros(num_envs, dtype=wp.quatf, device=device) + # Initialize quaternion to identity (w=1): warp quatf is (x,y,z,w) + # Use torch interop to set the w component + quat_torch = wp.to_torch(self._quat_w) + quat_torch[:, 3] = 1.0 + self._projected_gravity_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + # Initialize projected gravity to (0, 0, -1) + pg_torch = wp.to_torch(self._projected_gravity_b) + pg_torch[:, 2] = -1.0 + self._lin_vel_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._ang_vel_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._lin_acc_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + self._ang_acc_b = wp.zeros(num_envs, dtype=wp.vec3f, device=device) + + # -- Pinned ProxyArray cache (one per read property, lazily created on first access) + self._pose_w_ta: ProxyArray | None = None + self._pos_w_ta: ProxyArray | None = None + self._quat_w_ta: ProxyArray | None = None + self._projected_gravity_b_ta: ProxyArray | None = None + self._lin_vel_b_ta: ProxyArray | None = None + self._ang_vel_b_ta: ProxyArray | None = None + self._lin_acc_b_ta: ProxyArray | None = None + self._ang_acc_b_ta: ProxyArray | None = None diff --git a/source/isaaclab_physx/isaaclab_physx/sim/__init__.py b/source/isaaclab_physx/isaaclab_physx/sim/__init__.py new file mode 100644 index 000000000000..c315db7b9b3e --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package containing simulation-specific functionalities for PhysX backend.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sim/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sim/__init__.pyi new file mode 100644 index 000000000000..abc8d0087afd --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/__init__.pyi @@ -0,0 +1,28 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "define_deformable_body_properties", + "modify_deformable_body_properties", + "DeformableBodyPropertiesCfg", + "DeformableObjectSpawnerCfg", + "spawn_deformable_body_material", + "DeformableBodyMaterialCfg", + "SurfaceDeformableBodyMaterialCfg", + "views", +] + +from .schemas import ( + define_deformable_body_properties, + modify_deformable_body_properties, + DeformableBodyPropertiesCfg +) +from .spawners import ( + DeformableObjectSpawnerCfg, + spawn_deformable_body_material, + DeformableBodyMaterialCfg, + SurfaceDeformableBodyMaterialCfg, +) +from . import views diff --git a/source/isaaclab_physx/isaaclab_physx/sim/schemas/__init__.py b/source/isaaclab_physx/isaaclab_physx/sim/schemas/__init__.py new file mode 100644 index 000000000000..b0d1477483d6 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/schemas/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module containing utilities for schemas used in Omniverse for PhysX backend.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sim/schemas/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sim/schemas/__init__.pyi new file mode 100644 index 000000000000..bb0e51a19b4b --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/schemas/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "define_deformable_body_properties", + "modify_deformable_body_properties", + "DeformableBodyPropertiesCfg", +] + +from .schemas import ( + define_deformable_body_properties, + modify_deformable_body_properties, +) +from .schemas_cfg import DeformableBodyPropertiesCfg diff --git a/source/isaaclab_physx/isaaclab_physx/sim/schemas/schemas.py b/source/isaaclab_physx/isaaclab_physx/sim/schemas/schemas.py new file mode 100644 index 000000000000..5b57b93d7ff4 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/schemas/schemas.py @@ -0,0 +1,208 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# needed to import for allowing type-hinting: Usd.Stage | None +from __future__ import annotations + +import logging + +from omni.physx.scripts import deformableUtils +from pxr import Usd + +from isaaclab.sim.utils import ( + apply_nested, + get_all_matching_child_prims, + safe_set_attribute_on_usd_prim, +) +from isaaclab.sim.utils.stage import get_current_stage +from isaaclab.utils.string import to_camel_case + +from isaaclab_physx.sim.schemas.schemas_cfg import DeformableBodyPropertiesCfg + +# import logger +logger = logging.getLogger(__name__) + + +""" +Deformable body properties. +""" + + +def define_deformable_body_properties( + prim_path: str, + cfg: DeformableBodyPropertiesCfg, + stage: Usd.Stage | None = None, + deformable_type: str = "volume", + sim_mesh_prim_path: str | None = None, +): + """Apply the deformable body schema on the input prim and set its properties. + + See :func:`modify_deformable_body_properties` for more details on how the properties are set. + + .. note:: + If the input prim is not a mesh, this function will traverse the prim and find the first mesh + under it. If no mesh or multiple meshes are found, an error is raised. This is because the deformable + body schema can only be applied to a single mesh. + + Args: + prim_path: The prim path where to apply the deformable body schema. + cfg: The configuration for the deformable body. + stage: The stage where to find the prim. Defaults to None, in which case the + current stage is used. + deformable_type: The type of the deformable body (surface or volume). + This is used to determine which PhysX API to use for the deformable body. Defaults to "volume". + sim_mesh_prim_path: Optional override for the simulation mesh prim path. + If None, it is set to ``{prim_path}/sim_mesh`` for surface deformables + and ``{prim_path}/sim_tetmesh`` for volume deformables. + + Raises: + ValueError: When the prim path is not valid. + ValueError: When the prim has no mesh or multiple meshes. + RuntimeError: When setting the deformable body properties fails. + """ + # get stage handle + if stage is None: + stage = get_current_stage() + + # get USD prim + prim = stage.GetPrimAtPath(prim_path) + # check if prim path is valid + if not prim.IsValid(): + raise ValueError(f"Prim path '{prim_path}' is not valid.") + + # traverse the prim and get the mesh. If none or multiple meshes are found, raise error. + matching_prims = get_all_matching_child_prims(prim_path, lambda p: p.GetTypeName() == "Mesh") + # check if the volume deformable mesh is valid + if len(matching_prims) == 0: + raise ValueError(f"Could not find any mesh in '{prim_path}'. Please check asset.") + if len(matching_prims) > 1: + # get list of all meshes found + mesh_paths = [p.GetPrimPath() for p in matching_prims] + raise ValueError( + f"Found multiple meshes in '{prim_path}': {mesh_paths}." + " Deformable body schema can only be applied to one mesh." + ) + mesh_prim = matching_prims[0] + mesh_prim_path = mesh_prim.GetPrimPath() + + # check if the prim is valid + if not mesh_prim.IsValid(): + raise ValueError(f"Mesh prim path '{mesh_prim_path}' is not valid.") + + # set root prim properties based on the type of the deformable mesh (surface vs volume) + if deformable_type == "surface": + sim_mesh_prim_path = prim_path + "/sim_mesh" if sim_mesh_prim_path is None else sim_mesh_prim_path + success = deformableUtils.create_auto_surface_deformable_hierarchy( + stage=stage, + root_prim_path=prim_path, + simulation_mesh_path=sim_mesh_prim_path, + cooking_src_mesh_path=mesh_prim_path, + cooking_src_simplification_enabled=False, + set_visibility_with_guide_purpose=True, + ) + elif deformable_type == "volume": + sim_mesh_prim_path = prim_path + "/sim_tetmesh" if sim_mesh_prim_path is None else sim_mesh_prim_path + success = deformableUtils.create_auto_volume_deformable_hierarchy( + stage=stage, + root_prim_path=prim_path, + simulation_tetmesh_path=sim_mesh_prim_path, + collision_tetmesh_path=sim_mesh_prim_path, + cooking_src_mesh_path=mesh_prim_path, + simulation_hex_mesh_enabled=False, + cooking_src_simplification_enabled=False, + set_visibility_with_guide_purpose=True, + ) + else: + raise ValueError( + f"""Unsupported deformable type: '{deformable_type}'. + Only surface and volume deformables are supported.""" + ) + + # api failure + if not success: + raise RuntimeError(f"Failed to set deformable body properties on prim '{mesh_prim_path}'.") + + # set deformable body properties + modify_deformable_body_properties(prim_path, cfg, stage) + + +@apply_nested +def modify_deformable_body_properties(prim_path: str, cfg: DeformableBodyPropertiesCfg, stage: Usd.Stage | None = None): + """Modify PhysX parameters for a deformable body prim. + + A `deformable body`_ is a single body (either surface or volume deformable) that can be simulated by PhysX. + Unlike rigid bodies, deformable bodies support relative motion of the nodes in the mesh. + Consequently, they can be used to simulate deformations under applied forces. + + PhysX deformable body simulation employs Finite Element Analysis (FEA) to simulate the deformations of the mesh. + It uses two meshes to represent the deformable body: + + 1. **Simulation mesh**: This mesh is used for the simulation and is the one that is deformed by the solver. + 2. **Collision mesh**: This mesh only needs to match the surface of the simulation mesh and is used for + collision detection. + + For most applications, we assume that the above two meshes are computed from the "render mesh" of the deformable + body. The render mesh is the mesh that is visible in the scene and is used for rendering purposes. It is composed + of triangles, while the simulation mesh is composed of tetrahedrons for volume deformables, + and triangles for surface deformables. + + .. caution:: + The deformable body schema is still under development by the Omniverse team. The current implementation + works with the PhysX schemas shipped with Isaac Sim 6.0.0 onwards. It may change in future releases. + + .. note:: + This function is decorated with :func:`apply_nested` that sets the properties to all the prims + (that have the schema applied on them) under the input prim path. + + .. _deformable body: https://nvidia-omniverse.github.io/PhysX/physx/5.6.1/docs/DeformableVolume.html + .. _PhysxDeformableBodyAPI: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/latest/physxschema/annotated.html + + Args: + prim_path: The prim path to the deformable body. + cfg: The configuration for the deformable body. + stage: The stage where to find the prim. Defaults to None, in which case the + current stage is used. + + Returns: + True if the properties were successfully set, False otherwise. + """ + # get stage handle + if stage is None: + stage = get_current_stage() + + # get deformable-body USD prim + deformable_body_prim = stage.GetPrimAtPath(prim_path) + # check if the prim is valid + if not deformable_body_prim.IsValid(): + return False + # check if deformable body API is applied + if "OmniPhysicsDeformableBodyAPI" not in deformable_body_prim.GetAppliedSchemas(): + return False + + # apply customization to deformable API + if "PhysxBaseDeformableBodyAPI" not in deformable_body_prim.GetAppliedSchemas(): + deformable_body_prim.AddAppliedSchema("PhysxBaseDeformableBodyAPI") + + # ensure PhysX collision API is applied on the collision mesh + if "PhysxCollisionAPI" not in deformable_body_prim.GetAppliedSchemas(): + deformable_body_prim.AddAppliedSchema("PhysxCollisionAPI") + + # convert to dict + cfg = cfg.to_dict() + # set into PhysX API + if cfg["kinematic_enabled"]: + logger.warning( + "Kinematic deformable bodies are not fully supported in the current version of Omni Physics. " + "Setting kinematic_enabled to True may lead to unexpected behavior." + ) + # prefixes for each attribute (collision attributes: physxCollision:*, and physxDeformable:* for rest) + property_prefixes = cfg["_property_prefix"] + for prefix, attr_list in property_prefixes.items(): + for attr_name in attr_list: + safe_set_attribute_on_usd_prim( + deformable_body_prim, f"{prefix}:{to_camel_case(attr_name, 'cC')}", cfg[attr_name], camel_case=False + ) + # success + return True diff --git a/source/isaaclab_physx/isaaclab_physx/sim/schemas/schemas_cfg.py b/source/isaaclab_physx/isaaclab_physx/sim/schemas/schemas_cfg.py new file mode 100644 index 000000000000..6579fee6356a --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/schemas/schemas_cfg.py @@ -0,0 +1,163 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import dataclasses + +from isaaclab.utils import configclass + + +@configclass +class OmniPhysicsPropertiesCfg: + """OmniPhysics properties for a deformable body. + + These properties are set with the prefix ``omniphysics:``. For example, to set the mass of the + deformable body, you would set the property ``omniphysics:mass``. + + See the OmniPhysics documentation for more information on the available properties. + """ + + deformable_body_enabled: bool | None = None + """Enables deformable body.""" + + kinematic_enabled: bool = False + """Enables kinematic body. Defaults to False, which means that the body is not kinematic.""" + + mass: float | None = None + """The material mass in [kg]. Defaults to None, in which case the material density is used to compute the mass.""" + + +@configclass +class PhysXDeformableBodyPropertiesCfg: + """PhysX-specific properties for a deformable body. + + These properties are set with the prefix ``physxDeformableBody:`` + + For more information on the available properties, please refer to the `documentation `_. + """ + + solver_position_iteration_count: int = 16 + """Number of the solver positional iterations per step. Range is [1,255], default to 16.""" + + linear_damping: float | None = None + """Linear damping coefficient, in units of [1/s] and constrained to the range [0, inf).""" + + max_linear_velocity: float | None = None + """Maximum allowable linear velocity for the deformable body, in units of distance/second and constrained to the + range [0, inf). A negative value allows the simulation to choose suitable a per vertex value dynamically, + currently only supported for surface deformables. This can help prevent surface-surface intersections.""" + + settling_damping: float | None = None + """Additional damping applied when a vertex's velocity falls below :attr:`settling_threshold`. + Specified in units of [1/s] and constrained to the range [0, inf).""" + + settling_threshold: float | None = None + """Velocity threshold below which :attr:`settling_damping` is applied in addition to standard damping. + Specified in units of distance/second and constrained to the range [0, inf).""" + + sleep_threshold: float | None = None + """Velocity threshold below which a vertex becomes a candidate for sleeping. + Specified in units of distance/seconds and constrained to the range [0, inf).""" + + max_depenetration_velocity: float | None = None + """Maximum velocity that the solver may apply to resolve intersections. + Specified in units of distance/seconds and constrained to the range [0, inf).""" + + self_collision: bool | None = None + """Enables self-collisions for the deformable body, preventing self-intersections.""" + + self_collision_filter_distance: float | None = None + r"""Distance below which self-collision is disabled [m]. + + The default value of -inf indicates that the simulation selects a suitable value. + Constrained to range [:attr:`rest_offset` \* 2, inf]. + """ + + enable_speculative_c_c_d: bool | None = None + """Enables dynamic adjustment of contact offset based on velocity (speculative continuous collision detection).""" + + disable_gravity: bool | None = None + """Disables gravity for the deformable body.""" + + # specific to surface deformables + collision_pair_update_frequency: int | None = None + """Determines how often surface-to-surface collision pairs are updated during each time step. + Increasing this value results in more frequent updates to the contact pairs, which provides better contact points. + + For example, a value of 2 means collision pairs are updated twice per time step: + once at the beginning and once in the middle of the time step (i.e., during the middle solver iteration). + If set to 0, the solver adaptively determines when to update the surface-to-surface contact pairs, + instead of using a fixed frequency. + + Valid range: [1, :attr:`solver_position_iteration_count`]. + """ + + collision_iteration_multiplier: float | None = None + """Determines how many collision subiterations are used in each solver iteration. + By default, collision constraints are applied once per solver iteration. + Increasing this value applies collision constraints more frequently within each solver iteration. + + For example, a value of 2 means collision constraints are applied twice per solver iteration + (i.e., collision constraints are applied 2 x :attr:`solver_position_iteration_count` times per time step). + Increasing this value does not update collision pairs more frequently; + refer to :attr:`collision_pair_update_frequency` for that. + + Valid range: [1, :attr:`solver_position_iteration_count` / 2]. + """ + + +@configclass +class PhysXCollisionPropertiesCfg: + """PhysX-specific collision properties for a deformable body. + + These properties are set with the prefix ``physxCollision:``. + + See the PhysX documentation for more information on the available properties. + """ + + contact_offset: float | None = None + """Contact offset for the collision shape [m]. + + The collision detector generates contact points as soon as two shapes get closer than the sum of their + contact offsets. This quantity should be non-negative which means that contact generation can potentially start + before the shapes actually penetrate. + """ + + rest_offset: float | None = None + """Rest offset for the collision shape [m]. + + The rest offset quantifies how close a shape gets to others at rest, At rest, the distance between two + vertically stacked objects is the sum of their rest offsets. If a pair of shapes have a positive rest + offset, the shapes will be separated at rest by an air gap. + """ + + +@configclass +class DeformableBodyPropertiesCfg( + OmniPhysicsPropertiesCfg, PhysXDeformableBodyPropertiesCfg, PhysXCollisionPropertiesCfg +): + """Properties to apply to a deformable body. + + A deformable body is a body that can deform under forces, both surface and volume deformables. + The configuration allows users to specify the properties of the deformable body, + such as the solver iteration counts, damping, and self-collision. + + An FEM-based deformable body is created by providing a collision mesh and simulation mesh. The collision mesh + is used for collision detection and the simulation mesh is used for simulation. + + See :meth:`modify_deformable_body_properties` for more information. + + .. note:: + If the values are :obj:`None`, they are not modified. This is useful when you want to set only a subset of + the properties and leave the rest as-is. + """ + + _property_prefix: dict[str, list[str]] = { + "omniphysics": [field.name for field in dataclasses.fields(OmniPhysicsPropertiesCfg)], + "physxDeformableBody": [field.name for field in dataclasses.fields(PhysXDeformableBodyPropertiesCfg)], + "physxCollision": [field.name for field in dataclasses.fields(PhysXCollisionPropertiesCfg)], + } + """Mapping between the property prefixes and the properties that fall under each prefix.""" diff --git a/source/isaaclab_physx/isaaclab_physx/sim/spawners/__init__.py b/source/isaaclab_physx/isaaclab_physx/sim/spawners/__init__.py new file mode 100644 index 000000000000..82b931452f4f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/spawners/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module containing utilities for creating prims in Omniverse for the PhysX backend.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sim/spawners/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sim/spawners/__init__.pyi new file mode 100644 index 000000000000..805a9f79fd52 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/spawners/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "DeformableObjectSpawnerCfg", + "spawn_deformable_body_material", + "DeformableBodyMaterialCfg", + "SurfaceDeformableBodyMaterialCfg", +] + +from .spawner_cfg import DeformableObjectSpawnerCfg +from .materials import ( + spawn_deformable_body_material, + DeformableBodyMaterialCfg, + SurfaceDeformableBodyMaterialCfg, +) diff --git a/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/__init__.py b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/__init__.py new file mode 100644 index 000000000000..4877bb0c1470 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for spawners that spawn PhysX-based materials.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/__init__.pyi new file mode 100644 index 000000000000..6e8a48b2f118 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "spawn_deformable_body_material", + "DeformableBodyMaterialCfg", + "SurfaceDeformableBodyMaterialCfg", +] + +from .physics_materials import spawn_deformable_body_material +from .physics_materials_cfg import ( + DeformableBodyMaterialCfg, + SurfaceDeformableBodyMaterialCfg, +) diff --git a/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/physics_materials.py b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/physics_materials.py new file mode 100644 index 000000000000..78920cca9f8d --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/physics_materials.py @@ -0,0 +1,80 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from pxr import Usd, UsdShade + +from isaaclab.sim.utils import clone, safe_set_attribute_on_usd_prim +from isaaclab.sim.utils.stage import get_current_stage +from isaaclab.utils.string import to_camel_case + +from . import physics_materials_cfg + + +@clone +def spawn_deformable_body_material(prim_path: str, cfg: physics_materials_cfg.DeformableBodyMaterialCfg) -> Usd.Prim: + """Create material with deformable-body physics properties. + + Deformable body materials are used to define the physical properties to meshes of a deformable body. These + include the friction and deformable body properties. For more information on deformable body material, + please refer to the documentation on `PxFEMSoftBodyMaterial`_. + + .. note:: + This function is decorated with :func:`clone` that resolves prim path into list of paths + if the input prim path is a regex pattern. This is done to support spawning multiple assets + from a single and cloning the USD prim at the given path expression. + + Args: + prim_path: The prim path or pattern to spawn the asset at. If the prim path is a regex pattern, + then the asset is spawned at all the matching prim paths. + cfg: The configuration for the physics material. + + Returns: + The spawned deformable body material prim. + + Raises: + ValueError: When a prim already exists at the specified prim path and is not a material. + + .. _PxFEMSoftBodyMaterial: https://nvidia-omniverse.github.io/PhysX/physx/5.4.1/_api_build/structPxFEMSoftBodyMaterialModel.html + """ + # get stage handle + stage = get_current_stage() + + # create material prim if no prim exists + if not stage.GetPrimAtPath(prim_path).IsValid(): + _ = UsdShade.Material.Define(stage, prim_path) + + # obtain prim + prim = stage.GetPrimAtPath(prim_path) + # check if prim is a material + if not prim.IsA(UsdShade.Material): + raise ValueError(f"A prim already exists at path: '{prim_path}' but is not a material.") + # ensure PhysX deformable body material API is applied + applied = prim.GetAppliedSchemas() + if "OmniPhysicsDeformableMaterialAPI" not in applied: + prim.AddAppliedSchema("OmniPhysicsDeformableMaterialAPI") + if "PhysxDeformableMaterialAPI" not in applied: + prim.AddAppliedSchema("PhysxDeformableMaterialAPI") + # surface deformable material API + is_surface_deformable = isinstance(cfg, physics_materials_cfg.SurfaceDeformableBodyMaterialCfg) + if is_surface_deformable: + if "OmniPhysicsSurfaceDeformableMaterialAPI" not in applied: + prim.AddAppliedSchema("OmniPhysicsSurfaceDeformableMaterialAPI") + if "PhysxSurfaceDeformableMaterialAPI" not in applied: + prim.AddAppliedSchema("PhysxSurfaceDeformableMaterialAPI") + + # convert to dict + cfg = cfg.to_dict() + del cfg["func"] + # set into PhysX API, gather prefixes for each attribute + property_prefixes = cfg["_property_prefix"] + for prefix, attr_list in property_prefixes.items(): + for attr_name in attr_list: + safe_set_attribute_on_usd_prim( + prim, f"{prefix}:{to_camel_case(attr_name, 'cC')}", cfg[attr_name], camel_case=False + ) + # return the prim + return prim diff --git a/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/physics_materials_cfg.py b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/physics_materials_cfg.py new file mode 100644 index 000000000000..35b066fa8736 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/spawners/materials/physics_materials_cfg.py @@ -0,0 +1,122 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import dataclasses +from collections.abc import Callable + +from isaaclab.sim.spawners.materials import PhysicsMaterialCfg +from isaaclab.utils import configclass + + +@configclass +class OmniPhysicsDeformableMaterialCfg: + """OmniPhysics material properties for a deformable body. + + These properties are set with the prefix ``omniphysics:``. For example, to set the density of the + deformable body, you would set the property ``omniphysics:density``. + + See the OmniPhysics documentation for more information on the available properties. + """ + + density: float | None = None + """The material density in [kg/m^3]. Defaults to None, in which case the simulation decides the default density.""" + + static_friction: float = 0.25 + """The static friction. Defaults to 0.25.""" + + dynamic_friction: float = 0.25 + """The dynamic friction. Defaults to 0.25.""" + + youngs_modulus: float = 1000000.0 + """The Young's modulus, which defines the body's stiffness. Defaults to 1[MPa]. + + The Young's modulus is a measure of the material's ability to deform under stress. It is measured in Pascals ([Pa]). + """ + + poissons_ratio: float = 0.45 + """The Poisson's ratio which defines the body's volume preservation. Defaults to 0.45. + + The Poisson's ratio is a measure of the material's ability to expand in the lateral direction when compressed + in the axial direction. It is a dimensionless number between 0 and 0.5. Using a value of 0.5 will make the + material incompressible. + """ + + +@configclass +class OmniPhysicsSurfaceDeformableMaterialCfg(OmniPhysicsDeformableMaterialCfg): + """OmniPhysics material properties for a surface deformable body, + extending on :class:`OmniPhysicsDeformableMaterialCfg` with additional parameters for surface deformable bodies. + + These properties are set with the prefix ``omniphysics:``. + For example, to set the surface thickness of the surface deformable body, + you would set the property ``omniphysics:surfaceThickness``. + + See the OmniPhysics documentation for more information on the available properties. + """ + + surface_thickness: float = 0.01 + """The thickness of the deformable body's surface. Defaults to 0.01 meters ([m]).""" + + surface_stretch_stiffness: float = 0.0 + """The stretch stiffness of the deformable body's surface. Defaults to 0.0.""" + + surface_shear_stiffness: float = 0.0 + """The shear stiffness of the deformable body's surface. Defaults to 0.0.""" + + surface_bend_stiffness: float = 0.0 + """The bend stiffness of the deformable body's surface. Defaults to 0.0.""" + + bend_damping: float = 0.0 + """The bend damping for the deformable body's surface. Defaults to 0.0.""" + + +@configclass +class PhysXDeformableMaterialCfg: + """PhysX-specific material properties for a deformable body. + + These properties are set with the prefix ``physxDeformableBody:``. + For example, to set the elasticity damping of the deformable body, + you would set the property ``physxDeformableBody:elasticityDamping``. + + See the PhysX documentation for more information on the available properties. + """ + + elasticity_damping: float = 0.005 + """The elasticity damping for the deformable material. Defaults to 0.005.""" + + +@configclass +class DeformableBodyMaterialCfg(PhysicsMaterialCfg, OmniPhysicsDeformableMaterialCfg, PhysXDeformableMaterialCfg): + """Physics material parameters for deformable bodies. + + See :meth:`spawn_deformable_body_material` for more information. + """ + + func: Callable | str = "{DIR}.physics_materials:spawn_deformable_body_material" + + _property_prefix: dict[str, list[str]] = { + "omniphysics": [field.name for field in dataclasses.fields(OmniPhysicsDeformableMaterialCfg)], + "physxDeformableBody": [field.name for field in dataclasses.fields(PhysXDeformableMaterialCfg)], + } + """Mapping between the property prefixes and the properties that fall under each prefix.""" + + +@configclass +class SurfaceDeformableBodyMaterialCfg(DeformableBodyMaterialCfg, OmniPhysicsSurfaceDeformableMaterialCfg): + """Physics material parameters for surface deformable bodies, + extending on :class:`DeformableBodyMaterialCfg` with additional parameters for surface deformable bodies. + + See :meth:`spawn_deformable_body_material` for more information. + """ + + func: Callable | str = "{DIR}.physics_materials:spawn_deformable_body_material" + + _property_prefix: dict[str, list[str]] = { + "omniphysics": [field.name for field in dataclasses.fields(OmniPhysicsSurfaceDeformableMaterialCfg)], + "physxDeformableBody": [field.name for field in dataclasses.fields(PhysXDeformableMaterialCfg)], + } + """Extend DeformableBodyMaterialCfg properties under each prefix.""" diff --git a/source/isaaclab_physx/isaaclab_physx/sim/spawners/spawner_cfg.py b/source/isaaclab_physx/isaaclab_physx/sim/spawners/spawner_cfg.py new file mode 100644 index 000000000000..dda028508762 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/spawners/spawner_cfg.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.sim.spawners.spawner_cfg import SpawnerCfg +from isaaclab.utils import configclass + +if TYPE_CHECKING: + from isaaclab.sim import schemas + + # deformables only supported on PhysX backend + from isaaclab_physx.sim.schemas.schemas_cfg import DeformableBodyPropertiesCfg + + +@configclass +class DeformableObjectSpawnerCfg(SpawnerCfg): + """Configuration parameters for spawning a deformable asset. + + Unlike rigid objects, deformable objects are affected by forces and can deform when subjected to + external forces. This class is used to configure the properties of the deformable object. + + Deformable bodies don't have a separate collision mesh. The collision mesh is the same as the visual mesh. + The collision properties such as rest and collision offsets are specified in the :attr:`deformable_props`. + + Note: + By default, all properties are set to None. This means that no properties will be added or modified + to the prim outside of the properties available by default when spawning the prim. + """ + + mass_props: schemas.MassPropertiesCfg | None = None + """Mass properties.""" + + deformable_props: DeformableBodyPropertiesCfg | None = None + """Deformable body properties. Only supported on PhysX backend for now.""" diff --git a/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.py b/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.py new file mode 100644 index 000000000000..85c69b44a24f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX simulation views.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.pyi new file mode 100644 index 000000000000..789d62af9d14 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "FabricFrameView", +] + +from .fabric_frame_view import FabricFrameView diff --git a/source/isaaclab_physx/isaaclab_physx/sim/views/fabric_frame_view.py b/source/isaaclab_physx/isaaclab_physx/sim/views/fabric_frame_view.py new file mode 100644 index 000000000000..de65e8501793 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/views/fabric_frame_view.py @@ -0,0 +1,410 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX FrameView with Fabric GPU acceleration.""" + +from __future__ import annotations + +import logging + +import torch +import warp as wp + +from pxr import Usd + +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import SettingsManager +from isaaclab.sim.views.base_frame_view import BaseFrameView +from isaaclab.sim.views.usd_frame_view import UsdFrameView +from isaaclab.utils.warp import ProxyArray +from isaaclab.utils.warp import fabric as fabric_utils + +logger = logging.getLogger(__name__) + + +def _to_float32_2d(a: wp.array | torch.Tensor) -> wp.array | torch.Tensor: + """Ensure array is compatible with Fabric kernels (2-D float32). + + For ``wp.array`` with vec dtypes (``vec3f``, ``vec4f``), uses + :meth:`wp.array.view` for zero-copy reinterpretation. + ``torch.Tensor`` and already-correct 2-D float32 arrays pass through. + """ + if not isinstance(a, wp.array): + return a + if a.shape[0] == 0: + return a + if a.ndim == 2 and a.dtype == wp.float32: + return a + return a.view(dtype=wp.float32) + + +class FabricFrameView(BaseFrameView): + """FrameView with Fabric GPU acceleration for the PhysX backend. + + Uses composition: holds a :class:`UsdFrameView` internally for USD + fallback and non-accelerated operations (local poses, visibility, scales + when Fabric is disabled). + + When Fabric is enabled, world-pose and scale operations use GPU-accelerated + Warp kernels operating on ``omni:fabric:worldMatrix``. All other operations + delegate to the internal USD view. + + Pose getters return :class:`~isaaclab.utils.warp.ProxyArray`. Setters accept ``wp.array``. + """ + + def __init__( + self, + prim_path: str, + device: str = "cpu", + validate_xform_ops: bool = True, + sync_usd_on_fabric_write: bool = False, + stage: Usd.Stage | None = None, + ): + self._usd_view = UsdFrameView(prim_path, device=device, validate_xform_ops=validate_xform_ops, stage=stage) + self._device = device + self._sync_usd_on_fabric_write = sync_usd_on_fabric_write + + settings = SettingsManager.instance() + self._use_fabric = bool(settings.get("/physics/fabricEnabled", False)) + + if self._use_fabric and self._device == "cpu": + logger.warning( + "Fabric mode with Warp fabric-array operations is not supported on CPU devices. " + "Falling back to standard USD operations on the CPU. This may impact performance." + ) + self._use_fabric = False + + if self._use_fabric and self._device not in ("cuda", "cuda:0"): + logger.warning( + f"Fabric mode is not supported on device '{self._device}'. " + "USDRT SelectPrims and Warp fabric arrays only support cuda:0. " + "Falling back to standard USD operations. This may impact performance." + ) + self._use_fabric = False + + self._fabric_initialized = False + self._fabric_usd_sync_done = False + self._fabric_selection = None + self._fabric_to_view: wp.array | None = None + self._view_to_fabric: wp.array | None = None + self._default_view_indices: wp.array | None = None + self._fabric_hierarchy = None + self._view_index_attr = f"isaaclab:view_index:{abs(hash(self))}" + + # ------------------------------------------------------------------ + # Delegated properties + # ------------------------------------------------------------------ + + @property + def count(self) -> int: + return self._usd_view.count + + @property + def device(self) -> str: + """Device where arrays are allocated (cpu or cuda).""" + return self._device + + @property + def prims(self) -> list: + return self._usd_view.prims + + @property + def prim_paths(self) -> list[str]: + return self._usd_view.prim_paths + + # ------------------------------------------------------------------ + # Delegated operations (USD-only) + # ------------------------------------------------------------------ + + def get_visibility(self, indices=None): + return self._usd_view.get_visibility(indices) + + def set_visibility(self, visibility, indices=None): + self._usd_view.set_visibility(visibility, indices) + + # ------------------------------------------------------------------ + # World poses — Fabric-accelerated or USD fallback + # ------------------------------------------------------------------ + + def set_world_poses(self, positions=None, orientations=None, indices=None): + if not self._use_fabric: + self._usd_view.set_world_poses(positions, orientations, indices) + return + + if not self._fabric_initialized: + self._initialize_fabric() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + dummy = wp.zeros((0, 3), dtype=wp.float32, device=self._device) + positions_wp = _to_float32_2d(positions) if positions is not None else dummy + orientations_wp = ( + _to_float32_2d(orientations) + if orientations is not None + else wp.zeros((0, 4), dtype=wp.float32, device=self._device) + ) + + wp.launch( + kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, + positions_wp, + orientations_wp, + dummy, + False, + False, + False, + indices_wp, + self._view_to_fabric, + ], + device=self._fabric_device, + ) + wp.synchronize() + + self._fabric_hierarchy.update_world_xforms() + self._fabric_usd_sync_done = True + if self._sync_usd_on_fabric_write: + self._usd_view.set_world_poses(positions, orientations, indices) + + def get_world_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + if not self._use_fabric: + return self._usd_view.get_world_poses(indices) + + if not self._fabric_initialized: + self._initialize_fabric() + if not self._fabric_usd_sync_done: + self._sync_fabric_from_usd_once() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + use_cached = indices is None or indices == slice(None) + if use_cached: + positions_wp = self._fabric_positions_buf + orientations_wp = self._fabric_orientations_buf + else: + positions_wp = wp.zeros((count, 3), dtype=wp.float32, device=self._device) + orientations_wp = wp.zeros((count, 4), dtype=wp.float32, device=self._device) + + wp.launch( + kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, + positions_wp, + orientations_wp, + self._fabric_dummy_buffer, + indices_wp, + self._view_to_fabric, + ], + device=self._fabric_device, + ) + + if use_cached: + wp.synchronize() + return self._fabric_positions_ta, self._fabric_orientations_ta + return ProxyArray(positions_wp), ProxyArray(orientations_wp) + + # ------------------------------------------------------------------ + # Local poses — USD fallback (Fabric only accelerates world poses) + # ------------------------------------------------------------------ + + def set_local_poses(self, translations=None, orientations=None, indices=None): + self._usd_view.set_local_poses(translations, orientations, indices) + + def get_local_poses(self, indices: wp.array | None = None) -> tuple[ProxyArray, ProxyArray]: + return self._usd_view.get_local_poses(indices) + + # ------------------------------------------------------------------ + # Scales — Fabric-accelerated or USD fallback + # ------------------------------------------------------------------ + + def set_scales(self, scales, indices=None): + if not self._use_fabric: + self._usd_view.set_scales(scales, indices) + return + + if not self._fabric_initialized: + self._initialize_fabric() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + dummy3 = wp.zeros((0, 3), dtype=wp.float32, device=self._device) + dummy4 = wp.zeros((0, 4), dtype=wp.float32, device=self._device) + scales_wp = _to_float32_2d(scales) + + wp.launch( + kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, + dummy3, + dummy4, + scales_wp, + False, + False, + False, + indices_wp, + self._view_to_fabric, + ], + device=self._fabric_device, + ) + wp.synchronize() + + self._fabric_hierarchy.update_world_xforms() + self._fabric_usd_sync_done = True + if self._sync_usd_on_fabric_write: + self._usd_view.set_scales(scales, indices) + + def get_scales(self, indices=None): + if not self._use_fabric: + return self._usd_view.get_scales(indices) + + if not self._fabric_initialized: + self._initialize_fabric() + if not self._fabric_usd_sync_done: + self._sync_fabric_from_usd_once() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + use_cached = indices is None or indices == slice(None) + if use_cached: + scales_wp = self._fabric_scales_buf + else: + scales_wp = wp.zeros((count, 3), dtype=wp.float32, device=self._device) + + wp.launch( + kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, + self._fabric_dummy_buffer, + self._fabric_dummy_buffer, + scales_wp, + indices_wp, + self._view_to_fabric, + ], + device=self._fabric_device, + ) + + if use_cached: + wp.synchronize() + return scales_wp + + # ------------------------------------------------------------------ + # Internal — Fabric initialization + # ------------------------------------------------------------------ + + def _initialize_fabric(self) -> None: + """Initialize Fabric batch infrastructure for GPU-accelerated pose queries.""" + import usdrt # noqa: PLC0415 + from usdrt import Rt # noqa: PLC0415 + + stage_id = sim_utils.get_current_stage_id() + fabric_stage = usdrt.Usd.Stage.Attach(stage_id) + + for i in range(self.count): + rt_prim = fabric_stage.GetPrimAtPath(self.prim_paths[i]) + rt_xformable = Rt.Xformable(rt_prim) + + has_attr = ( + rt_xformable.HasFabricHierarchyWorldMatrixAttr() + if hasattr(rt_xformable, "HasFabricHierarchyWorldMatrixAttr") + else False + ) + if not has_attr: + rt_xformable.CreateFabricHierarchyWorldMatrixAttr() + + rt_xformable.SetWorldXformFromUsd() + + rt_prim.CreateAttribute(self._view_index_attr, usdrt.Sdf.ValueTypeNames.UInt, custom=True) + rt_prim.GetAttribute(self._view_index_attr).Set(i) + + self._fabric_hierarchy = usdrt.hierarchy.IFabricHierarchy().get_fabric_hierarchy( + fabric_stage.GetFabricId(), fabric_stage.GetStageIdAsStageId() + ) + self._fabric_hierarchy.update_world_xforms() + + self._default_view_indices = wp.zeros((self.count,), dtype=wp.uint32, device=self._device) + wp.launch( + kernel=fabric_utils.arange_k, dim=self.count, inputs=[self._default_view_indices], device=self._device + ) + wp.synchronize() + + fabric_device = self._device + if self._device == "cuda": + logger.warning("Fabric device is not specified, defaulting to 'cuda:0'.") + fabric_device = "cuda:0" + elif self._device.startswith("cuda:"): + if self._device != "cuda:0": + logger.debug( + f"SelectPrims only supports cuda:0. Using cuda:0 for SelectPrims " + f"even though simulation device is {self._device}." + ) + fabric_device = "cuda:0" + + self._fabric_selection = fabric_stage.SelectPrims( + require_attrs=[ + (usdrt.Sdf.ValueTypeNames.UInt, self._view_index_attr, usdrt.Usd.Access.Read), + (usdrt.Sdf.ValueTypeNames.Matrix4d, "omni:fabric:worldMatrix", usdrt.Usd.Access.ReadWrite), + ], + device=fabric_device, + ) + + self._view_to_fabric = wp.zeros((self.count,), dtype=wp.uint32, device=fabric_device) + self._fabric_to_view = wp.fabricarray(self._fabric_selection, self._view_index_attr) + + wp.launch( + kernel=fabric_utils.set_view_to_fabric_array, + dim=self._fabric_to_view.shape[0], + inputs=[self._fabric_to_view, self._view_to_fabric], + device=fabric_device, + ) + wp.synchronize() + + self._fabric_positions_buf = wp.zeros((self.count, 3), dtype=wp.float32, device=self._device) + self._fabric_orientations_buf = wp.zeros((self.count, 4), dtype=wp.float32, device=self._device) + self._fabric_positions_ta = ProxyArray(self._fabric_positions_buf) + self._fabric_orientations_ta = ProxyArray(self._fabric_orientations_buf) + self._fabric_scales_buf = wp.zeros((self.count, 3), dtype=wp.float32, device=self._device) + self._fabric_dummy_buffer = wp.zeros((0, 3), dtype=wp.float32, device=self._device) + self._fabric_world_matrices = wp.fabricarray(self._fabric_selection, "omni:fabric:worldMatrix") + self._fabric_stage = fabric_stage + self._fabric_device = fabric_device + + self._fabric_initialized = True + self._fabric_usd_sync_done = False + + def _sync_fabric_from_usd_once(self) -> None: + """Sync Fabric world matrices from USD once, on the first read.""" + if not self._fabric_initialized: + self._initialize_fabric() + + positions_usd_ta, orientations_usd_ta = self._usd_view.get_world_poses() + positions_usd = positions_usd_ta.warp + orientations_usd = orientations_usd_ta.warp + scales_usd = self._usd_view.get_scales() + + prev_sync = self._sync_usd_on_fabric_write + self._sync_usd_on_fabric_write = False + self.set_world_poses(positions_usd, orientations_usd) + self.set_scales(scales_usd) + self._sync_usd_on_fabric_write = prev_sync + + self._fabric_usd_sync_done = True + + def _resolve_indices_wp(self, indices: wp.array | None) -> wp.array: + """Resolve view indices as a Warp uint32 array.""" + if indices is None or indices == slice(None): + if self._default_view_indices is None: + raise RuntimeError("Fabric indices are not initialized.") + return self._default_view_indices + if indices.dtype != wp.uint32: + return wp.array(indices.numpy().astype("uint32"), dtype=wp.uint32, device=self._device) + return indices diff --git a/source/isaaclab_physx/isaaclab_physx/test/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/__init__.py new file mode 100644 index 000000000000..7d7ed09bafbe --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test utilities for isaaclab_physx.""" diff --git a/source/isaaclab_physx/isaaclab_physx/test/benchmark/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/benchmark/__init__.py new file mode 100644 index 000000000000..64ecc582fff1 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/benchmark/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX-specific benchmark utilities. + +This package provides helper functions for creating benchmark inputs +specific to PhysX-based assets (Articulation, RigidObject, etc.). +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/test/benchmark/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/test/benchmark/__init__.pyi new file mode 100644 index 000000000000..174054b401b9 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/benchmark/__init__.pyi @@ -0,0 +1,22 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "make_tensor_body_ids", + "make_tensor_env_ids", + "make_tensor_joint_ids", + "make_warp_body_mask", + "make_warp_env_mask", + "make_warp_joint_mask", +] + +from .benchmark_utils import ( + make_tensor_body_ids, + make_tensor_env_ids, + make_tensor_joint_ids, + make_warp_body_mask, + make_warp_env_mask, + make_warp_joint_mask, +) diff --git a/source/isaaclab_physx/isaaclab_physx/test/benchmark/benchmark_utils.py b/source/isaaclab_physx/isaaclab_physx/test/benchmark/benchmark_utils.py new file mode 100644 index 000000000000..c17265490985 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/benchmark/benchmark_utils.py @@ -0,0 +1,93 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX-specific benchmark utilities. + +This module provides helper functions for creating tensor indices and warp masks +used in PhysX benchmark input generators. +""" + +from __future__ import annotations + +import torch +import warp as wp + + +def make_tensor_env_ids(num_instances: int, device: str) -> torch.Tensor: + """Create a tensor of environment IDs. + + Args: + num_instances: Number of environment instances. + device: Device to create the tensor on. + + Returns: + Tensor of environment IDs [0, 1, ..., num_instances-1]. + """ + return torch.arange(num_instances, dtype=torch.int32, device=device) + + +def make_tensor_joint_ids(num_joints: int, device: str) -> torch.Tensor: + """Create a tensor of joint IDs. + + Args: + num_joints: Number of joints. + device: Device to create the tensor on. + + Returns: + Tensor of joint IDs [0, 1, ..., num_joints-1]. + """ + return torch.arange(num_joints, dtype=torch.int32, device=device) + + +def make_tensor_body_ids(num_bodies: int, device: str) -> torch.Tensor: + """Create a tensor of body IDs. + + Args: + num_bodies: Number of bodies. + device: Device to create the tensor on. + + Returns: + Tensor of body IDs [0, 1, ..., num_bodies-1]. + """ + return torch.arange(num_bodies, dtype=torch.int32, device=device) + + +def make_warp_env_mask(num_instances: int, device: str) -> wp.array: + """Create an all-true environment mask. + + Args: + num_instances: Number of environment instances. + device: Device to create the mask on. + + Returns: + Warp array of booleans, all set to True. + """ + return wp.ones((num_instances,), dtype=wp.bool, device=device) + + +def make_warp_joint_mask(num_joints: int, device: str) -> wp.array: + """Create an all-true joint mask. + + Args: + num_joints: Number of joints. + device: Device to create the mask on. + + Returns: + Warp array of booleans, all set to True. + """ + return wp.ones((num_joints,), dtype=wp.bool, device=device) + + +def make_warp_body_mask(num_bodies: int, device: str) -> wp.array: + """Create an all-true body mask. + + Args: + num_bodies: Number of bodies. + device: Device to create the mask on. + + Returns: + Warp array of booleans, all set to True. + """ + return wp.ones((num_bodies,), dtype=wp.bool, device=device) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py new file mode 100644 index 000000000000..2718cb450f76 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock interfaces for PhysX TensorAPI views. + +This module provides mock implementations of PhysX TensorAPI views for unit testing +without requiring Isaac Sim or GPU simulation. +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.pyi new file mode 100644 index 000000000000..0cfe80e0b39f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.pyi @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "create_mock_articulation_view", + "create_mock_humanoid_view", + "create_mock_quadruped_view", + "create_mock_rigid_body_view", + "create_mock_rigid_contact_view", + "MockArticulationView", + "MockArticulationViewWarp", + "MockRigidBodyView", + "MockRigidBodyViewWarp", + "MockRigidContactView", + "MockRigidContactViewWarp", +] + +from .factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, + create_mock_rigid_body_view, + create_mock_rigid_contact_view, +) +from .views import ( + MockArticulationView, + MockArticulationViewWarp, + MockRigidBodyView, + MockRigidBodyViewWarp, + MockRigidContactView, + MockRigidContactViewWarp, +) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py new file mode 100644 index 000000000000..3f405fc2e5fe --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py @@ -0,0 +1,302 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory functions for creating mock PhysX views.""" + +from __future__ import annotations + +from typing import Literal + +from .views import ( + MockArticulationView, + MockArticulationViewWarp, + MockRigidBodyView, + MockRigidBodyViewWarp, + MockRigidContactView, + MockRigidContactViewWarp, +) + +Backend = Literal["torch", "warp"] + + +def create_mock_rigid_body_view( + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", + backend: Backend = "torch", +) -> MockRigidBodyView | MockRigidBodyViewWarp: + """Create a mock rigid body view. + + Args: + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + backend: Backend to use ("torch" or "warp"). + + Returns: + A MockRigidBodyView or MockRigidBodyViewWarp instance. + """ + if backend == "warp": + return MockRigidBodyViewWarp(count=count, prim_paths=prim_paths, device=device) + return MockRigidBodyView(count=count, prim_paths=prim_paths, device=device) + + +def create_mock_articulation_view( + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", + backend: Backend = "torch", +) -> MockArticulationView | MockArticulationViewWarp: + """Create a mock articulation view. + + Args: + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. + link_names: Names of the links. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + backend: Backend to use ("torch" or "warp"). + + Returns: + A MockArticulationView or MockArticulationViewWarp instance. + """ + if backend == "warp": + return MockArticulationViewWarp( + count=count, + num_dofs=num_dofs, + num_links=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + prim_paths=prim_paths, + device=device, + ) + return MockArticulationView( + count=count, + num_dofs=num_dofs, + num_links=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + prim_paths=prim_paths, + device=device, + ) + + +def create_mock_rigid_contact_view( + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", + backend: Backend = "torch", +) -> MockRigidContactView | MockRigidContactViewWarp: + """Create a mock rigid contact view. + + Args: + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation. + backend: Backend to use ("torch" or "warp"). + + Returns: + A MockRigidContactView or MockRigidContactViewWarp instance. + """ + if backend == "warp": + return MockRigidContactViewWarp( + count=count, + num_bodies=num_bodies, + filter_count=filter_count, + max_contact_data_count=max_contact_data_count, + device=device, + ) + return MockRigidContactView( + count=count, + num_bodies=num_bodies, + filter_count=filter_count, + max_contact_data_count=max_contact_data_count, + device=device, + ) + + +# -- Pre-configured factories -- + + +def create_mock_quadruped_view( + count: int = 1, + device: str = "cpu", + backend: Backend = "torch", +) -> MockArticulationView | MockArticulationViewWarp: + """Create a mock articulation view configured for a quadruped robot. + + Configuration: + - 12 DOFs (3 per leg x 4 legs: hip, thigh, calf) + - 13 links (base + 3 per leg) + - Floating base + + Args: + count: Number of articulation instances. + device: Device for tensor allocation. + backend: Backend to use ("torch" or "warp"). + + Returns: + A MockArticulationView or MockArticulationViewWarp configured for quadruped. + """ + dof_names = [ + "FL_hip_joint", + "FL_thigh_joint", + "FL_calf_joint", + "FR_hip_joint", + "FR_thigh_joint", + "FR_calf_joint", + "RL_hip_joint", + "RL_thigh_joint", + "RL_calf_joint", + "RR_hip_joint", + "RR_thigh_joint", + "RR_calf_joint", + ] + link_names = [ + "base", + "FL_hip", + "FL_thigh", + "FL_calf", + "FR_hip", + "FR_thigh", + "FR_calf", + "RL_hip", + "RL_thigh", + "RL_calf", + "RR_hip", + "RR_thigh", + "RR_calf", + ] + if backend == "warp": + return MockArticulationViewWarp( + count=count, + num_dofs=12, + num_links=13, + dof_names=dof_names, + link_names=link_names, + fixed_base=False, + device=device, + ) + return MockArticulationView( + count=count, + num_dofs=12, + num_links=13, + dof_names=dof_names, + link_names=link_names, + fixed_base=False, + device=device, + ) + + +def create_mock_humanoid_view( + count: int = 1, + device: str = "cpu", + backend: Backend = "torch", +) -> MockArticulationView | MockArticulationViewWarp: + """Create a mock articulation view configured for a humanoid robot. + + Configuration: + - 21 DOFs (typical humanoid configuration) + - 22 links + - Floating base + + Args: + count: Number of articulation instances. + device: Device for tensor allocation. + backend: Backend to use ("torch" or "warp"). + + Returns: + A MockArticulationView or MockArticulationViewWarp configured for humanoid. + """ + dof_names = [ + # Torso + "torso_joint", + # Left arm + "left_shoulder_pitch", + "left_shoulder_roll", + "left_shoulder_yaw", + "left_elbow", + # Right arm + "right_shoulder_pitch", + "right_shoulder_roll", + "right_shoulder_yaw", + "right_elbow", + # Left leg + "left_hip_yaw", + "left_hip_roll", + "left_hip_pitch", + "left_knee", + "left_ankle_pitch", + "left_ankle_roll", + # Right leg + "right_hip_yaw", + "right_hip_roll", + "right_hip_pitch", + "right_knee", + "right_ankle_pitch", + "right_ankle_roll", + ] + link_names = [ + "pelvis", + "torso", + # Left arm + "left_shoulder", + "left_upper_arm", + "left_lower_arm", + "left_hand", + # Right arm + "right_shoulder", + "right_upper_arm", + "right_lower_arm", + "right_hand", + # Left leg + "left_hip", + "left_upper_leg", + "left_lower_leg", + "left_ankle", + "left_foot", + # Right leg + "right_hip", + "right_upper_leg", + "right_lower_leg", + "right_ankle", + "right_foot", + # Head + "neck", + "head", + ] + if backend == "warp": + return MockArticulationViewWarp( + count=count, + num_dofs=21, + num_links=22, + dof_names=dof_names, + link_names=link_names, + fixed_base=False, + device=device, + ) + return MockArticulationView( + count=count, + num_dofs=21, + num_links=22, + dof_names=dof_names, + link_names=link_names, + fixed_base=False, + device=device, + ) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py new file mode 100644 index 000000000000..540a5d60b303 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for mock PhysX interfaces.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.pyi new file mode 100644 index 000000000000..27fd2f2ac588 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MockSharedMetatype", + "mock_articulation_view", + "mock_rigid_body_view", + "mock_rigid_contact_view", + "patch_articulation_view", + "patch_rigid_body_view", + "patch_rigid_contact_view", +] + +from .mock_shared_metatype import MockSharedMetatype +from .patching import ( + mock_articulation_view, + mock_rigid_body_view, + mock_rigid_contact_view, + patch_articulation_view, + patch_rigid_body_view, + patch_rigid_contact_view, +) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/kernels_mock.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/kernels_mock.py new file mode 100644 index 000000000000..f372ddacf438 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/kernels_mock.py @@ -0,0 +1,99 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp kernels for mock PhysX views.""" + +from __future__ import annotations + +import warp as wp + + +@wp.kernel +def init_identity_transforms_1d_flat(out: wp.array2d(dtype=wp.float32)): + """Initialize (N, 7) float32 array: pos=0, quat=(0,0,0,1).""" + i = wp.tid() + out[i, 0] = 0.0 + out[i, 1] = 0.0 + out[i, 2] = 0.0 + out[i, 3] = 0.0 + out[i, 4] = 0.0 + out[i, 5] = 0.0 + out[i, 6] = 1.0 + + +@wp.kernel +def init_identity_transforms_2d_flat(out: wp.array3d(dtype=wp.float32)): + """Initialize (N, L, 7) float32 array: pos=0, quat=(0,0,0,1).""" + i, j = wp.tid() + out[i, j, 0] = 0.0 + out[i, j, 1] = 0.0 + out[i, j, 2] = 0.0 + out[i, j, 3] = 0.0 + out[i, j, 4] = 0.0 + out[i, j, 5] = 0.0 + out[i, j, 6] = 1.0 + + +@wp.kernel +def scatter_floats_2d( + src: wp.array2d(dtype=wp.float32), + indices: wp.array(dtype=wp.int32), + dst: wp.array2d(dtype=wp.float32), +): + """Scatter 2D float arrays from src to dst rows at specified indices. + + Args: + src: Source array, shape (M, J). + indices: Target row indices in dst, shape (M,). + dst: Destination array, shape (N, J) where N >= max(indices). + """ + i, j = wp.tid() + dst[indices[i], j] = src[i, j] + + +@wp.kernel +def init_identity_inertias_1d(out: wp.array2d(dtype=wp.float32)): + """Initialize 1D array of inertia tensors to identity (9 values per body). + + Sets diagonal elements to 1.0, off-diagonal to 0.0. + Shape: (N, 9) where 9 represents flattened 3x3 matrix. + + Args: + out: Output array of shape (N, 9) to initialize. + """ + i = wp.tid() + # Flattened row-major 3x3 identity: [1,0,0,0,1,0,0,0,1] + out[i, 0] = 1.0 # [0,0] + out[i, 1] = 0.0 + out[i, 2] = 0.0 + out[i, 3] = 0.0 + out[i, 4] = 1.0 # [1,1] + out[i, 5] = 0.0 + out[i, 6] = 0.0 + out[i, 7] = 0.0 + out[i, 8] = 1.0 # [2,2] + + +@wp.kernel +def init_identity_inertias_2d(out: wp.array3d(dtype=wp.float32)): + """Initialize 2D array of inertia tensors to identity (9 values per body per link). + + Sets diagonal elements to 1.0, off-diagonal to 0.0. + Shape: (N, L, 9) where 9 represents flattened 3x3 matrix. + + Args: + out: Output array of shape (N, L, 9) to initialize. + """ + i, j = wp.tid() + # Flattened row-major 3x3 identity: [1,0,0,0,1,0,0,0,1] + out[i, j, 0] = 1.0 # [0,0] + out[i, j, 1] = 0.0 + out[i, j, 2] = 0.0 + out[i, j, 3] = 0.0 + out[i, j, 4] = 1.0 # [1,1] + out[i, j, 5] = 0.0 + out[i, j, 6] = 0.0 + out[i, j, 7] = 0.0 + out[i, j, 8] = 1.0 # [2,2] diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py new file mode 100644 index 000000000000..c7ceb4d93f7f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX shared metatype.""" + +from __future__ import annotations + + +class MockSharedMetatype: + """Mock implementation of the shared metatype for articulation views. + + The shared metatype contains metadata about the articulation structure that is + shared across all instances, such as DOF count, link count, and names. + """ + + def __init__( + self, + dof_count: int = 1, + link_count: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + ): + """Initialize the mock shared metatype. + + Args: + dof_count: Number of degrees of freedom (joints). + link_count: Number of links (bodies). + dof_names: Names of the DOFs. Defaults to ["dof_0", "dof_1", ...]. + link_names: Names of the links. Defaults to ["link_0", "link_1", ...]. + fixed_base: Whether the articulation has a fixed base. + """ + self._dof_count = dof_count + self._link_count = link_count + self._dof_names = dof_names or [f"dof_{i}" for i in range(dof_count)] + self._link_names = link_names or [f"link_{i}" for i in range(link_count)] + self._fixed_base = fixed_base + + @property + def dof_count(self) -> int: + """Number of degrees of freedom.""" + return self._dof_count + + @property + def link_count(self) -> int: + """Number of links.""" + return self._link_count + + @property + def dof_names(self) -> list[str]: + """Names of the degrees of freedom.""" + return self._dof_names + + @property + def link_names(self) -> list[str]: + """Names of the links.""" + return self._link_names + + @property + def fixed_base(self) -> bool: + """Whether the articulation has a fixed base.""" + return self._fixed_base diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py new file mode 100644 index 000000000000..288eb28bf5fe --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py @@ -0,0 +1,285 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for patching PhysX views with mock implementations.""" + +from __future__ import annotations + +import functools +from collections.abc import Callable, Generator +from contextlib import contextmanager +from typing import Any, TypeVar +from unittest.mock import patch + +F = TypeVar("F", bound=Callable[..., Any]) + + +@contextmanager +def patch_rigid_body_view( + target: str, + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Generator[Any, None, None]: + """Context manager for patching physx.RigidBodyView with a mock. + + Args: + target: The target to patch (e.g., "my_module.physx.RigidBodyView"). + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Yields: + The mock class that will be used when the target is instantiated. + + Example: + >>> with patch_rigid_body_view("my_module.view", count=4) as mock: + ... view = my_function() + ... view.set_mock_transforms(torch.randn(4, 7)) + """ + from ..views import MockRigidBodyView + + def create_mock(*args, **kwargs): + return MockRigidBodyView( + count=kwargs.get("count", count), + prim_paths=kwargs.get("prim_paths", prim_paths), + device=kwargs.get("device", device), + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +@contextmanager +def patch_articulation_view( + target: str, + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Generator[Any, None, None]: + """Context manager for patching physx.ArticulationView with a mock. + + Args: + target: The target to patch (e.g., "my_module.physx.ArticulationView"). + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. + link_names: Names of the links. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Yields: + The mock class that will be used when the target is instantiated. + + Example: + >>> with patch_articulation_view("my_module.view", num_dofs=12) as mock: + ... view = my_function() + ... positions = view.get_dof_positions() + """ + from ..views import MockArticulationView + + def create_mock(*args, **kwargs): + return MockArticulationView( + count=kwargs.get("count", count), + num_dofs=kwargs.get("num_dofs", num_dofs), + num_links=kwargs.get("num_links", num_links), + dof_names=kwargs.get("dof_names", dof_names), + link_names=kwargs.get("link_names", link_names), + fixed_base=kwargs.get("fixed_base", fixed_base), + prim_paths=kwargs.get("prim_paths", prim_paths), + device=kwargs.get("device", device), + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +@contextmanager +def patch_rigid_contact_view( + target: str, + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", +) -> Generator[Any, None, None]: + """Context manager for patching physx.RigidContactView with a mock. + + Args: + target: The target to patch (e.g., "my_module.physx.RigidContactView"). + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation. + + Yields: + The mock class that will be used when the target is instantiated. + + Example: + >>> with patch_rigid_contact_view("my_module.view", num_bodies=4) as mock: + ... view = my_function() + ... forces = view.get_net_contact_forces(0.01) + """ + from ..views import MockRigidContactView + + def create_mock(*args, **kwargs): + return MockRigidContactView( + count=kwargs.get("count", count), + num_bodies=kwargs.get("num_bodies", num_bodies), + filter_count=kwargs.get("filter_count", filter_count), + max_contact_data_count=kwargs.get("max_contact_data_count", max_contact_data_count), + device=kwargs.get("device", device), + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +# -- Decorators -- + + +def mock_rigid_body_view( + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Callable[[F], F]: + """Decorator for injecting MockRigidBodyView into test function. + + The mock view is passed as the first argument to the decorated function. + + Args: + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Returns: + A decorator function. + + Example: + >>> @mock_rigid_body_view(count=4) + ... def test_my_function(mock_view): + ... transforms = mock_view.get_transforms() + ... assert transforms.shape == (4, 7) + """ + from ..views import MockRigidBodyView + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args, **kwargs): + mock = MockRigidBodyView(count=count, prim_paths=prim_paths, device=device) + return func(mock, *args, **kwargs) + + return wrapper # type: ignore[return-value] + + return decorator + + +def mock_articulation_view( + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Callable[[F], F]: + """Decorator for injecting MockArticulationView into test function. + + The mock view is passed as the first argument to the decorated function. + + Args: + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. + link_names: Names of the links. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Returns: + A decorator function. + + Example: + >>> @mock_articulation_view(count=4, num_dofs=12, num_links=13) + ... def test_my_function(mock_view): + ... positions = mock_view.get_dof_positions() + ... assert positions.shape == (4, 12) + """ + from ..views import MockArticulationView + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args, **kwargs): + mock = MockArticulationView( + count=count, + num_dofs=num_dofs, + num_links=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + prim_paths=prim_paths, + device=device, + ) + return func(mock, *args, **kwargs) + + return wrapper # type: ignore[return-value] + + return decorator + + +def mock_rigid_contact_view( + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", +) -> Callable[[F], F]: + """Decorator for injecting MockRigidContactView into test function. + + The mock view is passed as the first argument to the decorated function. + + Args: + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation. + + Returns: + A decorator function. + + Example: + >>> @mock_rigid_contact_view(count=4, num_bodies=4, filter_count=2) + ... def test_my_function(mock_view): + ... forces = mock_view.get_net_contact_forces(0.01) + ... assert forces.shape == (16, 3) + """ + from ..views import MockRigidContactView + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args, **kwargs): + mock = MockRigidContactView( + count=count, + num_bodies=num_bodies, + filter_count=filter_count, + max_contact_data_count=max_contact_data_count, + device=device, + ) + return func(mock, *args, **kwargs) + + return wrapper # type: ignore[return-value] + + return decorator diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py new file mode 100644 index 000000000000..196e03203ef1 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock PhysX TensorAPI views.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.pyi b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.pyi new file mode 100644 index 000000000000..ecbb3985cb53 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.pyi @@ -0,0 +1,20 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MockArticulationView", + "MockArticulationViewWarp", + "MockRigidBodyView", + "MockRigidBodyViewWarp", + "MockRigidContactView", + "MockRigidContactViewWarp", +] + +from .mock_articulation_view import MockArticulationView +from .mock_articulation_view_warp import MockArticulationViewWarp +from .mock_rigid_body_view import MockRigidBodyView +from .mock_rigid_body_view_warp import MockRigidBodyViewWarp +from .mock_rigid_contact_view import MockRigidContactView +from .mock_rigid_contact_view_warp import MockRigidContactViewWarp diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py new file mode 100644 index 000000000000..3c0fc04af412 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py @@ -0,0 +1,1066 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX ArticulationView.""" + +from __future__ import annotations + +import torch + +from ..utils.mock_shared_metatype import MockSharedMetatype + + +class MockArticulationView: + """Mock implementation of physx.ArticulationView for unit testing. + + This class mimics the interface of the PhysX TensorAPI ArticulationView, + allowing tests to run without Isaac Sim or GPU simulation. + + Data Shapes: + - root_transforms: (N, 7) - [pos(3), quat_xyzw(4)] + - root_velocities: (N, 6) - [lin_vel(3), ang_vel(3)] + - link_transforms: (N, L, 7) - per-link poses + - link_velocities: (N, L, 6) - per-link velocities + - dof_positions: (N, J) - joint positions + - dof_velocities: (N, J) - joint velocities + - dof_limits: (N, J, 2) - [lower, upper] limits + - dof_stiffnesses: (N, J) - joint stiffnesses + - dof_dampings: (N, J) - joint dampings + - dof_max_forces: (N, J) - maximum joint forces + - dof_max_velocities: (N, J) - maximum joint velocities + - masses: (N, L) - per-link masses + - coms: (N, L, 7) - per-link centers of mass + - inertias: (N, L, 3, 3) - per-link inertia tensors + + Where N = count, L = num_links, J = num_dofs + """ + + def __init__( + self, + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", + ): + """Initialize the mock articulation view. + + Args: + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. Defaults to auto-generated names. + link_names: Names of the links. Defaults to auto-generated names. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation ("cpu" or "cuda"). + """ + self._count = count + self._num_dofs = num_dofs + self._num_links = num_links + self._device = device + self._prim_paths = prim_paths or [f"/World/Articulation_{i}" for i in range(count)] + self._noop_setters = False + + # Create shared metatype + self._shared_metatype = MockSharedMetatype( + dof_count=num_dofs, + link_count=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + ) + + # Tendon properties (fixed values for mock) + self._max_fixed_tendons = 0 + self._max_spatial_tendons = 0 + + # Internal state (lazily initialized) + self._root_transforms: torch.Tensor | None = None + self._root_velocities: torch.Tensor | None = None + self._link_transforms: torch.Tensor | None = None + self._link_velocities: torch.Tensor | None = None + self._link_accelerations: torch.Tensor | None = None + self._link_incoming_joint_force: torch.Tensor | None = None + self._dof_positions: torch.Tensor | None = None + self._dof_velocities: torch.Tensor | None = None + self._dof_projected_joint_forces: torch.Tensor | None = None + self._dof_limits: torch.Tensor | None = None + self._dof_stiffnesses: torch.Tensor | None = None + self._dof_dampings: torch.Tensor | None = None + self._dof_max_forces: torch.Tensor | None = None + self._dof_max_velocities: torch.Tensor | None = None + self._dof_armatures: torch.Tensor | None = None + self._dof_friction_coefficients: torch.Tensor | None = None + self._dof_friction_properties: torch.Tensor | None = None + self._masses: torch.Tensor | None = None + self._coms: torch.Tensor | None = None + self._inertias: torch.Tensor | None = None + + # -- Helper Methods -- + + def _check_cpu_tensor(self, tensor: torch.Tensor, name: str) -> None: + """Check that tensor is on CPU, raise RuntimeError if on GPU. + + This mimics PhysX behavior where joint/body properties must be on CPU. + """ + if tensor.is_cuda: + raise RuntimeError( + f"Expected CPU tensor for {name}, but got tensor on {tensor.device}. " + "Joint and body properties must be set with CPU tensors." + ) + + # -- Properties -- + + @property + def count(self) -> int: + """Number of articulation instances.""" + return self._count + + @property + def shared_metatype(self) -> MockSharedMetatype: + """Shared metatype containing articulation structure metadata.""" + return self._shared_metatype + + @property + def max_fixed_tendons(self) -> int: + """Maximum number of fixed tendons.""" + return self._max_fixed_tendons + + @property + def max_spatial_tendons(self) -> int: + """Maximum number of spatial tendons.""" + return self._max_spatial_tendons + + @property + def prim_paths(self) -> list[str]: + """USD prim paths for each instance.""" + return self._prim_paths + + @property + def dof_paths(self) -> list[list[str]]: + """DOF paths for each instance.""" + dof_names = self._shared_metatype.dof_names or [f"joint_{i}" for i in range(self._num_dofs)] + single_instance_paths = [f"{self._prim_paths[0]}/{name}" for name in dof_names] + return [single_instance_paths] * self._count + + # -- Root Getters -- + + def get_root_transforms(self) -> torch.Tensor: + """Get world transforms of root links. + + Returns: + Tensor of shape (N, 7) with [pos(3), quat_xyzw(4)]. + """ + if self._root_transforms is None: + self._root_transforms = torch.zeros(self._count, 7, device=self._device) + self._root_transforms[:, 6] = 1.0 # w=1 for identity quaternion + return self._root_transforms.clone() + + def get_root_velocities(self) -> torch.Tensor: + """Get velocities of root links. + + Returns: + Tensor of shape (N, 6) with [lin_vel(3), ang_vel(3)]. + """ + if self._root_velocities is None: + self._root_velocities = torch.zeros(self._count, 6, device=self._device) + return self._root_velocities.clone() + + # -- Link Getters -- + + def get_link_transforms(self) -> torch.Tensor: + """Get world transforms of all links. + + Returns: + Tensor of shape (N, L, 7) with [pos(3), quat_xyzw(4)] per link. + """ + if self._link_transforms is None: + self._link_transforms = torch.zeros(self._count, self._num_links, 7, device=self._device) + self._link_transforms[:, :, 6] = 1.0 # w=1 for identity quaternion + return self._link_transforms.clone() + + def get_link_velocities(self) -> torch.Tensor: + """Get velocities of all links. + + Returns: + Tensor of shape (N, L, 6) with [lin_vel(3), ang_vel(3)] per link. + """ + if self._link_velocities is None: + self._link_velocities = torch.zeros(self._count, self._num_links, 6, device=self._device) + return self._link_velocities.clone() + + # -- DOF Getters -- + + def get_dof_positions(self) -> torch.Tensor: + """Get positions of all DOFs. + + Returns: + Tensor of shape (N, J) with joint positions. + """ + if self._dof_positions is None: + self._dof_positions = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_positions.clone() + + def get_dof_velocities(self) -> torch.Tensor: + """Get velocities of all DOFs. + + Returns: + Tensor of shape (N, J) with joint velocities. + """ + if self._dof_velocities is None: + self._dof_velocities = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_velocities.clone() + + def get_dof_projected_joint_forces(self) -> torch.Tensor: + """Get projected joint forces of all DOFs. + + Returns: + Tensor of shape (N, J) with projected joint forces. + """ + if self._dof_projected_joint_forces is None: + self._dof_projected_joint_forces = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_projected_joint_forces.clone() + + def get_dof_limits(self) -> torch.Tensor: + """Get position limits of all DOFs. + + Returns: + Tensor of shape (N, J, 2) with [lower, upper] limits. Always on CPU. + """ + if self._dof_limits is None: + # Default: no limits (infinite) - stored on CPU + self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device="cpu") + self._dof_limits[:, :, 0] = float("-inf") # lower limit + self._dof_limits[:, :, 1] = float("inf") # upper limit + return self._dof_limits.clone() + + def get_dof_stiffnesses(self) -> torch.Tensor: + """Get stiffnesses of all DOFs. + + Returns: + Tensor of shape (N, J) with joint stiffnesses. Always on CPU. + """ + if self._dof_stiffnesses is None: + self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device="cpu") + return self._dof_stiffnesses.clone() + + def get_dof_dampings(self) -> torch.Tensor: + """Get dampings of all DOFs. + + Returns: + Tensor of shape (N, J) with joint dampings. Always on CPU. + """ + if self._dof_dampings is None: + self._dof_dampings = torch.zeros(self._count, self._num_dofs, device="cpu") + return self._dof_dampings.clone() + + def get_dof_max_forces(self) -> torch.Tensor: + """Get maximum forces of all DOFs. + + Returns: + Tensor of shape (N, J) with maximum joint forces. Always on CPU. + """ + if self._dof_max_forces is None: + # Default: infinite max force - stored on CPU + self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") + return self._dof_max_forces.clone() + + def get_dof_max_velocities(self) -> torch.Tensor: + """Get maximum velocities of all DOFs. + + Returns: + Tensor of shape (N, J) with maximum joint velocities. Always on CPU. + """ + if self._dof_max_velocities is None: + # Default: infinite max velocity - stored on CPU + self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") + return self._dof_max_velocities.clone() + + def get_dof_armatures(self) -> torch.Tensor: + """Get armatures of all DOFs. + + Returns: + Tensor of shape (N, J) with joint armatures. Always on CPU. + """ + if self._dof_armatures is None: + self._dof_armatures = torch.zeros(self._count, self._num_dofs, device="cpu") + return self._dof_armatures.clone() + + def get_dof_friction_coefficients(self) -> torch.Tensor: + """Get friction coefficients of all DOFs. + + Returns: + Tensor of shape (N, J) with joint friction coefficients. Always on CPU. + """ + if self._dof_friction_coefficients is None: + self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device="cpu") + return self._dof_friction_coefficients.clone() + + # -- Mass Property Getters -- + + def get_masses(self) -> torch.Tensor: + """Get masses of all links. + + Returns: + Tensor of shape (N, L) with link masses. Always on CPU. + """ + if self._masses is None: + self._masses = torch.ones(self._count, self._num_links, device="cpu") + return self._masses.clone() + + def get_coms(self) -> torch.Tensor: + """Get centers of mass of all links. + + Returns: + Tensor of shape (N, L, 7) with [pos(3), quat_xyzw(4)] per link. Always on CPU. + """ + if self._coms is None: + self._coms = torch.zeros(self._count, self._num_links, 7, device="cpu") + self._coms[:, :, 6] = 1.0 # w=1 for identity quaternion + return self._coms.clone() + + def get_inertias(self) -> torch.Tensor: + """Get inertia tensors of all links. + + Returns: + Tensor of shape (N, L, 9) with flattened 3x3 inertia matrices per link (row-major). Always on CPU. + """ + if self._inertias is None: + # Default: identity inertia - flattened [1,0,0,0,1,0,0,0,1] - stored on CPU + self._inertias = torch.zeros(self._count, self._num_links, 9, device="cpu") + self._inertias[:, :, 0] = 1.0 # [0,0] + self._inertias[:, :, 4] = 1.0 # [1,1] + self._inertias[:, :, 8] = 1.0 # [2,2] + return self._inertias.clone() + + # -- Root Setters -- + + def set_root_transforms( + self, + transforms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set world transforms of root links. + + Args: + transforms: Tensor of shape (N, 7) or (len(indices), 7). + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + transforms = transforms.to(self._device) + if self._root_transforms is None: + self._root_transforms = torch.zeros(self._count, 7, device=self._device) + self._root_transforms[:, 6] = 1.0 + if indices is not None: + self._root_transforms[indices] = transforms + else: + self._root_transforms = transforms + + def set_root_velocities( + self, + velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocities of root links. + + Args: + velocities: Tensor of shape (N, 6) or (len(indices), 6). + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + velocities = velocities.to(self._device) + if self._root_velocities is None: + self._root_velocities = torch.zeros(self._count, 6, device=self._device) + if indices is not None: + self._root_velocities[indices] = velocities + else: + self._root_velocities = velocities + + # -- DOF Setters -- + + def set_dof_positions( + self, + positions: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set positions of all DOFs. + + Args: + positions: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + positions = positions.to(self._device) + if self._dof_positions is None: + self._dof_positions = torch.zeros(self._count, self._num_dofs, device=self._device) + if indices is not None: + self._dof_positions[indices] = positions + else: + self._dof_positions = positions + + def set_dof_velocities( + self, + velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocities of all DOFs. + + Args: + velocities: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + velocities = velocities.to(self._device) + if self._dof_velocities is None: + self._dof_velocities = torch.zeros(self._count, self._num_dofs, device=self._device) + if indices is not None: + self._dof_velocities[indices] = velocities + else: + self._dof_velocities = velocities + + def set_dof_position_targets( + self, + targets: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set position targets for all DOFs (no-op in mock). + + Args: + targets: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_velocity_targets( + self, + targets: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocity targets for all DOFs (no-op in mock). + + Args: + targets: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_actuation_forces( + self, + forces: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set actuation forces for all DOFs (no-op in mock). + + Args: + forces: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_limits( + self, + limits: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set position limits of all DOFs. + + Args: + limits: Tensor of shape (N, J, 2) with [lower, upper] limits. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If limits tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(limits, "dof_limits") + if self._dof_limits is None: + self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device="cpu") + self._dof_limits[:, :, 0] = float("-inf") + self._dof_limits[:, :, 1] = float("inf") + if indices is not None: + self._dof_limits[indices] = limits + else: + self._dof_limits = limits + + def set_dof_stiffnesses( + self, + stiffnesses: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set stiffnesses of all DOFs. + + Args: + stiffnesses: Tensor of shape (N, J). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If stiffnesses tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(stiffnesses, "dof_stiffnesses") + if self._dof_stiffnesses is None: + self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device="cpu") + if indices is not None: + self._dof_stiffnesses[indices] = stiffnesses + else: + self._dof_stiffnesses = stiffnesses + + def set_dof_dampings( + self, + dampings: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set dampings of all DOFs. + + Args: + dampings: Tensor of shape (N, J). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If dampings tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(dampings, "dof_dampings") + if self._dof_dampings is None: + self._dof_dampings = torch.zeros(self._count, self._num_dofs, device="cpu") + if indices is not None: + self._dof_dampings[indices] = dampings + else: + self._dof_dampings = dampings + + def set_dof_max_forces( + self, + max_forces: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set maximum forces of all DOFs. + + Args: + max_forces: Tensor of shape (N, J). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If max_forces tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(max_forces, "dof_max_forces") + if self._dof_max_forces is None: + self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") + if indices is not None: + self._dof_max_forces[indices] = max_forces + else: + self._dof_max_forces = max_forces + + def set_dof_max_velocities( + self, + max_velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set maximum velocities of all DOFs. + + Args: + max_velocities: Tensor of shape (N, J). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If max_velocities tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(max_velocities, "dof_max_velocities") + if self._dof_max_velocities is None: + self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") + if indices is not None: + self._dof_max_velocities[indices] = max_velocities + else: + self._dof_max_velocities = max_velocities + + def set_dof_armatures( + self, + armatures: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set armatures of all DOFs. + + Args: + armatures: Tensor of shape (N, J). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If armatures tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(armatures, "dof_armatures") + if self._dof_armatures is None: + self._dof_armatures = torch.zeros(self._count, self._num_dofs, device="cpu") + if indices is not None: + self._dof_armatures[indices] = armatures + else: + self._dof_armatures = armatures + + def set_dof_friction_coefficients( + self, + friction_coefficients: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set friction coefficients of all DOFs. + + Args: + friction_coefficients: Tensor of shape (N, J). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If friction_coefficients tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(friction_coefficients, "dof_friction_coefficients") + if self._dof_friction_coefficients is None: + self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device="cpu") + if indices is not None: + self._dof_friction_coefficients[indices] = friction_coefficients + else: + self._dof_friction_coefficients = friction_coefficients + + def set_dof_friction_properties( + self, + friction_properties: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set friction properties of all DOFs. + + Args: + friction_properties: Tensor of shape (N, J, 3) with [static, dynamic, viscous]. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If friction_properties tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(friction_properties, "dof_friction_properties") + if self._dof_friction_properties is None: + self._dof_friction_properties = torch.zeros(self._count, self._num_dofs, 3, device="cpu") + if indices is not None: + self._dof_friction_properties[indices] = friction_properties + else: + self._dof_friction_properties = friction_properties + + # -- Mass Property Setters -- + + def set_masses( + self, + masses: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set masses of all links. + + Args: + masses: Tensor of shape (N, L). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If masses tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(masses, "masses") + if self._masses is None: + self._masses = torch.ones(self._count, self._num_links, device="cpu") + if indices is not None: + self._masses[indices] = masses + else: + self._masses = masses + + def set_coms( + self, + coms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set centers of mass of all links. + + Args: + coms: Tensor of shape (N, L, 7). Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If coms tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(coms, "coms") + if self._coms is None: + self._coms = torch.zeros(self._count, self._num_links, 7, device="cpu") + self._coms[:, :, 6] = 1.0 + if indices is not None: + self._coms[indices] = coms + else: + self._coms = coms + + def set_inertias( + self, + inertias: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set inertia tensors of all links. + + Args: + inertias: Tensor of shape (N, L, 9) - flattened 3x3 matrices. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If inertias tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(inertias, "inertias") + if self._inertias is None: + self._inertias = torch.zeros(self._count, self._num_links, 9, device="cpu") + self._inertias[:, :, 0] = 1.0 + self._inertias[:, :, 4] = 1.0 + self._inertias[:, :, 8] = 1.0 + if indices is not None: + self._inertias[indices] = inertias + else: + self._inertias = inertias + + # -- Mock setters (direct test data injection) -- + + def set_mock_root_transforms(self, transforms: torch.Tensor) -> None: + """Set mock root transform data directly for testing. + + Args: + transforms: Tensor of shape (N, 7). + """ + self._root_transforms = transforms.to(self._device) + + def set_mock_root_velocities(self, velocities: torch.Tensor) -> None: + """Set mock root velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, 6). + """ + self._root_velocities = velocities.to(self._device) + + def set_mock_link_transforms(self, transforms: torch.Tensor) -> None: + """Set mock link transform data directly for testing. + + Args: + transforms: Tensor of shape (N, L, 7). + """ + self._link_transforms = transforms.to(self._device) + + def set_mock_link_velocities(self, velocities: torch.Tensor) -> None: + """Set mock link velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, L, 6). + """ + self._link_velocities = velocities.to(self._device) + + def set_mock_dof_positions(self, positions: torch.Tensor) -> None: + """Set mock DOF position data directly for testing. + + Args: + positions: Tensor of shape (N, J). + """ + self._dof_positions = positions.to(self._device) + + def set_mock_dof_velocities(self, velocities: torch.Tensor) -> None: + """Set mock DOF velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, J). + """ + self._dof_velocities = velocities.to(self._device) + + def set_mock_dof_projected_joint_forces(self, forces: torch.Tensor) -> None: + """Set mock projected joint force data directly for testing. + + Args: + forces: Tensor of shape (N, J). + """ + self._dof_projected_joint_forces = forces.to(self._device) + + def set_mock_dof_limits(self, limits: torch.Tensor) -> None: + """Set mock DOF limit data directly for testing. + + Args: + limits: Tensor of shape (N, J, 2). + """ + self._dof_limits = limits.to(self._device) + + def set_mock_dof_stiffnesses(self, stiffnesses: torch.Tensor) -> None: + """Set mock DOF stiffness data directly for testing. + + Args: + stiffnesses: Tensor of shape (N, J). + """ + self._dof_stiffnesses = stiffnesses.to(self._device) + + def set_mock_dof_dampings(self, dampings: torch.Tensor) -> None: + """Set mock DOF damping data directly for testing. + + Args: + dampings: Tensor of shape (N, J). + """ + self._dof_dampings = dampings.to(self._device) + + def set_mock_dof_max_forces(self, max_forces: torch.Tensor) -> None: + """Set mock DOF max force data directly for testing. + + Args: + max_forces: Tensor of shape (N, J). + """ + self._dof_max_forces = max_forces.to(self._device) + + def set_mock_dof_max_velocities(self, max_velocities: torch.Tensor) -> None: + """Set mock DOF max velocity data directly for testing. + + Args: + max_velocities: Tensor of shape (N, J). + """ + self._dof_max_velocities = max_velocities.to(self._device) + + def set_mock_dof_armatures(self, armatures: torch.Tensor) -> None: + """Set mock DOF armature data directly for testing. + + Args: + armatures: Tensor of shape (N, J). + """ + self._dof_armatures = armatures.to(self._device) + + def set_mock_dof_friction_coefficients(self, friction_coefficients: torch.Tensor) -> None: + """Set mock DOF friction coefficient data directly for testing. + + Args: + friction_coefficients: Tensor of shape (N, J). + """ + self._dof_friction_coefficients = friction_coefficients.to(self._device) + + def set_mock_masses(self, masses: torch.Tensor) -> None: + """Set mock mass data directly for testing. + + Args: + masses: Tensor of shape (N, L). + """ + self._masses = masses.to(self._device) + + def set_mock_coms(self, coms: torch.Tensor) -> None: + """Set mock center of mass data directly for testing. + + Args: + coms: Tensor of shape (N, L, 7). + """ + self._coms = coms.to(self._device) + + def set_mock_inertias(self, inertias: torch.Tensor) -> None: + """Set mock inertia data directly for testing. + + Args: + inertias: Tensor of shape (N, L, 3, 3). + """ + self._inertias = inertias.to(self._device) + + # -- Additional mock state for extended properties -- + + def get_dof_friction_properties(self) -> torch.Tensor: + """Get friction properties of all DOFs. + + Returns: + Tensor of shape (N, J, 3) with [static_friction, dynamic_friction, viscous_friction]. Always on CPU. + """ + if self._dof_friction_properties is None: + self._dof_friction_properties = torch.zeros(self._count, self._num_dofs, 3, device="cpu") + return self._dof_friction_properties.clone() + + def get_link_accelerations(self) -> torch.Tensor: + """Get accelerations of all links. + + Returns: + Tensor of shape (N, L, 6) with [lin_acc(3), ang_acc(3)] per link. + """ + if self._link_accelerations is None: + self._link_accelerations = torch.zeros(self._count, self._num_links, 6, device=self._device) + return self._link_accelerations.clone() + + def get_link_incoming_joint_force(self) -> torch.Tensor: + """Get incoming joint forces for all links. + + Returns: + Tensor of shape (N, L, 6) with [force(3), torque(3)] per link. + """ + if self._link_incoming_joint_force is None: + self._link_incoming_joint_force = torch.zeros(self._count, self._num_links, 6, device=self._device) + return self._link_incoming_joint_force.clone() + + def set_mock_dof_friction_properties(self, friction_properties: torch.Tensor) -> None: + """Set mock DOF friction properties data directly for testing. + + Args: + friction_properties: Tensor of shape (N, J, 3). + """ + self._dof_friction_properties = friction_properties.to(self._device) + + def set_mock_link_accelerations(self, accelerations: torch.Tensor) -> None: + """Set mock link acceleration data directly for testing. + + Args: + accelerations: Tensor of shape (N, L, 6). + """ + self._link_accelerations = accelerations.to(self._device) + + def set_mock_link_incoming_joint_force(self, forces: torch.Tensor) -> None: + """Set mock link incoming joint force data directly for testing. + + Args: + forces: Tensor of shape (N, L, 6). + """ + self._link_incoming_joint_force = forces.to(self._device) + + # -- Actions (no-op for testing) -- + + def apply_forces_and_torques_at_position( + self, + force_data: torch.Tensor | None = None, + torque_data: torch.Tensor | None = None, + position_data: torch.Tensor | None = None, + indices: torch.Tensor | None = None, + is_global: bool = True, + ) -> None: + """Apply forces and torques at positions (no-op in mock). + + Args: + force_data: Forces to apply, shape (N, 3) or (len(indices), 3). + torque_data: Torques to apply, shape (N, 3) or (len(indices), 3). + position_data: Positions to apply forces at, shape (N, 3) or (len(indices), 3). + indices: Optional indices of articulations to apply to. + is_global: Whether forces/torques are in global frame. + """ + pass # No-op for mock + + # -- Tendon Getters (stubs) -- + + def get_fixed_tendon_stiffnesses(self) -> torch.Tensor: + """Get fixed tendon stiffnesses. Returns zeros of shape (N, max_fixed_tendons).""" + return torch.zeros(self._count, self._max_fixed_tendons, device="cpu") + + def get_fixed_tendon_dampings(self) -> torch.Tensor: + """Get fixed tendon dampings. Returns zeros of shape (N, max_fixed_tendons).""" + return torch.zeros(self._count, self._max_fixed_tendons, device="cpu") + + def get_fixed_tendon_limit_stiffnesses(self) -> torch.Tensor: + """Get fixed tendon limit stiffnesses. Returns zeros of shape (N, max_fixed_tendons).""" + return torch.zeros(self._count, self._max_fixed_tendons, device="cpu") + + def get_fixed_tendon_rest_lengths(self) -> torch.Tensor: + """Get fixed tendon rest lengths. Returns zeros of shape (N, max_fixed_tendons).""" + return torch.zeros(self._count, self._max_fixed_tendons, device="cpu") + + def get_fixed_tendon_offsets(self) -> torch.Tensor: + """Get fixed tendon offsets. Returns zeros of shape (N, max_fixed_tendons).""" + return torch.zeros(self._count, self._max_fixed_tendons, device="cpu") + + def get_fixed_tendon_limits(self) -> torch.Tensor: + """Get fixed tendon limits. Returns zeros of shape (N, max_fixed_tendons, 2).""" + return torch.zeros(self._count, self._max_fixed_tendons, 2, device="cpu") + + def get_spatial_tendon_stiffnesses(self) -> torch.Tensor: + """Get spatial tendon stiffnesses. Returns zeros of shape (N, max_spatial_tendons).""" + return torch.zeros(self._count, self._max_spatial_tendons, device="cpu") + + def get_spatial_tendon_dampings(self) -> torch.Tensor: + """Get spatial tendon dampings. Returns zeros of shape (N, max_spatial_tendons).""" + return torch.zeros(self._count, self._max_spatial_tendons, device="cpu") + + def get_spatial_tendon_limit_stiffnesses(self) -> torch.Tensor: + """Get spatial tendon limit stiffnesses. Returns zeros of shape (N, max_spatial_tendons).""" + return torch.zeros(self._count, self._max_spatial_tendons, device="cpu") + + def get_spatial_tendon_offsets(self) -> torch.Tensor: + """Get spatial tendon offsets. Returns zeros of shape (N, max_spatial_tendons).""" + return torch.zeros(self._count, self._max_spatial_tendons, device="cpu") + + # -- Tendon Setters (no-op stubs) -- + + def set_fixed_tendon_properties( + self, + stiffness: torch.Tensor | None = None, + damping: torch.Tensor | None = None, + limit_stiffness: torch.Tensor | None = None, + pos_limits: torch.Tensor | None = None, + rest_length: torch.Tensor | None = None, + offset: torch.Tensor | None = None, + indices: torch.Tensor | None = None, + ) -> None: + """Set fixed tendon properties (no-op in mock).""" + pass # No-op for mock + + def set_spatial_tendon_properties( + self, + stiffness: torch.Tensor | None = None, + damping: torch.Tensor | None = None, + limit_stiffness: torch.Tensor | None = None, + offset: torch.Tensor | None = None, + indices: torch.Tensor | None = None, + ) -> None: + """Set spatial tendon properties (no-op in mock).""" + pass # No-op for mock + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for benchmarking. + + This method initializes all mock data with random values, + useful for benchmarking where the actual values don't matter. + """ + # Root state + self._root_transforms = torch.randn(self._count, 7, device=self._device) + self._root_transforms[:, 3:7] = torch.nn.functional.normalize(self._root_transforms[:, 3:7], dim=-1) + self._root_velocities = torch.randn(self._count, 6, device=self._device) + + # Link state + self._link_transforms = torch.randn(self._count, self._num_links, 7, device=self._device) + self._link_transforms[:, :, 3:7] = torch.nn.functional.normalize(self._link_transforms[:, :, 3:7], dim=-1) + self._link_velocities = torch.randn(self._count, self._num_links, 6, device=self._device) + self._link_accelerations = torch.randn(self._count, self._num_links, 6, device=self._device) + self._link_incoming_joint_force = torch.randn(self._count, self._num_links, 6, device=self._device) + + # DOF state + self._dof_positions = torch.randn(self._count, self._num_dofs, device=self._device) + self._dof_velocities = torch.randn(self._count, self._num_dofs, device=self._device) + self._dof_projected_joint_forces = torch.randn(self._count, self._num_dofs, device=self._device) + + # DOF properties - stored on CPU (PhysX requirement) + self._dof_limits = torch.randn(self._count, self._num_dofs, 2, device="cpu") + self._dof_stiffnesses = torch.rand(self._count, self._num_dofs, device="cpu") * 100 + self._dof_dampings = torch.rand(self._count, self._num_dofs, device="cpu") * 10 + self._dof_max_forces = torch.rand(self._count, self._num_dofs, device="cpu") * 100 + self._dof_max_velocities = torch.rand(self._count, self._num_dofs, device="cpu") * 10 + self._dof_armatures = torch.rand(self._count, self._num_dofs, device="cpu") * 0.1 + self._dof_friction_coefficients = torch.rand(self._count, self._num_dofs, device="cpu") + self._dof_friction_properties = torch.rand(self._count, self._num_dofs, 3, device="cpu") + + # Mass properties - stored on CPU (PhysX requirement) + self._masses = torch.rand(self._count, self._num_links, device="cpu") * 10 + self._coms = torch.randn(self._count, self._num_links, 7, device="cpu") + self._coms[:, :, 3:7] = torch.nn.functional.normalize(self._coms[:, :, 3:7], dim=-1) + # Inertias: (N, L, 9) flattened format + self._inertias = torch.zeros(self._count, self._num_links, 9, device="cpu") + self._inertias[:, :, 0] = torch.rand(self._count, self._num_links) # [0,0] + self._inertias[:, :, 4] = torch.rand(self._count, self._num_links) # [1,1] + self._inertias[:, :, 8] = torch.rand(self._count, self._num_links) # [2,2] diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view_warp.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view_warp.py new file mode 100644 index 000000000000..bc011a2aa060 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view_warp.py @@ -0,0 +1,1195 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX ArticulationView using Warp arrays.""" + +from __future__ import annotations + +import warp as wp + +from ..utils.kernels_mock import ( + init_identity_inertias_2d, + init_identity_transforms_1d_flat, + init_identity_transforms_2d_flat, + scatter_floats_2d, +) +from ..utils.mock_shared_metatype import MockSharedMetatype + + +class MockArticulationViewWarp: + """Mock implementation of physx.ArticulationView using Warp arrays for unit testing. + + This class mimics the interface of the PhysX TensorAPI ArticulationView using + flat float32 arrays (matching real PhysX behavior), allowing tests to run + without Isaac Sim or GPU simulation. + + Data Shapes (flat float32, matching real PhysX views): + - root_transforms: (N, 7) dtype=wp.float32 - [pos(3), quat_xyzw(4)] + - root_velocities: (N, 6) dtype=wp.float32 - [ang_vel(3), lin_vel(3)] + - link_transforms: (N, L, 7) dtype=wp.float32 - per-link poses + - link_velocities: (N, L, 6) dtype=wp.float32 - per-link velocities + - link_accelerations: (N, L, 6) dtype=wp.float32 - per-link accelerations + - link_incoming_joint_force: (N, L, 6) dtype=wp.float32 - per-link forces + - dof_positions: (N, J) dtype=wp.float32 - joint positions + - dof_velocities: (N, J) dtype=wp.float32 - joint velocities + - dof_limits: (N, J, 2) dtype=wp.float32 - [lower, upper] limits + - dof_stiffnesses: (N, J) dtype=wp.float32 - joint stiffnesses + - dof_dampings: (N, J) dtype=wp.float32 - joint dampings + - masses: (N, L) dtype=wp.float32 - per-link masses + - coms: (N, L, 7) dtype=wp.float32 - per-link centers of mass + - inertias: (N, L, 9) dtype=wp.float32 - per-link inertia tensors (flattened) + + Where N = count, L = num_links, J = num_dofs + """ + + def __init__( + self, + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", + max_fixed_tendons: int = 0, + max_spatial_tendons: int = 0, + ): + """Initialize the mock articulation view. + + Args: + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. Defaults to auto-generated names. + link_names: Names of the links. Defaults to auto-generated names. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for array allocation ("cpu" or "cuda:N"). + max_fixed_tendons: Maximum number of fixed tendons. + max_spatial_tendons: Maximum number of spatial tendons. + """ + self._count = count + self._num_dofs = num_dofs + self._num_links = num_links + self._device = device + self._prim_paths = prim_paths or [f"/World/Articulation_{i}" for i in range(count)] + self._backend = "warp" + self._noop_setters = False + + # Create shared metatype + self._shared_metatype = MockSharedMetatype( + dof_count=num_dofs, + link_count=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + ) + + # Tendon properties + self._max_fixed_tendons = max_fixed_tendons + self._max_spatial_tendons = max_spatial_tendons + + # Internal state (lazily initialized) + self._root_transforms: wp.array | None = None + self._root_velocities: wp.array | None = None + self._link_transforms: wp.array | None = None + self._link_velocities: wp.array | None = None + self._link_accelerations: wp.array | None = None + self._link_incoming_joint_force: wp.array | None = None + self._dof_positions: wp.array | None = None + self._dof_velocities: wp.array | None = None + self._dof_projected_joint_forces: wp.array | None = None + self._dof_limits: wp.array | None = None + self._dof_stiffnesses: wp.array | None = None + self._dof_dampings: wp.array | None = None + self._dof_max_forces: wp.array | None = None + self._dof_max_velocities: wp.array | None = None + self._dof_armatures: wp.array | None = None + self._dof_friction_coefficients: wp.array | None = None + self._dof_friction_properties: wp.array | None = None + self._masses: wp.array | None = None + self._coms: wp.array | None = None + self._inertias: wp.array | None = None + + # -- Helper Methods -- + + def _check_cpu_array(self, arr: wp.array, name: str) -> None: + """Check that array is on CPU, raise RuntimeError if on GPU. + + This mimics PhysX behavior where joint/body properties must be on CPU. + """ + if arr.device.is_cuda: + raise RuntimeError( + f"Expected CPU array for {name}, but got array on {arr.device}. " + "Joint and body properties must be set with CPU arrays." + ) + + def _create_identity_transforms_1d(self, count: int, device: str | None = None) -> wp.array: + """Create 1D array of identity transforms as (count, 7) float32.""" + dev = device or self._device + arr = wp.zeros((count, 7), dtype=wp.float32, device=dev) + wp.launch(init_identity_transforms_1d_flat, dim=count, inputs=[arr], device=dev) + return arr + + def _create_identity_transforms_2d(self, count: int, num_links: int, device: str | None = None) -> wp.array: + """Create 2D array of identity transforms as (count, num_links, 7) float32.""" + dev = device or self._device + arr = wp.zeros((count, num_links, 7), dtype=wp.float32, device=dev) + wp.launch(init_identity_transforms_2d_flat, dim=(count, num_links), inputs=[arr], device=dev) + return arr + + @staticmethod + def _as_structured(flat: wp.array, dtype, shape: tuple) -> wp.array: + """Zero-copy reinterpretation of flat float32 array as structured type array. + + This is needed because real PhysX views return structured types (e.g. transformf) + and the data classes call .view() on the results (which requires same byte-size dtype). + """ + return wp.array( + ptr=flat.ptr, + dtype=dtype, + shape=shape, + device=flat.device, + copy=False, + ) + + # -- Properties -- + + @property + def count(self) -> int: + """Number of articulation instances.""" + return self._count + + @property + def shared_metatype(self) -> MockSharedMetatype: + """Shared metatype containing articulation structure metadata.""" + return self._shared_metatype + + @property + def max_fixed_tendons(self) -> int: + """Maximum number of fixed tendons.""" + return self._max_fixed_tendons + + @property + def max_spatial_tendons(self) -> int: + """Maximum number of spatial tendons.""" + return self._max_spatial_tendons + + @property + def prim_paths(self) -> list[str]: + """USD prim paths for each instance.""" + return self._prim_paths + + @property + def dof_paths(self) -> list[list[str]]: + """DOF paths for each instance.""" + dof_names = self._shared_metatype.dof_names or [f"joint_{i}" for i in range(self._num_dofs)] + single_instance_paths = [f"{self._prim_paths[0]}/{name}" for name in dof_names] + return [single_instance_paths] * self._count + + # -- Root Getters -- + + def get_root_transforms(self) -> wp.array: + """Get world transforms of root links. + + Returns: + Warp array of shape (N,) with dtype=wp.transformf (matching real PhysX). + """ + if self._root_transforms is None: + self._root_transforms = self._create_identity_transforms_1d(self._count) + return wp.clone(self._as_structured(self._root_transforms, wp.transformf, (self._count,))) + + def get_root_velocities(self) -> wp.array: + """Get velocities of root links. + + Returns: + Warp array of shape (N,) with dtype=wp.spatial_vectorf (matching real PhysX). + """ + if self._root_velocities is None: + self._root_velocities = wp.zeros((self._count, 6), dtype=wp.float32, device=self._device) + return wp.clone(self._as_structured(self._root_velocities, wp.spatial_vectorf, (self._count,))) + + # -- Link Getters -- + + def get_link_transforms(self) -> wp.array: + """Get world transforms of all links. + + Returns: + Warp array of shape (N, L) with dtype=wp.transformf (matching real PhysX). + """ + if self._link_transforms is None: + self._link_transforms = self._create_identity_transforms_2d(self._count, self._num_links) + return wp.clone(self._as_structured(self._link_transforms, wp.transformf, (self._count, self._num_links))) + + def get_link_velocities(self) -> wp.array: + """Get velocities of all links. + + Returns: + Warp array of shape (N, L) with dtype=wp.spatial_vectorf (matching real PhysX). + """ + if self._link_velocities is None: + self._link_velocities = wp.zeros((self._count, self._num_links, 6), dtype=wp.float32, device=self._device) + return wp.clone(self._as_structured(self._link_velocities, wp.spatial_vectorf, (self._count, self._num_links))) + + def get_link_accelerations(self) -> wp.array: + """Get accelerations of all links. + + Returns: + Warp array of shape (N, L) with dtype=wp.spatial_vectorf (matching real PhysX). + """ + if self._link_accelerations is None: + self._link_accelerations = wp.zeros( + (self._count, self._num_links, 6), dtype=wp.float32, device=self._device + ) + return wp.clone( + self._as_structured(self._link_accelerations, wp.spatial_vectorf, (self._count, self._num_links)) + ) + + def get_link_incoming_joint_force(self) -> wp.array: + """Get incoming joint forces for all links. + + Returns: + Warp array of shape (N, L) with dtype=wp.spatial_vectorf (matching real PhysX). + """ + if self._link_incoming_joint_force is None: + self._link_incoming_joint_force = wp.zeros( + (self._count, self._num_links, 6), dtype=wp.float32, device=self._device + ) + return wp.clone( + self._as_structured(self._link_incoming_joint_force, wp.spatial_vectorf, (self._count, self._num_links)) + ) + + # -- DOF Getters -- + + def get_dof_positions(self) -> wp.array: + """Get positions of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. + """ + if self._dof_positions is None: + self._dof_positions = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device=self._device) + return wp.clone(self._dof_positions) + + def get_dof_velocities(self) -> wp.array: + """Get velocities of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. + """ + if self._dof_velocities is None: + self._dof_velocities = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device=self._device) + return wp.clone(self._dof_velocities) + + def get_dof_projected_joint_forces(self) -> wp.array: + """Get projected joint forces of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. + """ + if self._dof_projected_joint_forces is None: + self._dof_projected_joint_forces = wp.zeros( + (self._count, self._num_dofs), dtype=wp.float32, device=self._device + ) + return wp.clone(self._dof_projected_joint_forces) + + def get_dof_limits(self) -> wp.array: + """Get position limits of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.vec2f (matching real PhysX). Always on CPU. + """ + if self._dof_limits is None: + import numpy as np + + limits = np.zeros((self._count, self._num_dofs, 2), dtype=np.float32) + limits[:, :, 0] = float("-inf") # lower limit + limits[:, :, 1] = float("inf") # upper limit + self._dof_limits = wp.array(limits, dtype=wp.float32, device="cpu") + return wp.clone(self._as_structured(self._dof_limits, wp.vec2f, (self._count, self._num_dofs))) + + def get_dof_stiffnesses(self) -> wp.array: + """Get stiffnesses of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. Always on CPU. + """ + if self._dof_stiffnesses is None: + self._dof_stiffnesses = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + return wp.clone(self._dof_stiffnesses) + + def get_dof_dampings(self) -> wp.array: + """Get dampings of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. Always on CPU. + """ + if self._dof_dampings is None: + self._dof_dampings = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + return wp.clone(self._dof_dampings) + + def get_dof_max_forces(self) -> wp.array: + """Get maximum forces of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. Always on CPU. + """ + if self._dof_max_forces is None: + import numpy as np + + arr = np.full((self._count, self._num_dofs), float("inf"), dtype=np.float32) + self._dof_max_forces = wp.array(arr, dtype=wp.float32, device="cpu") + return wp.clone(self._dof_max_forces) + + def get_dof_max_velocities(self) -> wp.array: + """Get maximum velocities of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. Always on CPU. + """ + if self._dof_max_velocities is None: + import numpy as np + + arr = np.full((self._count, self._num_dofs), float("inf"), dtype=np.float32) + self._dof_max_velocities = wp.array(arr, dtype=wp.float32, device="cpu") + return wp.clone(self._dof_max_velocities) + + def get_dof_armatures(self) -> wp.array: + """Get armatures of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. Always on CPU. + """ + if self._dof_armatures is None: + self._dof_armatures = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + return wp.clone(self._dof_armatures) + + def get_dof_friction_coefficients(self) -> wp.array: + """Get friction coefficients of all DOFs. + + Returns: + Warp array of shape (N, J) with dtype=wp.float32. Always on CPU. + """ + if self._dof_friction_coefficients is None: + self._dof_friction_coefficients = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + return wp.clone(self._dof_friction_coefficients) + + def get_dof_friction_properties(self) -> wp.array: + """Get friction properties of all DOFs. + + Returns: + Warp array of shape (N, J, 3) with dtype=wp.float32. Always on CPU. + """ + if self._dof_friction_properties is None: + self._dof_friction_properties = wp.zeros((self._count, self._num_dofs, 3), dtype=wp.float32, device="cpu") + return wp.clone(self._dof_friction_properties) + + # -- Mass Property Getters -- + + def get_masses(self) -> wp.array: + """Get masses of all links. + + Returns: + Warp array of shape (N, L) with dtype=wp.float32. Always on CPU. + """ + if self._masses is None: + self._masses = wp.ones((self._count, self._num_links), dtype=wp.float32, device="cpu") + return wp.clone(self._masses) + + def get_coms(self) -> wp.array: + """Get centers of mass of all links. + + Returns: + Warp array of shape (N, L) with dtype=wp.transformf (matching real PhysX). Always on CPU. + """ + if self._coms is None: + self._coms = self._create_identity_transforms_2d(self._count, self._num_links, device="cpu") + return wp.clone(self._as_structured(self._coms, wp.transformf, (self._count, self._num_links))) + + def get_inertias(self) -> wp.array: + """Get inertia tensors of all links. + + Returns: + Warp array of shape (N, L, 9) with dtype=wp.float32 - flattened 3x3 matrices. Always on CPU. + """ + if self._inertias is None: + self._inertias = wp.zeros((self._count, self._num_links, 9), dtype=wp.float32, device="cpu") + wp.launch( + init_identity_inertias_2d, + dim=(self._count, self._num_links), + inputs=[self._inertias], + device="cpu", + ) + return wp.clone(self._inertias) + + # -- Root Setters -- + + def set_root_transforms( + self, + transforms: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set world transforms of root links. + + Args: + transforms: Warp array of shape (N, 7) or (len(indices), 7) with dtype=wp.float32. + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + if self._root_transforms is None: + self._root_transforms = self._create_identity_transforms_1d(self._count) + if indices is not None: + wp.launch( + scatter_floats_2d, + dim=(indices.shape[0], 7), + inputs=[transforms, indices, self._root_transforms], + device=self._device, + ) + else: + wp.copy(self._root_transforms, transforms) + + def set_root_velocities( + self, + velocities: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set velocities of root links. + + Args: + velocities: Warp array of shape (N, 6) or (len(indices), 6) with dtype=wp.float32. + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + if self._root_velocities is None: + self._root_velocities = wp.zeros((self._count, 6), dtype=wp.float32, device=self._device) + if indices is not None: + wp.launch( + scatter_floats_2d, + dim=(indices.shape[0], 6), + inputs=[velocities, indices, self._root_velocities], + device=self._device, + ) + else: + wp.copy(self._root_velocities, velocities) + + # -- DOF Setters -- + + def set_dof_positions( + self, + positions: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set positions of all DOFs. + + Args: + positions: Warp array of shape (N, J) or (len(indices), J) with dtype=wp.float32. + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + if self._dof_positions is None: + self._dof_positions = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device=self._device) + if indices is not None: + wp.launch( + scatter_floats_2d, + dim=(indices.shape[0], self._num_dofs), + inputs=[positions, indices, self._dof_positions], + device=self._device, + ) + else: + wp.copy(self._dof_positions, positions) + + def set_dof_velocities( + self, + velocities: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set velocities of all DOFs. + + Args: + velocities: Warp array of shape (N, J) or (len(indices), J) with dtype=wp.float32. + indices: Optional indices of articulations to update. + """ + if self._noop_setters: + return + if self._dof_velocities is None: + self._dof_velocities = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device=self._device) + if indices is not None: + wp.launch( + scatter_floats_2d, + dim=(indices.shape[0], self._num_dofs), + inputs=[velocities, indices, self._dof_velocities], + device=self._device, + ) + else: + wp.copy(self._dof_velocities, velocities) + + def set_dof_position_targets( + self, + targets: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set position targets for all DOFs (no-op in mock). + + Args: + targets: Warp array of shape (N, J) or (len(indices), J) with dtype=wp.float32. + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_velocity_targets( + self, + targets: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set velocity targets for all DOFs (no-op in mock). + + Args: + targets: Warp array of shape (N, J) or (len(indices), J) with dtype=wp.float32. + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_actuation_forces( + self, + forces: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set actuation forces for all DOFs (no-op in mock). + + Args: + forces: Warp array of shape (N, J) or (len(indices), J) with dtype=wp.float32. + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_limits( + self, + limits: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set position limits of all DOFs. + + Args: + limits: Warp array of shape (N, J, 2) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If limits array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(limits, "dof_limits") + if self._dof_limits is None: + import numpy as np + + arr = np.zeros((self._count, self._num_dofs, 2), dtype=np.float32) + arr[:, :, 0] = float("-inf") + arr[:, :, 1] = float("inf") + self._dof_limits = wp.array(arr, dtype=wp.float32, device="cpu") + if indices is not None: + # numpy() on CPU warp arrays is zero-copy; in-place write modifies the warp array directly + self._dof_limits.numpy()[indices.numpy()] = limits.numpy() + else: + wp.copy(self._dof_limits, limits) + + def set_dof_stiffnesses( + self, + stiffnesses: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set stiffnesses of all DOFs. + + Args: + stiffnesses: Warp array of shape (N, J) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If stiffnesses array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(stiffnesses, "dof_stiffnesses") + if self._dof_stiffnesses is None: + self._dof_stiffnesses = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + if indices is not None: + self._dof_stiffnesses.numpy()[indices.numpy()] = stiffnesses.numpy() + else: + wp.copy(self._dof_stiffnesses, stiffnesses) + + def set_dof_dampings( + self, + dampings: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set dampings of all DOFs. + + Args: + dampings: Warp array of shape (N, J) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If dampings array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(dampings, "dof_dampings") + if self._dof_dampings is None: + self._dof_dampings = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + if indices is not None: + self._dof_dampings.numpy()[indices.numpy()] = dampings.numpy() + else: + wp.copy(self._dof_dampings, dampings) + + def set_dof_max_forces( + self, + max_forces: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set maximum forces of all DOFs. + + Args: + max_forces: Warp array of shape (N, J) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If max_forces array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(max_forces, "dof_max_forces") + if self._dof_max_forces is None: + import numpy as np + + arr = np.full((self._count, self._num_dofs), float("inf"), dtype=np.float32) + self._dof_max_forces = wp.array(arr, dtype=wp.float32, device="cpu") + if indices is not None: + self._dof_max_forces.numpy()[indices.numpy()] = max_forces.numpy() + else: + wp.copy(self._dof_max_forces, max_forces) + + def set_dof_max_velocities( + self, + max_velocities: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set maximum velocities of all DOFs. + + Args: + max_velocities: Warp array of shape (N, J) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If max_velocities array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(max_velocities, "dof_max_velocities") + if self._dof_max_velocities is None: + import numpy as np + + arr = np.full((self._count, self._num_dofs), float("inf"), dtype=np.float32) + self._dof_max_velocities = wp.array(arr, dtype=wp.float32, device="cpu") + if indices is not None: + self._dof_max_velocities.numpy()[indices.numpy()] = max_velocities.numpy() + else: + wp.copy(self._dof_max_velocities, max_velocities) + + def set_dof_armatures( + self, + armatures: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set armatures of all DOFs. + + Args: + armatures: Warp array of shape (N, J) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If armatures array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(armatures, "dof_armatures") + if self._dof_armatures is None: + self._dof_armatures = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + if indices is not None: + self._dof_armatures.numpy()[indices.numpy()] = armatures.numpy() + else: + wp.copy(self._dof_armatures, armatures) + + def set_dof_friction_coefficients( + self, + friction_coefficients: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set friction coefficients of all DOFs. + + Args: + friction_coefficients: Warp array of shape (N, J) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If friction_coefficients array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(friction_coefficients, "dof_friction_coefficients") + if self._dof_friction_coefficients is None: + self._dof_friction_coefficients = wp.zeros((self._count, self._num_dofs), dtype=wp.float32, device="cpu") + if indices is not None: + self._dof_friction_coefficients.numpy()[indices.numpy()] = friction_coefficients.numpy() + else: + wp.copy(self._dof_friction_coefficients, friction_coefficients) + + def set_dof_friction_properties( + self, + friction_properties: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set friction properties of all DOFs. + + Args: + friction_properties: Warp array of shape (N, J, 3) with [static, dynamic, viscous]. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If friction_properties array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(friction_properties, "dof_friction_properties") + if self._dof_friction_properties is None: + self._dof_friction_properties = wp.zeros((self._count, self._num_dofs, 3), dtype=wp.float32, device="cpu") + if indices is not None: + self._dof_friction_properties.numpy()[indices.numpy()] = friction_properties.numpy() + else: + wp.copy(self._dof_friction_properties, friction_properties) + + # -- Mass Property Setters -- + + def set_masses( + self, + masses: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set masses of all links. + + Args: + masses: Warp array of shape (N, L) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If masses array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(masses, "masses") + if self._masses is None: + self._masses = wp.ones((self._count, self._num_links), dtype=wp.float32, device="cpu") + if indices is not None: + self._masses.numpy()[indices.numpy()] = masses.numpy() + else: + wp.copy(self._masses, masses) + + def set_coms( + self, + coms: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set centers of mass of all links. + + Args: + coms: Warp array of shape (N, L, 7) with dtype=wp.float32. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If coms array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(coms, "coms") + if self._coms is None: + self._coms = self._create_identity_transforms_2d(self._count, self._num_links, device="cpu") + if indices is not None: + self._coms.numpy()[indices.numpy()] = coms.numpy() + else: + wp.copy(self._coms, coms) + + def set_inertias( + self, + inertias: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set inertia tensors of all links. + + Args: + inertias: Warp array of shape (N, L, 9) with dtype=wp.float32 - flattened 3x3 matrices. Must be on CPU. + indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If inertias array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(inertias, "inertias") + if self._inertias is None: + self._inertias = wp.zeros((self._count, self._num_links, 9), dtype=wp.float32, device="cpu") + wp.launch( + init_identity_inertias_2d, + dim=(self._count, self._num_links), + inputs=[self._inertias], + device="cpu", + ) + if indices is not None: + self._inertias.numpy()[indices.numpy()] = inertias.numpy() + else: + wp.copy(self._inertias, inertias) + + # -- Mock setters (direct test data injection) -- + + def set_mock_root_transforms(self, transforms: wp.array) -> None: + """Set mock root transform data directly for testing. + + Args: + transforms: Warp array of shape (N, 7) with dtype=wp.float32. + """ + self._root_transforms = wp.clone(transforms) + if self._root_transforms.device.alias != self._device: + self._root_transforms = self._root_transforms.to(self._device) + + def set_mock_root_velocities(self, velocities: wp.array) -> None: + """Set mock root velocity data directly for testing. + + Args: + velocities: Warp array of shape (N, 6) with dtype=wp.float32. + """ + self._root_velocities = wp.clone(velocities) + if self._root_velocities.device.alias != self._device: + self._root_velocities = self._root_velocities.to(self._device) + + def set_mock_link_transforms(self, transforms: wp.array) -> None: + """Set mock link transform data directly for testing. + + Args: + transforms: Warp array of shape (N, L, 7) with dtype=wp.float32. + """ + self._link_transforms = wp.clone(transforms) + if self._link_transforms.device.alias != self._device: + self._link_transforms = self._link_transforms.to(self._device) + + def set_mock_link_velocities(self, velocities: wp.array) -> None: + """Set mock link velocity data directly for testing. + + Args: + velocities: Warp array of shape (N, L, 6) with dtype=wp.float32. + """ + self._link_velocities = wp.clone(velocities) + if self._link_velocities.device.alias != self._device: + self._link_velocities = self._link_velocities.to(self._device) + + def set_mock_link_accelerations(self, accelerations: wp.array) -> None: + """Set mock link acceleration data directly for testing. + + Args: + accelerations: Warp array of shape (N, L, 6) with dtype=wp.float32. + """ + self._link_accelerations = wp.clone(accelerations) + if self._link_accelerations.device.alias != self._device: + self._link_accelerations = self._link_accelerations.to(self._device) + + def set_mock_link_incoming_joint_force(self, forces: wp.array) -> None: + """Set mock link incoming joint force data directly for testing. + + Args: + forces: Warp array of shape (N, L, 6) with dtype=wp.float32. + """ + self._link_incoming_joint_force = wp.clone(forces) + if self._link_incoming_joint_force.device.alias != self._device: + self._link_incoming_joint_force = self._link_incoming_joint_force.to(self._device) + + def set_mock_dof_positions(self, positions: wp.array) -> None: + """Set mock DOF position data directly for testing. + + Args: + positions: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_positions = wp.clone(positions) + if self._dof_positions.device.alias != self._device: + self._dof_positions = self._dof_positions.to(self._device) + + def set_mock_dof_velocities(self, velocities: wp.array) -> None: + """Set mock DOF velocity data directly for testing. + + Args: + velocities: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_velocities = wp.clone(velocities) + if self._dof_velocities.device.alias != self._device: + self._dof_velocities = self._dof_velocities.to(self._device) + + def set_mock_dof_projected_joint_forces(self, forces: wp.array) -> None: + """Set mock projected joint force data directly for testing. + + Args: + forces: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_projected_joint_forces = wp.clone(forces) + if self._dof_projected_joint_forces.device.alias != self._device: + self._dof_projected_joint_forces = self._dof_projected_joint_forces.to(self._device) + + def set_mock_dof_limits(self, limits: wp.array) -> None: + """Set mock DOF limit data directly for testing. + + Args: + limits: Warp array of shape (N, J, 2) with dtype=wp.float32. + """ + self._dof_limits = wp.clone(limits) + + def set_mock_dof_stiffnesses(self, stiffnesses: wp.array) -> None: + """Set mock DOF stiffness data directly for testing. + + Args: + stiffnesses: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_stiffnesses = wp.clone(stiffnesses) + + def set_mock_dof_dampings(self, dampings: wp.array) -> None: + """Set mock DOF damping data directly for testing. + + Args: + dampings: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_dampings = wp.clone(dampings) + + def set_mock_dof_max_forces(self, max_forces: wp.array) -> None: + """Set mock DOF max force data directly for testing. + + Args: + max_forces: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_max_forces = wp.clone(max_forces) + + def set_mock_dof_max_velocities(self, max_velocities: wp.array) -> None: + """Set mock DOF max velocity data directly for testing. + + Args: + max_velocities: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_max_velocities = wp.clone(max_velocities) + + def set_mock_dof_armatures(self, armatures: wp.array) -> None: + """Set mock DOF armature data directly for testing. + + Args: + armatures: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_armatures = wp.clone(armatures) + + def set_mock_dof_friction_coefficients(self, friction_coefficients: wp.array) -> None: + """Set mock DOF friction coefficient data directly for testing. + + Args: + friction_coefficients: Warp array of shape (N, J) with dtype=wp.float32. + """ + self._dof_friction_coefficients = wp.clone(friction_coefficients) + + def set_mock_dof_friction_properties(self, friction_properties: wp.array) -> None: + """Set mock DOF friction properties data directly for testing. + + Args: + friction_properties: Warp array of shape (N, J, 3) with dtype=wp.float32. + """ + self._dof_friction_properties = wp.clone(friction_properties) + + def set_mock_masses(self, masses: wp.array) -> None: + """Set mock mass data directly for testing. + + Args: + masses: Warp array of shape (N, L) with dtype=wp.float32. + """ + self._masses = wp.clone(masses) + + def set_mock_coms(self, coms: wp.array) -> None: + """Set mock center of mass data directly for testing. + + Args: + coms: Warp array of shape (N, L, 7) with dtype=wp.float32. + """ + self._coms = wp.clone(coms) + + def set_mock_inertias(self, inertias: wp.array) -> None: + """Set mock inertia data directly for testing. + + Args: + inertias: Warp array of shape (N, L, 9) with dtype=wp.float32. + """ + self._inertias = wp.clone(inertias) + + # -- Actions (no-op for testing) -- + + def apply_forces_and_torques_at_position( + self, + force_data: wp.array | None = None, + torque_data: wp.array | None = None, + position_data: wp.array | None = None, + indices: wp.array | None = None, + is_global: bool = True, + ) -> None: + """Apply forces and torques at positions (no-op in mock). + + Args: + force_data: Forces to apply, shape (N, 3) or (len(indices), 3) with dtype=wp.float32. + torque_data: Torques to apply, shape (N, 3) or (len(indices), 3) with dtype=wp.float32. + position_data: Positions to apply forces at, shape (N, 3) or (len(indices), 3) with dtype=wp.float32. + indices: Optional indices of articulations to apply to. + is_global: Whether forces/torques are in global frame. + """ + pass # No-op for mock + + # -- Tendon Getters (stubs) -- + + def get_fixed_tendon_stiffnesses(self) -> wp.array: + """Get fixed tendon stiffnesses. Returns zeros of shape (N, max_fixed_tendons).""" + return wp.zeros((self._count, self._max_fixed_tendons), dtype=wp.float32, device="cpu") + + def get_fixed_tendon_dampings(self) -> wp.array: + """Get fixed tendon dampings. Returns zeros of shape (N, max_fixed_tendons).""" + return wp.zeros((self._count, self._max_fixed_tendons), dtype=wp.float32, device="cpu") + + def get_fixed_tendon_limit_stiffnesses(self) -> wp.array: + """Get fixed tendon limit stiffnesses. Returns zeros of shape (N, max_fixed_tendons).""" + return wp.zeros((self._count, self._max_fixed_tendons), dtype=wp.float32, device="cpu") + + def get_fixed_tendon_rest_lengths(self) -> wp.array: + """Get fixed tendon rest lengths. Returns zeros of shape (N, max_fixed_tendons).""" + return wp.zeros((self._count, self._max_fixed_tendons), dtype=wp.float32, device="cpu") + + def get_fixed_tendon_offsets(self) -> wp.array: + """Get fixed tendon offsets. Returns zeros of shape (N, max_fixed_tendons).""" + return wp.zeros((self._count, self._max_fixed_tendons), dtype=wp.float32, device="cpu") + + def get_fixed_tendon_limits(self) -> wp.array: + """Get fixed tendon limits. Returns zeros of shape (N, max_fixed_tendons, 2).""" + return wp.zeros((self._count, self._max_fixed_tendons, 2), dtype=wp.float32, device="cpu") + + def get_spatial_tendon_stiffnesses(self) -> wp.array: + """Get spatial tendon stiffnesses. Returns zeros of shape (N, max_spatial_tendons).""" + return wp.zeros((self._count, self._max_spatial_tendons), dtype=wp.float32, device="cpu") + + def get_spatial_tendon_dampings(self) -> wp.array: + """Get spatial tendon dampings. Returns zeros of shape (N, max_spatial_tendons).""" + return wp.zeros((self._count, self._max_spatial_tendons), dtype=wp.float32, device="cpu") + + def get_spatial_tendon_limit_stiffnesses(self) -> wp.array: + """Get spatial tendon limit stiffnesses. Returns zeros of shape (N, max_spatial_tendons).""" + return wp.zeros((self._count, self._max_spatial_tendons), dtype=wp.float32, device="cpu") + + def get_spatial_tendon_offsets(self) -> wp.array: + """Get spatial tendon offsets. Returns zeros of shape (N, max_spatial_tendons).""" + return wp.zeros((self._count, self._max_spatial_tendons), dtype=wp.float32, device="cpu") + + # -- Tendon Setters (no-op stubs) -- + + def set_fixed_tendon_properties( + self, + stiffness: wp.array | None = None, + damping: wp.array | None = None, + limit_stiffness: wp.array | None = None, + pos_limits: wp.array | None = None, + rest_length: wp.array | None = None, + offset: wp.array | None = None, + indices: wp.array | None = None, + ) -> None: + """Set fixed tendon properties (no-op in mock).""" + pass # No-op for mock + + def set_spatial_tendon_properties( + self, + stiffness: wp.array | None = None, + damping: wp.array | None = None, + limit_stiffness: wp.array | None = None, + offset: wp.array | None = None, + indices: wp.array | None = None, + ) -> None: + """Set spatial tendon properties (no-op in mock).""" + pass # No-op for mock + + # -- Benchmark Utilities -- + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for benchmarking. + + This method initializes all mock data with random warp arrays, + useful for benchmarking where the actual values don't matter. + """ + import numpy as np + + N = self._count + L = self._num_links + J = self._num_dofs + + # Root state - on device + root_tf = np.random.randn(N, 7).astype(np.float32) + root_tf[:, 3:7] /= np.linalg.norm(root_tf[:, 3:7], axis=-1, keepdims=True) + self._root_transforms = wp.array(root_tf, dtype=wp.float32, device=self._device) + self._root_velocities = wp.array( + np.random.randn(N, 6).astype(np.float32), dtype=wp.float32, device=self._device + ) + + # Link state - on device + link_tf = np.random.randn(N, L, 7).astype(np.float32) + link_tf[:, :, 3:7] /= np.linalg.norm(link_tf[:, :, 3:7], axis=-1, keepdims=True) + self._link_transforms = wp.array(link_tf, dtype=wp.float32, device=self._device) + self._link_velocities = wp.array( + np.random.randn(N, L, 6).astype(np.float32), dtype=wp.float32, device=self._device + ) + self._link_accelerations = wp.array( + np.random.randn(N, L, 6).astype(np.float32), dtype=wp.float32, device=self._device + ) + self._link_incoming_joint_force = wp.array( + np.random.randn(N, L, 6).astype(np.float32), dtype=wp.float32, device=self._device + ) + + # DOF state - on device + self._dof_positions = wp.array(np.random.randn(N, J).astype(np.float32), dtype=wp.float32, device=self._device) + self._dof_velocities = wp.array(np.random.randn(N, J).astype(np.float32), dtype=wp.float32, device=self._device) + self._dof_projected_joint_forces = wp.array( + np.random.randn(N, J).astype(np.float32), dtype=wp.float32, device=self._device + ) + + # DOF properties - on CPU (PhysX requirement) + self._dof_limits = wp.array(np.random.randn(N, J, 2).astype(np.float32), dtype=wp.float32, device="cpu") + self._dof_stiffnesses = wp.array( + (np.random.rand(N, J) * 100).astype(np.float32), dtype=wp.float32, device="cpu" + ) + self._dof_dampings = wp.array((np.random.rand(N, J) * 10).astype(np.float32), dtype=wp.float32, device="cpu") + self._dof_max_forces = wp.array((np.random.rand(N, J) * 100).astype(np.float32), dtype=wp.float32, device="cpu") + self._dof_max_velocities = wp.array( + (np.random.rand(N, J) * 10).astype(np.float32), dtype=wp.float32, device="cpu" + ) + self._dof_armatures = wp.array((np.random.rand(N, J) * 0.1).astype(np.float32), dtype=wp.float32, device="cpu") + self._dof_friction_coefficients = wp.array( + np.random.rand(N, J).astype(np.float32), dtype=wp.float32, device="cpu" + ) + self._dof_friction_properties = wp.array( + np.random.rand(N, J, 3).astype(np.float32), dtype=wp.float32, device="cpu" + ) + + # Mass properties - on CPU (PhysX requirement) + self._masses = wp.array((np.random.rand(N, L) * 10).astype(np.float32), dtype=wp.float32, device="cpu") + coms = np.random.randn(N, L, 7).astype(np.float32) + coms[:, :, 3:7] /= np.linalg.norm(coms[:, :, 3:7], axis=-1, keepdims=True) + self._coms = wp.array(coms, dtype=wp.float32, device="cpu") + inertias = np.zeros((N, L, 9), dtype=np.float32) + diag = np.random.rand(N, L, 3) + 0.1 + inertias[:, :, 0] = diag[:, :, 0] + inertias[:, :, 4] = diag[:, :, 1] + inertias[:, :, 8] = diag[:, :, 2] + self._inertias = wp.array(inertias, dtype=wp.float32, device="cpu") diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py new file mode 100644 index 000000000000..8c1ca5c92ea4 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py @@ -0,0 +1,371 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX RigidBodyView.""" + +from __future__ import annotations + +import torch + + +class MockRigidBodyView: + """Mock implementation of physx.RigidBodyView for unit testing. + + This class mimics the interface of the PhysX TensorAPI RigidBodyView, + allowing tests to run without Isaac Sim or GPU simulation. + + Data Shapes: + - transforms: (N, 7) - [pos(3), quat_xyzw(4)] + - velocities: (N, 6) - [lin_vel(3), ang_vel(3)] + - accelerations: (N, 6) - [lin_acc(3), ang_acc(3)] + - masses: (N, 1) + - coms: (N, 7) - center of mass [pos(3), quat_xyzw(4)] + - inertias: (N, 9) - flattened 3x3 inertia matrix (row-major) + """ + + def __init__( + self, + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", + ): + """Initialize the mock rigid body view. + + Args: + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation ("cpu" or "cuda"). + """ + self._count = count + self._prim_paths = prim_paths or [f"/World/RigidBody_{i}" for i in range(count)] + self._device = device + self._backend = "torch" + self._noop_setters = False + + # Internal state (lazily initialized) + self._transforms: torch.Tensor | None = None + self._velocities: torch.Tensor | None = None + self._accelerations: torch.Tensor | None = None + self._masses: torch.Tensor | None = None + self._coms: torch.Tensor | None = None + self._inertias: torch.Tensor | None = None + + # -- Helper Methods -- + + def _check_cpu_tensor(self, tensor: torch.Tensor, name: str) -> None: + """Check that tensor is on CPU, raise RuntimeError if on GPU. + + This mimics PhysX behavior where body properties must be on CPU. + """ + if tensor.is_cuda: + raise RuntimeError( + f"Expected CPU tensor for {name}, but got tensor on {tensor.device}. " + "Body properties must be set with CPU tensors." + ) + + # -- Properties -- + + @property + def count(self) -> int: + """Number of rigid body instances.""" + return self._count + + @property + def prim_paths(self) -> list[str]: + """USD prim paths for each instance.""" + return self._prim_paths + + # -- Getters -- + + def get_transforms(self) -> torch.Tensor: + """Get world transforms of all rigid bodies. + + Returns: + Tensor of shape (N, 7) with [pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w]. + """ + if self._transforms is None: + # Default: origin position with identity quaternion (xyzw format) + self._transforms = torch.zeros(self._count, 7, device=self._device) + self._transforms[:, 6] = 1.0 # w=1 for identity quaternion + return self._transforms.clone() + + def get_velocities(self) -> torch.Tensor: + """Get velocities of all rigid bodies. + + Returns: + Tensor of shape (N, 6) with [lin_vel(3), ang_vel(3)]. + """ + if self._velocities is None: + self._velocities = torch.zeros(self._count, 6, device=self._device) + return self._velocities.clone() + + def get_accelerations(self) -> torch.Tensor: + """Get accelerations of all rigid bodies. + + Returns: + Tensor of shape (N, 6) with [lin_acc(3), ang_acc(3)]. + """ + if self._accelerations is None: + self._accelerations = torch.zeros(self._count, 6, device=self._device) + return self._accelerations.clone() + + def get_masses(self) -> torch.Tensor: + """Get masses of all rigid bodies. + + Returns: + Tensor of shape (N, 1) with mass values. Always on CPU. + """ + if self._masses is None: + self._masses = torch.ones(self._count, 1, device="cpu") + return self._masses.clone() + + def get_coms(self) -> torch.Tensor: + """Get centers of mass of all rigid bodies. + + Returns: + Tensor of shape (N, 7) with [pos(3), quat_xyzw(4)]. Always on CPU. + """ + if self._coms is None: + # Default: local origin with identity quaternion - stored on CPU + self._coms = torch.zeros(self._count, 7, device="cpu") + self._coms[:, 6] = 1.0 # w=1 for identity quaternion + return self._coms.clone() + + def get_inertias(self) -> torch.Tensor: + """Get inertia tensors of all rigid bodies. + + Returns: + Tensor of shape (N, 9) with flattened 3x3 inertia matrices (row-major). Always on CPU. + """ + if self._inertias is None: + # Default: identity inertia (unit sphere) - flattened [1,0,0,0,1,0,0,0,1] - stored on CPU + self._inertias = torch.zeros(self._count, 9, device="cpu") + self._inertias[:, 0] = 1.0 # [0,0] + self._inertias[:, 4] = 1.0 # [1,1] + self._inertias[:, 8] = 1.0 # [2,2] + return self._inertias.clone() + + # -- Setters (simulation interface) -- + + def set_transforms( + self, + transforms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set world transforms of rigid bodies. + + Args: + transforms: Tensor of shape (N, 7) or (len(indices), 7). + indices: Optional indices of bodies to update. + """ + if self._noop_setters: + return + transforms = transforms.to(self._device) + if self._transforms is None: + self._transforms = torch.zeros(self._count, 7, device=self._device) + self._transforms[:, 6] = 1.0 + if indices is not None: + self._transforms[indices] = transforms + else: + self._transforms = transforms + + def set_velocities( + self, + velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocities of rigid bodies. + + Args: + velocities: Tensor of shape (N, 6) or (len(indices), 6). + indices: Optional indices of bodies to update. + """ + if self._noop_setters: + return + velocities = velocities.to(self._device) + if self._velocities is None: + self._velocities = torch.zeros(self._count, 6, device=self._device) + if indices is not None: + self._velocities[indices] = velocities + else: + self._velocities = velocities + + def set_masses( + self, + masses: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set masses of rigid bodies. + + Args: + masses: Tensor of shape (N, 1) or (len(indices), 1). Must be on CPU. + indices: Optional indices of bodies to update. + + Raises: + RuntimeError: If masses tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(masses, "masses") + if self._masses is None: + self._masses = torch.ones(self._count, 1, device="cpu") + if indices is not None: + self._masses[indices] = masses + else: + self._masses = masses + + def set_coms( + self, + coms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set centers of mass of rigid bodies. + + Args: + coms: Tensor of shape (N, 7) or (len(indices), 7). Must be on CPU. + indices: Optional indices of bodies to update. + + Raises: + RuntimeError: If coms tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(coms, "coms") + if self._coms is None: + self._coms = torch.zeros(self._count, 7, device="cpu") + self._coms[:, 6] = 1.0 + if indices is not None: + self._coms[indices] = coms + else: + self._coms = coms + + def set_inertias( + self, + inertias: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set inertia tensors of rigid bodies. + + Args: + inertias: Tensor of shape (N, 9) or (len(indices), 9) - flattened 3x3 matrices. Must be on CPU. + indices: Optional indices of bodies to update. + + Raises: + RuntimeError: If inertias tensor is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_tensor(inertias, "inertias") + if self._inertias is None: + self._inertias = torch.zeros(self._count, 9, device="cpu") + self._inertias[:, 0] = 1.0 + self._inertias[:, 4] = 1.0 + self._inertias[:, 8] = 1.0 + if indices is not None: + self._inertias[indices] = inertias + else: + self._inertias = inertias + + # -- Mock setters (direct test data injection) -- + + def set_mock_transforms(self, transforms: torch.Tensor) -> None: + """Set mock transform data directly for testing. + + Args: + transforms: Tensor of shape (N, 7). + """ + self._transforms = transforms.to(self._device) + + def set_mock_velocities(self, velocities: torch.Tensor) -> None: + """Set mock velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, 6). + """ + self._velocities = velocities.to(self._device) + + def set_mock_accelerations(self, accelerations: torch.Tensor) -> None: + """Set mock acceleration data directly for testing. + + Args: + accelerations: Tensor of shape (N, 6). + """ + self._accelerations = accelerations.to(self._device) + + def set_mock_masses(self, masses: torch.Tensor) -> None: + """Set mock mass data directly for testing. + + Args: + masses: Tensor of shape (N, 1). + """ + self._masses = masses.to(self._device) + + def set_mock_coms(self, coms: torch.Tensor) -> None: + """Set mock center of mass data directly for testing. + + Args: + coms: Tensor of shape (N, 7). + """ + self._coms = coms.to(self._device) + + def set_mock_inertias(self, inertias: torch.Tensor) -> None: + """Set mock inertia data directly for testing. + + Args: + inertias: Tensor of shape (N, 9) - flattened 3x3 matrices. + """ + self._inertias = inertias.to(self._device) + + # -- Actions (no-op for testing) -- + + def apply_forces_and_torques_at_position( + self, + force_data: torch.Tensor | None = None, + torque_data: torch.Tensor | None = None, + position_data: torch.Tensor | None = None, + indices: torch.Tensor | None = None, + is_global: bool = True, + ) -> None: + """Apply forces and torques at positions (no-op in mock). + + Args: + force_data: Forces to apply, shape (N, 3) or (len(indices), 3). + torque_data: Torques to apply, shape (N, 3) or (len(indices), 3). + position_data: Positions to apply forces at, shape (N, 3) or (len(indices), 3). + indices: Optional indices of bodies to apply to. + is_global: Whether forces/torques are in global frame. + """ + pass # No-op for mock + + # -- Convenience method for benchmarking -- + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for benchmarking. + + This method initializes all mock data with random values, + useful for benchmarking where the actual values don't matter. + """ + # Transforms with normalized quaternions - on device + self._transforms = torch.randn(self._count, 7, device=self._device) + self._transforms[:, 3:7] = torch.nn.functional.normalize(self._transforms[:, 3:7], dim=-1) + + # Velocities and accelerations - on device + self._velocities = torch.randn(self._count, 6, device=self._device) + self._accelerations = torch.randn(self._count, 6, device=self._device) + + # Mass properties - stored on CPU (PhysX requirement) + self._masses = torch.rand(self._count, 1, device="cpu") * 10 + + # Center of mass with normalized quaternions - stored on CPU (PhysX requirement) + self._coms = torch.randn(self._count, 7, device="cpu") + self._coms[:, 3:7] = torch.nn.functional.normalize(self._coms[:, 3:7], dim=-1) + + # Inertia tensors (positive definite diagonal) - flattened (N, 9) - stored on CPU (PhysX requirement) + # Create diagonal inertia matrices and flatten + diag_values = torch.rand(self._count, 3, device="cpu") + 0.1 # Ensure positive + self._inertias = torch.zeros(self._count, 9, device="cpu") + self._inertias[:, 0] = diag_values[:, 0] # [0,0] + self._inertias[:, 4] = diag_values[:, 1] # [1,1] + self._inertias[:, 8] = diag_values[:, 2] # [2,2] diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view_warp.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view_warp.py new file mode 100644 index 000000000000..a3ef67fefdd7 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view_warp.py @@ -0,0 +1,399 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX RigidBodyView using Warp arrays.""" + +from __future__ import annotations + +import warp as wp + +from ..utils.kernels_mock import ( + init_identity_inertias_1d, + init_identity_transforms_1d_flat, + scatter_floats_2d, +) + + +class MockRigidBodyViewWarp: + """Mock implementation of physx.RigidBodyView using Warp arrays for unit testing. + + This class mimics the interface of the PhysX TensorAPI RigidBodyView using + flat float32 arrays (matching real PhysX behavior), allowing tests to run + without Isaac Sim or GPU simulation. + + Data Shapes (flat float32, matching real PhysX views): + - transforms: (N, 7) dtype=wp.float32 - [pos(3), quat_xyzw(4)] + - velocities: (N, 6) dtype=wp.float32 - [ang_vel(3), lin_vel(3)] + - accelerations: (N, 6) dtype=wp.float32 - [ang_acc(3), lin_acc(3)] + - masses: (N, 1) dtype=wp.float32 + - coms: (N, 7) dtype=wp.float32 - center of mass [pos(3), quat_xyzw(4)] + - inertias: (N, 9) dtype=wp.float32 - flattened 3x3 inertia matrix (row-major) + """ + + def __init__( + self, + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", + ): + """Initialize the mock rigid body view. + + Args: + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for array allocation ("cpu" or "cuda:N"). + """ + self._count = count + self._prim_paths = prim_paths or [f"/World/RigidBody_{i}" for i in range(count)] + self._device = device + self._backend = "warp" + self._noop_setters = False + + # Internal state (lazily initialized) + self._transforms: wp.array | None = None + self._velocities: wp.array | None = None + self._accelerations: wp.array | None = None + self._masses: wp.array | None = None + self._coms: wp.array | None = None + self._inertias: wp.array | None = None + + # -- Helper Methods -- + + def _check_cpu_array(self, arr: wp.array, name: str) -> None: + """Check that array is on CPU, raise RuntimeError if on GPU. + + This mimics PhysX behavior where body properties must be on CPU. + """ + if arr.device.is_cuda: + raise RuntimeError( + f"Expected CPU array for {name}, but got array on {arr.device}. " + "Body properties must be set with CPU arrays." + ) + + def _create_identity_transforms(self, count: int, device: str | None = None) -> wp.array: + """Create array of identity transforms as (count, 7) float32.""" + dev = device or self._device + arr = wp.zeros((count, 7), dtype=wp.float32, device=dev) + wp.launch(init_identity_transforms_1d_flat, dim=count, inputs=[arr], device=dev) + return arr + + # -- Properties -- + + @property + def count(self) -> int: + """Number of rigid body instances.""" + return self._count + + @property + def prim_paths(self) -> list[str]: + """USD prim paths for each instance.""" + return self._prim_paths + + # -- Getters -- + + def get_transforms(self) -> wp.array: + """Get world transforms of all rigid bodies. + + Returns: + Warp array of shape (N, 7) with dtype=wp.float32. + Each row contains [pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w]. + """ + if self._transforms is None: + self._transforms = self._create_identity_transforms(self._count) + return wp.clone(self._transforms) + + def get_velocities(self) -> wp.array: + """Get velocities of all rigid bodies. + + Returns: + Warp array of shape (N, 6) with dtype=wp.float32. + Each row contains [ang_vel(3), lin_vel(3)]. + """ + if self._velocities is None: + self._velocities = wp.zeros((self._count, 6), dtype=wp.float32, device=self._device) + return wp.clone(self._velocities) + + def get_accelerations(self) -> wp.array: + """Get accelerations of all rigid bodies. + + Returns: + Warp array of shape (N, 6) with dtype=wp.float32. + Each row contains [ang_acc(3), lin_acc(3)]. + """ + if self._accelerations is None: + self._accelerations = wp.zeros((self._count, 6), dtype=wp.float32, device=self._device) + return wp.clone(self._accelerations) + + def get_masses(self) -> wp.array: + """Get masses of all rigid bodies. + + Returns: + Warp array of shape (N, 1) with dtype=wp.float32. Always on CPU. + """ + if self._masses is None: + self._masses = wp.ones((self._count, 1), dtype=wp.float32, device="cpu") + return wp.clone(self._masses) + + def get_coms(self) -> wp.array: + """Get centers of mass of all rigid bodies. + + Returns: + Warp array of shape (N, 7) with dtype=wp.float32. Always on CPU. + Each row contains [pos(3), quat_xyzw(4)]. + """ + if self._coms is None: + self._coms = self._create_identity_transforms(self._count, device="cpu") + return wp.clone(self._coms) + + def get_inertias(self) -> wp.array: + """Get inertia tensors of all rigid bodies. + + Returns: + Warp array of shape (N, 9) with dtype=wp.float32 - flattened 3x3 matrices (row-major). + Always on CPU. + """ + if self._inertias is None: + self._inertias = wp.zeros((self._count, 9), dtype=wp.float32, device="cpu") + wp.launch(init_identity_inertias_1d, dim=self._count, inputs=[self._inertias], device="cpu") + return wp.clone(self._inertias) + + # -- Setters (simulation interface) -- + + def set_transforms( + self, + transforms: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set world transforms of rigid bodies. + + Args: + transforms: Warp array of shape (N, 7) or (len(indices), 7) with dtype=wp.float32. + indices: Optional Warp array of indices (dtype=wp.int32) of bodies to update. + """ + if self._noop_setters: + return + if self._transforms is None: + self._transforms = self._create_identity_transforms(self._count) + if indices is not None: + wp.launch( + scatter_floats_2d, + dim=(indices.shape[0], 7), + inputs=[transforms, indices, self._transforms], + device=self._device, + ) + else: + wp.copy(self._transforms, transforms) + + def set_velocities( + self, + velocities: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set velocities of rigid bodies. + + Args: + velocities: Warp array of shape (N, 6) or (len(indices), 6) with dtype=wp.float32. + indices: Optional Warp array of indices (dtype=wp.int32) of bodies to update. + """ + if self._noop_setters: + return + if self._velocities is None: + self._velocities = wp.zeros((self._count, 6), dtype=wp.float32, device=self._device) + if indices is not None: + wp.launch( + scatter_floats_2d, + dim=(indices.shape[0], 6), + inputs=[velocities, indices, self._velocities], + device=self._device, + ) + else: + wp.copy(self._velocities, velocities) + + def set_masses( + self, + masses: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set masses of rigid bodies. + + Args: + masses: Warp array of shape (N, 1) or (len(indices), 1) with dtype=wp.float32. Must be on CPU. + indices: Optional Warp array of indices (dtype=wp.int32) of bodies to update. + + Raises: + RuntimeError: If masses array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(masses, "masses") + if self._masses is None: + self._masses = wp.ones((self._count, 1), dtype=wp.float32, device="cpu") + if indices is not None: + self._masses.numpy()[indices.numpy()] = masses.numpy() + else: + wp.copy(self._masses, masses) + + def set_coms( + self, + coms: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set centers of mass of rigid bodies. + + Args: + coms: Warp array of shape (N, 7) or (len(indices), 7) with dtype=wp.float32. Must be on CPU. + indices: Optional Warp array of indices (dtype=wp.int32) of bodies to update. + + Raises: + RuntimeError: If coms array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(coms, "coms") + if self._coms is None: + self._coms = self._create_identity_transforms(self._count, device="cpu") + if indices is not None: + self._coms.numpy()[indices.numpy()] = coms.numpy() + else: + wp.copy(self._coms, coms) + + def set_inertias( + self, + inertias: wp.array, + indices: wp.array | None = None, + ) -> None: + """Set inertia tensors of rigid bodies. + + Args: + inertias: Warp array of shape (N, 9) or (len(indices), 9) - flattened 3x3 matrices. Must be on CPU. + indices: Optional Warp array of indices (dtype=wp.int32) of bodies to update. + + Raises: + RuntimeError: If inertias array is on GPU. + """ + if self._noop_setters: + return + self._check_cpu_array(inertias, "inertias") + if self._inertias is None: + self._inertias = wp.zeros((self._count, 9), dtype=wp.float32, device="cpu") + wp.launch(init_identity_inertias_1d, dim=self._count, inputs=[self._inertias], device="cpu") + if indices is not None: + self._inertias.numpy()[indices.numpy()] = inertias.numpy() + else: + wp.copy(self._inertias, inertias) + + # -- Mock setters (direct test data injection) -- + + def set_mock_transforms(self, transforms: wp.array) -> None: + """Set mock transform data directly for testing. + + Args: + transforms: Warp array of shape (N, 7) with dtype=wp.float32. + """ + self._transforms = wp.clone(transforms) + if self._transforms.device.alias != self._device: + self._transforms = self._transforms.to(self._device) + + def set_mock_velocities(self, velocities: wp.array) -> None: + """Set mock velocity data directly for testing. + + Args: + velocities: Warp array of shape (N, 6) with dtype=wp.float32. + """ + self._velocities = wp.clone(velocities) + if self._velocities.device.alias != self._device: + self._velocities = self._velocities.to(self._device) + + def set_mock_accelerations(self, accelerations: wp.array) -> None: + """Set mock acceleration data directly for testing. + + Args: + accelerations: Warp array of shape (N, 6) with dtype=wp.float32. + """ + self._accelerations = wp.clone(accelerations) + if self._accelerations.device.alias != self._device: + self._accelerations = self._accelerations.to(self._device) + + def set_mock_masses(self, masses: wp.array) -> None: + """Set mock mass data directly for testing. + + Args: + masses: Warp array of shape (N, 1) with dtype=wp.float32. + """ + self._masses = wp.clone(masses) + + def set_mock_coms(self, coms: wp.array) -> None: + """Set mock center of mass data directly for testing. + + Args: + coms: Warp array of shape (N, 7) with dtype=wp.float32. + """ + self._coms = wp.clone(coms) + + def set_mock_inertias(self, inertias: wp.array) -> None: + """Set mock inertia data directly for testing. + + Args: + inertias: Warp array of shape (N, 9) with dtype=wp.float32 - flattened 3x3 matrices. + """ + self._inertias = wp.clone(inertias) + + # -- Convenience method for benchmarking -- + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for benchmarking. + + This method initializes all mock data with random warp arrays, + useful for benchmarking where the actual values don't matter. + """ + import numpy as np + + N = self._count + + # Transforms with normalized quaternions - on device + tf = np.random.randn(N, 7).astype(np.float32) + tf[:, 3:7] /= np.linalg.norm(tf[:, 3:7], axis=-1, keepdims=True) + self._transforms = wp.array(tf, dtype=wp.float32, device=self._device) + + # Velocities and accelerations - on device + self._velocities = wp.array(np.random.randn(N, 6).astype(np.float32), dtype=wp.float32, device=self._device) + self._accelerations = wp.array(np.random.randn(N, 6).astype(np.float32), dtype=wp.float32, device=self._device) + + # Mass properties - stored on CPU (PhysX requirement) + self._masses = wp.array((np.random.rand(N, 1) * 10).astype(np.float32), dtype=wp.float32, device="cpu") + + # Center of mass with normalized quaternions - stored on CPU (PhysX requirement) + c = np.random.randn(N, 7).astype(np.float32) + c[:, 3:7] /= np.linalg.norm(c[:, 3:7], axis=-1, keepdims=True) + self._coms = wp.array(c, dtype=wp.float32, device="cpu") + + # Inertia tensors (positive definite diagonal) - flattened (N, 9) - stored on CPU (PhysX requirement) + diag_values = np.random.rand(N, 3).astype(np.float32) + 0.1 + inertias = np.zeros((N, 9), dtype=np.float32) + inertias[:, 0] = diag_values[:, 0] + inertias[:, 4] = diag_values[:, 1] + inertias[:, 8] = diag_values[:, 2] + self._inertias = wp.array(inertias, dtype=wp.float32, device="cpu") + + # -- Actions (no-op for testing) -- + + def apply_forces_and_torques_at_position( + self, + force_data: wp.array | None = None, + torque_data: wp.array | None = None, + position_data: wp.array | None = None, + indices: wp.array | None = None, + is_global: bool = True, + ) -> None: + """Apply forces and torques at positions (no-op in mock). + + Args: + force_data: Forces to apply, shape (N, 3) or (len(indices), 3) with dtype=wp.float32. + torque_data: Torques to apply, shape (N, 3) or (len(indices), 3) with dtype=wp.float32. + position_data: Positions to apply forces at, shape (N, 3) or (len(indices), 3) with dtype=wp.float32. + indices: Optional indices of bodies to apply to. + is_global: Whether forces/torques are in global frame. + """ + pass # No-op for mock diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py new file mode 100644 index 000000000000..9bb2821a6ba3 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py @@ -0,0 +1,264 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX RigidContactView.""" + +from __future__ import annotations + +import torch + + +class MockRigidContactView: + """Mock implementation of physx.RigidContactView for unit testing. + + This class mimics the interface of the PhysX TensorAPI RigidContactView, + allowing tests to run without Isaac Sim or GPU simulation. + + Data Shapes: + - net_contact_forces: (N*B, 3) - flattened net forces + - contact_force_matrix: (N*B, F, 3) - per-filter forces + - contact_data: tuple of 6 tensors (positions, normals, impulses, separations, num_found, patch_id) + - friction_data: tuple of 4 tensors (friction_forces, friction_impulses, friction_points, patch_id) + + Where: + - N = count (number of instances) + - B = num_bodies (bodies per instance) + - F = filter_count (number of filter bodies) + """ + + def __init__( + self, + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", + ): + """Initialize the mock rigid contact view. + + Args: + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation ("cpu" or "cuda"). + """ + self._count = count + self._num_bodies = num_bodies + self._filter_count = filter_count + self._max_contact_data_count = max_contact_data_count + self._device = device + + # Total number of bodies (flattened) + self._total_bodies = count * num_bodies + + # Internal state (lazily initialized) + self._net_contact_forces: torch.Tensor | None = None + self._contact_force_matrix: torch.Tensor | None = None + self._contact_positions: torch.Tensor | None = None + self._contact_normals: torch.Tensor | None = None + self._contact_impulses: torch.Tensor | None = None + self._contact_separations: torch.Tensor | None = None + self._contact_num_found: torch.Tensor | None = None + self._contact_patch_id: torch.Tensor | None = None + self._friction_forces: torch.Tensor | None = None + self._friction_impulses: torch.Tensor | None = None + self._friction_points: torch.Tensor | None = None + self._friction_patch_id: torch.Tensor | None = None + + # -- Properties -- + + @property + def filter_count(self) -> int: + """Number of filter bodies for contact filtering.""" + return self._filter_count + + @property + def count(self) -> int: + """Number of instances.""" + return self._count + + @property + def num_bodies(self) -> int: + """Number of bodies per instance.""" + return self._num_bodies + + # -- Getters -- + + def get_net_contact_forces(self, dt: float) -> torch.Tensor: + """Get net contact forces on all bodies. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tensor of shape (N*B, 3) with net forces per body. + """ + if self._net_contact_forces is None: + self._net_contact_forces = torch.zeros(self._total_bodies, 3, device=self._device) + return self._net_contact_forces.clone() + + def get_contact_force_matrix(self, dt: float) -> torch.Tensor: + """Get contact force matrix (per-filter forces). + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tensor of shape (N*B, F, 3) with forces per body per filter. + """ + if self._contact_force_matrix is None: + self._contact_force_matrix = torch.zeros(self._total_bodies, self._filter_count, 3, device=self._device) + return self._contact_force_matrix.clone() + + def get_contact_data( + self, dt: float + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Get detailed contact data. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tuple of 6 tensors: + - contact_positions: (N*B, max_contacts, 3) - contact point positions + - contact_normals: (N*B, max_contacts, 3) - contact normals + - contact_impulses: (N*B, max_contacts, 3) - contact impulses + - contact_separations: (N*B, max_contacts) - contact separations + - contact_num_found: (N*B,) - number of contacts found + - contact_patch_id: (N*B, max_contacts) - contact patch IDs + """ + max_contacts = self._max_contact_data_count + + if self._contact_positions is None: + self._contact_positions = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) + if self._contact_normals is None: + self._contact_normals = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) + if self._contact_impulses is None: + self._contact_impulses = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) + if self._contact_separations is None: + self._contact_separations = torch.zeros(self._total_bodies, max_contacts, device=self._device) + if self._contact_num_found is None: + self._contact_num_found = torch.zeros(self._total_bodies, dtype=torch.int32, device=self._device) + if self._contact_patch_id is None: + self._contact_patch_id = torch.zeros( + self._total_bodies, max_contacts, dtype=torch.int32, device=self._device + ) + + return ( + self._contact_positions.clone(), + self._contact_normals.clone(), + self._contact_impulses.clone(), + self._contact_separations.clone(), + self._contact_num_found.clone(), + self._contact_patch_id.clone(), + ) + + def get_friction_data(self, dt: float) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Get friction data. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tuple of 4 tensors: + - friction_forces: (N*B, max_contacts, 3) - friction forces + - friction_impulses: (N*B, max_contacts, 3) - friction impulses + - friction_points: (N*B, max_contacts, 3) - friction application points + - friction_patch_id: (N*B, max_contacts) - friction patch IDs + """ + max_contacts = self._max_contact_data_count + + if self._friction_forces is None: + self._friction_forces = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) + if self._friction_impulses is None: + self._friction_impulses = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) + if self._friction_points is None: + self._friction_points = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) + if self._friction_patch_id is None: + self._friction_patch_id = torch.zeros( + self._total_bodies, max_contacts, dtype=torch.int32, device=self._device + ) + + return ( + self._friction_forces.clone(), + self._friction_impulses.clone(), + self._friction_points.clone(), + self._friction_patch_id.clone(), + ) + + # -- Mock setters (direct test data injection) -- + + def set_mock_net_contact_forces(self, forces: torch.Tensor) -> None: + """Set mock net contact force data directly for testing. + + Args: + forces: Tensor of shape (N*B, 3). + """ + self._net_contact_forces = forces.to(self._device) + + def set_mock_contact_force_matrix(self, matrix: torch.Tensor) -> None: + """Set mock contact force matrix data directly for testing. + + Args: + matrix: Tensor of shape (N*B, F, 3). + """ + self._contact_force_matrix = matrix.to(self._device) + + def set_mock_contact_data( + self, + positions: torch.Tensor | None = None, + normals: torch.Tensor | None = None, + impulses: torch.Tensor | None = None, + separations: torch.Tensor | None = None, + num_found: torch.Tensor | None = None, + patch_id: torch.Tensor | None = None, + ) -> None: + """Set mock contact data directly for testing. + + Args: + positions: Contact positions, shape (N*B, max_contacts, 3). + normals: Contact normals, shape (N*B, max_contacts, 3). + impulses: Contact impulses, shape (N*B, max_contacts, 3). + separations: Contact separations, shape (N*B, max_contacts). + num_found: Number of contacts found, shape (N*B,). + patch_id: Contact patch IDs, shape (N*B, max_contacts). + """ + if positions is not None: + self._contact_positions = positions.to(self._device) + if normals is not None: + self._contact_normals = normals.to(self._device) + if impulses is not None: + self._contact_impulses = impulses.to(self._device) + if separations is not None: + self._contact_separations = separations.to(self._device) + if num_found is not None: + self._contact_num_found = num_found.to(self._device) + if patch_id is not None: + self._contact_patch_id = patch_id.to(self._device) + + def set_mock_friction_data( + self, + forces: torch.Tensor | None = None, + impulses: torch.Tensor | None = None, + points: torch.Tensor | None = None, + patch_id: torch.Tensor | None = None, + ) -> None: + """Set mock friction data directly for testing. + + Args: + forces: Friction forces, shape (N*B, max_contacts, 3). + impulses: Friction impulses, shape (N*B, max_contacts, 3). + points: Friction application points, shape (N*B, max_contacts, 3). + patch_id: Friction patch IDs, shape (N*B, max_contacts). + """ + if forces is not None: + self._friction_forces = forces.to(self._device) + if impulses is not None: + self._friction_impulses = impulses.to(self._device) + if points is not None: + self._friction_points = points.to(self._device) + if patch_id is not None: + self._friction_patch_id = patch_id.to(self._device) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view_warp.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view_warp.py new file mode 100644 index 000000000000..1ed26879330a --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view_warp.py @@ -0,0 +1,300 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX RigidContactView using Warp arrays.""" + +from __future__ import annotations + +import warp as wp + + +class MockRigidContactViewWarp: + """Mock implementation of physx.RigidContactView using Warp arrays for unit testing. + + This class mimics the interface of the PhysX TensorAPI RigidContactView using + flat float32 arrays (matching real PhysX behavior), allowing tests to run + without Isaac Sim or GPU simulation. + + Data Shapes (flat float32, matching real PhysX views): + - net_contact_forces: (N*B, 3) dtype=wp.float32 - flattened net forces + - contact_force_matrix: (N*B, F, 3) dtype=wp.float32 - per-filter forces + - contact_data: tuple of arrays with float32 for positions/normals/impulses + - friction_data: tuple of arrays with float32 for forces/impulses/points + + Where: + - N = count (number of instances) + - B = num_bodies (bodies per instance) + - F = filter_count (number of filter bodies) + """ + + def __init__( + self, + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", + ): + """Initialize the mock rigid contact view. + + Args: + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for array allocation ("cpu" or "cuda:N"). + """ + self._count = count + self._num_bodies = num_bodies + self._filter_count = filter_count + self._max_contact_data_count = max_contact_data_count + self._device = device + self._backend = "warp" + + # Total number of bodies (flattened) + self._total_bodies = count * num_bodies + + # Internal state (lazily initialized) + self._net_contact_forces: wp.array | None = None + self._contact_force_matrix: wp.array | None = None + self._contact_positions: wp.array | None = None + self._contact_normals: wp.array | None = None + self._contact_impulses: wp.array | None = None + self._contact_separations: wp.array | None = None + self._contact_num_found: wp.array | None = None + self._contact_patch_id: wp.array | None = None + self._friction_forces: wp.array | None = None + self._friction_impulses: wp.array | None = None + self._friction_points: wp.array | None = None + self._friction_patch_id: wp.array | None = None + + # -- Properties -- + + @property + def filter_count(self) -> int: + """Number of filter bodies for contact filtering.""" + return self._filter_count + + @property + def count(self) -> int: + """Number of instances.""" + return self._count + + @property + def num_bodies(self) -> int: + """Number of bodies per instance.""" + return self._num_bodies + + # -- Getters -- + + def get_net_contact_forces(self, dt: float) -> wp.array: + """Get net contact forces on all bodies. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Warp array of shape (N*B, 3) with dtype=wp.float32. + """ + if self._net_contact_forces is None: + self._net_contact_forces = wp.zeros((self._total_bodies, 3), dtype=wp.float32, device=self._device) + return wp.clone(self._net_contact_forces) + + def get_contact_force_matrix(self, dt: float) -> wp.array: + """Get contact force matrix (per-filter forces). + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Warp array of shape (N*B, F, 3) with dtype=wp.float32. + """ + if self._contact_force_matrix is None: + self._contact_force_matrix = wp.zeros( + (self._total_bodies, self._filter_count, 3), dtype=wp.float32, device=self._device + ) + return wp.clone(self._contact_force_matrix) + + def get_contact_data(self, dt: float) -> tuple[wp.array, wp.array, wp.array, wp.array, wp.array, wp.array]: + """Get detailed contact data. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tuple of 6 arrays: + - contact_positions: (N*B, max_contacts, 3) dtype=wp.float32 + - contact_normals: (N*B, max_contacts, 3) dtype=wp.float32 + - contact_impulses: (N*B, max_contacts, 3) dtype=wp.float32 + - contact_separations: (N*B, max_contacts) dtype=wp.float32 + - contact_num_found: (N*B,) dtype=wp.int32 + - contact_patch_id: (N*B, max_contacts) dtype=wp.int32 + """ + max_contacts = self._max_contact_data_count + + if self._contact_positions is None: + self._contact_positions = wp.zeros( + (self._total_bodies, max_contacts, 3), dtype=wp.float32, device=self._device + ) + if self._contact_normals is None: + self._contact_normals = wp.zeros( + (self._total_bodies, max_contacts, 3), dtype=wp.float32, device=self._device + ) + if self._contact_impulses is None: + self._contact_impulses = wp.zeros( + (self._total_bodies, max_contacts, 3), dtype=wp.float32, device=self._device + ) + if self._contact_separations is None: + self._contact_separations = wp.zeros( + (self._total_bodies, max_contacts), dtype=wp.float32, device=self._device + ) + if self._contact_num_found is None: + self._contact_num_found = wp.zeros(self._total_bodies, dtype=wp.int32, device=self._device) + if self._contact_patch_id is None: + self._contact_patch_id = wp.zeros((self._total_bodies, max_contacts), dtype=wp.int32, device=self._device) + + return ( + wp.clone(self._contact_positions), + wp.clone(self._contact_normals), + wp.clone(self._contact_impulses), + wp.clone(self._contact_separations), + wp.clone(self._contact_num_found), + wp.clone(self._contact_patch_id), + ) + + def get_friction_data(self, dt: float) -> tuple[wp.array, wp.array, wp.array, wp.array]: + """Get friction data. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tuple of 4 arrays: + - friction_forces: (N*B, max_contacts, 3) dtype=wp.float32 + - friction_impulses: (N*B, max_contacts, 3) dtype=wp.float32 + - friction_points: (N*B, max_contacts, 3) dtype=wp.float32 + - friction_patch_id: (N*B, max_contacts) dtype=wp.int32 + """ + max_contacts = self._max_contact_data_count + + if self._friction_forces is None: + self._friction_forces = wp.zeros( + (self._total_bodies, max_contacts, 3), dtype=wp.float32, device=self._device + ) + if self._friction_impulses is None: + self._friction_impulses = wp.zeros( + (self._total_bodies, max_contacts, 3), dtype=wp.float32, device=self._device + ) + if self._friction_points is None: + self._friction_points = wp.zeros( + (self._total_bodies, max_contacts, 3), dtype=wp.float32, device=self._device + ) + if self._friction_patch_id is None: + self._friction_patch_id = wp.zeros((self._total_bodies, max_contacts), dtype=wp.int32, device=self._device) + + return ( + wp.clone(self._friction_forces), + wp.clone(self._friction_impulses), + wp.clone(self._friction_points), + wp.clone(self._friction_patch_id), + ) + + # -- Mock setters (direct test data injection) -- + + def set_mock_net_contact_forces(self, forces: wp.array) -> None: + """Set mock net contact force data directly for testing. + + Args: + forces: Warp array of shape (N*B, 3) with dtype=wp.float32. + """ + self._net_contact_forces = wp.clone(forces) + if self._net_contact_forces.device.alias != self._device: + self._net_contact_forces = self._net_contact_forces.to(self._device) + + def set_mock_contact_force_matrix(self, matrix: wp.array) -> None: + """Set mock contact force matrix data directly for testing. + + Args: + matrix: Warp array of shape (N*B, F, 3) with dtype=wp.float32. + """ + self._contact_force_matrix = wp.clone(matrix) + if self._contact_force_matrix.device.alias != self._device: + self._contact_force_matrix = self._contact_force_matrix.to(self._device) + + def set_mock_contact_data( + self, + positions: wp.array | None = None, + normals: wp.array | None = None, + impulses: wp.array | None = None, + separations: wp.array | None = None, + num_found: wp.array | None = None, + patch_id: wp.array | None = None, + ) -> None: + """Set mock contact data directly for testing. + + Args: + positions: Contact positions, shape (N*B, max_contacts, 3) dtype=wp.float32. + normals: Contact normals, shape (N*B, max_contacts, 3) dtype=wp.float32. + impulses: Contact impulses, shape (N*B, max_contacts, 3) dtype=wp.float32. + separations: Contact separations, shape (N*B, max_contacts) dtype=wp.float32. + num_found: Number of contacts found, shape (N*B,) dtype=wp.int32. + patch_id: Contact patch IDs, shape (N*B, max_contacts) dtype=wp.int32. + """ + if positions is not None: + self._contact_positions = wp.clone(positions) + if self._contact_positions.device.alias != self._device: + self._contact_positions = self._contact_positions.to(self._device) + if normals is not None: + self._contact_normals = wp.clone(normals) + if self._contact_normals.device.alias != self._device: + self._contact_normals = self._contact_normals.to(self._device) + if impulses is not None: + self._contact_impulses = wp.clone(impulses) + if self._contact_impulses.device.alias != self._device: + self._contact_impulses = self._contact_impulses.to(self._device) + if separations is not None: + self._contact_separations = wp.clone(separations) + if self._contact_separations.device.alias != self._device: + self._contact_separations = self._contact_separations.to(self._device) + if num_found is not None: + self._contact_num_found = wp.clone(num_found) + if self._contact_num_found.device.alias != self._device: + self._contact_num_found = self._contact_num_found.to(self._device) + if patch_id is not None: + self._contact_patch_id = wp.clone(patch_id) + if self._contact_patch_id.device.alias != self._device: + self._contact_patch_id = self._contact_patch_id.to(self._device) + + def set_mock_friction_data( + self, + forces: wp.array | None = None, + impulses: wp.array | None = None, + points: wp.array | None = None, + patch_id: wp.array | None = None, + ) -> None: + """Set mock friction data directly for testing. + + Args: + forces: Friction forces, shape (N*B, max_contacts, 3) dtype=wp.float32. + impulses: Friction impulses, shape (N*B, max_contacts, 3) dtype=wp.float32. + points: Friction application points, shape (N*B, max_contacts, 3) dtype=wp.float32. + patch_id: Friction patch IDs, shape (N*B, max_contacts) dtype=wp.int32. + """ + if forces is not None: + self._friction_forces = wp.clone(forces) + if self._friction_forces.device.alias != self._device: + self._friction_forces = self._friction_forces.to(self._device) + if impulses is not None: + self._friction_impulses = wp.clone(impulses) + if self._friction_impulses.device.alias != self._device: + self._friction_impulses = self._friction_impulses.to(self._device) + if points is not None: + self._friction_points = wp.clone(points) + if self._friction_points.device.alias != self._device: + self._friction_points = self._friction_points.to(self._device) + if patch_id is not None: + self._friction_patch_id = wp.clone(patch_id) + if self._friction_patch_id.device.alias != self._device: + self._friction_patch_id = self._friction_patch_id.to(self._device) diff --git a/source/isaaclab_physx/isaaclab_physx/video_recording/__init__.py b/source/isaaclab_physx/isaaclab_physx/video_recording/__init__.py new file mode 100644 index 000000000000..8deb00ddc466 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/video_recording/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Isaac Sim Kit perspective video recording.""" diff --git a/source/isaaclab_physx/isaaclab_physx/video_recording/isaacsim_kit_perspective_video.py b/source/isaaclab_physx/isaaclab_physx/video_recording/isaacsim_kit_perspective_video.py new file mode 100644 index 000000000000..4796340f2724 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/video_recording/isaacsim_kit_perspective_video.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Isaac Sim Kit perspective RGB capture via ``/OmniverseKit_Persp`` and omni.replicator.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from .isaacsim_kit_perspective_video_cfg import IsaacsimKitPerspectiveVideoCfg + + +class IsaacsimKitPerspectiveVideo: + """Stateful capture of one RGB frame per call from the Kit perspective camera.""" + + def __init__(self, cfg: IsaacsimKitPerspectiveVideoCfg): + self.cfg = cfg + self._rgb_annotator = None + self._render_product = None + + def render_rgb_array(self) -> np.ndarray: + """Return one RGB frame. Blank frame during warmup; raises on other failures.""" + import omni.kit.app + import omni.replicator.core as rep + + omni.kit.app.get_app().update() + + h, w = self.cfg.window_height, self.cfg.window_width + if self._rgb_annotator is None: + from isaacsim.core.rendering_manager import ViewportManager + + ViewportManager.set_camera_view( + self.cfg.camera_prim_path, + eye=list(self.cfg.eye), + target=list(self.cfg.lookat), + ) + self._render_product = rep.create.render_product(self.cfg.camera_prim_path, (w, h)) + self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu") + self._rgb_annotator.attach([self._render_product]) + + rgb_data = self._rgb_annotator.get_data() + if isinstance(rgb_data, dict): + rgb_data = rgb_data.get("data", np.array([], dtype=np.uint8)) + rgb_data = np.asarray(rgb_data, dtype=np.uint8) + if rgb_data.size == 0: + return np.zeros((h, w, 3), dtype=np.uint8) + if rgb_data.ndim == 1: + rgb_data = rgb_data.reshape(h, w, -1) + return rgb_data[:, :, :3] + + +def create_isaacsim_kit_perspective_video(cfg: IsaacsimKitPerspectiveVideoCfg) -> IsaacsimKitPerspectiveVideo: + """Instantiate the perspective video capture implementation from ``cfg.class_type``.""" + ct = cfg.class_type + if isinstance(ct, type): + return ct(cfg) + from isaaclab.utils.string import string_to_callable + + cls = string_to_callable(str(ct)) + return cls(cfg) diff --git a/source/isaaclab_physx/isaaclab_physx/video_recording/isaacsim_kit_perspective_video_cfg.py b/source/isaaclab_physx/isaaclab_physx/video_recording/isaacsim_kit_perspective_video_cfg.py new file mode 100644 index 000000000000..c3f9280976aa --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/video_recording/isaacsim_kit_perspective_video_cfg.py @@ -0,0 +1,41 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Isaac Sim Kit perspective RGB capture (OmniverseKit_Persp + Replicator).""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from isaaclab.utils import configclass + +if TYPE_CHECKING: + pass + + +@configclass +class IsaacsimKitPerspectiveVideoCfg: + """Settings for capturing a perspective RGB frame from the Kit viewport camera.""" + + class_type: type[Any] | str = ( + "isaaclab_physx.video_recording.isaacsim_kit_perspective_video:IsaacsimKitPerspectiveVideo" + ) + """Implementation class; default is + :class:`~isaaclab_physx.video_recording.isaacsim_kit_perspective_video.IsaacsimKitPerspectiveVideo`.""" + + camera_prim_path: str = "/OmniverseKit_Persp" + """Viewport camera prim used for the render product.""" + + eye: tuple[float, float, float] = (7.5, 7.5, 7.5) + """Camera position in world space (metres).""" + + lookat: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Camera look-at target in world space (metres).""" + + window_width: int = 1280 + """Output width in pixels.""" + + window_height: int = 720 + """Output height in pixels.""" diff --git a/source/isaaclab_physx/pyproject.toml b/source/isaaclab_physx/pyproject.toml new file mode 100644 index 000000000000..31dce8d230ec --- /dev/null +++ b/source/isaaclab_physx/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools<82.0.0", "wheel", "toml"] +build-backend = "setuptools.build_meta" diff --git a/source/isaaclab_physx/setup.py b/source/isaaclab_physx/setup.py new file mode 100644 index 000000000000..1e917e938c2b --- /dev/null +++ b/source/isaaclab_physx/setup.py @@ -0,0 +1,74 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_physx' python package.""" + +import os + +import toml +from setuptools import setup + +# Obtain the extension data from the extension.toml file +EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) +# Read the extension.toml file +EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +# Minimum dependencies required prior to installation +INSTALL_REQUIRES = [] + +EXTRAS_REQUIRE = { + "newton": [ + "newton @ git+https://github.com/newton-physics/newton.git@2684d75bfa4bb8b058a93b81c458a74b7701c997", + ], +} + +# Installation operation +setup( + name="isaaclab_physx", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url=EXTENSION_TOML_DATA["package"]["repository"], + version=EXTENSION_TOML_DATA["package"]["version"], + description=EXTENSION_TOML_DATA["package"]["description"], + keywords=EXTENSION_TOML_DATA["package"]["keywords"], + license="BSD-3-Clause", + include_package_data=True, + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", + install_requires=INSTALL_REQUIRES, + extras_require=EXTRAS_REQUIRE, + packages=[ + "isaaclab_physx", + "isaaclab_physx.assets", + "isaaclab_physx.assets.articulation", + "isaaclab_physx.assets.deformable_object", + "isaaclab_physx.assets.rigid_object", + "isaaclab_physx.assets.rigid_object_collection", + "isaaclab_physx.assets.surface_gripper", + "isaaclab_physx.cloner", + "isaaclab_physx.physics", + "isaaclab_physx.renderers", + "isaaclab_physx.scene_data_providers", + "isaaclab_physx.sensors", + "isaaclab_physx.sensors.contact_sensor", + "isaaclab_physx.sensors.frame_transformer", + "isaaclab_physx.sensors.imu", + "isaaclab_physx.sim", + "isaaclab_physx.sim.schemas", + "isaaclab_physx.sim.spawners", + "isaaclab_physx.sim.spawners.materials", + "isaaclab_physx.test", + "isaaclab_physx.test.benchmark", + "isaaclab_physx.test.mock_interfaces", + "isaaclab_physx.test.mock_interfaces.utils", + "isaaclab_physx.test.mock_interfaces.views", + ], + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", + ], + zip_safe=False, +) diff --git a/source/isaaclab/test/assets/test_articulation.py b/source/isaaclab_physx/test/assets/test_articulation.py similarity index 73% rename from source/isaaclab/test/assets/test_articulation.py rename to source/isaaclab_physx/test/assets/test_articulation.py index 9a983ab34c1d..3687dae5961d 100644 --- a/source/isaaclab/test/assets/test_articulation.py +++ b/source/isaaclab_physx/test/assets/test_articulation.py @@ -17,21 +17,23 @@ """Rest everything follows.""" -import ctypes +import sys import pytest import torch +import warp as wp +from isaaclab_physx.assets import Articulation import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils import isaaclab.utils.string as string_utils from isaaclab.actuators import ActuatorBase, IdealPDActuatorCfg, ImplicitActuatorCfg -from isaaclab.assets import Articulation, ArticulationCfg +from isaaclab.assets import ArticulationCfg from isaaclab.envs.mdp.terminations import joint_effort_out_of_limit from isaaclab.managers import SceneEntityCfg from isaaclab.sim import build_simulation_context from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.version import get_isaac_sim_version, has_kit ## # Pre-defined configs @@ -106,7 +108,7 @@ def generate_articulation_cfg( init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.0), joint_pos=({"RevoluteJoint": 1.5708}), - rot=(0.7071055, 0.7071081, 0, 0), + rot=(0.7071081, 0, 0, 0.7071055), ), ) elif articulation_type == "single_joint_explicit": @@ -220,8 +222,8 @@ def test_initialization_floating_base_non_root(sim, num_articulations, device, a articulation_cfg = generate_articulation_cfg(articulation_type="humanoid", stiffness=0.0, damping=0.0) articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -231,17 +233,17 @@ def test_initialization_floating_base_non_root(sim, num_articulations, device, a # Check that is fixed base assert not articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 21) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 21) # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -277,8 +279,8 @@ def test_initialization_floating_base(sim, num_articulations, device, add_ground articulation_cfg = generate_articulation_cfg(articulation_type="anymal", stiffness=0.0, damping=0.0) articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -287,19 +289,19 @@ def test_initialization_floating_base(sim, num_articulations, device, add_ground # Check that floating base assert not articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 12) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 12) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -334,8 +336,8 @@ def test_initialization_fixed_base(sim, num_articulations, device): articulation_cfg = generate_articulation_cfg(articulation_type="panda") articulation, translations = generate_articulation(articulation_cfg, num_articulations, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -344,19 +346,19 @@ def test_initialization_fixed_base(sim, num_articulations, device): # Check that fixed base assert articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 9) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 9) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -371,10 +373,12 @@ def test_initialization_fixed_base(sim, num_articulations, device): articulation.update(sim.cfg.dt) # check that the root is at the correct state - its default state as it is fixed base - default_root_state = articulation.data.default_root_state.clone() - default_root_state[:, :3] = default_root_state[:, :3] + translations + default_root_pose = articulation.data.default_root_pose.torch.clone() + default_root_vel = articulation.data.default_root_vel.torch.clone() + default_root_pose[:, :3] = default_root_pose[:, :3] + translations - torch.testing.assert_close(articulation.data.root_state_w, default_root_state) + torch.testing.assert_close(articulation.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(articulation.data.root_com_vel_w.torch, default_root_vel) @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -398,8 +402,8 @@ def test_initialization_fixed_base_single_joint(sim, num_articulations, device, articulation_cfg = generate_articulation_cfg(articulation_type="single_joint_implicit") articulation, translations = generate_articulation(articulation_cfg, num_articulations, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -408,19 +412,19 @@ def test_initialization_fixed_base_single_joint(sim, num_articulations, device, # Check that fixed base assert articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 1) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 1) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -435,10 +439,12 @@ def test_initialization_fixed_base_single_joint(sim, num_articulations, device, articulation.update(sim.cfg.dt) # check that the root is at the correct state - its default state as it is fixed base - default_root_state = articulation.data.default_root_state.clone() - default_root_state[:, :3] = default_root_state[:, :3] + translations + default_root_pose = articulation.data.default_root_pose.torch.clone() + default_root_vel = articulation.data.default_root_vel.torch.clone() + default_root_pose[:, :3] = default_root_pose[:, :3] + translations - torch.testing.assert_close(articulation.data.root_state_w, default_root_state) + torch.testing.assert_close(articulation.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(articulation.data.root_com_vel_w.torch, default_root_vel) @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -461,8 +467,8 @@ def test_initialization_hand_with_tendons(sim, num_articulations, device): articulation_cfg = generate_articulation_cfg(articulation_type="shadow_hand") articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -471,17 +477,17 @@ def test_initialization_hand_with_tendons(sim, num_articulations, device): # Check that fixed base assert articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 24) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 24) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- actuator type for actuator_name, actuator in articulation.actuators.items(): is_implicit_model_cfg = isinstance(articulation_cfg.actuators[actuator_name], ImplicitActuatorCfg) @@ -517,8 +523,8 @@ def test_initialization_floating_base_made_fixed_base(sim, num_articulations, de articulation_cfg.spawn.articulation_props.fix_root_link = True articulation, translations = generate_articulation(articulation_cfg, num_articulations, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -527,17 +533,17 @@ def test_initialization_floating_base_made_fixed_base(sim, num_articulations, de # Check that is fixed base assert articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 12) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 12) # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # Simulate physics @@ -548,10 +554,12 @@ def test_initialization_floating_base_made_fixed_base(sim, num_articulations, de articulation.update(sim.cfg.dt) # check that the root is at the correct state - its default state as it is fixed base - default_root_state = articulation.data.default_root_state.clone() - default_root_state[:, :3] = default_root_state[:, :3] + translations + default_root_pose = articulation.data.default_root_pose.torch.clone() + default_root_vel = articulation.data.default_root_vel.torch.clone() + default_root_pose[:, :3] = default_root_pose[:, :3] + translations - torch.testing.assert_close(articulation.data.root_state_w, default_root_state) + torch.testing.assert_close(articulation.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(articulation.data.root_com_vel_w.torch, default_root_vel) @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -576,8 +584,8 @@ def test_initialization_fixed_base_made_floating_base(sim, num_articulations, de articulation_cfg.spawn.articulation_props.fix_root_link = False articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=sim.device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -586,17 +594,17 @@ def test_initialization_fixed_base_made_floating_base(sim, num_articulations, de # Check that is floating base assert not articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 9) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 9) # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # Simulate physics @@ -631,8 +639,8 @@ def test_out_of_range_default_joint_pos(sim, num_articulations, device, add_grou articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim with pytest.raises(ValueError): @@ -655,8 +663,8 @@ def test_out_of_range_default_joint_vel(sim, device): } articulation = Articulation(articulation_cfg) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim with pytest.raises(ValueError): @@ -690,51 +698,51 @@ def test_joint_pos_limits(sim, num_articulations, device, add_ground_plane): assert articulation.is_initialized # Get current default joint pos - default_joint_pos = articulation._data.default_joint_pos.clone() + default_joint_pos = articulation._data.default_joint_pos.torch.clone() # Set new joint limits limits = torch.zeros(num_articulations, articulation.num_joints, 2, device=device) limits[..., 0] = (torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0) * -1.0 limits[..., 1] = torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0 - articulation.write_joint_position_limit_to_sim(limits) + articulation.write_joint_position_limit_to_sim_index(limits=limits) # Check new limits are in place - torch.testing.assert_close(articulation._data.joint_pos_limits, limits) - torch.testing.assert_close(articulation._data.default_joint_pos, default_joint_pos) + torch.testing.assert_close(articulation._data.joint_pos_limits.torch, limits) + torch.testing.assert_close(articulation._data.default_joint_pos.torch, default_joint_pos) # Set new joint limits with indexing - env_ids = torch.arange(1, device=device) - joint_ids = torch.arange(2, device=device) + env_ids = torch.arange(1, device=device, dtype=torch.int32) + joint_ids = torch.arange(2, device=device, dtype=torch.int32) limits = torch.zeros(env_ids.shape[0], joint_ids.shape[0], 2, device=device) limits[..., 0] = (torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) + 5.0) * -1.0 limits[..., 1] = torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) + 5.0 - articulation.write_joint_position_limit_to_sim(limits, env_ids=env_ids, joint_ids=joint_ids) + articulation.write_joint_position_limit_to_sim_index(limits=limits, env_ids=env_ids, joint_ids=joint_ids) # Check new limits are in place - torch.testing.assert_close(articulation._data.joint_pos_limits[env_ids][:, joint_ids], limits) - torch.testing.assert_close(articulation._data.default_joint_pos, default_joint_pos) + torch.testing.assert_close(articulation._data.joint_pos_limits.torch[env_ids][:, joint_ids], limits) + torch.testing.assert_close(articulation._data.default_joint_pos.torch, default_joint_pos) # Set new joint limits that invalidate default joint pos limits = torch.zeros(num_articulations, articulation.num_joints, 2, device=device) limits[..., 0] = torch.rand(num_articulations, articulation.num_joints, device=device) * -0.1 limits[..., 1] = torch.rand(num_articulations, articulation.num_joints, device=device) * 0.1 - articulation.write_joint_position_limit_to_sim(limits) + articulation.write_joint_position_limit_to_sim_index(limits=limits) # Check if all values are within the bounds - within_bounds = (articulation._data.default_joint_pos >= limits[..., 0]) & ( - articulation._data.default_joint_pos <= limits[..., 1] - ) + default_joint_pos_torch = articulation._data.default_joint_pos.torch + within_bounds = (default_joint_pos_torch >= limits[..., 0]) & (default_joint_pos_torch <= limits[..., 1]) assert torch.all(within_bounds) # Set new joint limits that invalidate default joint pos with indexing limits = torch.zeros(env_ids.shape[0], joint_ids.shape[0], 2, device=device) limits[..., 0] = torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) * -0.1 limits[..., 1] = torch.rand(env_ids.shape[0], joint_ids.shape[0], device=device) * 0.1 - articulation.write_joint_position_limit_to_sim(limits, env_ids=env_ids, joint_ids=joint_ids) + articulation.write_joint_position_limit_to_sim_index(limits=limits, env_ids=env_ids, joint_ids=joint_ids) # Check if all values are within the bounds - within_bounds = (articulation._data.default_joint_pos[env_ids][:, joint_ids] >= limits[..., 0]) & ( - articulation._data.default_joint_pos[env_ids][:, joint_ids] <= limits[..., 1] + default_joint_pos_torch = articulation._data.default_joint_pos.torch + within_bounds = (default_joint_pos_torch[env_ids][:, joint_ids] >= limits[..., 0]) & ( + default_joint_pos_torch[env_ids][:, joint_ids] <= limits[..., 1] ) assert torch.all(within_bounds) @@ -760,14 +768,14 @@ def __init__(self, art): assert articulation.is_initialized # Case A: no clipping → should NOT terminate - articulation._data.computed_torque.zero_() - articulation._data.applied_torque.zero_() + articulation._data.computed_torque.torch.zero_() + articulation._data.applied_torque.torch.zero_() out = joint_effort_out_of_limit(env, robot_all) # [N] assert torch.all(~out) # Case B: simulate clipping → should terminate - articulation._data.computed_torque.fill_(100.0) # pretend controller commanded 100 - articulation._data.applied_torque.fill_(50.0) # pretend actuator clipped to 50 + articulation._data.computed_torque.torch.fill_(100.0) # pretend controller commanded 100 + articulation._data.applied_torque.torch.fill_(50.0) # pretend actuator clipped to 50 out = joint_effort_out_of_limit(env, robot_all) # [N] assert torch.all(out) @@ -797,15 +805,16 @@ def test_external_force_buffer(sim, num_articulations, device): body_ids, _ = articulation.find_bodies("base") # reset root state - root_state = articulation.data.default_root_state.clone() - articulation.write_root_state_to_sim(root_state) + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) # reset dof state joint_pos, joint_vel = ( - articulation.data.default_joint_pos, - articulation.data.default_joint_vel, + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, ) - articulation.write_joint_state_to_sim(joint_pos, joint_vel) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) # reset articulation articulation.reset() @@ -827,27 +836,26 @@ def test_external_force_buffer(sim, num_articulations, device): external_wrench_b[:, :, 3] = force # apply force - # TODO: Replace with wrench composer once the deprecation is complete - articulation.set_external_force_and_torque( - external_wrench_b[..., :3], - external_wrench_b[..., 3:], + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], body_ids=body_ids, ) # check if the articulation's force and torque buffers are correctly updated for i in range(num_articulations): - assert articulation.permanent_wrench_composer.composed_force_as_torch[i, 0, 0].item() == force - assert articulation.permanent_wrench_composer.composed_torque_as_torch[i, 0, 0].item() == force + assert articulation.permanent_wrench_composer.composed_force.torch[i, 0, 0].item() == force + assert articulation.permanent_wrench_composer.composed_torque.torch[i, 0, 0].item() == force # Check if the instantaneous wrench is correctly added to the permanent wrench - articulation.instantaneous_wrench_composer.add_forces_and_torques( + articulation.instantaneous_wrench_composer.add_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=body_ids, ) # apply action to the articulation - articulation.set_joint_position_target(articulation.data.default_joint_pos.clone()) + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) articulation.write_data_to_sim() # perform step @@ -886,27 +894,25 @@ def test_external_force_on_single_body(sim, num_articulations, device): # Now we are ready! for _ in range(5): # reset root state - root_state = articulation.data.default_root_state.clone() - - articulation.write_root_pose_to_sim(root_state[:, :7]) - articulation.write_root_velocity_to_sim(root_state[:, 7:]) + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) # reset dof state joint_pos, joint_vel = ( - articulation.data.default_joint_pos, - articulation.data.default_joint_vel, + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, ) - articulation.write_joint_state_to_sim(joint_pos, joint_vel) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) # reset articulation articulation.reset() # apply force - # TODO: Replace with wrench composer once the deprecation is complete - articulation.set_external_force_and_torque( - external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=body_ids ) # perform simulation for _ in range(100): # apply action to the articulation - articulation.set_joint_position_target(articulation.data.default_joint_pos.clone()) + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) articulation.write_data_to_sim() # perform step sim.step() @@ -914,7 +920,7 @@ def test_external_force_on_single_body(sim, num_articulations, device): articulation.update(sim.cfg.dt) # check condition that the articulations have fallen down for i in range(num_articulations): - assert articulation.data.root_pos_w[i, 2].item() < 0.2 + assert articulation.data.root_pos_w.torch[i, 2].item() < 0.2 @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -955,24 +961,25 @@ def test_external_force_on_single_body_at_position(sim, num_articulations, devic # Now we are ready! for i in range(5): # reset root state - root_state = articulation.data.default_root_state.clone() - root_state[0, 0] = 2.5 # space them apart by 2.5m + root_pose = articulation.data.default_root_pose.torch.clone() + root_pose[0, 0] = 2.5 # space them apart by 2.5m - articulation.write_root_pose_to_sim(root_state[:, :7]) - articulation.write_root_velocity_to_sim(root_state[:, 7:]) + articulation.write_root_pose_to_sim_index(root_pose=root_pose) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) # reset dof state joint_pos, joint_vel = ( - articulation.data.default_joint_pos, - articulation.data.default_joint_vel, + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, ) - articulation.write_joint_state_to_sim(joint_pos, joint_vel) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) # reset articulation articulation.reset() # apply force is_global = False if i % 2 == 0: - body_com_pos_w = articulation.data.body_com_pos_w[:, body_ids, :3] + body_com_pos_w = articulation.data.body_com_pos_w.torch[:, body_ids, :3] # is_global = True external_wrench_positions_b[..., 0] = 0.0 external_wrench_positions_b[..., 1] = 1.0 @@ -983,14 +990,14 @@ def test_external_force_on_single_body_at_position(sim, num_articulations, devic external_wrench_positions_b[..., 1] = 1.0 external_wrench_positions_b[..., 2] = 0.0 - articulation.permanent_wrench_composer.set_forces_and_torques( + articulation.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, body_ids=body_ids, is_global=is_global, ) - articulation.permanent_wrench_composer.add_forces_and_torques( + articulation.permanent_wrench_composer.add_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, @@ -1000,7 +1007,7 @@ def test_external_force_on_single_body_at_position(sim, num_articulations, devic # perform simulation for _ in range(100): # apply action to the articulation - articulation.set_joint_position_target(articulation.data.default_joint_pos.clone()) + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) articulation.write_data_to_sim() # perform step sim.step() @@ -1008,7 +1015,7 @@ def test_external_force_on_single_body_at_position(sim, num_articulations, devic articulation.update(sim.cfg.dt) # check condition that the articulations have fallen down for i in range(num_articulations): - assert articulation.data.root_pos_w[i, 2].item() < 0.2 + assert articulation.data.root_pos_w.torch[i, 2].item() < 0.2 @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -1041,25 +1048,25 @@ def test_external_force_on_multiple_bodies(sim, num_articulations, device): # Now we are ready! for _ in range(5): # reset root state - articulation.write_root_pose_to_sim(articulation.data.default_root_state.clone()[:, :7]) - articulation.write_root_velocity_to_sim(articulation.data.default_root_state.clone()[:, 7:]) + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) # reset dof state joint_pos, joint_vel = ( - articulation.data.default_joint_pos, - articulation.data.default_joint_vel, + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, ) - articulation.write_joint_state_to_sim(joint_pos, joint_vel) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) # reset articulation articulation.reset() # apply force - # TODO: Replace with wrench composer once the deprecation is complete - articulation.set_external_force_and_torque( - external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids + articulation.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=body_ids ) # perform simulation for _ in range(100): # apply action to the articulation - articulation.set_joint_position_target(articulation.data.default_joint_pos.clone()) + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) articulation.write_data_to_sim() # perform step sim.step() @@ -1068,7 +1075,7 @@ def test_external_force_on_multiple_bodies(sim, num_articulations, device): # check condition for i in range(num_articulations): # since there is a moment applied on the articulation, the articulation should rotate - assert articulation.data.root_ang_vel_w[i, 2].item() > 0.1 + assert articulation.data.root_ang_vel_w.torch[i, 2].item() > 0.1 @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -1110,20 +1117,21 @@ def test_external_force_on_multiple_bodies_at_position(sim, num_articulations, d # Now we are ready! for i in range(5): # reset root state - articulation.write_root_pose_to_sim(articulation.data.default_root_state.clone()[:, :7]) - articulation.write_root_velocity_to_sim(articulation.data.default_root_state.clone()[:, 7:]) + articulation.write_root_pose_to_sim_index(root_pose=articulation.data.default_root_pose.torch.clone()) + articulation.write_root_velocity_to_sim_index(root_velocity=articulation.data.default_root_vel.torch.clone()) # reset dof state joint_pos, joint_vel = ( - articulation.data.default_joint_pos, - articulation.data.default_joint_vel, + articulation.data.default_joint_pos.torch, + articulation.data.default_joint_vel.torch, ) - articulation.write_joint_state_to_sim(joint_pos, joint_vel) + articulation.write_joint_position_to_sim_index(position=joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=joint_vel) # reset articulation articulation.reset() is_global = False if i % 2 == 0: - body_com_pos_w = articulation.data.body_com_pos_w[:, body_ids, :3] + body_com_pos_w = articulation.data.body_com_pos_w.torch[:, body_ids, :3] is_global = True external_wrench_positions_b[..., 0] = 0.0 external_wrench_positions_b[..., 1] = 1.0 @@ -1135,14 +1143,14 @@ def test_external_force_on_multiple_bodies_at_position(sim, num_articulations, d external_wrench_positions_b[..., 2] = 0.0 # apply force - articulation.permanent_wrench_composer.set_forces_and_torques( + articulation.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, body_ids=body_ids, is_global=is_global, ) - articulation.permanent_wrench_composer.add_forces_and_torques( + articulation.permanent_wrench_composer.add_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, @@ -1152,7 +1160,7 @@ def test_external_force_on_multiple_bodies_at_position(sim, num_articulations, d # perform simulation for _ in range(100): # apply action to the articulation - articulation.set_joint_position_target(articulation.data.default_joint_pos.clone()) + articulation.set_joint_position_target_index(target=articulation.data.default_joint_pos.torch.clone()) articulation.write_data_to_sim() # perform step sim.step() @@ -1161,7 +1169,7 @@ def test_external_force_on_multiple_bodies_at_position(sim, num_articulations, d # check condition for i in range(num_articulations): # since there is a moment applied on the articulation, the articulation should rotate - assert torch.abs(articulation.data.root_ang_vel_w[i, 2]).item() > 0.1 + assert torch.abs(articulation.data.root_ang_vel_w.torch[i, 2]).item() > 0.1 @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -1334,9 +1342,9 @@ def test_setting_velocity_limit_implicit(sim, num_articulations, device, vel_lim sim.reset() # read the values set into the simulation - physx_vel_limit = articulation.root_physx_view.get_dof_max_velocities().to(device) + physx_vel_limit = wp.to_torch(articulation.root_view.get_dof_max_velocities()).to(device) # check data buffer - torch.testing.assert_close(articulation.data.joint_velocity_limits, physx_vel_limit) + torch.testing.assert_close(articulation.data.joint_velocity_limits.torch, physx_vel_limit) # check actuator has simulation velocity limit torch.testing.assert_close(articulation.actuators["joint"].velocity_limit_sim, physx_vel_limit) # check that both values match for velocity limit @@ -1383,12 +1391,12 @@ def test_setting_velocity_limit_explicit(sim, num_articulations, device, vel_lim sim.reset() # collect limit init values - physx_vel_limit = articulation.root_physx_view.get_dof_max_velocities().to(device) + physx_vel_limit = wp.to_torch(articulation.root_view.get_dof_max_velocities()).to(device) actuator_vel_limit = articulation.actuators["joint"].velocity_limit actuator_vel_limit_sim = articulation.actuators["joint"].velocity_limit_sim # check data buffer for joint_velocity_limits_sim - torch.testing.assert_close(articulation.data.joint_velocity_limits, physx_vel_limit) + torch.testing.assert_close(articulation.data.joint_velocity_limits.torch, physx_vel_limit) # check actuator velocity_limit_sim is set to physx torch.testing.assert_close(actuator_vel_limit_sim, physx_vel_limit) @@ -1447,7 +1455,7 @@ def test_setting_effort_limit_implicit(sim, num_articulations, device, effort_li sim.reset() # obtain the physx effort limits - physx_effort_limit = articulation.root_physx_view.get_dof_max_forces().to(device=device) + physx_effort_limit = wp.to_torch(articulation.root_view.get_dof_max_forces()).to(device=device) # check that the two are equivalent torch.testing.assert_close( @@ -1501,7 +1509,7 @@ def test_setting_effort_limit_explicit(sim, num_articulations, device, effort_li usd_default_effort_limit = 80.0 # collect limit init values - physx_effort_limit = articulation.root_physx_view.get_dof_max_forces().to(device) + physx_effort_limit = wp.to_torch(articulation.root_view.get_dof_max_forces()).to(device) actuator_effort_limit = articulation.actuators["joint"].effort_limit actuator_effort_limit_sim = articulation.actuators["joint"].effort_limit_sim @@ -1551,33 +1559,28 @@ def test_reset(sim, num_articulations, device): # Reset should zero external forces and torques assert not articulation._instantaneous_wrench_composer.active assert not articulation._permanent_wrench_composer.active - assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_force_as_torch) == 0 - assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_torque_as_torch) == 0 - assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_force_as_torch) == 0 - assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_torque_as_torch) == 0 + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_torque.torch) == 0 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_torque.torch) == 0 if num_articulations > 1: num_bodies = articulation.num_bodies - # TODO: Replace with wrench composer once the deprecation is complete - articulation.set_external_force_and_torque( + articulation.permanent_wrench_composer.set_forces_and_torques_index( forces=torch.ones((num_articulations, num_bodies, 3), device=device), torques=torch.ones((num_articulations, num_bodies, 3), device=device), ) - articulation.instantaneous_wrench_composer.add_forces_and_torques( + articulation.instantaneous_wrench_composer.add_forces_and_torques_index( forces=torch.ones((num_articulations, num_bodies, 3), device=device), torques=torch.ones((num_articulations, num_bodies, 3), device=device), ) articulation.reset(env_ids=torch.tensor([0], device=device)) assert articulation._instantaneous_wrench_composer.active assert articulation._permanent_wrench_composer.active - assert ( - torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_force_as_torch) == num_bodies * 3 - ) - assert ( - torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_torque_as_torch) == num_bodies * 3 - ) - assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_force_as_torch) == num_bodies * 3 - assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_torque_as_torch) == num_bodies * 3 + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_force.torch) == num_bodies * 3 + assert torch.count_nonzero(articulation._instantaneous_wrench_composer.composed_torque.torch) == num_bodies * 3 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_force.torch) == num_bodies * 3 + assert torch.count_nonzero(articulation._permanent_wrench_composer.composed_torque.torch) == num_bodies * 3 @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -1601,11 +1604,11 @@ def test_apply_joint_command(sim, num_articulations, device, add_ground_plane): articulation.update(sim.cfg.dt) # reset dof state - joint_pos = articulation.data.default_joint_pos + joint_pos = articulation.data.default_joint_pos.torch.clone() joint_pos[:, 3] = 0.0 # apply action to the articulation - articulation.set_joint_position_target(joint_pos) + articulation.set_joint_position_target_index(target=joint_pos) articulation.write_data_to_sim() for _ in range(100): @@ -1617,7 +1620,7 @@ def test_apply_joint_command(sim, num_articulations, device, add_ground_plane): # Check that current joint position is not the same as default joint position, meaning # the articulation moved. We can't check that it reached its desired joint position as the gains # are not properly tuned - assert not torch.allclose(articulation.data.joint_pos, joint_pos) + assert not torch.allclose(articulation.data.joint_pos.torch, joint_pos) @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -1641,9 +1644,9 @@ def test_body_root_state(sim, num_articulations, device, with_offset): sim._app_control_on_stop_handle = None articulation_cfg = generate_articulation_cfg(articulation_type="single_joint_implicit") articulation, env_pos = generate_articulation(articulation_cfg, num_articulations, device) - env_idx = torch.tensor([x for x in range(num_articulations)], device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1, "Boundedness of articulation is incorrect" + env_idx = torch.tensor([x for x in range(num_articulations)], device=device, dtype=torch.int32) + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10, "Possible reference leak for articulation" # Play sim sim.reset() # Check if articulation is initialized @@ -1651,22 +1654,28 @@ def test_body_root_state(sim, num_articulations, device, with_offset): # Check that fixed base assert articulation.is_fixed_base, "Articulation is not a fixed base" + # Resolve body indices by name (ordering may differ across physics backends) + root_idx = articulation.body_names.index("CenterPivot") + arm_idx = articulation.body_names.index("Arm") + # change center of mass offset from link frame if with_offset: offset = [0.5, 0.0, 0.0] else: offset = [0.0, 0.0, 0.0] - # create com offsets + # create com offsets — apply offset to the Arm body num_bodies = articulation.num_bodies - com = articulation.root_physx_view.get_coms() + com = wp.to_torch(articulation.root_view.get_coms()) link_offset = [1.0, 0.0, 0.0] # the offset from CenterPivot to Arm frames new_com = torch.tensor(offset, device=device).repeat(num_articulations, 1, 1) - com[:, 1, :3] = new_com.squeeze(-2) - articulation.root_physx_view.set_coms(com.cpu(), env_idx.cpu()) + com[:, arm_idx, :3] = new_com.squeeze(-2) + articulation.root_view.set_coms( + wp.from_torch(com.cpu(), dtype=wp.float32), wp.from_torch(env_idx.cpu(), dtype=wp.int32) + ) # check they are set - torch.testing.assert_close(articulation.root_physx_view.get_coms(), com.cpu()) + torch.testing.assert_close(wp.to_torch(articulation.root_view.get_coms()), com.cpu()) for i in range(50): # perform step @@ -1675,38 +1684,40 @@ def test_body_root_state(sim, num_articulations, device, with_offset): articulation.update(sim.cfg.dt) # get state properties - root_state_w = articulation.data.root_state_w - root_link_state_w = articulation.data.root_link_state_w - root_com_state_w = articulation.data.root_com_state_w - body_state_w = articulation.data.body_state_w - body_link_state_w = articulation.data.body_link_state_w - body_com_state_w = articulation.data.body_com_state_w + root_link_pose_w = articulation.data.root_link_pose_w.torch + root_link_vel_w = articulation.data.root_link_vel_w.torch + root_com_pose_w = articulation.data.root_com_pose_w.torch + root_com_vel_w = articulation.data.root_com_vel_w.torch + body_link_pose_w = articulation.data.body_link_pose_w.torch + body_link_vel_w = articulation.data.body_link_vel_w.torch + body_com_pose_w = articulation.data.body_com_pose_w.torch + body_com_vel_w = articulation.data.body_com_vel_w.torch if with_offset: # get joint state - joint_pos = articulation.data.joint_pos.unsqueeze(-1) - joint_vel = articulation.data.joint_vel.unsqueeze(-1) + joint_pos = articulation.data.joint_pos.torch.unsqueeze(-1) + joint_vel = articulation.data.joint_vel.torch.unsqueeze(-1) # LINK state - # pose - torch.testing.assert_close(root_state_w[..., :7], root_link_state_w[..., :7]) - torch.testing.assert_close(body_state_w[..., :7], body_link_state_w[..., :7]) + # angular velocity should be the same for both COM and link frames + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) # lin_vel arm lin_vel_gt = torch.zeros(num_articulations, num_bodies, 3, device=device) vx = -(link_offset[0]) * joint_vel * torch.sin(joint_pos) vy = torch.zeros(num_articulations, 1, 1, device=device) vz = (link_offset[0]) * joint_vel * torch.cos(joint_pos) - lin_vel_gt[:, 1, :] = torch.cat([vx, vy, vz], dim=-1).squeeze(-2) + lin_vel_gt[:, arm_idx, :] = torch.cat([vx, vy, vz], dim=-1).squeeze(-2) # linear velocity of root link should be zero - torch.testing.assert_close(lin_vel_gt[:, 0, :], root_link_state_w[..., 7:10], atol=1e-3, rtol=1e-1) + torch.testing.assert_close(lin_vel_gt[:, root_idx, :], root_link_vel_w[..., :3], atol=1e-3, rtol=1e-1) # linear velocity of pendulum link should be - torch.testing.assert_close(lin_vel_gt, body_link_state_w[..., 7:10], atol=1e-3, rtol=1e-1) + torch.testing.assert_close(lin_vel_gt, body_link_vel_w[..., :3], atol=1e-3, rtol=1e-1) # ang_vel - torch.testing.assert_close(root_state_w[..., 10:], root_link_state_w[..., 10:]) - torch.testing.assert_close(body_state_w[..., 10:], body_link_state_w[..., 10:]) + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) # COM state # position and orientation shouldn't match for the _state_com_w but everything else will @@ -1714,26 +1725,26 @@ def test_body_root_state(sim, num_articulations, device, with_offset): px = (link_offset[0] + offset[0]) * torch.cos(joint_pos) py = torch.zeros(num_articulations, 1, 1, device=device) pz = (link_offset[0] + offset[0]) * torch.sin(joint_pos) - pos_gt[:, 1, :] = torch.cat([px, py, pz], dim=-1).squeeze(-2) + pos_gt[:, arm_idx, :] = torch.cat([px, py, pz], dim=-1).squeeze(-2) pos_gt += env_pos.unsqueeze(-2).repeat(1, num_bodies, 1) - torch.testing.assert_close(pos_gt[:, 0, :], root_com_state_w[..., :3], atol=1e-3, rtol=1e-1) - torch.testing.assert_close(pos_gt, body_com_state_w[..., :3], atol=1e-3, rtol=1e-1) + torch.testing.assert_close(pos_gt[:, root_idx, :], root_com_pose_w[..., :3], atol=1e-3, rtol=1e-1) + torch.testing.assert_close(pos_gt, body_com_pose_w[..., :3], atol=1e-3, rtol=1e-1) # orientation - com_quat_b = articulation.data.body_com_quat_b - com_quat_w = math_utils.quat_mul(body_link_state_w[..., 3:7], com_quat_b) - torch.testing.assert_close(com_quat_w, body_com_state_w[..., 3:7]) - torch.testing.assert_close(com_quat_w[:, 0, :], root_com_state_w[..., 3:7]) - - # linear vel, and angular vel - torch.testing.assert_close(root_state_w[..., 7:], root_com_state_w[..., 7:]) - torch.testing.assert_close(body_state_w[..., 7:], body_com_state_w[..., 7:]) + com_quat_b = articulation.data.body_com_quat_b.torch + com_quat_w = math_utils.quat_mul(body_link_pose_w[..., 3:], com_quat_b) + torch.testing.assert_close(com_quat_w, body_com_pose_w[..., 3:]) + torch.testing.assert_close(com_quat_w[:, root_idx, :], root_com_pose_w[..., 3:]) + + # angular velocity should be the same for both COM and link frames + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) else: # single joint center of masses are at link frames so they will be the same - torch.testing.assert_close(root_state_w, root_link_state_w) - torch.testing.assert_close(root_state_w, root_com_state_w) - torch.testing.assert_close(body_state_w, body_link_state_w) - torch.testing.assert_close(body_state_w, body_com_state_w) + torch.testing.assert_close(root_link_pose_w, root_com_pose_w) + torch.testing.assert_close(root_com_vel_w, root_link_vel_w) + torch.testing.assert_close(body_link_pose_w, body_com_pose_w) + torch.testing.assert_close(body_com_vel_w, body_link_vel_w) @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -1761,7 +1772,7 @@ def test_write_root_state(sim, num_articulations, device, with_offset, state_loc sim._app_control_on_stop_handle = None articulation_cfg = generate_articulation_cfg(articulation_type="anymal") articulation, env_pos = generate_articulation(articulation_cfg, num_articulations, device) - env_idx = torch.tensor([x for x in range(num_articulations)]) + env_idx = torch.tensor([x for x in range(num_articulations)], device=device, dtype=torch.int32) # Play sim sim.reset() @@ -1773,16 +1784,18 @@ def test_write_root_state(sim, num_articulations, device, with_offset, state_loc offset = torch.tensor([0.0, 0.0, 0.0]).repeat(num_articulations, 1, 1) # create com offsets - com = articulation.root_physx_view.get_coms() + com = wp.to_torch(articulation.root_view.get_coms()) new_com = offset com[:, 0, :3] = new_com.squeeze(-2) - articulation.root_physx_view.set_coms(com, env_idx) + articulation.root_view.set_coms( + wp.from_torch(com.cpu(), dtype=wp.float32), wp.from_torch(env_idx.cpu(), dtype=wp.int32) + ) # check they are set - torch.testing.assert_close(articulation.root_physx_view.get_coms(), com) + torch.testing.assert_close(wp.to_torch(articulation.root_view.get_coms()), com) - rand_state = torch.zeros_like(articulation.data.root_state_w) - rand_state[..., :7] = articulation.data.default_root_state[..., :7] + rand_state = torch.zeros(num_articulations, 13, device=device) + rand_state[..., :7] = articulation.data.default_root_pose.torch rand_state[..., :3] += env_pos # make quaternion a unit vector rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) @@ -1796,102 +1809,25 @@ def test_write_root_state(sim, num_articulations, device, with_offset, state_loc if state_location == "com": if i % 2 == 0: - articulation.write_root_com_state_to_sim(rand_state) + articulation.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7]) + articulation.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) else: - articulation.write_root_com_state_to_sim(rand_state, env_ids=env_idx) + articulation.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + articulation.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:], env_ids=env_idx) elif state_location == "link": if i % 2 == 0: - articulation.write_root_link_state_to_sim(rand_state) + articulation.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7]) + articulation.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) else: - articulation.write_root_link_state_to_sim(rand_state, env_ids=env_idx) + articulation.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + articulation.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:], env_ids=env_idx) if state_location == "com": - torch.testing.assert_close(rand_state, articulation.data.root_com_state_w) + torch.testing.assert_close(rand_state[..., :7], articulation.data.root_com_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], articulation.data.root_com_vel_w.torch) elif state_location == "link": - torch.testing.assert_close(rand_state, articulation.data.root_link_state_w) - - -@pytest.mark.parametrize("num_articulations", [1, 2]) -@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) -@pytest.mark.isaacsim_ci -def test_body_incoming_joint_wrench_b_single_joint(sim, num_articulations, device): - """Test the data.body_incoming_joint_wrench_b buffer is populated correctly and statically correct for single joint. - - This test verifies that: - 1. The body incoming joint wrench buffer has correct shape - 2. The wrench values are statically correct for a single joint - 3. The wrench values match expected values from gravity and external forces - - Args: - sim: The simulation fixture - num_articulations: Number of articulations to test - device: The device to run the simulation on - """ - articulation_cfg = generate_articulation_cfg(articulation_type="single_joint_implicit") - articulation, _ = generate_articulation( - articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device - ) - - # Play the simulator - sim.reset() - # apply external force - external_force_vector_b = torch.zeros((num_articulations, articulation.num_bodies, 3), device=device) - external_force_vector_b[:, 1, 1] = 10.0 # 10 N in Y direction - external_torque_vector_b = torch.zeros((num_articulations, articulation.num_bodies, 3), device=device) - external_torque_vector_b[:, 1, 2] = 10.0 # 10 Nm in z direction - - # apply action to the articulation - joint_pos = torch.ones_like(articulation.data.joint_pos) * 1.5708 / 2.0 - articulation.write_joint_state_to_sim( - torch.ones_like(articulation.data.joint_pos), torch.zeros_like(articulation.data.joint_vel) - ) - articulation.set_joint_position_target(joint_pos) - articulation.write_data_to_sim() - for _ in range(50): - # TODO: Replace with wrench composer once the deprecation is complete - articulation.set_external_force_and_torque(forces=external_force_vector_b, torques=external_torque_vector_b) - articulation.write_data_to_sim() - # perform step - sim.step() - # update buffers - articulation.update(sim.cfg.dt) - - # check shape - assert articulation.data.body_incoming_joint_wrench_b.shape == (num_articulations, articulation.num_bodies, 6) - - # calculate expected static - mass = articulation.data.default_mass - pos_w = articulation.data.body_pos_w - quat_w = articulation.data.body_quat_w - - mass_link2 = mass[:, 1].view(num_articulations, -1) - gravity = torch.tensor(sim.cfg.gravity, device="cpu").repeat(num_articulations, 1).view((num_articulations, 3)) - - # NOTE: the com and link pose for single joint are colocated - weight_vector_w = mass_link2 * gravity - # expected wrench from link mass and external wrench - expected_wrench = torch.zeros((num_articulations, 6), device=device) - expected_wrench[:, :3] = math_utils.quat_apply( - math_utils.quat_conjugate(quat_w[:, 0, :]), - weight_vector_w.to(device) + math_utils.quat_apply(quat_w[:, 1, :], external_force_vector_b[:, 1, :]), - ) - expected_wrench[:, 3:] = math_utils.quat_apply( - math_utils.quat_conjugate(quat_w[:, 0, :]), - torch.cross( - pos_w[:, 1, :].to(device) - pos_w[:, 0, :].to(device), - weight_vector_w.to(device) + math_utils.quat_apply(quat_w[:, 1, :], external_force_vector_b[:, 1, :]), - dim=-1, - ) - + math_utils.quat_apply(quat_w[:, 1, :], external_torque_vector_b[:, 1, :]), - ) - - # check value of last joint wrench - torch.testing.assert_close( - expected_wrench, - articulation.data.body_incoming_joint_wrench_b[:, 1, :].squeeze(1), - atol=1e-2, - rtol=1e-3, - ) + torch.testing.assert_close(rand_state[..., :7], articulation.data.root_link_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], articulation.data.root_link_vel_w.torch) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @@ -1901,12 +1837,11 @@ def test_setting_articulation_root_prim_path(sim, device): sim._app_control_on_stop_handle = None # Create articulation articulation_cfg = generate_articulation_cfg(articulation_type="humanoid") - print(articulation_cfg.spawn.usd_path) articulation_cfg.articulation_root_prim_path = "/torso" articulation, _ = generate_articulation(articulation_cfg, 1, device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -1921,12 +1856,11 @@ def test_setting_invalid_articulation_root_prim_path(sim, device): sim._app_control_on_stop_handle = None # Create articulation articulation_cfg = generate_articulation_cfg(articulation_type="humanoid") - print(articulation_cfg.spawn.usd_path) articulation_cfg.articulation_root_prim_path = "/non_existing_prim_path" articulation, _ = generate_articulation(articulation_cfg, 1, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim with pytest.raises(RuntimeError): @@ -1959,61 +1893,84 @@ def test_write_joint_state_data_consistency(sim, num_articulations, device, grav limits = torch.zeros(num_articulations, articulation.num_joints, 2, device=device) limits[..., 0] = (torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0) * -1.0 limits[..., 1] = torch.rand(num_articulations, articulation.num_joints, device=device) + 5.0 - articulation.write_joint_position_limit_to_sim(limits) + articulation.write_joint_position_limit_to_sim_index(limits=limits) from torch.distributions import Uniform - pos_dist = Uniform(articulation.data.joint_pos_limits[..., 0], articulation.data.joint_pos_limits[..., 1]) - vel_dist = Uniform(-articulation.data.joint_vel_limits, articulation.data.joint_vel_limits) + joint_pos_limits = articulation.data.joint_pos_limits.torch + joint_vel_limits = articulation.data.joint_vel_limits.torch + pos_dist = Uniform(joint_pos_limits[..., 0], joint_pos_limits[..., 1]) + vel_dist = Uniform(-joint_vel_limits, joint_vel_limits) - original_body_states = articulation.data.body_state_w.clone() + original_body_link_pose_w = articulation.data.body_link_pose_w.torch.clone() + original_body_com_vel_w = articulation.data.body_com_vel_w.torch.clone() rand_joint_pos = pos_dist.sample() rand_joint_vel = vel_dist.sample() - articulation.write_joint_state_to_sim(rand_joint_pos, rand_joint_vel) + articulation.write_joint_position_to_sim_index(position=rand_joint_pos) + articulation.write_joint_velocity_to_sim_index(velocity=rand_joint_vel) # make sure valued updated - assert torch.count_nonzero(original_body_states[:, 1:] != articulation.data.body_state_w[:, 1:]) > ( + body_link_pose_w = articulation.data.body_link_pose_w.torch + body_com_vel_w = articulation.data.body_com_vel_w.torch + original_body_states = torch.cat([original_body_link_pose_w, original_body_com_vel_w], dim=-1) + body_state_w = torch.cat([body_link_pose_w, body_com_vel_w], dim=-1) + assert torch.count_nonzero(original_body_states[:, 1:] != body_state_w[:, 1:]) > ( len(original_body_states[:, 1:]) / 2 ) # validate body - link consistency - torch.testing.assert_close(articulation.data.body_state_w[..., :7], articulation.data.body_link_state_w[..., :7]) - # skip 7:10 because they differs from link frame, this should be fine because we are only checking + body_link_vel_w = articulation.data.body_link_vel_w.torch + torch.testing.assert_close(body_link_pose_w, articulation.data.body_link_pose_w.torch) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking # if velocity update is triggered, which can be determined by comparing angular velocity - torch.testing.assert_close(articulation.data.body_state_w[..., 10:], articulation.data.body_link_state_w[..., 10:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) # validate link - com conistency + body_com_pos_b = articulation.data.body_com_pos_b.torch + body_com_quat_b = articulation.data.body_com_quat_b.torch expected_com_pos, expected_com_quat = math_utils.combine_frame_transforms( - articulation.data.body_link_state_w[..., :3].view(-1, 3), - articulation.data.body_link_state_w[..., 3:7].view(-1, 4), - articulation.data.body_com_pos_b.view(-1, 3), - articulation.data.body_com_quat_b.view(-1, 4), + body_link_pose_w[..., :3].view(-1, 3), + body_link_pose_w[..., 3:].view(-1, 4), + body_com_pos_b.view(-1, 3), + body_com_quat_b.view(-1, 4), ) - torch.testing.assert_close(expected_com_pos.view(len(env_idx), -1, 3), articulation.data.body_com_pos_w) - torch.testing.assert_close(expected_com_quat.view(len(env_idx), -1, 4), articulation.data.body_com_quat_w) + body_com_pos_w = articulation.data.body_com_pos_w.torch + body_com_quat_w = articulation.data.body_com_quat_w.torch + torch.testing.assert_close(expected_com_pos.view(len(env_idx), -1, 3), body_com_pos_w) + torch.testing.assert_close(expected_com_quat.view(len(env_idx), -1, 4), body_com_quat_w) # validate body - com consistency - torch.testing.assert_close(articulation.data.body_state_w[..., 7:10], articulation.data.body_com_lin_vel_w) - torch.testing.assert_close(articulation.data.body_state_w[..., 10:], articulation.data.body_com_ang_vel_w) + body_com_lin_vel_w = articulation.data.body_com_lin_vel_w.torch + body_com_ang_vel_w = articulation.data.body_com_ang_vel_w.torch + torch.testing.assert_close(body_com_vel_w[..., :3], body_com_lin_vel_w) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_com_ang_vel_w) # validate pos_w, quat_w, pos_b, quat_b is consistent with pose_w and pose_b - expected_com_pose_w = torch.cat((articulation.data.body_com_pos_w, articulation.data.body_com_quat_w), dim=2) - expected_com_pose_b = torch.cat((articulation.data.body_com_pos_b, articulation.data.body_com_quat_b), dim=2) - expected_body_pose_w = torch.cat((articulation.data.body_pos_w, articulation.data.body_quat_w), dim=2) - expected_body_link_pose_w = torch.cat( - (articulation.data.body_link_pos_w, articulation.data.body_link_quat_w), dim=2 - ) - torch.testing.assert_close(articulation.data.body_com_pose_w, expected_com_pose_w) - torch.testing.assert_close(articulation.data.body_com_pose_b, expected_com_pose_b) - torch.testing.assert_close(articulation.data.body_pose_w, expected_body_pose_w) - torch.testing.assert_close(articulation.data.body_link_pose_w, expected_body_link_pose_w) - - # validate pose_w is consistent state[..., :7] - torch.testing.assert_close(articulation.data.body_pose_w, articulation.data.body_state_w[..., :7]) - torch.testing.assert_close(articulation.data.body_vel_w, articulation.data.body_state_w[..., 7:]) - torch.testing.assert_close(articulation.data.body_link_pose_w, articulation.data.body_link_state_w[..., :7]) - torch.testing.assert_close(articulation.data.body_com_pose_w, articulation.data.body_com_state_w[..., :7]) - torch.testing.assert_close(articulation.data.body_vel_w, articulation.data.body_state_w[..., 7:]) + expected_com_pose_w = torch.cat((body_com_pos_w, body_com_quat_w), dim=2) + expected_com_pose_b = torch.cat((body_com_pos_b, body_com_quat_b), dim=2) + body_pos_w = articulation.data.body_pos_w.torch + body_quat_w = articulation.data.body_quat_w.torch + expected_body_pose_w = torch.cat((body_pos_w, body_quat_w), dim=2) + body_link_pos_w = articulation.data.body_link_pos_w.torch + body_link_quat_w = articulation.data.body_link_quat_w.torch + expected_body_link_pose_w = torch.cat((body_link_pos_w, body_link_quat_w), dim=2) + body_com_pose_w = articulation.data.body_com_pose_w.torch + body_com_pose_b = articulation.data.body_com_pose_b.torch + body_pose_w = articulation.data.body_pose_w.torch + body_link_pose_w_fresh = articulation.data.body_link_pose_w.torch + torch.testing.assert_close(body_com_pose_w, expected_com_pose_w) + torch.testing.assert_close(body_com_pose_b, expected_com_pose_b) + torch.testing.assert_close(body_pose_w, expected_body_pose_w) + torch.testing.assert_close(body_link_pose_w_fresh, expected_body_link_pose_w) + + # validate pose_w is consistent with individual properties + body_vel_w = articulation.data.body_vel_w.torch + body_com_vel_w_fresh = articulation.data.body_com_vel_w.torch + torch.testing.assert_close(body_pose_w, body_link_pose_w) + torch.testing.assert_close(body_vel_w, body_com_vel_w) + torch.testing.assert_close(body_link_pose_w_fresh, body_link_pose_w) + torch.testing.assert_close(body_com_pose_w, articulation.data.body_com_pose_w.torch) + torch.testing.assert_close(body_vel_w, body_com_vel_w_fresh) @pytest.mark.parametrize("num_articulations", [1, 2]) @@ -2031,14 +1988,14 @@ def test_spatial_tendons(sim, num_articulations, device): device: The device to run the simulation on """ # skip test if Isaac Sim version is less than 5.0 - if get_isaac_sim_version().major < 5: + if has_kit() and get_isaac_sim_version().major < 5: pytest.skip("Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later.") return articulation_cfg = generate_articulation_cfg(articulation_type="spatial_tendon_test_asset") articulation, _ = generate_articulation(articulation_cfg, num_articulations, device=device) - # Check that boundedness of articulation is correct - assert ctypes.c_long.from_address(id(articulation)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(articulation) < 10 # Play sim sim.reset() @@ -2047,17 +2004,17 @@ def test_spatial_tendons(sim, num_articulations, device): # Check that fixed base assert articulation.is_fixed_base # Check buffers that exists and have correct shapes - assert articulation.data.root_pos_w.shape == (num_articulations, 3) - assert articulation.data.root_quat_w.shape == (num_articulations, 4) - assert articulation.data.joint_pos.shape == (num_articulations, 3) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.root_pos_w.torch.shape == (num_articulations, 3) + assert articulation.data.root_quat_w.torch.shape == (num_articulations, 4) + assert articulation.data.joint_pos.torch.shape == (num_articulations, 3) + assert articulation.data.body_mass.torch.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.torch.shape == (num_articulations, articulation.num_bodies, 9) assert articulation.num_spatial_tendons == 1 - articulation.set_spatial_tendon_stiffness(torch.tensor([10.0])) - articulation.set_spatial_tendon_limit_stiffness(torch.tensor([10.0])) - articulation.set_spatial_tendon_damping(torch.tensor([10.0])) - articulation.set_spatial_tendon_offset(torch.tensor([10.0])) + articulation.set_spatial_tendon_stiffness_index(stiffness=10.0) + articulation.set_spatial_tendon_limit_stiffness_index(limit_stiffness=10.0) + articulation.set_spatial_tendon_damping_index(damping=10.0) + articulation.set_spatial_tendon_offset_index(offset=10.0) # Simulate physics for _ in range(10): @@ -2096,10 +2053,11 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground # The static friction must be set first to be sure the dynamic friction is not greater than static # when both are set. - articulation.write_joint_friction_coefficient_to_sim(friction) - if get_isaac_sim_version().major >= 5: - articulation.write_joint_dynamic_friction_coefficient_to_sim(dynamic_friction) - articulation.write_joint_viscous_friction_coefficient_to_sim(viscous_friction) + articulation.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=friction, + joint_dynamic_friction_coeff=dynamic_friction, + joint_viscous_friction_coeff=viscous_friction, + ) articulation.write_data_to_sim() for _ in range(100): @@ -2108,21 +2066,17 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground # update buffers articulation.update(sim.cfg.dt) - if get_isaac_sim_version().major >= 5: - friction_props_from_sim = articulation.root_physx_view.get_dof_friction_properties() - joint_friction_coeff_sim = friction_props_from_sim[:, :, 0] - joint_dynamic_friction_coeff_sim = friction_props_from_sim[:, :, 1] - joint_viscous_friction_coeff_sim = friction_props_from_sim[:, :, 2] - assert torch.allclose(joint_dynamic_friction_coeff_sim, dynamic_friction.cpu()) - assert torch.allclose(joint_viscous_friction_coeff_sim, viscous_friction.cpu()) - else: - joint_friction_coeff_sim = articulation.root_physx_view.get_dof_friction_properties() - + friction_props_from_sim = wp.to_torch(articulation.root_view.get_dof_friction_properties()) + joint_friction_coeff_sim = friction_props_from_sim[:, :, 0] + joint_dynamic_friction_coeff_sim = friction_props_from_sim[:, :, 1] + joint_viscous_friction_coeff_sim = friction_props_from_sim[:, :, 2] + assert torch.allclose(joint_dynamic_friction_coeff_sim, dynamic_friction.cpu()) + assert torch.allclose(joint_viscous_friction_coeff_sim, viscous_friction.cpu()) assert torch.allclose(joint_friction_coeff_sim, friction.cpu()) # For Isaac Sim >= 5.0: also test the combined API that can set dynamic and viscous via # write_joint_friction_coefficient_to_sim; reset the sim to isolate this path. - if get_isaac_sim_version().major >= 5: + if has_kit() and get_isaac_sim_version().major >= 5: # Reset simulator to ensure a clean state for the alternative API path sim.reset() @@ -2140,7 +2094,7 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground dynamic_friction_2 = torch.min(dynamic_friction_2, friction_2) # Use the combined setter to write all three at once - articulation.write_joint_friction_coefficient_to_sim( + articulation.write_joint_friction_coefficient_to_sim_index( joint_friction_coeff=friction_2, joint_dynamic_friction_coeff=dynamic_friction_2, joint_viscous_friction_coeff=viscous_friction_2, @@ -2152,7 +2106,7 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground sim.step() articulation.update(sim.cfg.dt) - friction_props_from_sim_2 = articulation.root_physx_view.get_dof_friction_properties() + friction_props_from_sim_2 = wp.to_torch(articulation.root_view.get_dof_friction_properties()) joint_friction_coeff_sim_2 = friction_props_from_sim_2[:, :, 0] friction_dynamic_coef_sim_2 = friction_props_from_sim_2[:, :, 1] friction_viscous_coeff_sim_2 = friction_props_from_sim_2[:, :, 2] @@ -2163,5 +2117,45 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground assert torch.allclose(joint_friction_coeff_sim_2, friction_2.cpu()) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +@pytest.mark.isaacsim_ci +def test_set_material_properties(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test getting and setting material properties (friction/restitution) of articulation shapes.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device + ) + + # Play the simulator + sim.reset() + + # Get number of shapes from the articulation + max_shapes = articulation.root_view.max_shapes + + # Generate random material properties: (static_friction, dynamic_friction, restitution) + materials = torch.empty(num_articulations, max_shapes, 3, device="cpu").uniform_(0.0, 1.0) + # Ensure dynamic friction <= static friction + materials[..., 1] = torch.min(materials[..., 0], materials[..., 1]) + + # Set material properties via the PhysX view-level API + env_ids = torch.arange(num_articulations, dtype=torch.int32) + articulation.root_view.set_material_properties( + wp.from_torch(materials, dtype=wp.float32), wp.from_torch(env_ids, dtype=wp.int32) + ) + + # Simulate physics + sim.step() + articulation.update(sim.cfg.dt) + + # Get material properties from simulation + materials_check = wp.to_torch(articulation.root_view.get_material_properties()) + + # Check if material properties are set correctly + torch.testing.assert_close(materials_check, materials) + + if __name__ == "__main__": pytest.main([__file__, "-v", "--maxfail=1"]) diff --git a/source/isaaclab/test/assets/test_deformable_object.py b/source/isaaclab_physx/test/assets/test_deformable_object.py similarity index 63% rename from source/isaaclab/test/assets/test_deformable_object.py rename to source/isaaclab_physx/test/assets/test_deformable_object.py index 4726a274462c..31a199938c94 100644 --- a/source/isaaclab/test/assets/test_deformable_object.py +++ b/source/isaaclab_physx/test/assets/test_deformable_object.py @@ -16,27 +16,34 @@ """Rest everything follows.""" -import ctypes +import sys import pytest import torch +import warp as wp from flaky import flaky +from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg +from isaaclab_physx.sim import DeformableBodyMaterialCfg, DeformableBodyPropertiesCfg, SurfaceDeformableBodyMaterialCfg import carb import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils -from isaaclab.assets import DeformableObject, DeformableObjectCfg from isaaclab.sim import build_simulation_context +# Temporarily disabled: this suite intermittently aborts with SIGABRT on CI. +# Re-enable once the underlying crash is fixed. +pytestmark = pytest.mark.skip(reason="Temporarily disabled due to intermittent crash on CI.") + def generate_cubes_scene( num_cubes: int = 1, height: float = 1.0, - initial_rot: tuple[float, ...] = (1.0, 0.0, 0.0, 0.0), + initial_rot: tuple[float, ...] = (0.0, 0.0, 0.0, 1.0), has_api: bool = True, material_path: str | None = "material", kinematic_enabled: bool = False, + deformable_type: str = "volume", device: str = "cuda:0", ) -> DeformableObject: """Generate a scene with the provided number of cubes. @@ -44,11 +51,12 @@ def generate_cubes_scene( Args: num_cubes: Number of cubes to generate. height: Height of the cubes. Default is 1.0. - initial_rot: Initial rotation of the cubes. Default is (1.0, 0.0, 0.0, 0.0). + initial_rot: Initial rotation of the cubes (xyzw format). Default is (0.0, 0.0, 0.0, 1.0). has_api: Whether the cubes have a deformable body API on them. material_path: Path to the material file. If None, no material is added. Default is "material", which is path relative to the spawned object prim path. kinematic_enabled: Whether the cubes are kinematic. + deformable_type: The type of deformable body to spawn. Supported values are "volume" and "surface". device: Device to use for the simulation. Returns: @@ -64,11 +72,14 @@ def generate_cubes_scene( if has_api: spawn_cfg = sim_utils.MeshCuboidCfg( size=(0.2, 0.2, 0.2), - deformable_props=sim_utils.DeformableBodyPropertiesCfg(kinematic_enabled=kinematic_enabled), + deformable_props=DeformableBodyPropertiesCfg(kinematic_enabled=kinematic_enabled), ) # Add physics material if provided if material_path is not None: - spawn_cfg.physics_material = sim_utils.DeformableBodyMaterialCfg() + if deformable_type == "surface": + spawn_cfg.physics_material = SurfaceDeformableBodyMaterialCfg() + else: + spawn_cfg.physics_material = DeformableBodyMaterialCfg() spawn_cfg.physics_material_path = material_path else: spawn_cfg.physics_material = None @@ -103,8 +114,11 @@ def test_initialization(sim, num_cubes, material_path): """Test initialization for prim with deformable body API at the provided prim path.""" cube_object = generate_cubes_scene(num_cubes=num_cubes, material_path=material_path) - # Check that boundedness of deformable object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 + # Check that the framework doesn't hold excessive strong references. + # sys.getrefcount() adds 1 for its own argument. The baseline is 2 (local var + + # getrefcount arg) but Omniverse event-bus subscriptions and Python/torch runtime + # internals may legitimately add a few more. We use a threshold to catch real leaks. + assert sys.getrefcount(cube_object) < 10 # Play sim sim.reset() @@ -114,7 +128,7 @@ def test_initialization(sim, num_cubes, material_path): # Check correct number of cubes assert cube_object.num_instances == num_cubes - assert cube_object.root_physx_view.count == num_cubes + assert cube_object.root_view.count == num_cubes # Check correct number of materials in the view if material_path: @@ -126,98 +140,67 @@ def test_initialization(sim, num_cubes, material_path): assert cube_object.material_physx_view is None # Check buffers that exist and have correct shapes - assert cube_object.data.nodal_state_w.shape == (num_cubes, cube_object.max_sim_vertices_per_body, 6) - assert cube_object.data.nodal_kinematic_target.shape == (num_cubes, cube_object.max_sim_vertices_per_body, 4) - assert cube_object.data.root_pos_w.shape == (num_cubes, 3) - assert cube_object.data.root_vel_w.shape == (num_cubes, 3) - - # Simulate physics - for _ in range(2): - sim.step() - cube_object.update(sim.cfg.dt) - - # Check sim data - assert cube_object.data.sim_element_quat_w.shape == (num_cubes, cube_object.max_sim_elements_per_body, 4) - assert cube_object.data.sim_element_deform_gradient_w.shape == ( + # nodal_state_w is (N, V) vec6f -> wp.to_torch gives (N, V, 6) + assert cube_object.data.nodal_state_w.torch.shape == (num_cubes, cube_object.max_sim_vertices_per_body, 6) + # nodal_kinematic_target is (N, V) vec4f -> .torch gives (N, V, 4) + assert cube_object.data.nodal_kinematic_target.torch.shape == ( num_cubes, - cube_object.max_sim_elements_per_body, - 3, - 3, - ) - assert cube_object.data.sim_element_stress_w.shape == (num_cubes, cube_object.max_sim_elements_per_body, 3, 3) - assert cube_object.data.collision_element_quat_w.shape == ( - num_cubes, - cube_object.max_collision_elements_per_body, + cube_object.max_sim_vertices_per_body, 4, ) - assert cube_object.data.collision_element_deform_gradient_w.shape == ( - num_cubes, - cube_object.max_collision_elements_per_body, - 3, - 3, - ) - assert cube_object.data.collision_element_stress_w.shape == ( - num_cubes, - cube_object.max_collision_elements_per_body, - 3, - 3, - ) - - -@pytest.mark.isaacsim_ci -def test_initialization_on_device_cpu(): - """Test that initialization fails with deformable body API on the CPU.""" - with build_simulation_context(device="cpu", auto_add_lighting=True) as sim: - sim._app_control_on_stop_handle = None - cube_object = generate_cubes_scene(num_cubes=5, device="cpu") - - # Check that boundedness of deformable object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 - - # Play sim - with pytest.raises(RuntimeError): - sim.reset() + # root_pos_w is (N,) vec3f -> wp.to_torch gives (N, 3) + assert cube_object.data.root_pos_w.torch.shape == (num_cubes, 3) + assert cube_object.data.root_vel_w.torch.shape == (num_cubes, 3) @pytest.mark.parametrize("num_cubes", [1, 2]) @pytest.mark.isaacsim_ci -def test_initialization_with_kinematic_enabled(sim, num_cubes): - """Test that initialization for prim with kinematic flag enabled.""" - cube_object = generate_cubes_scene(num_cubes=num_cubes, kinematic_enabled=True) - - # Check that boundedness of deformable object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 +def test_initialization_surface_deformable(sim, num_cubes): + """Test initialization of a surface deformable body.""" + cube_object = generate_cubes_scene(num_cubes=num_cubes, deformable_type="surface") # Play sim sim.reset() # Check if object is initialized assert cube_object.is_initialized + assert cube_object._deformable_type == "surface" - # Check buffers that exist and have correct shapes - assert cube_object.data.root_pos_w.shape == (num_cubes, 3) - assert cube_object.data.root_vel_w.shape == (num_cubes, 3) + # Check correct number of instances + assert cube_object.num_instances == num_cubes + assert cube_object.root_view.count == num_cubes - # Simulate physics - for _ in range(2): - sim.step() - cube_object.update(sim.cfg.dt) - default_nodal_state_w = cube_object.data.default_nodal_state_w.clone() - torch.testing.assert_close(cube_object.data.nodal_state_w, default_nodal_state_w) + # Check material view is created + assert cube_object.material_physx_view is not None + assert cube_object.material_physx_view.count == num_cubes + + # Check nodal state buffers have correct shapes + assert cube_object.data.nodal_state_w.torch.shape == (num_cubes, cube_object.max_sim_vertices_per_body, 6) + assert cube_object.data.root_pos_w.torch.shape == (num_cubes, 3) + assert cube_object.data.root_vel_w.torch.shape == (num_cubes, 3) + + # Kinematic targets are not allocated for surface deformables + assert cube_object.data.nodal_kinematic_target is None + + # Writing kinematic targets should raise ValueError + dummy_targets = torch.zeros(num_cubes, cube_object.max_sim_vertices_per_body, 4, device=sim.device) + with pytest.raises(ValueError, match="Kinematic targets can only be set for volume deformable bodies"): + cube_object.write_nodal_kinematic_target_to_sim_index(dummy_targets) -@pytest.mark.parametrize("num_cubes", [1, 2]) @pytest.mark.isaacsim_ci -def test_initialization_with_no_deformable_body(sim, num_cubes): - """Test that initialization fails when no deformable body is found at the provided prim path.""" - cube_object = generate_cubes_scene(num_cubes=num_cubes, has_api=False) +def test_initialization_on_device_cpu(): + """Test that initialization fails with deformable body API on the CPU.""" + with build_simulation_context(device="cpu", auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + cube_object = generate_cubes_scene(num_cubes=5, device="cpu") - # Check that boundedness of deformable object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 - # Play sim - with pytest.raises(RuntimeError): - sim.reset() + # Play sim + with pytest.raises(RuntimeError): + sim.reset() @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -231,8 +214,8 @@ def test_set_nodal_state(sim, num_cubes): for state_type_to_randomize in ["nodal_pos_w", "nodal_vel_w"]: state_dict = { - "nodal_pos_w": torch.zeros_like(cube_object.data.nodal_pos_w), - "nodal_vel_w": torch.zeros_like(cube_object.data.nodal_vel_w), + "nodal_pos_w": torch.zeros_like(cube_object.data.nodal_pos_w.torch), + "nodal_vel_w": torch.zeros_like(cube_object.data.nodal_vel_w.torch), } for _ in range(5): @@ -250,9 +233,9 @@ def test_set_nodal_state(sim, num_cubes): ], dim=-1, ) - cube_object.write_nodal_state_to_sim(nodal_state) + cube_object.write_nodal_state_to_sim_index(nodal_state) - torch.testing.assert_close(cube_object.data.nodal_state_w, nodal_state, rtol=1e-5, atol=1e-5) + torch.testing.assert_close(cube_object.data.nodal_state_w.torch, nodal_state, rtol=1e-5, atol=1e-5) sim.step() cube_object.update(sim.cfg.dt) @@ -263,23 +246,23 @@ def test_set_nodal_state(sim, num_cubes): @pytest.mark.parametrize("randomize_rot", [True, False]) @flaky(max_runs=3, min_passes=1) @pytest.mark.isaacsim_ci -def test_set_nodal_state_with_applied_transform(sim, num_cubes, randomize_pos, randomize_rot): +def test_set_nodal_state_with_applied_transform(num_cubes, randomize_pos, randomize_rot): """Test setting the state of the deformable object with applied transform.""" carb_settings_iface = carb.settings.get_settings() carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) - # Create a new simulation context with gravity disabled + # Create simulation context with gravity disabled (no fixture needed) with build_simulation_context(auto_add_lighting=True, gravity_enabled=False) as sim: sim._app_control_on_stop_handle = None cube_object = generate_cubes_scene(num_cubes=num_cubes) sim.reset() for _ in range(5): - nodal_state = cube_object.data.default_nodal_state_w.clone() + nodal_state = cube_object.data.default_nodal_state_w.torch.clone() mean_nodal_pos_default = nodal_state[..., :3].mean(dim=1) if randomize_pos: - pos_w = torch.rand(cube_object.num_instances, 3, device=sim.device) + pos_w = 0.5 * torch.rand(cube_object.num_instances, 3, device=sim.device) pos_w[:, 2] += 0.5 else: pos_w = None @@ -296,14 +279,14 @@ def test_set_nodal_state_with_applied_transform(sim, num_cubes, randomize_pos, r else: torch.testing.assert_close(mean_nodal_pos_init, mean_nodal_pos_default + pos_w, rtol=1e-5, atol=1e-5) - cube_object.write_nodal_state_to_sim(nodal_state) + cube_object.write_nodal_state_to_sim_index(nodal_state) cube_object.reset() for _ in range(50): sim.step() cube_object.update(sim.cfg.dt) - torch.testing.assert_close(cube_object.data.root_pos_w, mean_nodal_pos_init, rtol=1e-5, atol=1e-5) + torch.testing.assert_close(cube_object.data.root_pos_w.torch, mean_nodal_pos_init, rtol=1e-4, atol=1e-4) @pytest.mark.parametrize("num_cubes", [2, 4]) @@ -314,20 +297,20 @@ def test_set_kinematic_targets(sim, num_cubes): sim.reset() - nodal_kinematic_targets = cube_object.root_physx_view.get_sim_kinematic_targets().clone() + nodal_kinematic_targets = wp.to_torch(cube_object.root_view.get_simulation_nodal_kinematic_targets()).clone() for _ in range(5): - cube_object.write_nodal_state_to_sim(cube_object.data.default_nodal_state_w) + cube_object.write_nodal_state_to_sim_index(cube_object.data.default_nodal_state_w.torch) - default_root_pos = cube_object.data.default_nodal_state_w.mean(dim=1) + default_root_pos = cube_object.data.default_nodal_state_w.torch.mean(dim=1) cube_object.reset() nodal_kinematic_targets[1:, :, 3] = 1.0 nodal_kinematic_targets[0, :, 3] = 0.0 - nodal_kinematic_targets[0, :, :3] = cube_object.data.default_nodal_state_w[0, :, :3] - cube_object.write_nodal_kinematic_target_to_sim( - nodal_kinematic_targets[0], env_ids=torch.tensor([0], device=sim.device) + nodal_kinematic_targets[0, :, :3] = cube_object.data.default_nodal_state_w.torch[0, :, :3] + cube_object.write_nodal_kinematic_target_to_sim_index( + nodal_kinematic_targets[0:1], env_ids=torch.tensor([0], device=sim.device) ) for _ in range(20): @@ -335,7 +318,10 @@ def test_set_kinematic_targets(sim, num_cubes): cube_object.update(sim.cfg.dt) torch.testing.assert_close( - cube_object.data.nodal_pos_w[0], nodal_kinematic_targets[0, :, :3], rtol=1e-5, atol=1e-5 + cube_object.data.nodal_pos_w.torch[0], + nodal_kinematic_targets[0, :, :3], + rtol=1e-5, + atol=1e-5, ) - root_pos_w = cube_object.data.root_pos_w + root_pos_w = cube_object.data.root_pos_w.torch assert torch.all(root_pos_w[1:, 2] < default_root_pos[1:, 2]) diff --git a/source/isaaclab/test/assets/test_rigid_object.py b/source/isaaclab_physx/test/assets/test_rigid_object.py similarity index 66% rename from source/isaaclab/test/assets/test_rigid_object.py rename to source/isaaclab_physx/test/assets/test_rigid_object.py index 8de5361e29ff..6a2211787e19 100644 --- a/source/isaaclab/test/assets/test_rigid_object.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object.py @@ -16,15 +16,17 @@ """Rest everything follows.""" -import ctypes +import sys from typing import Literal import pytest import torch +import warp as wp from flaky import flaky +from isaaclab_physx.assets import RigidObject import isaaclab.sim as sim_utils -from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab.assets import RigidObjectCfg from isaaclab.sim import build_simulation_context from isaaclab.sim.spawners import materials from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR @@ -105,8 +107,8 @@ def test_initialization(num_cubes, device): # Generate cubes scene cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) - # Check that boundedness of rigid object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 # Play sim sim.reset() @@ -116,10 +118,10 @@ def test_initialization(num_cubes, device): assert len(cube_object.body_names) == 1 # Check buffers that exists and have correct shapes - assert cube_object.data.root_pos_w.shape == (num_cubes, 3) - assert cube_object.data.root_quat_w.shape == (num_cubes, 4) - assert cube_object.data.default_mass.shape == (num_cubes, 1) - assert cube_object.data.default_inertia.shape == (num_cubes, 9) + assert cube_object.data.root_pos_w.torch.shape == (num_cubes, 3) + assert cube_object.data.root_quat_w.torch.shape == (num_cubes, 4) + assert cube_object.data.body_mass.torch.shape == (num_cubes, 1) + assert cube_object.data.body_inertia.torch.shape == (num_cubes, 1, 9) # Simulate physics for _ in range(2): @@ -139,8 +141,8 @@ def test_initialization_with_kinematic_enabled(num_cubes, device): # Generate cubes scene cube_object, origins = generate_cubes_scene(num_cubes=num_cubes, kinematic_enabled=True, device=device) - # Check that boundedness of rigid object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 # Play sim sim.reset() @@ -150,8 +152,8 @@ def test_initialization_with_kinematic_enabled(num_cubes, device): assert len(cube_object.body_names) == 1 # Check buffers that exists and have correct shapes - assert cube_object.data.root_pos_w.shape == (num_cubes, 3) - assert cube_object.data.root_quat_w.shape == (num_cubes, 4) + assert cube_object.data.root_pos_w.torch.shape == (num_cubes, 3) + assert cube_object.data.root_quat_w.torch.shape == (num_cubes, 4) # Simulate physics for _ in range(2): @@ -160,9 +162,11 @@ def test_initialization_with_kinematic_enabled(num_cubes, device): # update object cube_object.update(sim.cfg.dt) # check that the object is kinematic - default_root_state = cube_object.data.default_root_state.clone() - default_root_state[:, :3] += origins - torch.testing.assert_close(cube_object.data.root_state_w, default_root_state) + default_root_pose = cube_object.data.default_root_pose.torch.clone() + default_root_vel = cube_object.data.default_root_vel.torch.clone() + default_root_pose[:, :3] += origins + torch.testing.assert_close(cube_object.data.root_link_pose_w.torch, default_root_pose) + torch.testing.assert_close(cube_object.data.root_com_vel_w.torch, default_root_vel) @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -175,8 +179,8 @@ def test_initialization_with_no_rigid_body(num_cubes, device): # Generate cubes scene cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, api="none", device=device) - # Check that boundedness of rigid object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 # Play sim with pytest.raises(RuntimeError): @@ -193,8 +197,8 @@ def test_initialization_with_articulation_root(num_cubes, device): # Generate cubes scene cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, api="articulation_root", device=device) - # Check that boundedness of rigid object is correct - assert ctypes.c_long.from_address(id(cube_object)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(cube_object) < 10 # Play sim with pytest.raises(RuntimeError): @@ -241,7 +245,7 @@ def test_external_force_buffer(device): external_wrench_b[:, :, 3] = force # apply force - cube_object.permanent_wrench_composer.set_forces_and_torques( + cube_object.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=body_ids, @@ -249,11 +253,11 @@ def test_external_force_buffer(device): # check if the cube's force and torque buffers are correctly updated for i in range(cube_object.num_instances): - assert cube_object._permanent_wrench_composer.composed_force_as_torch[i, 0, 0].item() == force - assert cube_object._permanent_wrench_composer.composed_torque_as_torch[i, 0, 0].item() == force + assert cube_object._permanent_wrench_composer.composed_force.torch[i, 0, 0].item() == force + assert cube_object._permanent_wrench_composer.composed_torque.torch[i, 0, 0].item() == force # Check if the instantaneous wrench is correctly added to the permanent wrench - cube_object.permanent_wrench_composer.add_forces_and_torques( + cube_object.permanent_wrench_composer.add_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=body_ids, @@ -295,17 +299,18 @@ def test_external_force_on_single_body(num_cubes, device): # Sample a force equal to the weight of the object external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=sim.device) # Every 2nd cube should have a force applied to it - external_wrench_b[0::2, :, 2] = 9.81 * cube_object.root_physx_view.get_masses()[0] + external_wrench_b[0::2, :, 2] = 9.81 * wp.to_torch(cube_object.root_view.get_masses())[0] # Now we are ready! for i in range(5): # reset root state - root_state = cube_object.data.default_root_state.clone() + root_pose = cube_object.data.default_root_pose.torch.clone() + root_vel = cube_object.data.default_root_vel.torch.clone() # need to shift the position of the cubes otherwise they will be on top of each other - root_state[:, :3] = origins - cube_object.write_root_pose_to_sim(root_state[:, :7]) - cube_object.write_root_velocity_to_sim(root_state[:, 7:]) + root_pose[:, :3] = origins + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) # reset object cube_object.reset() @@ -313,12 +318,12 @@ def test_external_force_on_single_body(num_cubes, device): is_global = False if i % 2 == 0: is_global = True - positions = cube_object.data.body_com_pos_w[:, body_ids, :3] + positions = cube_object.data.body_com_pos_w.torch[:, body_ids, :3] else: positions = None # apply force - cube_object.permanent_wrench_composer.set_forces_and_torques( + cube_object.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=positions, @@ -338,10 +343,10 @@ def test_external_force_on_single_body(num_cubes, device): # First object should still be at the same Z position (1.0) torch.testing.assert_close( - cube_object.data.root_pos_w[0::2, 2], torch.ones(num_cubes // 2, device=sim.device) + cube_object.data.root_pos_w.torch[0::2, 2], torch.ones(num_cubes // 2, device=sim.device) ) # Second object should have fallen, so it's Z height should be less than initial height of 1.0 - assert torch.all(cube_object.data.root_pos_w[1::2, 2] < 1.0) + assert torch.all(cube_object.data.root_pos_w.torch[1::2, 2] < 1.0) @pytest.mark.parametrize("num_cubes", [2, 4]) @@ -381,12 +386,13 @@ def test_external_force_on_single_body_at_position(num_cubes, device): # Now we are ready! for i in range(5): # reset root state - root_state = cube_object.data.default_root_state.clone() + root_pose = cube_object.data.default_root_pose.torch.clone() + root_vel = cube_object.data.default_root_vel.torch.clone() # need to shift the position of the cubes otherwise they will be on top of each other - root_state[:, :3] = origins - cube_object.write_root_pose_to_sim(root_state[:, :7]) - cube_object.write_root_velocity_to_sim(root_state[:, 7:]) + root_pose[:, :3] = origins + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) # reset object cube_object.reset() @@ -394,7 +400,7 @@ def test_external_force_on_single_body_at_position(num_cubes, device): is_global = False if i % 2 == 0: is_global = True - body_com_pos_w = cube_object.data.body_com_pos_w[:, body_ids, :3] + body_com_pos_w = cube_object.data.body_com_pos_w.torch[:, body_ids, :3] external_wrench_positions_b[..., 0] = 0.0 external_wrench_positions_b[..., 1] = 1.0 external_wrench_positions_b[..., 2] = 0.0 @@ -405,14 +411,14 @@ def test_external_force_on_single_body_at_position(num_cubes, device): external_wrench_positions_b[..., 2] = 0.0 # apply force - cube_object.permanent_wrench_composer.set_forces_and_torques( + cube_object.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, body_ids=body_ids, is_global=is_global, ) - cube_object.permanent_wrench_composer.add_forces_and_torques( + cube_object.permanent_wrench_composer.add_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, @@ -420,13 +426,13 @@ def test_external_force_on_single_body_at_position(num_cubes, device): is_global=is_global, ) torch.testing.assert_close( - cube_object._permanent_wrench_composer.composed_force_as_torch[:, 0, :], + cube_object._permanent_wrench_composer.composed_force.torch[:, 0, :], desired_force[:, 0, :], rtol=1e-6, atol=1e-7, ) torch.testing.assert_close( - cube_object._permanent_wrench_composer.composed_torque_as_torch[:, 0, :], + cube_object._permanent_wrench_composer.composed_torque.torch[:, 0, :], desired_torque[:, 0, :], rtol=1e-6, atol=1e-7, @@ -443,9 +449,9 @@ def test_external_force_on_single_body_at_position(num_cubes, device): cube_object.update(sim.cfg.dt) # The first object should be rotating around it's X axis - assert torch.all(torch.abs(cube_object.data.root_ang_vel_b[0::2, 0]) > 0.1) + assert torch.all(torch.abs(cube_object.data.root_ang_vel_b.torch[0::2, 0]) > 0.1) # Second object should have fallen, so it's Z height should be less than initial height of 1.0 - assert torch.all(cube_object.data.root_pos_w[1::2, 2] < 1.0) + assert torch.all(cube_object.data.root_pos_w.torch[1::2, 2] < 1.0) @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -473,10 +479,10 @@ def test_set_rigid_object_state(num_cubes, device): # Set each state type individually as they are dependent on each other for state_type_to_randomize in state_types: state_dict = { - "root_pos_w": torch.zeros_like(cube_object.data.root_pos_w, device=sim.device), + "root_pos_w": torch.zeros_like(cube_object.data.root_pos_w.torch, device=sim.device), "root_quat_w": default_orientation(num=num_cubes, device=sim.device), - "root_lin_vel_w": torch.zeros_like(cube_object.data.root_lin_vel_w, device=sim.device), - "root_ang_vel_w": torch.zeros_like(cube_object.data.root_ang_vel_w, device=sim.device), + "root_lin_vel_w": torch.zeros_like(cube_object.data.root_lin_vel_w.torch, device=sim.device), + "root_ang_vel_w": torch.zeros_like(cube_object.data.root_ang_vel_w.torch, device=sim.device), } # Now we are ready! @@ -492,25 +498,24 @@ def test_set_rigid_object_state(num_cubes, device): # perform simulation for _ in range(5): - root_state = torch.cat( - [ - state_dict["root_pos_w"], - state_dict["root_quat_w"], - state_dict["root_lin_vel_w"], - state_dict["root_ang_vel_w"], - ], + root_pose = torch.cat( + [state_dict["root_pos_w"], state_dict["root_quat_w"]], + dim=-1, + ) + root_vel = torch.cat( + [state_dict["root_lin_vel_w"], state_dict["root_ang_vel_w"]], dim=-1, ) # reset root state - cube_object.write_root_pose_to_sim(root_state[:, :7]) - cube_object.write_root_velocity_to_sim(root_state[:, 7:]) + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) sim.step() # assert that set root quantities are equal to the ones set in the state_dict for key, expected_value in state_dict.items(): - value = getattr(cube_object.data, key) - torch.testing.assert_close(value, expected_value, rtol=1e-5, atol=1e-5) + value = getattr(cube_object.data, key).torch + torch.testing.assert_close(value, expected_value, rtol=1e-3, atol=1e-3) cube_object.update(sim.cfg.dt) @@ -536,13 +541,14 @@ def test_reset_rigid_object(num_cubes, device): cube_object.update(sim.cfg.dt) # Move the object to a random position - root_state = cube_object.data.default_root_state.clone() - root_state[:, :3] = torch.randn(num_cubes, 3, device=sim.device) + root_pose = cube_object.data.default_root_pose.torch.clone() + root_pose[:, :3] = torch.randn(num_cubes, 3, device=sim.device) # Random orientation - root_state[:, 3:7] = random_orientation(num=num_cubes, device=sim.device) - cube_object.write_root_pose_to_sim(root_state[:, :7]) - cube_object.write_root_velocity_to_sim(root_state[:, 7:]) + root_pose[:, 3:7] = random_orientation(num=num_cubes, device=sim.device) + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + root_vel = cube_object.data.default_root_vel.torch.clone() + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) if i % 2 == 0: # reset object @@ -551,10 +557,10 @@ def test_reset_rigid_object(num_cubes, device): # Reset should zero external forces and torques assert not cube_object._instantaneous_wrench_composer.active assert not cube_object._permanent_wrench_composer.active - assert torch.count_nonzero(cube_object._instantaneous_wrench_composer.composed_force_as_torch) == 0 - assert torch.count_nonzero(cube_object._instantaneous_wrench_composer.composed_torque_as_torch) == 0 - assert torch.count_nonzero(cube_object._permanent_wrench_composer.composed_force_as_torch) == 0 - assert torch.count_nonzero(cube_object._permanent_wrench_composer.composed_torque_as_torch) == 0 + assert torch.count_nonzero(cube_object._instantaneous_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(cube_object._instantaneous_wrench_composer.composed_torque.torch) == 0 + assert torch.count_nonzero(cube_object._permanent_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(cube_object._permanent_wrench_composer.composed_torque.torch) == 0 @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -579,9 +585,11 @@ def test_rigid_body_set_material_properties(num_cubes, device): materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) - indices = torch.tensor(range(num_cubes), dtype=torch.int) + indices = torch.tensor(range(num_cubes), dtype=torch.int32) # Add friction to cube - cube_object.root_physx_view.set_material_properties(materials, indices) + cube_object.root_view.set_material_properties( + wp.from_torch(materials, dtype=wp.float32), wp.from_torch(indices, dtype=wp.int32) + ) # Simulate physics # perform rendering @@ -590,12 +598,52 @@ def test_rigid_body_set_material_properties(num_cubes, device): cube_object.update(sim.cfg.dt) # Get material properties - materials_to_check = cube_object.root_physx_view.get_material_properties() + materials_to_check = wp.to_torch(cube_object.root_view.get_material_properties()) # Check if material properties are set correctly torch.testing.assert_close(materials_to_check.reshape(num_cubes, 3), materials) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.isaacsim_ci +def test_set_material_properties_via_view(num_cubes, device): + """Test setting material properties via the PhysX view-level API.""" + with build_simulation_context( + device=device, gravity_enabled=True, add_ground_plane=True, auto_add_lighting=True + ) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Play sim + sim.reset() + + # Get number of shapes + max_shapes = cube_object.root_view.max_shapes + + # Generate random material properties: (static_friction, dynamic_friction, restitution) + materials = torch.empty(num_cubes, max_shapes, 3, device="cpu").uniform_(0.0, 1.0) + # Ensure dynamic friction <= static friction + materials[..., 1] = torch.min(materials[..., 0], materials[..., 1]) + + # Set material properties via the PhysX view-level API + env_ids = torch.arange(num_cubes, dtype=torch.int32) + cube_object.root_view.set_material_properties( + wp.from_torch(materials, dtype=wp.float32), wp.from_torch(env_ids, dtype=wp.int32) + ) + + # Simulate physics + sim.step() + cube_object.update(sim.cfg.dt) + + # Get material properties from simulation + materials_check = wp.to_torch(cube_object.root_view.get_material_properties()) + + # Check if material properties are set correctly + torch.testing.assert_close(materials_check, materials) + + @pytest.mark.parametrize("num_cubes", [1, 2]) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci @@ -625,16 +673,18 @@ def test_rigid_body_no_friction(num_cubes, device): restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2) cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) - indices = torch.tensor(range(num_cubes), dtype=torch.int) + indices = torch.tensor(range(num_cubes), dtype=torch.int32) - cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) + cube_object.root_view.set_material_properties( + wp.from_torch(cube_object_materials, dtype=wp.float32), wp.from_torch(indices, dtype=wp.int32) + ) # Set initial velocity # Initial velocity in X to get the block moving initial_velocity = torch.zeros((num_cubes, 6), device=sim.cfg.device) initial_velocity[:, 0] = 0.1 - cube_object.write_root_velocity_to_sim(initial_velocity) + cube_object.write_root_velocity_to_sim_index(root_velocity=initial_velocity) # Simulate physics for _ in range(5): @@ -650,7 +700,7 @@ def test_rigid_body_no_friction(num_cubes, device): tolerance = 1e-5 torch.testing.assert_close( - cube_object.data.root_lin_vel_w, initial_velocity[:, :3], rtol=1e-5, atol=tolerance + cube_object.data.root_lin_vel_w.torch, initial_velocity[:, :3], rtol=1e-5, atol=tolerance ) @@ -690,24 +740,26 @@ def test_rigid_body_with_static_friction(num_cubes, device): cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) - indices = torch.tensor(range(num_cubes), dtype=torch.int) + indices = torch.tensor(range(num_cubes), dtype=torch.int32) # Add friction to cube - cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) + cube_object.root_view.set_material_properties( + wp.from_torch(cube_object_materials, dtype=wp.float32), wp.from_torch(indices, dtype=wp.int32) + ) # let everything settle for _ in range(100): sim.step() cube_object.update(sim.cfg.dt) - cube_object.write_root_velocity_to_sim(torch.zeros((num_cubes, 6), device=sim.device)) - cube_mass = cube_object.root_physx_view.get_masses() + cube_object.write_root_velocity_to_sim_index(root_velocity=torch.zeros((num_cubes, 6), device=sim.device)) + cube_mass = wp.to_torch(cube_object.root_view.get_masses()) gravity_magnitude = abs(sim.cfg.gravity[2]) # 2 cases: force applied is below and above mu # below mu: block should not move as the force applied is <= mu # above mu: block should move as the force applied is > mu for force in "below_mu", "above_mu": # set initial velocity to zero - cube_object.write_root_velocity_to_sim(torch.zeros((num_cubes, 6), device=sim.device)) + cube_object.write_root_velocity_to_sim_index(root_velocity=torch.zeros((num_cubes, 6), device=sim.device)) external_wrench_b = torch.zeros((num_cubes, 1, 6), device=sim.device) if force == "below_mu": @@ -715,14 +767,13 @@ def test_rigid_body_with_static_friction(num_cubes, device): else: external_wrench_b[..., 0] = static_friction_coefficient * cube_mass * gravity_magnitude * 1.01 - # TODO: Replace with wrench composer once the deprecation is complete - cube_object.set_external_force_and_torque( - external_wrench_b[..., :3], - external_wrench_b[..., 3:], + cube_object.permanent_wrench_composer.set_forces_and_torques_index( + forces=external_wrench_b[..., :3], + torques=external_wrench_b[..., 3:], ) # Get root state - initial_root_pos = cube_object.data.root_pos_w.clone() + initial_root_pos = cube_object.data.root_pos_w.torch.clone() # Simulate physics for _ in range(200): # apply the wrench @@ -732,9 +783,11 @@ def test_rigid_body_with_static_friction(num_cubes, device): cube_object.update(sim.cfg.dt) if force == "below_mu": # Assert that the block has not moved - torch.testing.assert_close(cube_object.data.root_pos_w, initial_root_pos, rtol=2e-3, atol=2e-3) + torch.testing.assert_close( + cube_object.data.root_pos_w.torch, initial_root_pos, rtol=2e-3, atol=2e-3 + ) if force == "above_mu": - assert (cube_object.data.root_state_w[..., 0] - initial_root_pos[..., 0] > 0.02).all() + assert (cube_object.data.root_pos_w.torch[..., 0] - initial_root_pos[..., 0] > 0.02).all() @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -767,20 +820,21 @@ def test_rigid_body_with_restitution(num_cubes, device): ) cfg.func("/World/GroundPlane", cfg) - indices = torch.tensor(range(num_cubes), dtype=torch.int) + indices = torch.tensor(range(num_cubes), dtype=torch.int32) # Play sim sim.reset() - root_state = torch.zeros(num_cubes, 13, device=sim.device) - root_state[:, 3] = 1.0 # To make orientation a quaternion + root_pose = torch.zeros(num_cubes, 7, device=sim.device) + root_pose[:, 3] = 1.0 # To make orientation a quaternion for i in range(num_cubes): - root_state[i, 1] = 1.0 * i - root_state[:, 2] = 1.0 # Set an initial drop height - root_state[:, 9] = -1.0 # Set an initial downward velocity + root_pose[i, 1] = 1.0 * i + root_pose[:, 2] = 1.0 # Set an initial drop height + root_vel = torch.zeros(num_cubes, 6, device=sim.device) + root_vel[:, 2] = -1.0 # Set an initial downward velocity - cube_object.write_root_pose_to_sim(root_state[:, :7]) - cube_object.write_root_velocity_to_sim(root_state[:, 7:]) + cube_object.write_root_pose_to_sim_index(root_pose=root_pose) + cube_object.write_root_velocity_to_sim_index(root_velocity=root_vel) static_friction = torch.zeros(num_cubes, 1) dynamic_friction = torch.zeros(num_cubes, 1) @@ -789,16 +843,18 @@ def test_rigid_body_with_restitution(num_cubes, device): cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) # Add restitution to cube - cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) + cube_object.root_view.set_material_properties( + wp.from_torch(cube_object_materials, dtype=wp.float32), wp.from_torch(indices, dtype=wp.int32) + ) - curr_z_velocity = cube_object.data.root_lin_vel_w[:, 2].clone() + curr_z_velocity = cube_object.data.root_lin_vel_w.torch[:, 2].clone() for _ in range(100): sim.step() # update object cube_object.update(sim.cfg.dt) - curr_z_velocity = cube_object.data.root_lin_vel_w[:, 2].clone() + curr_z_velocity = cube_object.data.root_lin_vel_w.torch[:, 2].clone() if expected_collision_type == "inelastic": # assert that the block has not bounced by checking that the z velocity is less than or equal to 0 @@ -833,19 +889,21 @@ def test_rigid_body_set_mass(num_cubes, device): sim.reset() # Get masses before increasing - original_masses = cube_object.root_physx_view.get_masses() + original_masses = wp.to_torch(cube_object.root_view.get_masses()) assert original_masses.shape == (num_cubes, 1) # Randomize mass of the object masses = original_masses + torch.FloatTensor(num_cubes, 1).uniform_(4, 8) - indices = torch.tensor(range(num_cubes), dtype=torch.int) + indices = torch.tensor(range(num_cubes), dtype=torch.int32) # Add friction to cube - cube_object.root_physx_view.set_masses(masses, indices) + cube_object.root_view.set_masses( + wp.from_torch(masses, dtype=wp.float32), wp.from_torch(indices, dtype=wp.int32) + ) - torch.testing.assert_close(cube_object.root_physx_view.get_masses(), masses) + torch.testing.assert_close(wp.to_torch(cube_object.root_view.get_masses()), masses) # Simulate physics # perform rendering @@ -853,7 +911,7 @@ def test_rigid_body_set_mass(num_cubes, device): # update object cube_object.update(sim.cfg.dt) - masses_to_check = cube_object.root_physx_view.get_masses() + masses_to_check = wp.to_torch(cube_object.root_view.get_masses()) # Check if mass is set correctly torch.testing.assert_close(masses, masses_to_check) @@ -880,9 +938,9 @@ def test_gravity_vec_w(num_cubes, device, gravity_enabled): sim.reset() # Check that gravity is set correctly - assert cube_object.data.GRAVITY_VEC_W[0, 0] == gravity_dir[0] - assert cube_object.data.GRAVITY_VEC_W[0, 1] == gravity_dir[1] - assert cube_object.data.GRAVITY_VEC_W[0, 2] == gravity_dir[2] + assert cube_object.data.GRAVITY_VEC_W.torch[0, 0] == gravity_dir[0] + assert cube_object.data.GRAVITY_VEC_W.torch[0, 1] == gravity_dir[1] + assert cube_object.data.GRAVITY_VEC_W.torch[0, 2] == gravity_dir[2] # Simulate physics for _ in range(2): @@ -896,7 +954,7 @@ def test_gravity_vec_w(num_cubes, device, gravity_enabled): if gravity_enabled: gravity[:, :, 2] = -9.81 # Check the body accelerations are correct - torch.testing.assert_close(cube_object.data.body_acc_w, gravity) + torch.testing.assert_close(cube_object.data.body_acc_w.torch, gravity) @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -910,7 +968,7 @@ def test_body_root_state_properties(num_cubes, device, with_offset): sim._app_control_on_stop_handle = None # Create a scene with random cubes cube_object, env_pos = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device) - env_idx = torch.tensor([x for x in range(num_cubes)]) + env_idx = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32) # Play sim sim.reset() @@ -924,12 +982,12 @@ def test_body_root_state_properties(num_cubes, device, with_offset): else: offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) - com = cube_object.root_physx_view.get_coms() + com = wp.to_torch(cube_object.root_view.get_coms()) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(com, env_idx) + cube_object.root_view.set_coms(wp.from_torch(com, dtype=wp.float32), wp.from_torch(env_idx, dtype=wp.int32)) # check ceter of mass has been set - torch.testing.assert_close(cube_object.root_physx_view.get_coms(), com) + torch.testing.assert_close(wp.to_torch(cube_object.root_view.get_coms()), com) # random z spin velocity spin_twist = torch.zeros(6, device=device) @@ -938,70 +996,76 @@ def test_body_root_state_properties(num_cubes, device, with_offset): # Simulate physics for _ in range(100): # spin the object around Z axis (com) - cube_object.write_root_velocity_to_sim(spin_twist.repeat(num_cubes, 1)) + cube_object.write_root_velocity_to_sim_index(root_velocity=spin_twist.repeat(num_cubes, 1)) # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) # get state properties - root_state_w = cube_object.data.root_state_w - root_link_state_w = cube_object.data.root_link_state_w - root_com_state_w = cube_object.data.root_com_state_w - body_state_w = cube_object.data.body_state_w - body_link_state_w = cube_object.data.body_link_state_w - body_com_state_w = cube_object.data.body_com_state_w + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + body_link_pose_w = cube_object.data.body_link_pose_w.torch + body_link_vel_w = cube_object.data.body_link_vel_w.torch + body_com_pose_w = cube_object.data.body_com_pose_w.torch + body_com_vel_w = cube_object.data.body_com_vel_w.torch # if offset is [0,0,0] all root_state_%_w will match and all body_%_w will match if not with_offset: - torch.testing.assert_close(root_state_w, root_com_state_w) - torch.testing.assert_close(root_state_w, root_link_state_w) - torch.testing.assert_close(body_state_w, body_com_state_w) - torch.testing.assert_close(body_state_w, body_link_state_w) + torch.testing.assert_close(root_link_pose_w, root_com_pose_w) + torch.testing.assert_close(root_com_vel_w, root_link_vel_w) + torch.testing.assert_close(root_link_pose_w, root_link_pose_w) + torch.testing.assert_close(root_com_vel_w, root_link_vel_w) + torch.testing.assert_close(body_link_pose_w, body_com_pose_w) + torch.testing.assert_close(body_com_vel_w, body_link_vel_w) + torch.testing.assert_close(body_link_pose_w, body_link_pose_w) + torch.testing.assert_close(body_com_vel_w, body_link_vel_w) else: # cubes are spinning around center of mass # position will not match # center of mass position will be constant (i.e. spinning around com) - torch.testing.assert_close(env_pos + offset, root_com_state_w[..., :3]) - torch.testing.assert_close(env_pos + offset, body_com_state_w[..., :3].squeeze(-2)) + torch.testing.assert_close(env_pos + offset, root_com_pose_w[..., :3]) + torch.testing.assert_close(env_pos + offset, body_com_pose_w[..., :3].squeeze(-2)) # link position will be moving but should stay constant away from center of mass root_link_state_pos_rel_com = quat_apply_inverse( - root_link_state_w[..., 3:7], - root_link_state_w[..., :3] - root_com_state_w[..., :3], + root_link_pose_w[..., 3:], + root_link_pose_w[..., :3] - root_com_pose_w[..., :3], ) torch.testing.assert_close(-offset, root_link_state_pos_rel_com) body_link_state_pos_rel_com = quat_apply_inverse( - body_link_state_w[..., 3:7], - body_link_state_w[..., :3] - body_com_state_w[..., :3], + body_link_pose_w[..., 3:], + body_link_pose_w[..., :3] - body_com_pose_w[..., :3], ) torch.testing.assert_close(-offset, body_link_state_pos_rel_com.squeeze(-2)) # orientation of com will be a constant rotation from link orientation - com_quat_b = cube_object.data.body_com_quat_b - com_quat_w = quat_mul(body_link_state_w[..., 3:7], com_quat_b) - torch.testing.assert_close(com_quat_w, body_com_state_w[..., 3:7]) - torch.testing.assert_close(com_quat_w.squeeze(-2), root_com_state_w[..., 3:7]) + com_quat_b = cube_object.data.body_com_quat_b.torch + com_quat_w = quat_mul(body_link_pose_w[..., 3:], com_quat_b) + torch.testing.assert_close(com_quat_w, body_com_pose_w[..., 3:]) + torch.testing.assert_close(com_quat_w.squeeze(-2), root_com_pose_w[..., 3:]) # orientation of link will match root state will always match - torch.testing.assert_close(root_state_w[..., 3:7], root_link_state_w[..., 3:7]) - torch.testing.assert_close(body_state_w[..., 3:7], body_link_state_w[..., 3:7]) + torch.testing.assert_close(root_link_pose_w[..., 3:], root_link_pose_w[..., 3:]) + torch.testing.assert_close(body_link_pose_w[..., 3:], body_link_pose_w[..., 3:]) # lin_vel will not match # center of mass vel will be constant (i.e. spinning around com) - torch.testing.assert_close(torch.zeros_like(root_com_state_w[..., 7:10]), root_com_state_w[..., 7:10]) - torch.testing.assert_close(torch.zeros_like(body_com_state_w[..., 7:10]), body_com_state_w[..., 7:10]) + torch.testing.assert_close(torch.zeros_like(root_com_vel_w[..., :3]), root_com_vel_w[..., :3]) + torch.testing.assert_close(torch.zeros_like(body_com_vel_w[..., :3]), body_com_vel_w[..., :3]) # link frame will be moving, and should be equal to input angular velocity cross offset - lin_vel_rel_root_gt = quat_apply_inverse(root_link_state_w[..., 3:7], root_link_state_w[..., 7:10]) - lin_vel_rel_body_gt = quat_apply_inverse(body_link_state_w[..., 3:7], body_link_state_w[..., 7:10]) + lin_vel_rel_root_gt = quat_apply_inverse(root_link_pose_w[..., 3:], root_link_vel_w[..., :3]) + lin_vel_rel_body_gt = quat_apply_inverse(body_link_pose_w[..., 3:], body_link_vel_w[..., :3]) lin_vel_rel_gt = torch.linalg.cross(spin_twist.repeat(num_cubes, 1)[..., 3:], -offset) torch.testing.assert_close(lin_vel_rel_gt, lin_vel_rel_root_gt, atol=1e-4, rtol=1e-4) torch.testing.assert_close(lin_vel_rel_gt, lin_vel_rel_body_gt.squeeze(-2), atol=1e-4, rtol=1e-4) # ang_vel will always match - torch.testing.assert_close(root_state_w[..., 10:], root_com_state_w[..., 10:]) - torch.testing.assert_close(root_state_w[..., 10:], root_link_state_w[..., 10:]) - torch.testing.assert_close(body_state_w[..., 10:], body_com_state_w[..., 10:]) - torch.testing.assert_close(body_state_w[..., 10:], body_link_state_w[..., 10:]) + torch.testing.assert_close(root_com_vel_w[..., 3:], root_com_vel_w[..., 3:]) + torch.testing.assert_close(root_com_vel_w[..., 3:], root_link_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_com_vel_w[..., 3:]) + torch.testing.assert_close(body_com_vel_w[..., 3:], body_link_vel_w[..., 3:]) @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -1015,7 +1079,7 @@ def test_write_root_state(num_cubes, device, with_offset, state_location): sim._app_control_on_stop_handle = None # Create a scene with random cubes cube_object, env_pos = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device) - env_idx = torch.tensor([x for x in range(num_cubes)]) + env_idx = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32) # Play sim sim.reset() @@ -1029,15 +1093,15 @@ def test_write_root_state(num_cubes, device, with_offset, state_location): else: offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) - com = cube_object.root_physx_view.get_coms() + com = wp.to_torch(cube_object.root_view.get_coms()) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(com, env_idx) + cube_object.root_view.set_coms(wp.from_torch(com, dtype=wp.float32), wp.from_torch(env_idx, dtype=wp.int32)) # check center of mass has been set - torch.testing.assert_close(cube_object.root_physx_view.get_coms(), com) + torch.testing.assert_close(wp.to_torch(cube_object.root_view.get_coms()), com) - rand_state = torch.zeros_like(cube_object.data.root_state_w) - rand_state[..., :7] = cube_object.data.default_root_state[..., :7] + rand_state = torch.zeros(num_cubes, 13, device=device) + rand_state[..., :7] = cube_object.data.default_root_pose.torch rand_state[..., :3] += env_pos # make quaternion a unit vector rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) @@ -1051,19 +1115,27 @@ def test_write_root_state(num_cubes, device, with_offset, state_location): if state_location == "com": if i % 2 == 0: - cube_object.write_root_com_state_to_sim(rand_state) + cube_object.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) else: - cube_object.write_root_com_state_to_sim(rand_state, env_ids=env_idx) + cube_object.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + cube_object.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:], env_ids=env_idx) elif state_location == "link": if i % 2 == 0: - cube_object.write_root_link_state_to_sim(rand_state) + cube_object.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) else: - cube_object.write_root_link_state_to_sim(rand_state, env_ids=env_idx) + cube_object.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7], env_ids=env_idx) + cube_object.write_root_link_velocity_to_sim_index( + root_velocity=rand_state[..., 7:], env_ids=env_idx + ) if state_location == "com": - torch.testing.assert_close(rand_state, cube_object.data.root_com_state_w) + torch.testing.assert_close(rand_state[..., :7], cube_object.data.root_com_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.root_com_vel_w.torch) elif state_location == "link": - torch.testing.assert_close(rand_state, cube_object.data.root_link_state_w) + torch.testing.assert_close(rand_state[..., :7], cube_object.data.root_link_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.root_link_vel_w.torch) @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -1077,7 +1149,7 @@ def test_write_state_functions_data_consistency(num_cubes, device, with_offset, sim._app_control_on_stop_handle = None # Create a scene with random cubes cube_object, env_pos = generate_cubes_scene(num_cubes=num_cubes, height=0.0, device=device) - env_idx = torch.tensor([x for x in range(num_cubes)]) + env_idx = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32) # Play sim sim.reset() @@ -1091,15 +1163,14 @@ def test_write_state_functions_data_consistency(num_cubes, device, with_offset, else: offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) - com = cube_object.root_physx_view.get_coms() + com = wp.to_torch(cube_object.root_view.get_coms()) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(com, env_idx) + cube_object.root_view.set_coms(wp.from_torch(com, dtype=wp.float32), wp.from_torch(env_idx, dtype=wp.int32)) # check ceter of mass has been set - torch.testing.assert_close(cube_object.root_physx_view.get_coms(), com) + torch.testing.assert_close(wp.to_torch(cube_object.root_view.get_coms()), com) - rand_state = torch.rand_like(cube_object.data.root_state_w) - # rand_state[..., :7] = cube_object.data.default_root_state[..., :7] + rand_state = torch.rand(num_cubes, 13, device=device) rand_state[..., :3] += env_pos # make quaternion a unit vector rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) @@ -1112,62 +1183,118 @@ def test_write_state_functions_data_consistency(num_cubes, device, with_offset, cube_object.update(sim.cfg.dt) if state_location == "com": - cube_object.write_root_com_state_to_sim(rand_state) + cube_object.write_root_com_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_com_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) elif state_location == "link": - cube_object.write_root_link_state_to_sim(rand_state) + cube_object.write_root_link_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_link_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) elif state_location == "root": - cube_object.write_root_state_to_sim(rand_state) + cube_object.write_root_pose_to_sim_index(root_pose=rand_state[..., :7]) + cube_object.write_root_velocity_to_sim_index(root_velocity=rand_state[..., 7:]) if state_location == "com": + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + body_com_pose_b = cube_object.data.body_com_pose_b.torch expected_root_link_pos, expected_root_link_quat = combine_frame_transforms( - cube_object.data.root_com_state_w[:, :3], - cube_object.data.root_com_state_w[:, 3:7], - quat_rotate( - quat_inv(cube_object.data.body_com_pose_b[:, 0, 3:7]), -cube_object.data.body_com_pose_b[:, 0, :3] - ), - quat_inv(cube_object.data.body_com_pose_b[:, 0, 3:7]), + root_com_pose_w[:, :3], + root_com_pose_w[:, 3:], + quat_rotate(quat_inv(body_com_pose_b[:, 0, 3:7]), -body_com_pose_b[:, 0, :3]), + quat_inv(body_com_pose_b[:, 0, 3:7]), ) expected_root_link_pose = torch.cat((expected_root_link_pos, expected_root_link_quat), dim=1) - # test both root_pose and root_link_state_w successfully updated when root_com_state_w updates - torch.testing.assert_close(expected_root_link_pose, cube_object.data.root_link_state_w[:, :7]) - # skip 7:10 because they differs from link frame, this should be fine because we are only checking + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + # test both root_pose and root_link successfully updated when root_com updates + torch.testing.assert_close(expected_root_link_pose, root_link_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking # if velocity update is triggered, which can be determined by comparing angular velocity - torch.testing.assert_close( - cube_object.data.root_com_state_w[:, 10:], cube_object.data.root_link_state_w[:, 10:] - ) - torch.testing.assert_close(expected_root_link_pose, cube_object.data.root_state_w[:, :7]) - torch.testing.assert_close(cube_object.data.root_com_state_w[:, 10:], cube_object.data.root_state_w[:, 10:]) + torch.testing.assert_close(root_com_vel_w[:, 3:], root_link_vel_w[:, 3:]) + torch.testing.assert_close(expected_root_link_pose, root_link_pose_w) + torch.testing.assert_close(root_com_vel_w[:, 3:], cube_object.data.root_com_vel_w.torch[:, 3:]) elif state_location == "link": + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + body_com_pose_b = cube_object.data.body_com_pose_b.torch expected_com_pos, expected_com_quat = combine_frame_transforms( - cube_object.data.root_link_state_w[:, :3], - cube_object.data.root_link_state_w[:, 3:7], - cube_object.data.body_com_pose_b[:, 0, :3], - cube_object.data.body_com_pose_b[:, 0, 3:7], + root_link_pose_w[:, :3], + root_link_pose_w[:, 3:], + body_com_pose_b[:, 0, :3], + body_com_pose_b[:, 0, 3:7], ) expected_com_pose = torch.cat((expected_com_pos, expected_com_quat), dim=1) - # test both root_pose and root_com_state_w successfully updated when root_link_state_w updates - torch.testing.assert_close(expected_com_pose, cube_object.data.root_com_state_w[:, :7]) - # skip 7:10 because they differs from link frame, this should be fine because we are only checking + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + # test both root_pose and root_com successfully updated when root_link updates + torch.testing.assert_close(expected_com_pose, root_com_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking # if velocity update is triggered, which can be determined by comparing angular velocity - torch.testing.assert_close( - cube_object.data.root_link_state_w[:, 10:], cube_object.data.root_com_state_w[:, 10:] - ) - torch.testing.assert_close(cube_object.data.root_link_state_w[:, :7], cube_object.data.root_state_w[:, :7]) - torch.testing.assert_close( - cube_object.data.root_link_state_w[:, 10:], cube_object.data.root_state_w[:, 10:] - ) + torch.testing.assert_close(root_link_vel_w[:, 3:], root_com_vel_w[:, 3:]) + torch.testing.assert_close(root_link_pose_w, cube_object.data.root_link_pose_w.torch) + torch.testing.assert_close(root_link_vel_w[:, 3:], cube_object.data.root_com_vel_w.torch[:, 3:]) elif state_location == "root": + root_link_pose_w = cube_object.data.root_link_pose_w.torch + root_com_vel_w = cube_object.data.root_com_vel_w.torch + body_com_pose_b = cube_object.data.body_com_pose_b.torch expected_com_pos, expected_com_quat = combine_frame_transforms( - cube_object.data.root_state_w[:, :3], - cube_object.data.root_state_w[:, 3:7], - cube_object.data.body_com_pose_b[:, 0, :3], - cube_object.data.body_com_pose_b[:, 0, 3:7], + root_link_pose_w[:, :3], + root_link_pose_w[:, 3:], + body_com_pose_b[:, 0, :3], + body_com_pose_b[:, 0, 3:7], ) expected_com_pose = torch.cat((expected_com_pos, expected_com_quat), dim=1) - # test both root_com_state_w and root_link_state_w successfully updated when root_pose updates - torch.testing.assert_close(expected_com_pose, cube_object.data.root_com_state_w[:, :7]) - torch.testing.assert_close(cube_object.data.root_state_w[:, 7:], cube_object.data.root_com_state_w[:, 7:]) - torch.testing.assert_close(cube_object.data.root_state_w[:, :7], cube_object.data.root_link_state_w[:, :7]) - torch.testing.assert_close( - cube_object.data.root_state_w[:, 10:], cube_object.data.root_link_state_w[:, 10:] - ) + root_com_pose_w = cube_object.data.root_com_pose_w.torch + root_link_vel_w = cube_object.data.root_link_vel_w.torch + # test both root_com and root_link successfully updated when root_pose updates + torch.testing.assert_close(expected_com_pose, root_com_pose_w) + torch.testing.assert_close(root_com_vel_w, cube_object.data.root_com_vel_w.torch) + torch.testing.assert_close(root_link_pose_w, cube_object.data.root_link_pose_w.torch) + torch.testing.assert_close(root_com_vel_w[:, 3:], root_link_vel_w[:, 3:]) + + +@pytest.mark.isaacsim_ci +def test_warmup_attach_stage_not_called_for_cpu(): + """Regression test: attach_stage() must not be called for CPU in _warmup_and_create_views(). + + Bug (commit 0ba9c5cb3b): ``PhysxManager._warmup_and_create_views()`` called + ``_physx_sim.attach_stage()`` unconditionally before ``force_load_physics_from_usd()``. + These are two alternative initialization patterns; combining them causes + double-initialization that corrupts the CPU MBP broadphase, producing + non-deterministic collision failures (objects passing through surfaces). + + Fix: guard ``attach_stage()`` with ``if is_gpu:`` — it is only required by the + GPU pipeline, which needs explicit stage attachment before the physics load step. + The CPU pipeline attaches implicitly via ``force_load_physics_from_usd()``. + + This test verifies the guard is in place by monkeypatching ``attach_stage`` on + the PhysX simulation interface and asserting it is *not* called during CPU warmup. + The simulation test itself (1 cube falling onto a ground plane) is intentionally + omitted here because the MBP corruption is non-deterministic and depends on scene + complexity (multiple dynamic actors on a mesh collider), making it unreliable as a + unit test assertion. + """ + from unittest.mock import MagicMock + + from isaaclab_physx.physics import PhysxManager + + with build_simulation_context(device="cpu", add_ground_plane=True, dt=0.01, auto_add_lighting=True) as sim: + sim._app_control_on_stop_handle = None + generate_cubes_scene(num_cubes=1, height=1.0, device="cpu") + + # IPhysxSimulation is a C++ binding whose attributes are read-only, so we cannot + # assign to _physx_sim.attach_stage directly. Instead, replace the class-level + # reference with a MagicMock that wraps the real object so all other calls still + # work, then restore it in the finally block. + original_physx_sim = PhysxManager._physx_sim + spy = MagicMock(wraps=original_physx_sim) + PhysxManager._physx_sim = spy + try: + sim.reset() + finally: + PhysxManager._physx_sim = original_physx_sim + + assert spy.attach_stage.call_count == 0, ( + f"attach_stage() was called {spy.attach_stage.call_count} time(s) during CPU warmup. " + f"This indicates the CPU MBP broadphase double-initialization regression is present: " + f"attach_stage() + force_load_physics_from_usd() must not be combined for CPU." + ) diff --git a/source/isaaclab/test/assets/test_rigid_object_collection.py b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py similarity index 60% rename from source/isaaclab/test/assets/test_rigid_object_collection.py rename to source/isaaclab_physx/test/assets/test_rigid_object_collection.py index 8d919bf4d7ad..8401cc395a0c 100644 --- a/source/isaaclab/test/assets/test_rigid_object_collection.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py @@ -16,13 +16,15 @@ """Rest everything follows.""" -import ctypes +import sys import pytest import torch +import warp as wp +from isaaclab_physx.assets import RigidObjectCollection import isaaclab.sim as sim_utils -from isaaclab.assets import RigidObjectCfg, RigidObjectCollection, RigidObjectCollectionCfg +from isaaclab.assets import RigidObjectCfg, RigidObjectCollectionCfg from isaaclab.sim import build_simulation_context from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR from isaaclab.utils.math import ( @@ -113,21 +115,21 @@ def test_initialization(sim, num_envs, num_cubes, device): """Test initialization for prim with rigid body API at the provided prim path.""" object_collection, _ = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) - # Check that boundedness of rigid object is correct - assert ctypes.c_long.from_address(id(object_collection)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(object_collection) < 10 # Play sim sim.reset() # Check if object is initialized assert object_collection.is_initialized - assert len(object_collection.object_names) == num_cubes + assert len(object_collection.body_names) == num_cubes # Check buffers that exist and have correct shapes - assert object_collection.data.object_link_pos_w.shape == (num_envs, num_cubes, 3) - assert object_collection.data.object_link_quat_w.shape == (num_envs, num_cubes, 4) - assert object_collection.data.default_mass.shape == (num_envs, num_cubes, 1) - assert object_collection.data.default_inertia.shape == (num_envs, num_cubes, 9) + assert object_collection.data.body_link_pos_w.torch.shape == (num_envs, num_cubes, 3) + assert object_collection.data.body_link_quat_w.torch.shape == (num_envs, num_cubes, 4) + assert object_collection.data.body_mass.torch.shape == (num_envs, num_cubes) + assert object_collection.data.body_inertia.torch.shape == (num_envs, num_cubes, 9) # Simulate physics for _ in range(2): @@ -144,28 +146,31 @@ def test_id_conversion(sim, device): sim.reset() expected = [ - torch.tensor([4, 5], device=device, dtype=torch.long), - torch.tensor([4], device=device, dtype=torch.long), - torch.tensor([0, 2, 4], device=device, dtype=torch.long), - torch.tensor([1, 3, 5], device=device, dtype=torch.long), + torch.tensor([4, 5], device=device, dtype=torch.int32), + torch.tensor([4], device=device, dtype=torch.int32), + torch.tensor([0, 2, 4], device=device, dtype=torch.int32), + torch.tensor([1, 3, 5], device=device, dtype=torch.int32), ] - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES, object_collection._ALL_OBJ_INDICES[None, 2] + torch_all_env_indices = wp.to_torch(object_collection._ALL_ENV_INDICES) + torch_all_body_indices = wp.to_torch(object_collection._ALL_BODY_INDICES) + + view_ids = object_collection._env_body_ids_to_view_ids( + torch_all_env_indices, torch_all_body_indices[None, 2], device=device ) - assert (view_ids == expected[0]).all() - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES[None, 0], object_collection._ALL_OBJ_INDICES[None, 2] + assert (wp.to_torch(view_ids) == expected[0]).all() + view_ids = object_collection._env_body_ids_to_view_ids( + torch_all_env_indices[None, 0], torch_all_body_indices[None, 2], device=device ) - assert (view_ids == expected[1]).all() - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES[None, 0], object_collection._ALL_OBJ_INDICES + assert (wp.to_torch(view_ids) == expected[1]).all() + view_ids = object_collection._env_body_ids_to_view_ids( + torch_all_env_indices[None, 0], torch_all_body_indices, device=device ) - assert (view_ids == expected[2]).all() - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES[None, 1], object_collection._ALL_OBJ_INDICES + assert (wp.to_torch(view_ids) == expected[2]).all() + view_ids = object_collection._env_body_ids_to_view_ids( + torch_all_env_indices[None, 1], torch_all_body_indices, device=device ) - assert (view_ids == expected[3]).all() + assert (wp.to_torch(view_ids) == expected[3]).all() @pytest.mark.parametrize("num_envs", [1, 2]) @@ -177,28 +182,30 @@ def test_initialization_with_kinematic_enabled(sim, num_envs, num_cubes, device) num_envs=num_envs, num_cubes=num_cubes, kinematic_enabled=True, device=device ) - # Check that boundedness of rigid object is correct - assert ctypes.c_long.from_address(id(object_collection)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(object_collection) < 10 # Play sim sim.reset() # Check if object is initialized assert object_collection.is_initialized - assert len(object_collection.object_names) == num_cubes + assert len(object_collection.body_names) == num_cubes # Check buffers that exist and have correct shapes - assert object_collection.data.object_link_pos_w.shape == (num_envs, num_cubes, 3) - assert object_collection.data.object_link_quat_w.shape == (num_envs, num_cubes, 4) + assert object_collection.data.body_link_pos_w.torch.shape == (num_envs, num_cubes, 3) + assert object_collection.data.body_link_quat_w.torch.shape == (num_envs, num_cubes, 4) # Simulate physics for _ in range(2): sim.step() object_collection.update(sim.cfg.dt) # check that the object is kinematic - default_object_state = object_collection.data.default_object_state.clone() - default_object_state[..., :3] += origins.unsqueeze(1) - torch.testing.assert_close(object_collection.data.object_link_state_w, default_object_state) + default_body_pose = object_collection.data.default_body_pose.torch.clone() + default_body_vel = object_collection.data.default_body_vel.torch.clone() + default_body_pose[..., :3] += origins.unsqueeze(1) + torch.testing.assert_close(object_collection.data.body_link_pose_w.torch, default_body_pose) + torch.testing.assert_close(object_collection.data.body_link_vel_w.torch, default_body_vel) @pytest.mark.parametrize("num_cubes", [1, 2]) @@ -207,8 +214,8 @@ def test_initialization_with_no_rigid_body(sim, num_cubes, device): """Test that initialization fails when no rigid body is found at the provided prim path.""" object_collection, _ = generate_cubes_scene(num_cubes=num_cubes, has_api=False, device=device) - # Check that boundedness of rigid object is correct - assert ctypes.c_long.from_address(id(object_collection)).value == 1 + # Check that the framework doesn't hold excessive strong references. + assert sys.getrefcount(object_collection) < 10 # Play sim with pytest.raises(RuntimeError): @@ -224,7 +231,7 @@ def test_external_force_buffer(sim, device): sim.reset() # find objects to apply the force - object_ids, object_names = object_collection.find_objects(".*") + object_ids, object_names = object_collection.find_bodies(".*") # reset object object_collection.reset() @@ -243,7 +250,7 @@ def test_external_force_buffer(sim, device): external_wrench_b[:, :, 0] = force external_wrench_b[:, :, 3] = force - object_collection.permanent_wrench_composer.set_forces_and_torques( + object_collection.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], body_ids=object_ids, @@ -252,10 +259,10 @@ def test_external_force_buffer(sim, device): # check if the object collection's force and torque buffers are correctly updated for i in range(num_envs): - assert object_collection._permanent_wrench_composer.composed_force_as_torch[i, 0, 0].item() == force - assert object_collection._permanent_wrench_composer.composed_torque_as_torch[i, 0, 0].item() == force + assert object_collection._permanent_wrench_composer.composed_force.torch[i, 0, 0].item() == force + assert object_collection._permanent_wrench_composer.composed_torque.torch[i, 0, 0].item() == force - object_collection.instantaneous_wrench_composer.add_forces_and_torques( + object_collection.instantaneous_wrench_composer.add_forces_and_torques_index( body_ids=object_ids, forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], @@ -276,31 +283,33 @@ def test_external_force_on_single_body(sim, num_envs, num_cubes, device): sim.reset() # find objects to apply the force - object_ids, object_names = object_collection.find_objects(".*") + object_ids, object_names = object_collection.find_bodies(".*") # Sample a force equal to the weight of the object external_wrench_b = torch.zeros(object_collection.num_instances, len(object_ids), 6, device=sim.device) # Every 2nd cube should have a force applied to it - external_wrench_b[:, 0::2, 2] = 9.81 * object_collection.data.default_mass[:, 0::2, 0] + external_wrench_b[:, 0::2, 2] = 9.81 * object_collection.data.body_mass.torch[:, 0::2] for i in range(5): # reset object state - object_state = object_collection.data.default_object_state.clone() + body_pose = object_collection.data.default_body_pose.torch.clone() + body_vel = object_collection.data.default_body_vel.torch.clone() # need to shift the position of the cubes otherwise they will be on top of each other - object_state[..., :2] += origins.unsqueeze(1)[..., :2] - object_collection.write_object_state_to_sim(object_state) + body_pose[..., :2] += origins.unsqueeze(1)[..., :2] + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) # reset object object_collection.reset() is_global = False if i % 2 == 0: - positions = object_collection.data.object_link_pos_w[:, object_ids, :3] + positions = object_collection.data.body_link_pos_w.torch[:, object_ids, :3] is_global = True else: positions = None # apply force - object_collection.permanent_wrench_composer.set_forces_and_torques( + object_collection.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=positions, @@ -318,11 +327,11 @@ def test_external_force_on_single_body(sim, num_envs, num_cubes, device): # First object should still be at the same Z position (1.0) torch.testing.assert_close( - object_collection.data.object_link_pos_w[:, 0::2, 2], - torch.ones_like(object_collection.data.object_pos_w[:, 0::2, 2]), + object_collection.data.body_link_pos_w.torch[:, 0::2, 2], + torch.ones_like(object_collection.data.body_link_pos_w.torch[:, 0::2, 2]), ) # Second object should have fallen, so it's Z height should be less than initial height of 1.0 - assert torch.all(object_collection.data.object_link_pos_w[:, 1::2, 2] < 1.0) + assert torch.all(object_collection.data.body_link_pos_w.torch[:, 1::2, 2] < 1.0) @pytest.mark.parametrize("num_envs", [1, 2]) @@ -339,7 +348,7 @@ def test_external_force_on_single_body_at_position(sim, num_envs, num_cubes, dev sim.reset() # find objects to apply the force - object_ids, object_names = object_collection.find_objects(".*") + object_ids, object_names = object_collection.find_bodies(".*") # Sample a force equal to the weight of the object external_wrench_b = torch.zeros(object_collection.num_instances, len(object_ids), 6, device=sim.device) @@ -351,16 +360,18 @@ def test_external_force_on_single_body_at_position(sim, num_envs, num_cubes, dev # Desired force and torque for i in range(5): # reset object state - object_state = object_collection.data.default_object_state.clone() + body_pose = object_collection.data.default_body_pose.torch.clone() + body_vel = object_collection.data.default_body_vel.torch.clone() # need to shift the position of the cubes otherwise they will be on top of each other - object_state[..., :2] += origins.unsqueeze(1)[..., :2] - object_collection.write_object_state_to_sim(object_state) + body_pose[..., :2] += origins.unsqueeze(1)[..., :2] + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) # reset object object_collection.reset() is_global = False if i % 2 == 0: - body_com_pos_w = object_collection.data.object_link_pos_w[:, object_ids, :3] + body_com_pos_w = object_collection.data.body_link_pos_w.torch[:, object_ids, :3] external_wrench_positions_b[..., 0] = 0.0 external_wrench_positions_b[..., 1] = 1.0 external_wrench_positions_b[..., 2] = 0.0 @@ -372,7 +383,7 @@ def test_external_force_on_single_body_at_position(sim, num_envs, num_cubes, dev external_wrench_positions_b[..., 2] = 0.0 # apply force - object_collection.permanent_wrench_composer.set_forces_and_torques( + object_collection.permanent_wrench_composer.set_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, @@ -380,7 +391,7 @@ def test_external_force_on_single_body_at_position(sim, num_envs, num_cubes, dev env_ids=None, is_global=is_global, ) - object_collection.permanent_wrench_composer.add_forces_and_torques( + object_collection.permanent_wrench_composer.add_forces_and_torques_index( forces=external_wrench_b[..., :3], torques=external_wrench_b[..., 3:], positions=external_wrench_positions_b, @@ -397,9 +408,9 @@ def test_external_force_on_single_body_at_position(sim, num_envs, num_cubes, dev object_collection.update(sim.cfg.dt) # First object should be rotating around it's X axis - assert torch.all(object_collection.data.object_ang_vel_b[:, 0::2, 0] > 0.1) + assert torch.all(object_collection.data.body_com_ang_vel_b.torch[:, 0::2, 0] > 0.1) # Second object should have fallen, so it's Z height should be less than initial height of 1.0 - assert torch.all(object_collection.data.object_link_pos_w[:, 1::2, 2] < 1.0) + assert torch.all(object_collection.data.body_link_pos_w.torch[:, 1::2, 2] < 1.0) @pytest.mark.parametrize("num_envs", [1, 3]) @@ -416,17 +427,17 @@ def test_set_object_state(sim, num_envs, num_cubes, device, gravity_enabled): object_collection, origins = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) sim.reset() - state_types = ["object_pos_w", "object_quat_w", "object_lin_vel_w", "object_ang_vel_w"] + state_types = ["body_link_pos_w", "body_link_quat_w", "body_com_lin_vel_w", "body_com_ang_vel_w"] # Set each state type individually as they are dependent on each other for state_type_to_randomize in state_types: state_dict = { - "object_pos_w": torch.zeros_like(object_collection.data.object_pos_w, device=sim.device), - "object_quat_w": default_orientation(num=num_cubes * num_envs, device=sim.device).view( + "body_link_pos_w": torch.zeros_like(object_collection.data.body_link_pos_w.torch, device=sim.device), + "body_link_quat_w": default_orientation(num=num_cubes * num_envs, device=sim.device).view( num_envs, num_cubes, 4 ), - "object_lin_vel_w": torch.zeros_like(object_collection.data.object_lin_vel_w, device=sim.device), - "object_ang_vel_w": torch.zeros_like(object_collection.data.object_ang_vel_w, device=sim.device), + "body_com_lin_vel_w": torch.zeros_like(object_collection.data.body_com_lin_vel_w.torch, device=sim.device), + "body_com_ang_vel_w": torch.zeros_like(object_collection.data.body_com_ang_vel_w.torch, device=sim.device), } for _ in range(5): @@ -434,34 +445,34 @@ def test_set_object_state(sim, num_envs, num_cubes, device, gravity_enabled): object_collection.reset() # Set random state - if state_type_to_randomize == "object_quat_w": + if state_type_to_randomize == "body_link_quat_w": state_dict[state_type_to_randomize] = random_orientation( num=num_cubes * num_envs, device=sim.device ).view(num_envs, num_cubes, 4) else: state_dict[state_type_to_randomize] = torch.randn(num_envs, num_cubes, 3, device=sim.device) # make sure objects do not overlap - if state_type_to_randomize == "object_pos_w": + if state_type_to_randomize == "body_link_pos_w": state_dict[state_type_to_randomize][..., :2] += origins.unsqueeze(1)[..., :2] # perform simulation for _ in range(5): - object_state = torch.cat( - [ - state_dict["object_pos_w"], - state_dict["object_quat_w"], - state_dict["object_lin_vel_w"], - state_dict["object_ang_vel_w"], - ], + body_pose = torch.cat( + [state_dict["body_link_pos_w"], state_dict["body_link_quat_w"]], + dim=-1, + ) + body_vel = torch.cat( + [state_dict["body_com_lin_vel_w"], state_dict["body_com_ang_vel_w"]], dim=-1, ) # reset object state - object_collection.write_object_state_to_sim(object_state=object_state) + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) sim.step() # assert that set object quantities are equal to the ones set in the state_dict for key, expected_value in state_dict.items(): - value = getattr(object_collection.data, key) + value = getattr(object_collection.data, key).torch torch.testing.assert_close(value, expected_value, rtol=1e-5, atol=1e-5) object_collection.update(sim.cfg.dt) @@ -475,7 +486,7 @@ def test_set_object_state(sim, num_envs, num_cubes, device, gravity_enabled): def test_object_state_properties(sim, num_envs, num_cubes, device, with_offset, gravity_enabled): """Test the object_com_state_w and object_link_state_w properties.""" cube_object, env_pos = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, height=0.0, device=device) - view_ids = torch.tensor([x for x in range(num_cubes * num_envs)]) + view_ids = torch.tensor([x for x in range(num_cubes * num_envs)], dtype=torch.int32) sim.reset() @@ -489,72 +500,78 @@ def test_object_state_properties(sim, num_envs, num_cubes, device, with_offset, else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) ) - com = cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()) + com = wp.to_torch(cube_object.reshape_view_to_data_2d(cube_object.root_view.get_coms().view(wp.transformf))) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) + cube_object.root_view.set_coms( + cube_object.reshape_data_to_view_2d(wp.from_torch(com.clone(), dtype=wp.transformf)).view(wp.float32), + wp.from_torch(view_ids, dtype=wp.int32), + ) # check center of mass has been set - torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()), com) + torch.testing.assert_close( + wp.to_torch(cube_object.reshape_view_to_data_2d(cube_object.root_view.get_coms().view(wp.transformf))), com + ) # random z spin velocity spin_twist = torch.zeros(6, device=device) spin_twist[5] = torch.randn(1, device=device) # initial spawn point - init_com = cube_object.data.object_com_state_w[..., :3] + init_com = cube_object.data.body_com_pose_w.torch[..., :3] for i in range(10): # spin the object around Z axis (com) - cube_object.write_object_velocity_to_sim(spin_twist.repeat(num_envs, num_cubes, 1)) + cube_object.write_body_com_velocity_to_sim_index(body_velocities=spin_twist.repeat(num_envs, num_cubes, 1)) sim.step() cube_object.update(sim.cfg.dt) # get state properties - object_state_w = cube_object.data.object_state_w - object_link_state_w = cube_object.data.object_link_state_w - object_com_state_w = cube_object.data.object_com_state_w + object_link_pose_w = cube_object.data.body_link_pose_w.torch + object_link_vel_w = cube_object.data.body_link_vel_w.torch + object_com_pose_w = cube_object.data.body_com_pose_w.torch + object_com_vel_w = cube_object.data.body_com_vel_w.torch # if offset is [0,0,0] all object_state_%_w will match and all body_%_w will match if not with_offset: - torch.testing.assert_close(object_state_w, object_com_state_w) - torch.testing.assert_close(object_state_w, object_link_state_w) + torch.testing.assert_close(object_link_pose_w, object_com_pose_w) + torch.testing.assert_close(object_com_vel_w, object_link_vel_w) else: # cubes are spinning around center of mass # position will not match # center of mass position will be constant (i.e. spinning around com) - torch.testing.assert_close(init_com, object_com_state_w[..., :3]) + torch.testing.assert_close(init_com, object_com_pose_w[..., :3]) # link position will be moving but should stay constant away from center of mass object_link_state_pos_rel_com = quat_apply_inverse( - object_link_state_w[..., 3:7], - object_link_state_w[..., :3] - object_com_state_w[..., :3], + object_link_pose_w[..., 3:], + object_link_pose_w[..., :3] - object_com_pose_w[..., :3], ) torch.testing.assert_close(-offset, object_link_state_pos_rel_com) # orientation of com will be a constant rotation from link orientation - com_quat_b = cube_object.data.object_com_quat_b - com_quat_w = quat_mul(object_link_state_w[..., 3:7], com_quat_b) - torch.testing.assert_close(com_quat_w, object_com_state_w[..., 3:7]) + com_quat_b = cube_object.data.body_com_quat_b.torch + com_quat_w = quat_mul(object_link_pose_w[..., 3:], com_quat_b) + torch.testing.assert_close(com_quat_w, object_com_pose_w[..., 3:]) # orientation of link will match object state will always match - torch.testing.assert_close(object_state_w[..., 3:7], object_link_state_w[..., 3:7]) + torch.testing.assert_close(object_link_pose_w[..., 3:], object_link_pose_w[..., 3:]) # lin_vel will not match # center of mass vel will be constant (i.e. spinning around com) torch.testing.assert_close( - torch.zeros_like(object_com_state_w[..., 7:10]), - object_com_state_w[..., 7:10], + torch.zeros_like(object_com_vel_w[..., :3]), + object_com_vel_w[..., :3], ) # link frame will be moving, and should be equal to input angular velocity cross offset - lin_vel_rel_object_gt = quat_apply_inverse(object_link_state_w[..., 3:7], object_link_state_w[..., 7:10]) + lin_vel_rel_object_gt = quat_apply_inverse(object_link_pose_w[..., 3:], object_link_vel_w[..., :3]) lin_vel_rel_gt = torch.linalg.cross(spin_twist.repeat(num_envs, num_cubes, 1)[..., 3:], -offset) torch.testing.assert_close(lin_vel_rel_gt, lin_vel_rel_object_gt, atol=1e-4, rtol=1e-3) # ang_vel will always match - torch.testing.assert_close(object_state_w[..., 10:], object_com_state_w[..., 10:]) - torch.testing.assert_close(object_state_w[..., 10:], object_link_state_w[..., 10:]) + torch.testing.assert_close(object_com_vel_w[..., 3:], object_com_vel_w[..., 3:]) + torch.testing.assert_close(object_com_vel_w[..., 3:], object_link_vel_w[..., 3:]) @pytest.mark.parametrize("num_envs", [1, 3]) @@ -567,9 +584,9 @@ def test_write_object_state(sim, num_envs, num_cubes, device, with_offset, state """Test the setters for object_state using both the link frame and center of mass as reference frame.""" # Create a scene with random cubes cube_object, env_pos = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, height=0.0, device=device) - view_ids = torch.tensor([x for x in range(num_cubes * num_cubes)]) - env_ids = torch.tensor([x for x in range(num_envs)]) - object_ids = torch.tensor([x for x in range(num_cubes)]) + view_ids = torch.tensor([x for x in range(num_cubes * num_envs)], dtype=torch.int32) + env_ids = torch.tensor([x for x in range(num_envs)], dtype=torch.int32) + object_ids = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32) sim.reset() @@ -583,16 +600,20 @@ def test_write_object_state(sim, num_envs, num_cubes, device, with_offset, state else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) ) - com = cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()) + com = wp.to_torch(cube_object.reshape_view_to_data_2d(cube_object.root_view.get_coms().view(wp.transformf))) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) - + cube_object.root_view.set_coms( + cube_object.reshape_data_to_view_2d(wp.from_torch(com.clone(), dtype=wp.transformf)).view(wp.float32), + wp.from_torch(view_ids, dtype=wp.int32), + ) # check center of mass has been set - torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()), com) + torch.testing.assert_close( + wp.to_torch(cube_object.reshape_view_to_data_2d(cube_object.root_view.get_coms().view(wp.transformf))), com + ) - rand_state = torch.zeros_like(cube_object.data.object_link_state_w) - rand_state[..., :7] = cube_object.data.default_object_state[..., :7] - rand_state[..., :3] += cube_object.data.object_link_pos_w + rand_state = torch.zeros(num_envs, num_cubes, 13, device=device) + rand_state[..., :7] = cube_object.data.default_body_pose.torch + rand_state[..., :3] += cube_object.data.body_link_pos_w.torch # make quaternion a unit vector rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) @@ -604,19 +625,33 @@ def test_write_object_state(sim, num_envs, num_cubes, device, with_offset, state if state_location == "com": if i % 2 == 0: - cube_object.write_object_com_state_to_sim(rand_state) + cube_object.write_body_com_pose_to_sim_index(body_poses=rand_state[..., :7]) + cube_object.write_body_com_velocity_to_sim_index(body_velocities=rand_state[..., 7:]) else: - cube_object.write_object_com_state_to_sim(rand_state, env_ids=env_ids, object_ids=object_ids) + cube_object.write_body_com_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_com_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) elif state_location == "link": if i % 2 == 0: - cube_object.write_object_link_state_to_sim(rand_state) + cube_object.write_body_link_pose_to_sim_index(body_poses=rand_state[..., :7]) + cube_object.write_body_link_velocity_to_sim_index(body_velocities=rand_state[..., 7:]) else: - cube_object.write_object_link_state_to_sim(rand_state, env_ids=env_ids, object_ids=object_ids) + cube_object.write_body_link_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_link_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) if state_location == "com": - torch.testing.assert_close(rand_state, cube_object.data.object_com_state_w) + torch.testing.assert_close(rand_state[..., :7], cube_object.data.body_com_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.body_com_vel_w.torch) elif state_location == "link": - torch.testing.assert_close(rand_state, cube_object.data.object_link_state_w) + torch.testing.assert_close(rand_state[..., :7], cube_object.data.body_link_pose_w.torch) + torch.testing.assert_close(rand_state[..., 7:], cube_object.data.body_link_vel_w.torch) @pytest.mark.parametrize("num_envs", [1, 3]) @@ -632,11 +667,13 @@ def test_reset_object_collection(sim, num_envs, num_cubes, device): object_collection.update(sim.cfg.dt) # Move the object to a random position - object_state = object_collection.data.default_object_state.clone() - object_state[..., :3] = torch.randn(num_envs, num_cubes, 3, device=sim.device) + body_pose = object_collection.data.default_body_pose.torch.clone() + body_pose[..., :3] = torch.randn(num_envs, num_cubes, 3, device=sim.device) # Random orientation - object_state[..., 3:7] = random_orientation(num=num_cubes, device=sim.device) - object_collection.write_object_state_to_sim(object_state) + body_pose[..., 3:7] = random_orientation(num=num_cubes, device=sim.device) + object_collection.write_body_link_pose_to_sim_index(body_poses=body_pose) + body_vel = object_collection.data.default_body_vel.torch.clone() + object_collection.write_body_com_velocity_to_sim_index(body_velocities=body_vel) if i % 2 == 0: object_collection.reset() @@ -644,10 +681,10 @@ def test_reset_object_collection(sim, num_envs, num_cubes, device): # Reset should zero external forces and torques assert not object_collection._instantaneous_wrench_composer.active assert not object_collection._permanent_wrench_composer.active - assert torch.count_nonzero(object_collection._instantaneous_wrench_composer.composed_force_as_torch) == 0 - assert torch.count_nonzero(object_collection._instantaneous_wrench_composer.composed_torque_as_torch) == 0 - assert torch.count_nonzero(object_collection._permanent_wrench_composer.composed_force_as_torch) == 0 - assert torch.count_nonzero(object_collection._permanent_wrench_composer.composed_torque_as_torch) == 0 + assert torch.count_nonzero(object_collection._instantaneous_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(object_collection._instantaneous_wrench_composer.composed_torque.torch) == 0 + assert torch.count_nonzero(object_collection._permanent_wrench_composer.composed_force.torch) == 0 + assert torch.count_nonzero(object_collection._permanent_wrench_composer.composed_torque.torch) == 0 @pytest.mark.parametrize("num_envs", [1, 3]) @@ -667,8 +704,9 @@ def test_set_material_properties(sim, num_envs, num_cubes, device): # Add friction to cube indices = torch.tensor(range(num_cubes * num_envs), dtype=torch.int) - object_collection.root_physx_view.set_material_properties( - object_collection.reshape_data_to_view(materials), indices + object_collection.root_view.set_material_properties( + object_collection.reshape_data_to_view_3d(wp.from_torch(materials, dtype=wp.float32), 3, device="cpu"), + wp.from_torch(indices, dtype=wp.int32), ) # Perform simulation @@ -676,10 +714,12 @@ def test_set_material_properties(sim, num_envs, num_cubes, device): object_collection.update(sim.cfg.dt) # Get material properties - materials_to_check = object_collection.root_physx_view.get_material_properties() + materials_to_check = object_collection.root_view.get_material_properties() # Check if material properties are set correctly - torch.testing.assert_close(object_collection.reshape_view_to_data(materials_to_check), materials) + torch.testing.assert_close( + wp.to_torch(object_collection.reshape_view_to_data_3d(materials_to_check, 3, device="cpu")), materials + ) @pytest.mark.parametrize("num_envs", [1, 3]) @@ -696,9 +736,10 @@ def test_gravity_vec_w(sim, num_envs, num_cubes, device, gravity_enabled): sim.reset() # Check if gravity vector is set correctly - assert object_collection.data.GRAVITY_VEC_W[0, 0, 0] == gravity_dir[0] - assert object_collection.data.GRAVITY_VEC_W[0, 0, 1] == gravity_dir[1] - assert object_collection.data.GRAVITY_VEC_W[0, 0, 2] == gravity_dir[2] + gravity_vec = object_collection.data.GRAVITY_VEC_W.torch + assert gravity_vec[0, 0, 0] == gravity_dir[0] + assert gravity_vec[0, 0, 1] == gravity_dir[1] + assert gravity_vec[0, 0, 2] == gravity_dir[2] # Perform simulation for _ in range(2): @@ -711,7 +752,7 @@ def test_gravity_vec_w(sim, num_envs, num_cubes, device, gravity_enabled): gravity[..., 2] = -9.81 # Check the body accelerations are correct - torch.testing.assert_close(object_collection.data.object_acc_w, gravity) + torch.testing.assert_close(object_collection.data.body_com_acc_w.torch, gravity) @pytest.mark.parametrize("num_envs", [1, 3]) @@ -726,9 +767,9 @@ def test_write_object_state_functions_data_consistency( """Test the setters for object_state using both the link frame and center of mass as reference frame.""" # Create a scene with random cubes cube_object, env_pos = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, height=0.0, device=device) - view_ids = torch.tensor([x for x in range(num_cubes * num_cubes)]) - env_ids = torch.tensor([x for x in range(num_envs)]) - object_ids = torch.tensor([x for x in range(num_cubes)]) + view_ids = torch.tensor([x for x in range(num_cubes * num_envs)], dtype=torch.int32) + env_ids = torch.tensor([x for x in range(num_envs)], dtype=torch.int32) + object_ids = torch.tensor([x for x in range(num_cubes)], dtype=torch.int32) sim.reset() @@ -742,15 +783,20 @@ def test_write_object_state_functions_data_consistency( else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) ) - com = cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()) + com = wp.to_torch(cube_object.reshape_view_to_data_2d(cube_object.root_view.get_coms().view(wp.transformf))) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) + cube_object.root_view.set_coms( + cube_object.reshape_data_to_view_2d(wp.from_torch(com.clone(), dtype=wp.transformf)).view(wp.float32), + wp.from_torch(view_ids, dtype=wp.int32), + ) # check center of mass has been set - torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()), com) + torch.testing.assert_close( + wp.to_torch(cube_object.reshape_view_to_data_2d(cube_object.root_view.get_coms().view(wp.transformf))), com + ) - rand_state = torch.rand_like(cube_object.data.object_link_state_w) - rand_state[..., :3] += cube_object.data.object_link_pos_w + rand_state = torch.rand(num_envs, num_cubes, 13, device=device) + rand_state[..., :3] += cube_object.data.body_link_pos_w.torch # make quaternion a unit vector rand_state[..., 3:7] = torch.nn.functional.normalize(rand_state[..., 3:7], dim=-1) @@ -759,81 +805,95 @@ def test_write_object_state_functions_data_consistency( sim.step() cube_object.update(sim.cfg.dt) + body_link_pose_w = cube_object.data.body_link_pose_w.torch + body_com_pose_w = cube_object.data.body_com_pose_w.torch object_link_to_com_pos, object_link_to_com_quat = subtract_frame_transforms( - cube_object.data.object_link_state_w[..., :3].view(-1, 3), - cube_object.data.object_link_state_w[..., 3:7].view(-1, 4), - cube_object.data.object_com_state_w[..., :3].view(-1, 3), - cube_object.data.object_com_state_w[..., 3:7].view(-1, 4), + body_link_pose_w[..., :3].view(-1, 3), + body_link_pose_w[..., 3:7].view(-1, 4), + body_com_pose_w[..., :3].view(-1, 3), + body_com_pose_w[..., 3:7].view(-1, 4), ) if state_location == "com": - cube_object.write_object_com_state_to_sim(rand_state, env_ids=env_ids, object_ids=object_ids) + cube_object.write_body_com_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_com_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) elif state_location == "link": - cube_object.write_object_link_state_to_sim(rand_state, env_ids=env_ids, object_ids=object_ids) + cube_object.write_body_link_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_link_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) elif state_location == "root": - cube_object.write_object_state_to_sim(rand_state, env_ids=env_ids, object_ids=object_ids) + cube_object.write_body_link_pose_to_sim_index( + body_poses=rand_state[..., :7], env_ids=env_ids, body_ids=object_ids + ) + cube_object.write_body_com_velocity_to_sim_index( + body_velocities=rand_state[..., 7:], env_ids=env_ids, body_ids=object_ids + ) if state_location == "com": + com_pose_w = cube_object.data.body_com_pose_w.torch + com_vel_w = cube_object.data.body_com_vel_w.torch expected_root_link_pos, expected_root_link_quat = combine_frame_transforms( - cube_object.data.object_com_state_w[..., :3].view(-1, 3), - cube_object.data.object_com_state_w[..., 3:7].view(-1, 4), + com_pose_w[..., :3].view(-1, 3), + com_pose_w[..., 3:].view(-1, 4), quat_rotate(quat_inv(object_link_to_com_quat), -object_link_to_com_pos), quat_inv(object_link_to_com_quat), ) - # torch.testing.assert_close(rand_state, cube_object.data.object_com_state_w) expected_object_link_pose = torch.cat((expected_root_link_pos, expected_root_link_quat), dim=1).view( num_envs, -1, 7 ) - # test both root_pose and root_link_state_w successfully updated when root_com_state_w updates - torch.testing.assert_close(expected_object_link_pose, cube_object.data.object_link_state_w[..., :7]) - # skip 7:10 because they differs from link frame, this should be fine because we are only checking + link_pose_w = cube_object.data.body_link_pose_w.torch + link_vel_w = cube_object.data.body_link_vel_w.torch + # test both root_pose and root_link successfully updated when root_com updates + torch.testing.assert_close(expected_object_link_pose, link_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking # if velocity update is triggered, which can be determined by comparing angular velocity - torch.testing.assert_close( - cube_object.data.object_com_state_w[..., 10:], cube_object.data.object_link_state_w[..., 10:] - ) - torch.testing.assert_close(expected_object_link_pose, cube_object.data.object_state_w[..., :7]) - torch.testing.assert_close( - cube_object.data.object_com_state_w[..., 10:], cube_object.data.object_state_w[..., 10:] - ) + torch.testing.assert_close(com_vel_w[..., 3:], link_vel_w[..., 3:]) + torch.testing.assert_close(expected_object_link_pose, link_pose_w) + torch.testing.assert_close(com_vel_w[..., 3:], cube_object.data.body_com_vel_w.torch[..., 3:]) elif state_location == "link": + link_pose_w = cube_object.data.body_link_pose_w.torch + link_vel_w = cube_object.data.body_link_vel_w.torch expected_com_pos, expected_com_quat = combine_frame_transforms( - cube_object.data.object_link_state_w[..., :3].view(-1, 3), - cube_object.data.object_link_state_w[..., 3:7].view(-1, 4), + link_pose_w[..., :3].view(-1, 3), + link_pose_w[..., 3:].view(-1, 4), object_link_to_com_pos, object_link_to_com_quat, ) expected_object_com_pose = torch.cat((expected_com_pos, expected_com_quat), dim=1).view(num_envs, -1, 7) - # test both root_pose and root_com_state_w successfully updated when root_link_state_w updates - torch.testing.assert_close(expected_object_com_pose, cube_object.data.object_com_state_w[..., :7]) - # skip 7:10 because they differs from link frame, this should be fine because we are only checking + com_pose_w = cube_object.data.body_com_pose_w.torch + com_vel_w = cube_object.data.body_com_vel_w.torch + # test both root_pose and root_com successfully updated when root_link updates + torch.testing.assert_close(expected_object_com_pose, com_pose_w) + # skip lin_vel because it differs from link frame, this should be fine because we are only checking # if velocity update is triggered, which can be determined by comparing angular velocity - torch.testing.assert_close( - cube_object.data.object_link_state_w[..., 10:], cube_object.data.object_com_state_w[..., 10:] - ) - torch.testing.assert_close( - cube_object.data.object_link_state_w[..., :7], cube_object.data.object_state_w[..., :7] - ) - torch.testing.assert_close( - cube_object.data.object_link_state_w[..., 10:], cube_object.data.object_state_w[..., 10:] - ) + torch.testing.assert_close(link_vel_w[..., 3:], com_vel_w[..., 3:]) + torch.testing.assert_close(link_pose_w, cube_object.data.body_link_pose_w.torch) + torch.testing.assert_close(link_vel_w[..., 3:], cube_object.data.body_com_vel_w.torch[..., 3:]) elif state_location == "root": + body_link_pose_w = cube_object.data.body_link_pose_w.torch + body_com_vel_w = cube_object.data.body_com_vel_w.torch expected_object_com_pos, expected_object_com_quat = combine_frame_transforms( - cube_object.data.object_state_w[..., :3].view(-1, 3), - cube_object.data.object_state_w[..., 3:7].view(-1, 4), + body_link_pose_w[..., :3].view(-1, 3), + body_link_pose_w[..., 3:].view(-1, 4), object_link_to_com_pos, object_link_to_com_quat, ) expected_object_com_pose = torch.cat((expected_object_com_pos, expected_object_com_quat), dim=1).view( num_envs, -1, 7 ) - # test both root_com_state_w and root_link_state_w successfully updated when root_pose updates - torch.testing.assert_close(expected_object_com_pose, cube_object.data.object_com_state_w[..., :7]) - torch.testing.assert_close( - cube_object.data.object_state_w[..., 7:], cube_object.data.object_com_state_w[..., 7:] - ) - torch.testing.assert_close( - cube_object.data.object_state_w[..., :7], cube_object.data.object_link_state_w[..., :7] - ) - torch.testing.assert_close( - cube_object.data.object_state_w[..., 10:], cube_object.data.object_link_state_w[..., 10:] - ) + com_pose_w = cube_object.data.body_com_pose_w.torch + com_vel_w = cube_object.data.body_com_vel_w.torch + link_pose_w = cube_object.data.body_link_pose_w.torch + link_vel_w = cube_object.data.body_link_vel_w.torch + # test both root_com and root_link successfully updated when root_pose updates + torch.testing.assert_close(expected_object_com_pose, com_pose_w) + torch.testing.assert_close(body_com_vel_w, com_vel_w) + torch.testing.assert_close(body_link_pose_w, link_pose_w) + torch.testing.assert_close(body_com_vel_w[..., 3:], link_vel_w[..., 3:]) diff --git a/source/isaaclab/test/assets/test_surface_gripper.py b/source/isaaclab_physx/test/assets/test_surface_gripper.py similarity index 90% rename from source/isaaclab/test/assets/test_surface_gripper.py rename to source/isaaclab_physx/test/assets/test_surface_gripper.py index 86f112fdf984..c075821bb985 100644 --- a/source/isaaclab/test/assets/test_surface_gripper.py +++ b/source/isaaclab_physx/test/assets/test_surface_gripper.py @@ -9,6 +9,8 @@ """Launch Isaac Sim Simulator first.""" +import os + from isaaclab.app import AppLauncher # launch omniverse app @@ -18,6 +20,8 @@ import pytest import torch +import warp as wp +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg import isaaclab.sim as sim_utils from isaaclab.actuators import ImplicitActuatorCfg @@ -26,15 +30,17 @@ ArticulationCfg, RigidObject, RigidObjectCfg, - SurfaceGripper, - SurfaceGripperCfg, ) from isaaclab.sim import build_simulation_context from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR -from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.version import get_isaac_sim_version, has_kit # from isaacsim.robot.surface_gripper import GripperView +_RUNNING_CI = ( + os.environ.get("CI") == "true" or os.environ.get("GITHUB_ACTIONS") == "true" or os.environ.get("GITLAB_CI") +) + def generate_surface_gripper_cfgs( kinematic_enabled: bool = False, @@ -63,7 +69,7 @@ def generate_surface_gripper_cfgs( ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), joint_pos={ ".*": 0.0, }, @@ -158,6 +164,10 @@ def sim(request): @pytest.mark.parametrize("device", ["cpu"]) @pytest.mark.parametrize("add_ground_plane", [True]) @pytest.mark.isaacsim_ci +@pytest.mark.skipif( + _RUNNING_CI, + reason="Isaac Sim SurfaceGripperView initialization can deadlock in CI; keep CUDA fail-fast coverage only.", +) def test_initialization(sim, num_articulations, device, add_ground_plane) -> None: """Test initialization for articulation with a surface gripper. @@ -171,7 +181,7 @@ def test_initialization(sim, num_articulations, device, add_ground_plane) -> Non device: The device to run the test on. add_ground_plane: Whether to add a ground plane to the simulation. """ - if get_isaac_sim_version().major < 5: + if has_kit() and get_isaac_sim_version().major < 5: return surface_gripper_cfg, articulation_cfg = generate_surface_gripper_cfgs(kinematic_enabled=False) surface_gripper, articulation, _ = generate_surface_gripper( @@ -188,8 +198,8 @@ def test_initialization(sim, num_articulations, device, add_ground_plane) -> Non assert surface_gripper.state.shape == (num_articulations,) # Check that the command and state are initialized to the correct values - assert surface_gripper.command == 0.0 # Idle command after a reset - assert surface_gripper.state == -1.0 # Open state after a reset + assert wp.to_torch(surface_gripper.command).item() == 0.0 # Idle command after a reset + assert wp.to_torch(surface_gripper.state).item() == -1.0 # Open state after a reset # Simulate physics for _ in range(10): @@ -205,7 +215,7 @@ def test_initialization(sim, num_articulations, device, add_ground_plane) -> Non @pytest.mark.isaacsim_ci def test_raise_error_if_not_cpu(sim, device, add_ground_plane) -> None: """Test that the SurfaceGripper raises an error if the device is not CPU.""" - if get_isaac_sim_version().major < 5: + if has_kit() and get_isaac_sim_version().major < 5: return num_articulations = 1 surface_gripper_cfg, articulation_cfg = generate_surface_gripper_cfgs(kinematic_enabled=False) diff --git a/source/isaaclab_physx/test/renderers/test_isaac_rtx_renderer_utils.py b/source/isaaclab_physx/test/renderers/test_isaac_rtx_renderer_utils.py new file mode 100644 index 000000000000..068b6df87b38 --- /dev/null +++ b/source/isaaclab_physx/test/renderers/test_isaac_rtx_renderer_utils.py @@ -0,0 +1,333 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for RTX streaming wait helpers. + +Covers callback state updates, subscription behavior, and timeout-aware wait +logic in :mod:`isaaclab_physx.renderers.isaac_rtx_renderer_utils`. +""" + +from __future__ import annotations + +import sys +import time +import types +from unittest.mock import MagicMock, patch + +# Stub ``omni`` / ``omni.usd`` in ``sys.modules`` before importing the module +# under test so its top-level ``import omni.usd`` succeeds outside a running +# Kit runtime. Per-test fixtures below still patch these with fresh mocks, so +# each test remains isolated. +if "omni" not in sys.modules: + sys.modules["omni"] = types.ModuleType("omni") +if "omni.usd" not in sys.modules: + _omni_usd_stub = MagicMock() + sys.modules["omni.usd"] = _omni_usd_stub + setattr(sys.modules["omni"], "usd", _omni_usd_stub) + +import isaaclab_physx.renderers.isaac_rtx_renderer_utils as rtx_utils # noqa: E402 +import pytest # noqa: E402 + +pytestmark = pytest.mark.isaacsim_ci + +# test-specific timeout overrides for _STREAMING_WAIT_TIMEOUT_S +STREAMING_TIMEOUT_S = 0.1 +STREAMING_TIMEOUT_SHORT_S = 0.01 + +# simulated per-update sleep to advance wall-clock time inside the wait loop +MOCK_UPDATE_SLEEP_S = 0.02 + +# how many app.update() iterations before the mock becomes idle +MOCK_ITERATIONS_BEFORE_IDLE = 3 + + +@pytest.fixture(autouse=True) +def _reset_globals(monkeypatch): + """Restore module-level state so tests are isolated.""" + monkeypatch.setattr(rtx_utils, "_last_render_update_key", (0, -1, -1)) + + +@pytest.fixture() +def mock_omni_usd(): + """Make ``omni.usd`` importable outside the Isaac Sim runtime. + + Both ``sys.modules`` and the ``omni`` namespace attribute must be set, + because ``import omni.usd`` resolves the parent package first and then + looks up ``.usd`` as an attribute. + """ + import omni + + mock_module = MagicMock() + with ( + patch.dict(sys.modules, {"omni.usd": mock_module}), + patch.object(omni, "usd", mock_module, create=True), + ): + yield mock_module + + +@pytest.fixture() +def mock_omni_kit_app(): + """Make ``omni.kit.app`` importable outside the Isaac Sim runtime.""" + import omni + + mock_kit = MagicMock() + mock_module = MagicMock() + mock_kit.app = mock_module + with ( + patch.dict(sys.modules, {"omni.kit": mock_kit, "omni.kit.app": mock_module}), + patch.object(omni, "kit", mock_kit, create=True), + ): + yield mock_module + + +# --------------------------------------------------------------------------- +# _get_stage_streaming_busy +# --------------------------------------------------------------------------- + + +class TestGetStageStreamingBusy: + """Synchronous streaming status query delegates to UsdContext.""" + + def test_returns_true_when_busy(self, mock_omni_usd): + mock_ctx = MagicMock() + mock_ctx.get_stage_streaming_status.return_value = True + mock_omni_usd.get_context.return_value = mock_ctx + assert rtx_utils._get_stage_streaming_busy() is True + + def test_returns_false_when_idle(self, mock_omni_usd): + mock_ctx = MagicMock() + mock_ctx.get_stage_streaming_status.return_value = False + mock_omni_usd.get_context.return_value = mock_ctx + assert rtx_utils._get_stage_streaming_busy() is False + + def test_returns_false_when_no_context(self, mock_omni_usd): + mock_omni_usd.get_context.return_value = None + assert rtx_utils._get_stage_streaming_busy() is False + + +# --------------------------------------------------------------------------- +# _wait_for_streaming_complete +# --------------------------------------------------------------------------- + + +class TestWaitForStreamingComplete: + """Blocking wait pumps app.update() while busy and respects timeout. + + These tests patch ``_get_stage_streaming_busy`` at the module level so + they don't depend on ``omni.usd`` being importable. + """ + + def test_returns_immediately_when_not_busy(self, mock_omni_kit_app): + """Skips loop and issues only the final update when idle.""" + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + + with patch.object(rtx_utils, "_get_stage_streaming_busy", return_value=False): + rtx_utils._wait_for_streaming_complete() + + mock_app.update.assert_called_once() + + def test_pumps_updates_until_idle(self, mock_omni_kit_app): + """Pumps updates until streaming reports idle.""" + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + loop_calls = 0 + + def _streaming_status(): + return loop_calls < MOCK_ITERATIONS_BEFORE_IDLE + + def _count_update(): + nonlocal loop_calls + loop_calls += 1 + + mock_app.update.side_effect = _count_update + + with patch.object(rtx_utils, "_get_stage_streaming_busy", side_effect=_streaming_status): + rtx_utils._wait_for_streaming_complete() + + assert mock_app.update.call_count == MOCK_ITERATIONS_BEFORE_IDLE + 1 + + def test_respects_timeout(self, monkeypatch, mock_omni_kit_app): + """Exits wait loop on timeout if busy never clears.""" + monkeypatch.setattr(rtx_utils, "_STREAMING_WAIT_TIMEOUT_S", STREAMING_TIMEOUT_S) + mock_app = MagicMock() + mock_app.update.side_effect = lambda: time.sleep(MOCK_UPDATE_SLEEP_S) + mock_omni_kit_app.get_app.return_value = mock_app + + with patch.object(rtx_utils, "_get_stage_streaming_busy", return_value=True): + rtx_utils._wait_for_streaming_complete() + + assert mock_app.update.call_count > 0 + + def test_timeout_logs_warning(self, monkeypatch, mock_omni_kit_app): + """Logs warning when timeout is reached while still busy.""" + monkeypatch.setattr(rtx_utils, "_STREAMING_WAIT_TIMEOUT_S", STREAMING_TIMEOUT_SHORT_S) + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + mock_logger = MagicMock() + + with ( + patch.object(rtx_utils, "_get_stage_streaming_busy", return_value=True), + patch.object(rtx_utils, "logger", mock_logger), + ): + rtx_utils._wait_for_streaming_complete() + + mock_logger.warning.assert_called_once() + assert "RTX streaming did not complete within" in mock_logger.warning.call_args[0][0] + + def test_logs_info_on_non_trivial_completion(self, mock_omni_kit_app): + """Logs completion info when streaming finishes after delay.""" + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + mock_logger = MagicMock() + call_count = 0 + + def _streaming_status(): + return call_count < 1 + + def _become_idle_after_delay(): + nonlocal call_count + time.sleep(MOCK_UPDATE_SLEEP_S) + call_count += 1 + + mock_app.update.side_effect = _become_idle_after_delay + + with ( + patch.object(rtx_utils, "_get_stage_streaming_busy", side_effect=_streaming_status), + patch.object(rtx_utils, "logger", mock_logger), + ): + rtx_utils._wait_for_streaming_complete() + + mock_logger.info.assert_called_once() + assert "RTX streaming completed in" in mock_logger.info.call_args[0][0] + + +# --------------------------------------------------------------------------- +# ensure_isaac_rtx_render_update +# --------------------------------------------------------------------------- + + +class TestEnsureIsaacRtxRenderUpdate: + """Tests for :func:`ensure_isaac_rtx_render_update`. + + Covers dedup logic, visualizer-skip behaviour, and the first-call-for-sim + guard that prevents annotator buffers from never being populated. + """ + + @pytest.fixture() + def mock_sim(self): + """A minimal mock of :class:`SimulationContext`.""" + sim = MagicMock() + sim._physics_step_count = 0 + sim._render_generation = 0 + sim.render_generation = 0 + sim.is_rendering = True + sim.visualizers = [] + return sim + + @pytest.fixture() + def pumping_visualizer(self): + """A visualizer that claims to pump ``app.update()``.""" + viz = MagicMock() + viz.pumps_app_update.return_value = True + return viz + + def test_first_call_with_visualizer_still_pumps(self, mock_sim, pumping_visualizer, mock_omni_kit_app): + """Regression: first call for a new sim must pump even with a visualizer. + + Without the fix (commit 2e8ace7), a visualizer returning + ``pumps_app_update() == True`` caused the function to skip + ``app.update()`` on the very first call. The visualizer had not + pumped yet (``sim.render()`` was never called), so annotator + buffers were never populated and cameras hung waiting for data. + """ + mock_sim.visualizers = [pumping_visualizer] + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + + with ( + patch.object( + rtx_utils.sim_utils.SimulationContext, + "instance", + return_value=mock_sim, + ), + patch.object(rtx_utils, "_get_stage_streaming_busy", return_value=False), + ): + rtx_utils.ensure_isaac_rtx_render_update() + + mock_app.update.assert_called_once() + + def test_second_call_with_visualizer_skips_pump(self, mock_sim, pumping_visualizer, mock_omni_kit_app): + """After the first call, a visualizer that pumps causes the skip.""" + mock_sim.visualizers = [pumping_visualizer] + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + + with ( + patch.object( + rtx_utils.sim_utils.SimulationContext, + "instance", + return_value=mock_sim, + ), + patch.object(rtx_utils, "_get_stage_streaming_busy", return_value=False), + ): + rtx_utils.ensure_isaac_rtx_render_update() + mock_app.update.assert_called_once() + mock_app.update.reset_mock() + + mock_sim._physics_step_count = 1 + rtx_utils.ensure_isaac_rtx_render_update() + + mock_app.update.assert_not_called() + + def test_no_sim_is_noop(self, mock_omni_kit_app): + """No-op when SimulationContext.instance() returns None.""" + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + + with patch.object( + rtx_utils.sim_utils.SimulationContext, + "instance", + return_value=None, + ): + rtx_utils.ensure_isaac_rtx_render_update() + + mock_app.update.assert_not_called() + + def test_dedup_same_step(self, mock_sim, mock_omni_kit_app): + """Second call in the same physics step is a no-op (dedup).""" + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + + with ( + patch.object( + rtx_utils.sim_utils.SimulationContext, + "instance", + return_value=mock_sim, + ), + patch.object(rtx_utils, "_get_stage_streaming_busy", return_value=False), + ): + rtx_utils.ensure_isaac_rtx_render_update() + mock_app.update.assert_called_once() + mock_app.update.reset_mock() + + rtx_utils.ensure_isaac_rtx_render_update() + + mock_app.update.assert_not_called() + + def test_not_rendering_skips(self, mock_sim, mock_omni_kit_app): + """No ``app.update()`` when rendering is disabled.""" + mock_sim.is_rendering = False + mock_app = MagicMock() + mock_omni_kit_app.get_app.return_value = mock_app + + with patch.object( + rtx_utils.sim_utils.SimulationContext, + "instance", + return_value=mock_sim, + ): + rtx_utils.ensure_isaac_rtx_render_update() + + mock_app.update.assert_not_called() diff --git a/source/isaaclab/test/sensors/check_contact_sensor.py b/source/isaaclab_physx/test/sensors/check_contact_sensor.py similarity index 89% rename from source/isaaclab/test/sensors/check_contact_sensor.py rename to source/isaaclab_physx/test/sensors/check_contact_sensor.py index b4fe5f555dc7..e840f00f1d34 100644 --- a/source/isaaclab/test/sensors/check_contact_sensor.py +++ b/source/isaaclab_physx/test/sensors/check_contact_sensor.py @@ -36,9 +36,8 @@ import torch -from isaacsim.core.cloner import GridCloner - import isaaclab.sim as sim_utils +from isaaclab import cloner as lab_cloner from isaaclab.assets import Articulation from isaaclab.sensors.contact_sensor import ContactSensor, ContactSensorCfg from isaaclab.sim import SimulationCfg, SimulationContext @@ -82,16 +81,16 @@ def main(): # this is needed to visualize the scene when flatcache is enabled sim._settings.set_bool("/persistent/omnihydra/useSceneGraphInstancing", True) - # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") + # Create environment clones using Lab's cloner utilities + num_envs = args_cli.num_robots + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_envs, dtype=torch.long, device=sim.device) + env_origins, _ = lab_cloner.grid_transforms(num_envs, spacing=2.0, device=sim.device) # Everything under the namespace "/World/envs/env_0" will be cloned sim.stage.DefinePrim("/World/envs/env_0", "Xform") # Clone the scene - num_envs = args_cli.num_robots - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) - _ = cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) + envs_prim_paths = [f"/World/envs/env_{i}" for i in range(num_envs)] + lab_cloner.usd_replicate(sim.stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) # Design props design_scene() # Spawn things into the scene @@ -110,8 +109,8 @@ def main(): contact_sensor = ContactSensor(cfg=contact_sensor_cfg) # filter collisions within each environment instance physics_scene_path = sim.get_physics_context().prim_path - cloner.filter_collisions( - physics_scene_path, "/World/collisions", envs_prim_paths, global_paths=["/World/defaultGroundPlane"] + lab_cloner.filter_collisions( + sim.stage, physics_scene_path, "/World/collisions", envs_prim_paths, global_paths=["/World/defaultGroundPlane"] ) # Play the simulator diff --git a/source/isaaclab/test/sensors/check_imu_sensor.py b/source/isaaclab_physx/test/sensors/check_pva_sensor.py similarity index 76% rename from source/isaaclab/test/sensors/check_imu_sensor.py rename to source/isaaclab_physx/test/sensors/check_pva_sensor.py index 8a8c048ed62d..41476babf542 100644 --- a/source/isaaclab/test/sensors/check_imu_sensor.py +++ b/source/isaaclab_physx/test/sensors/check_pva_sensor.py @@ -4,7 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause """ -Visual test script for the imu sensor from the Orbit framework. +Visual test script for the pva sensor from the Orbit framework. """ from __future__ import annotations @@ -16,7 +16,7 @@ from isaacsim import SimulationApp # add argparse arguments -parser = argparse.ArgumentParser(description="Imu Test Script") +parser = argparse.ArgumentParser(description="Pva Test Script") parser.add_argument("--headless", action="store_true", default=False, help="Force display off at all times.") parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to clone.") parser.add_argument( @@ -40,15 +40,13 @@ import torch -import omni -from isaacsim.core.cloner import GridCloner -from isaacsim.core.utils.viewports import set_camera_view -from pxr import PhysxSchema +from isaacsim.core.rendering_manager import ViewportManager import isaaclab.sim as sim_utils import isaaclab.terrains as terrain_gen +from isaaclab import cloner as lab_cloner from isaaclab.assets import RigidObject, RigidObjectCfg -from isaaclab.sensors.imu import Imu, ImuCfg +from isaaclab.sensors.pva import Pva, PvaCfg from isaaclab.sim import SimulationCfg, SimulationContext from isaaclab.terrains.config.rough import ROUGH_TERRAINS_CFG from isaaclab.terrains.terrain_importer import TerrainImporter @@ -72,15 +70,17 @@ def design_scene(sim: SimulationContext, num_envs: int = 2048) -> RigidObject: ) _ = TerrainImporter(terrain_importer_cfg) # obtain the current stage - stage = omni.usd.get_context().get_stage() + stage = sim_utils.get_current_stage() # Create interface to clone the scene - cloner = GridCloner(spacing=2.0) - cloner.define_base_env("/World/envs") - envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs) + # Create environment clones using Lab's cloner utilities + env_fmt = "/World/envs/env_{}" + env_ids = torch.arange(num_envs, dtype=torch.long, device=sim.device) + env_origins, _ = lab_cloner.grid_transforms(num_envs, spacing=2.0, device=sim.device) + envs_prim_paths = [f"/World/envs/env_{i}" for i in range(num_envs)] # create source prim stage.DefinePrim(envs_prim_paths[0], "Xform") # clone the env xform - cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True) + lab_cloner.usd_replicate(stage, [env_fmt.format(0)], [env_fmt], env_ids, positions=env_origins) # Define the scene # -- Light cfg = sim_utils.DistantLightCfg(intensity=2000) @@ -102,12 +102,13 @@ def design_scene(sim: SimulationContext, num_envs: int = 2048) -> RigidObject: # obtain the current physics scene physics_scene_prim_path = None for prim in stage.Traverse(): - if prim.HasAPI(PhysxSchema.PhysxSceneAPI): + if "PhysxSceneAPI" in prim.GetAppliedSchemas(): physics_scene_prim_path = prim.GetPrimPath() logging.info(f"Physics scene prim path: {physics_scene_prim_path}") break # filter collisions within each environment instance - cloner.filter_collisions( + lab_cloner.filter_collisions( + stage, physics_scene_prim_path, "/World/collisions", envs_prim_paths, @@ -121,33 +122,33 @@ def main(): # Load kit helper sim = SimulationContext(SimulationCfg()) # Set main camera - set_camera_view([0.0, 30.0, 25.0], [0.0, 0.0, -2.5]) + ViewportManager.set_camera_view("/OmniverseKit_Persp", eye=[0.0, 30.0, 25.0], target=[0.0, 0.0, -2.5]) # Parameters num_envs = args_cli.num_envs # Design the scene balls = design_scene(sim=sim, num_envs=num_envs) - # Create a ray-caster sensor - imu_cfg = ImuCfg( + # Create a pva sensor + pva_cfg = PvaCfg( prim_path="/World/envs/env_.*/ball", debug_vis=not args_cli.headless, ) # increase scale of the arrows for better visualization - imu_cfg.visualizer_cfg.markers["arrow"].scale = (1.0, 0.2, 0.2) - imu = Imu(cfg=imu_cfg) + pva_cfg.visualizer_cfg.markers["arrow"].scale = (1.0, 0.2, 0.2) + pva = Pva(cfg=pva_cfg) - # Play simulator and init the Imu + # Play simulator and init the Pva sim.reset() # Print the sensor information - print(imu) + print(pva) # Get the ball initial positions sim.step(render=not args_cli.headless) balls.update(sim.get_physics_dt()) - ball_initial_positions = balls.data.root_pos_w.clone() - ball_initial_orientations = balls.data.root_quat_w.clone() + ball_initial_positions = balls.data.root_pos_w.torch.clone() + ball_initial_orientations = balls.data.root_quat_w.torch.clone() # Create a counter for resetting the scene step_count = 0 @@ -166,14 +167,14 @@ def main(): balls.write_root_pose_to_sim(torch.cat([ball_initial_positions, ball_initial_orientations], dim=-1)) balls.reset() # reset the sensor - imu.reset() + pva.reset() # reset the counter step_count = 0 # Step simulation sim.step() - # Update the imu sensor - with Timer(f"Imu sensor update with {num_envs}"): - imu.update(dt=sim.get_physics_dt(), force_recompute=True) + # Update the pva sensor + with Timer(f"Pva sensor update with {num_envs}"): + pva.update(dt=sim.get_physics_dt(), force_recompute=True) # Update counter step_count += 1 diff --git a/source/isaaclab/test/sensors/test_contact_sensor.py b/source/isaaclab_physx/test/sensors/test_contact_sensor.py similarity index 85% rename from source/isaaclab/test/sensors/test_contact_sensor.py rename to source/isaaclab_physx/test/sensors/test_contact_sensor.py index ed376f97f2d1..00772e6cb0d3 100644 --- a/source/isaaclab/test/sensors/test_contact_sensor.py +++ b/source/isaaclab_physx/test/sensors/test_contact_sensor.py @@ -19,12 +19,11 @@ import pytest import torch +import warp as wp from flaky import flaky -import carb -from pxr import PhysxSchema - import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.assets import RigidObject, RigidObjectCfg from isaaclab.scene import InteractiveScene, InteractiveSceneCfg from isaaclab.sensors import ContactSensor, ContactSensorCfg @@ -221,37 +220,37 @@ def setup_simulation(): durations = [sim_dt, sim_dt * 2, sim_dt * 32, sim_dt * 128] terrains = [FLAT_TERRAIN_CFG, COBBLESTONE_TERRAIN_CFG] devices = ["cuda:0", "cpu"] - carb_settings_iface = carb.settings.get_settings() - return sim_dt, durations, terrains, devices, carb_settings_iface + settings = get_settings_manager() + return sim_dt, durations, terrains, devices, settings @pytest.mark.parametrize("disable_contact_processing", [True, False]) -@flaky(max_runs=3, min_passes=1) +@flaky(max_runs=5, min_passes=1) def test_cube_contact_time(setup_simulation, disable_contact_processing): """Checks contact sensor values for contact time and air time for a cube collision primitive.""" # check for both contact processing enabled and disabled # internally, the contact sensor should enable contact processing so it should always work. - sim_dt, durations, terrains, devices, carb_settings_iface = setup_simulation - carb_settings_iface.set_bool("/physics/disableContactProcessing", disable_contact_processing) - _run_contact_sensor_test(CUBE_CFG, sim_dt, devices, terrains, carb_settings_iface, durations) + sim_dt, durations, terrains, devices, settings = setup_simulation + settings.set_bool("/physics/disableContactProcessing", disable_contact_processing) + _run_contact_sensor_test(CUBE_CFG, sim_dt, devices, terrains, settings, durations) @pytest.mark.parametrize("disable_contact_processing", [True, False]) -@flaky(max_runs=3, min_passes=1) +@flaky(max_runs=5, min_passes=1) def test_sphere_contact_time(setup_simulation, disable_contact_processing): """Checks contact sensor values for contact time and air time for a sphere collision primitive.""" # check for both contact processing enabled and disabled # internally, the contact sensor should enable contact processing so it should always work. - sim_dt, durations, terrains, devices, carb_settings_iface = setup_simulation - carb_settings_iface.set_bool("/physics/disableContactProcessing", disable_contact_processing) - _run_contact_sensor_test(SPHERE_CFG, sim_dt, devices, terrains, carb_settings_iface, durations) + sim_dt, durations, terrains, devices, settings = setup_simulation + settings.set_bool("/physics/disableContactProcessing", disable_contact_processing) + _run_contact_sensor_test(SPHERE_CFG, sim_dt, devices, terrains, settings, durations) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.parametrize("num_envs", [1, 6, 24]) def test_cube_stack_contact_filtering(setup_simulation, device, num_envs): """Checks contact sensor reporting for filtering stacked cube prims.""" - sim_dt, durations, terrains, devices, carb_settings_iface = setup_simulation + sim_dt, durations, terrains, devices, settings = setup_simulation with build_simulation_context(device=device, dt=sim_dt, add_lighting=True) as sim: sim._app_control_on_stop_handle = None # Instance new scene for the current terrain and contact prim. @@ -282,7 +281,7 @@ def test_cube_stack_contact_filtering(setup_simulation, device, num_envs): scene = InteractiveScene(scene_cfg) # Check that contact processing is enabled - assert not carb_settings_iface.get("/physics/disableContactProcessing") + assert not settings.get("/physics/disableContactProcessing") # Set variables internally for reference sim.reset() @@ -291,8 +290,8 @@ def test_cube_stack_contact_filtering(setup_simulation, device, num_envs): contact_sensor_2 = scene["contact_sensor_2"] # Check that contact processing is enabled - assert contact_sensor.contact_physx_view.filter_count == 1 - assert contact_sensor_2.contact_physx_view.filter_count == 1 + assert contact_sensor.contact_view.filter_count == 1 + assert contact_sensor_2.contact_view.filter_count == 1 # Play the simulation scene.reset() @@ -300,14 +299,18 @@ def test_cube_stack_contact_filtering(setup_simulation, device, num_envs): _perform_sim_step(sim, scene, sim_dt) # Check values for cube 2 --> cube 1 is the only collision for cube 2 - torch.testing.assert_close(contact_sensor_2.data.force_matrix_w[:, :, 0], contact_sensor_2.data.net_forces_w) + torch.testing.assert_close( + contact_sensor_2.data.force_matrix_w.torch[:, :, 0], + contact_sensor_2.data.net_forces_w.torch, + ) # Check that forces are opposite and equal torch.testing.assert_close( - contact_sensor_2.data.force_matrix_w[:, :, 0], -contact_sensor.data.force_matrix_w[:, :, 0] + contact_sensor_2.data.force_matrix_w.torch[:, :, 0], + -contact_sensor.data.force_matrix_w.torch[:, :, 0], ) # Check values are non-zero (contacts are happening and are getting reported) - assert contact_sensor_2.data.net_forces_w.sum().item() > 0.0 - assert contact_sensor.data.net_forces_w.sum().item() > 0.0 + assert contact_sensor_2.data.net_forces_w.torch.sum().item() > 0.0 + assert contact_sensor.data.net_forces_w.torch.sum().item() > 0.0 def test_no_contact_reporting(setup_simulation): @@ -316,11 +319,11 @@ def test_no_contact_reporting(setup_simulation): We borrow the test :func:`test_cube_stack_contact_filtering` to test this and force disable contact processing. """ # TODO: This test only works on CPU. For GPU, it seems the contact processing is not disabled. - sim_dt, durations, terrains, devices, carb_settings_iface = setup_simulation + sim_dt, durations, terrains, devices, settings = setup_simulation with build_simulation_context(device="cpu", dt=sim_dt, add_lighting=True) as sim: sim._app_control_on_stop_handle = None # Instance new scene for the current terrain and contact prim. - scene_cfg = ContactSensorSceneCfg(num_envs=32, env_spacing=1.0, lazy_sensor_update=False) + scene_cfg = ContactSensorSceneCfg(num_envs=2, env_spacing=1.0, lazy_sensor_update=False) scene_cfg.terrain = FLAT_TERRAIN_CFG # -- cube 1 scene_cfg.shape = CUBE_CFG.replace(prim_path="{ENV_REGEX_NS}/Cube_1") @@ -347,7 +350,7 @@ def test_no_contact_reporting(setup_simulation): scene = InteractiveScene(scene_cfg) # Force disable contact processing - carb_settings_iface.set_bool("/physics/disableContactProcessing", True) + settings.set_bool("/physics/disableContactProcessing", True) # Set variables internally for reference sim.reset() @@ -357,8 +360,8 @@ def test_no_contact_reporting(setup_simulation): contact_sensor_2: ContactSensor = scene["contact_sensor_2"] # Check buffers have the right size - assert contact_sensor.contact_physx_view.filter_count == 1 - assert contact_sensor_2.contact_physx_view.filter_count == 1 + assert contact_sensor.contact_view.filter_count == 1 + assert contact_sensor_2.contact_view.filter_count == 1 # Reset the contact sensors scene.reset() @@ -367,16 +370,16 @@ def test_no_contact_reporting(setup_simulation): _perform_sim_step(sim, scene, sim_dt) # check values are zero (contacts are happening but not reported) - assert contact_sensor.data.net_forces_w.sum().item() == 0.0 - assert contact_sensor.data.force_matrix_w.sum().item() == 0.0 - assert contact_sensor_2.data.net_forces_w.sum().item() == 0.0 - assert contact_sensor_2.data.force_matrix_w.sum().item() == 0.0 + assert contact_sensor.data.net_forces_w.torch.sum().item() == 0.0 + assert contact_sensor.data.force_matrix_w.torch.sum().item() == 0.0 + assert contact_sensor_2.data.net_forces_w.torch.sum().item() == 0.0 + assert contact_sensor_2.data.force_matrix_w.torch.sum().item() == 0.0 @pytest.mark.isaacsim_ci def test_sensor_print(setup_simulation): """Test sensor print is working correctly.""" - sim_dt, durations, terrains, devices, carb_settings_iface = setup_simulation + sim_dt, durations, terrains, devices, settings = setup_simulation with build_simulation_context(device="cuda:0", dt=sim_dt, add_lighting=False) as sim: sim._app_control_on_stop_handle = None # Spawn things into stage @@ -401,7 +404,7 @@ def test_sensor_print(setup_simulation): @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) def test_contact_sensor_threshold(setup_simulation, device): """Test that the contact sensor USD threshold attribute is set to 0.0.""" - sim_dt, durations, terrains, devices, carb_settings_iface = setup_simulation + sim_dt, durations, terrains, devices, settings = setup_simulation with build_simulation_context(device=device, dt=sim_dt, add_lighting=False) as sim: sim._app_control_on_stop_handle = None # Spawn things into stage @@ -429,9 +432,8 @@ def test_contact_sensor_threshold(setup_simulation, device): assert contact_sensor is not None, "Contact sensor was not created" # Check if the prim has contact report API and verify threshold is close to 0.0 - if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): - cr_api = PhysxSchema.PhysxContactReportAPI.Get(stage, prim.GetPrimPath()) - threshold_attr = cr_api.GetThresholdAttr() + if "PhysxContactReportAPI" in prim.GetAppliedSchemas(): + threshold_attr = prim.GetAttribute("physxContactReport:threshold") if threshold_attr.IsValid(): threshold_value = threshold_attr.Get() assert pytest.approx(threshold_value, abs=1e-6) == 0.0, ( @@ -449,8 +451,8 @@ def test_friction_reporting(setup_simulation, grav_dir): This test places a contact sensor enabled cube onto a ground plane under different gravity directions. It then compares the normalized friction force dir with the direction of gravity to ensure they are aligned. """ - sim_dt, _, _, _, carb_settings_iface = setup_simulation - carb_settings_iface.set_bool("/physics/disableContactProcessing", True) + sim_dt, _, _, _, settings = setup_simulation + settings.set_bool("/physics/disableContactProcessing", True) device = "cuda:0" sim_cfg = SimulationCfg(dt=sim_dt, device=device, gravity=grav_dir) with build_simulation_context(sim_cfg=sim_cfg, add_lighting=False) as sim: @@ -479,17 +481,18 @@ def test_friction_reporting(setup_simulation, grav_dir): scene["contact_sensor"].reset() scene["shape"].write_root_pose_to_sim( - root_pose=torch.tensor([0, 0.0, CUBE_CFG.spawn.size[2] / 2.0, 1, 0, 0, 0]) + root_pose=torch.tensor([0, 0.0, CUBE_CFG.spawn.size[2] / 2.0, 1, 0, 0, 0], device=device).unsqueeze(0) ) # step sim once to compute friction forces _perform_sim_step(sim, scene, sim_dt) # check that forces are being reported match expected friction forces - expected_friction, _, _, _ = scene["contact_sensor"].contact_physx_view.get_friction_data(dt=sim_dt) - reported_friction = scene["contact_sensor"].data.friction_forces_w[0, 0, :] + expected_friction, _, _, _ = scene["contact_sensor"].contact_view.get_friction_data(dt=sim_dt) + expected_friction_torch = wp.to_torch(expected_friction) + reported_friction = scene["contact_sensor"].data.friction_forces_w.torch[0, 0, :] - torch.testing.assert_close(expected_friction.sum(dim=0), reported_friction[0], atol=1e-6, rtol=1e-5) + torch.testing.assert_close(expected_friction_torch.sum(dim=0), reported_friction[0], atol=1e-6, rtol=1e-5) # check that friction force direction opposes gravity direction grav = torch.tensor(grav_dir, device=device) @@ -502,8 +505,8 @@ def test_friction_reporting(setup_simulation, grav_dir): @pytest.mark.isaacsim_ci def test_invalid_prim_paths_config(setup_simulation): - sim_dt, _, _, _, carb_settings_iface = setup_simulation - carb_settings_iface.set_bool("/physics/disableContactProcessing", True) + sim_dt, _, _, _, settings = setup_simulation + settings.set_bool("/physics/disableContactProcessing", True) device = "cuda:0" sim_cfg = SimulationCfg(dt=sim_dt, device=device) with build_simulation_context(sim_cfg=sim_cfg, add_lighting=False) as sim: @@ -536,8 +539,8 @@ def test_invalid_prim_paths_config(setup_simulation): @pytest.mark.isaacsim_ci def test_invalid_max_contact_points_config(setup_simulation): - sim_dt, _, _, _, carb_settings_iface = setup_simulation - carb_settings_iface.set_bool("/physics/disableContactProcessing", True) + sim_dt, _, _, _, settings = setup_simulation + settings.set_bool("/physics/disableContactProcessing", True) device = "cuda:0" sim_cfg = SimulationCfg(dt=sim_dt, device=device) with build_simulation_context(sim_cfg=sim_cfg, add_lighting=False) as sim: @@ -580,7 +583,7 @@ def _run_contact_sensor_test( sim_dt: float, devices: list[str], terrains: list[TerrainImporterCfg], - carb_settings_iface, + settings, durations: list[float], ): """ @@ -700,7 +703,7 @@ def _test_sensor_contact( duration = durations[idx] while current_test_time < duration: # set object states to contact the ground plane - shape.write_root_pose_to_sim(root_pose=test_pose) + shape.write_root_pose_to_sim(root_pose=torch.tensor(test_pose, device=shape.device).unsqueeze(0)) # perform simulation step _perform_sim_step(sim, scene, sim_dt) # increment contact time @@ -732,7 +735,7 @@ def _test_sensor_contact( _test_friction_forces(shape, sensor, mode) # switch the contact mode for 1 dt step before the next contact test begins. - shape.write_root_pose_to_sim(root_pose=reset_pose) + shape.write_root_pose_to_sim(root_pose=torch.tensor(reset_pose, device=shape.device).unsqueeze(0)) # perform simulation step _perform_sim_step(sim, scene, sim_dt) # set the last air time to 2 sim_dt steps, because last_air_time and last_contact_time @@ -746,26 +749,30 @@ def _test_friction_forces(shape: RigidObject, sensor: ContactSensor, mode: Conta assert sensor._data.friction_forces_w is None return - # check shape of the contact_pos_w tensor + # check shape of the friction_forces_w tensor (wp.to_torch expands vec3f -> float32 trailing dim) num_bodies = sensor.num_bodies - assert sensor._data.friction_forces_w.shape == (sensor.num_instances // num_bodies, num_bodies, 1, 3) + friction_torch = sensor._data.friction_forces_w.torch + assert friction_torch.shape == (sensor.num_instances // num_bodies, num_bodies, 1, 3) # compare friction forces if mode == ContactTestMode.IN_CONTACT: - assert torch.any(torch.abs(sensor._data.friction_forces_w) > 1e-5).item() - friction_forces, _, buffer_count, buffer_start_indices = sensor.contact_physx_view.get_friction_data( + assert torch.any(torch.abs(friction_torch) > 1e-5).item() + friction_forces, _, buffer_count, buffer_start_indices = sensor.contact_view.get_friction_data( dt=sensor._sim_physics_dt ) + friction_forces_t = wp.to_torch(friction_forces) + buffer_count_t = wp.to_torch(buffer_count).to(torch.int32) + buffer_start_t = wp.to_torch(buffer_start_indices).to(torch.int32) for i in range(sensor.num_instances * num_bodies): - for j in range(sensor.contact_physx_view.filter_count): - start_index_ij = buffer_start_indices[i, j] - count_ij = buffer_count[i, j] - force = torch.sum(friction_forces[start_index_ij : (start_index_ij + count_ij), :], dim=0) + for j in range(sensor.contact_view.filter_count): + start_index_ij = buffer_start_t[i, j] + count_ij = buffer_count_t[i, j] + force = torch.sum(friction_forces_t[start_index_ij : (start_index_ij + count_ij), :], dim=0) env_idx = i // num_bodies body_idx = i % num_bodies - assert torch.allclose(force, sensor._data.friction_forces_w[env_idx, body_idx, j, :], atol=1e-5) + assert torch.allclose(force, friction_torch[env_idx, body_idx, j, :], atol=1e-5) elif mode == ContactTestMode.NON_CONTACT: - assert torch.all(sensor._data.friction_forces_w == 0.0).item() + assert torch.all(friction_torch == 0.0).item() def _test_contact_position(shape: RigidObject, sensor: ContactSensor, mode: ContactTestMode) -> None: @@ -780,19 +787,19 @@ def _test_contact_position(shape: RigidObject, sensor: ContactSensor, mode: Cont assert sensor._data.contact_pos_w is None return - # check shape of the contact_pos_w tensor + # check shape of the contact_pos_w tensor (wp.to_torch expands vec3f -> float32 trailing dim) num_bodies = sensor.num_bodies - assert sensor._data.contact_pos_w.shape == (sensor.num_instances // num_bodies, num_bodies, 1, 3) + contact_pos_torch = sensor._data.contact_pos_w.torch + assert contact_pos_torch.shape == (sensor.num_instances // num_bodies, num_bodies, 1, 3) # check contact positions if mode == ContactTestMode.IN_CONTACT: - contact_position = sensor._data.pos_w + torch.tensor( - [[0.0, 0.0, -shape.cfg.spawn.radius]], device=sensor._data.pos_w.device - ) + pos_w_torch = sensor._data.pos_w.torch + contact_position = pos_w_torch + torch.tensor([[0.0, 0.0, -shape.cfg.spawn.radius]], device=pos_w_torch.device) assert torch.all( - torch.abs(torch.norm(sensor._data.contact_pos_w - contact_position.unsqueeze(1), p=2, dim=-1)) < 1e-2 + torch.abs(torch.linalg.norm(contact_pos_torch - contact_position.unsqueeze(1), ord=2, dim=-1)) < 1e-2 ).item() elif mode == ContactTestMode.NON_CONTACT: - assert torch.all(torch.isnan(sensor._data.contact_pos_w)).item() + assert torch.all(torch.isnan(contact_pos_torch)).item() def _check_prim_contact_state_times( @@ -821,10 +828,10 @@ def _check_prim_contact_state_times( in_air = True if expected_contact_time > 0.0: in_contact = True - measured_contact_time = sensor.data.current_contact_time - measured_air_time = sensor.data.current_air_time - measured_last_contact_time = sensor.data.last_contact_time - measured_last_air_time = sensor.data.last_air_time + measured_contact_time = sensor.data.current_contact_time.torch + measured_air_time = sensor.data.current_air_time.torch + measured_last_contact_time = sensor.data.last_contact_time.torch + measured_last_air_time = sensor.data.last_air_time.torch # check current contact state assert pytest.approx(measured_contact_time.item(), 0.01) == expected_contact_time assert pytest.approx(measured_air_time.item(), 0.01) == expected_air_time @@ -832,8 +839,8 @@ def _check_prim_contact_state_times( assert pytest.approx(measured_last_contact_time.item(), 0.01) == expected_last_contact_time assert pytest.approx(measured_last_air_time.item(), 0.01) == expected_last_air_time # check current contact mode - assert sensor.compute_first_contact(dt=dt).item() == in_contact - assert sensor.compute_first_air(dt=dt).item() == in_air + assert sensor.compute_first_contact(dt=dt).torch.item() == in_contact + assert sensor.compute_first_air(dt=dt).torch.item() == in_air def _perform_sim_step(sim, scene, sim_dt): diff --git a/source/isaaclab/test/sensors/test_frame_transformer.py b/source/isaaclab_physx/test/sensors/test_frame_transformer.py similarity index 91% rename from source/isaaclab/test/sensors/test_frame_transformer.py rename to source/isaaclab_physx/test/sensors/test_frame_transformer.py index 5e0ccf8e1f35..631e9ba118dd 100644 --- a/source/isaaclab/test/sensors/test_frame_transformer.py +++ b/source/isaaclab_physx/test/sensors/test_frame_transformer.py @@ -33,9 +33,9 @@ def quat_from_euler_rpy(roll, pitch, yaw, degrees=False): - """Converts Euler XYZ to Quaternion (w, x, y, z).""" + """Converts Euler XYZ to Quaternion (x, y, z, w).""" quat = tf.Rotation.from_euler("xyz", (roll, pitch, yaw), degrees=degrees).as_quat() - return tuple(quat[[3, 0, 1, 2]].tolist()) + return tuple(quat.tolist()) # scipy already returns xyzw def euler_rpy_apply(rpy, xyz, degrees=False): @@ -74,16 +74,13 @@ class MySceneCfg(InteractiveSceneCfg): @pytest.fixture def sim(): """Create a simulation context.""" - # Create a new stage - sim_utils.create_new_stage() - # Load kit helper - sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.005, device="cpu")) - # Set main camera - sim.set_camera_view(eye=(5.0, 5.0, 5.0), target=(0.0, 0.0, 0.0)) - yield sim - # Cleanup - sim.clear_all_callbacks() - sim.clear_instance() + sim_cfg = sim_utils.SimulationCfg(device="cpu", dt=0.005) + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim: + sim._app_control_on_stop_handle = None + # Set main camera + sim.set_camera_view(eye=(5.0, 5.0, 5.0), target=(0.0, 0.0, 0.0)) + yield sim + # Cleanup is handled by build_simulation_context def test_frame_transformer_feet_wrt_base(sim): @@ -92,7 +89,7 @@ def test_frame_transformer_feet_wrt_base(sim): In this test, the source frame is the robot base. """ # Spawn things into stage - scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) scene_cfg.frame_transformer = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/base", target_frames=[ @@ -148,7 +145,7 @@ def test_frame_transformer_feet_wrt_base(sim): feet_indices = [feet_indices[i] for i in reordering_indices] # default joint targets - default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() # Simulate physics @@ -156,10 +153,10 @@ def test_frame_transformer_feet_wrt_base(sim): # # reset if count % 25 == 0: # reset root state - root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins - joint_pos = scene.articulations["robot"].data.default_joint_pos - joint_vel = scene.articulations["robot"].data.default_joint_vel + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch # -- set root state # -- robot scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) @@ -180,14 +177,14 @@ def test_frame_transformer_feet_wrt_base(sim): # check absolute frame transforms in world frame # -- ground-truth - root_pose_w = scene.articulations["robot"].data.root_pose_w - feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices] - feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices] + root_pose_w = scene.articulations["robot"].data.root_pose_w.torch + feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w.torch[:, feet_indices] + feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w.torch[:, feet_indices] # -- frame transformer - source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w - source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w - feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w - feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch + feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch # check if they are same torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) @@ -196,8 +193,8 @@ def test_frame_transformer_feet_wrt_base(sim): torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf) # check if relative transforms are same - feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source - feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch for index in range(len(feet_indices)): # ground-truth foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( @@ -211,7 +208,7 @@ def test_frame_transformer_feet_wrt_base(sim): def test_frame_transformer_feet_wrt_thigh(sim): """Test feet transformation w.r.t. thigh source frame.""" # Spawn things into stage - scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) scene_cfg.frame_transformer = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/LF_THIGH", target_frames=[ @@ -246,7 +243,7 @@ def test_frame_transformer_feet_wrt_thigh(sim): assert scene.sensors["frame_transformer"].data.target_frame_names == user_feet_names # default joint targets - default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() # Simulate physics @@ -254,10 +251,10 @@ def test_frame_transformer_feet_wrt_thigh(sim): # # reset if count % 25 == 0: # reset root state - root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins - joint_pos = scene.articulations["robot"].data.default_joint_pos - joint_vel = scene.articulations["robot"].data.default_joint_vel + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch # -- set root state # -- robot scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) @@ -278,14 +275,14 @@ def test_frame_transformer_feet_wrt_thigh(sim): # check absolute frame transforms in world frame # -- ground-truth - source_pose_w_gt = scene.articulations["robot"].data.body_state_w[:, source_frame_index, :7] - feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices] - feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices] + source_pose_w_gt = scene.articulations["robot"].data.body_state_w.torch[:, source_frame_index, :7] + feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w.torch[:, feet_indices] + feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w.torch[:, feet_indices] # -- frame transformer - source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w - source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w - feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w - feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch + feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch # check if they are same torch.testing.assert_close(source_pose_w_gt[:, :3], source_pos_w_tf) torch.testing.assert_close(source_pose_w_gt[:, 3:], source_quat_w_tf) @@ -293,8 +290,8 @@ def test_frame_transformer_feet_wrt_thigh(sim): torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf) # check if relative transforms are same - feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source - feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch for index in range(len(feet_indices)): # ground-truth foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( @@ -324,7 +321,7 @@ def test_frame_transformer_robot_body_to_external_cube(sim): sim.reset() # default joint targets - default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() # Simulate physics @@ -332,10 +329,10 @@ def test_frame_transformer_robot_body_to_external_cube(sim): # # reset if count % 25 == 0: # reset root state - root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins - joint_pos = scene.articulations["robot"].data.default_joint_pos - joint_vel = scene.articulations["robot"].data.default_joint_vel + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch # -- set root state # -- robot scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) @@ -356,14 +353,14 @@ def test_frame_transformer_robot_body_to_external_cube(sim): # check absolute frame transforms in world frame # -- ground-truth - root_pose_w = scene.articulations["robot"].data.root_pose_w - cube_pos_w_gt = scene.rigid_objects["cube"].data.root_pos_w - cube_quat_w_gt = scene.rigid_objects["cube"].data.root_quat_w + root_pose_w = scene.articulations["robot"].data.root_pose_w.torch + cube_pos_w_gt = scene.rigid_objects["cube"].data.root_pos_w.torch + cube_quat_w_gt = scene.rigid_objects["cube"].data.root_quat_w.torch # -- frame transformer - source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w - source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w - cube_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.squeeze() - cube_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.squeeze() + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + cube_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch.squeeze() + cube_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch.squeeze() # check if they are same torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) @@ -372,8 +369,8 @@ def test_frame_transformer_robot_body_to_external_cube(sim): torch.testing.assert_close(cube_quat_w_gt, cube_quat_w_tf) # check if relative transforms are same - cube_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source - cube_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + cube_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + cube_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch # ground-truth cube_pos_b, cube_quat_b = math_utils.subtract_frame_transforms( root_pose_w[:, :3], root_pose_w[:, 3:], cube_pos_w_tf, cube_quat_w_tf @@ -403,7 +400,7 @@ def test_frame_transformer_offset_frames(sim): prim_path="{ENV_REGEX_NS}/cube", offset=OffsetCfg( pos=(0.0, 0.0, 0.1), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), ), FrameTransformerCfg.FrameCfg( @@ -411,7 +408,7 @@ def test_frame_transformer_offset_frames(sim): prim_path="{ENV_REGEX_NS}/cube", offset=OffsetCfg( pos=(0.0, 0.0, -0.1), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), ), ], @@ -428,7 +425,7 @@ def test_frame_transformer_offset_frames(sim): # # reset if count % 25 == 0: # reset root state - root_state = scene["cube"].data.default_root_state.clone() + root_state = scene["cube"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins # -- set root state # -- cube @@ -446,13 +443,13 @@ def test_frame_transformer_offset_frames(sim): # check absolute frame transforms in world frame # -- ground-truth - cube_pos_w_gt = scene["cube"].data.root_pos_w - cube_quat_w_gt = scene["cube"].data.root_quat_w + cube_pos_w_gt = scene["cube"].data.root_pos_w.torch + cube_quat_w_gt = scene["cube"].data.root_quat_w.torch # -- frame transformer - source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w - source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w - target_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.squeeze() - target_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.squeeze() + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + target_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch.squeeze() + target_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch.squeeze() target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names cube_center_idx = target_frame_names.index("CUBE_CENTER") @@ -508,7 +505,7 @@ def test_frame_transformer_all_bodies(sim): reordering_indices = [target_frame_names.index(name) for name in articulation_body_names] # default joint targets - default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + default_actions = scene.articulations["robot"].data.default_joint_pos.torch.clone() # Define simulation stepping sim_dt = sim.get_physics_dt() # Simulate physics @@ -516,10 +513,10 @@ def test_frame_transformer_all_bodies(sim): # # reset if count % 25 == 0: # reset root state - root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins - joint_pos = scene.articulations["robot"].data.default_joint_pos - joint_vel = scene.articulations["robot"].data.default_joint_vel + joint_pos = scene.articulations["robot"].data.default_joint_pos.torch + joint_vel = scene.articulations["robot"].data.default_joint_vel.torch # -- set root state # -- robot scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) @@ -540,15 +537,15 @@ def test_frame_transformer_all_bodies(sim): # check absolute frame transforms in world frame # -- ground-truth - root_pose_w = scene.articulations["robot"].data.root_pose_w - bodies_pos_w_gt = scene.articulations["robot"].data.body_pos_w - bodies_quat_w_gt = scene.articulations["robot"].data.body_quat_w + root_pose_w = scene.articulations["robot"].data.root_pose_w.torch + bodies_pos_w_gt = scene.articulations["robot"].data.body_pos_w.torch + bodies_quat_w_gt = scene.articulations["robot"].data.body_quat_w.torch # -- frame transformer - source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w - source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w - bodies_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w - bodies_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w.torch + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w.torch + bodies_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.torch + bodies_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.torch # check if they are same torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) @@ -556,8 +553,8 @@ def test_frame_transformer_all_bodies(sim): torch.testing.assert_close(bodies_pos_w_gt, bodies_pos_w_tf[:, reordering_indices]) torch.testing.assert_close(bodies_quat_w_gt, bodies_quat_w_tf[:, reordering_indices]) - bodies_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source - bodies_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + bodies_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source.torch + bodies_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source.torch # Go through each body and check if relative transforms are same for index in range(len(articulation_body_names)): @@ -711,22 +708,22 @@ class MultiRobotSceneCfg(InteractiveSceneCfg): # Reset periodically if count % 10 == 0: # Reset robot - root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state = scene.articulations["robot"].data.default_root_state.torch.clone() root_state[:, :3] += scene.env_origins scene.articulations["robot"].write_root_pose_to_sim(root_state[:, :7]) scene.articulations["robot"].write_root_velocity_to_sim(root_state[:, 7:]) scene.articulations["robot"].write_joint_state_to_sim( - scene.articulations["robot"].data.default_joint_pos, - scene.articulations["robot"].data.default_joint_vel, + scene.articulations["robot"].data.default_joint_pos.torch, + scene.articulations["robot"].data.default_joint_vel.torch, ) # Reset robot_1 - root_state_1 = scene.articulations["robot_1"].data.default_root_state.clone() + root_state_1 = scene.articulations["robot_1"].data.default_root_state.torch.clone() root_state_1[:, :3] += scene.env_origins scene.articulations["robot_1"].write_root_pose_to_sim(root_state_1[:, :7]) scene.articulations["robot_1"].write_root_velocity_to_sim(root_state_1[:, 7:]) scene.articulations["robot_1"].write_joint_state_to_sim( - scene.articulations["robot_1"].data.default_joint_pos, - scene.articulations["robot_1"].data.default_joint_vel, + scene.articulations["robot_1"].data.default_joint_pos.torch, + scene.articulations["robot_1"].data.default_joint_vel.torch, ) scene.reset() @@ -739,21 +736,21 @@ class MultiRobotSceneCfg(InteractiveSceneCfg): # Get frame transformer data frame_transformer_data = scene.sensors["frame_transformer"].data - source_pos_w = frame_transformer_data.source_pos_w - source_quat_w = frame_transformer_data.source_quat_w - target_pos_w = frame_transformer_data.target_pos_w + source_pos_w = frame_transformer_data.source_pos_w.torch + source_quat_w = frame_transformer_data.source_quat_w.torch + target_pos_w = frame_transformer_data.target_pos_w.torch # Get ground truth positions and orientations (after scene.update() so they're current) - robot_lf_pos_w = scene.articulations["robot"].data.body_pos_w[:, robot_lf_shank_body_idx] - robot_1_lf_pos_w = scene.articulations["robot_1"].data.body_pos_w[:, robot_1_lf_shank_body_idx] - robot_rf_pos_w = scene.articulations["robot"].data.body_pos_w[:, robot_rf_shank_body_idx] - robot_1_rf_pos_w = scene.articulations["robot_1"].data.body_pos_w[:, robot_1_rf_shank_body_idx] + robot_lf_pos_w = scene.articulations["robot"].data.body_pos_w.torch[:, robot_lf_shank_body_idx] + robot_1_lf_pos_w = scene.articulations["robot_1"].data.body_pos_w.torch[:, robot_1_lf_shank_body_idx] + robot_rf_pos_w = scene.articulations["robot"].data.body_pos_w.torch[:, robot_rf_shank_body_idx] + robot_1_rf_pos_w = scene.articulations["robot_1"].data.body_pos_w.torch[:, robot_1_rf_shank_body_idx] # Get expected source frame positions and orientations (after scene.update() so they're current) - expected_source_base_pos_w = scene.articulations[expected_source_robot].data.body_pos_w[ + expected_source_base_pos_w = scene.articulations[expected_source_robot].data.body_pos_w.torch[ :, expected_source_base_body_idx ] - expected_source_base_quat_w = scene.articulations[expected_source_robot].data.body_quat_w[ + expected_source_base_quat_w = scene.articulations[expected_source_robot].data.body_quat_w.torch[ :, expected_source_base_body_idx ] @@ -763,7 +760,7 @@ class MultiRobotSceneCfg(InteractiveSceneCfg): torch.testing.assert_close(source_quat_w, expected_source_base_quat_w, rtol=1e-5, atol=1e-5) # TEST 2: Explicit named frames (LF_SHANK) should have DIFFERENT world positions - lf_pos_difference = torch.norm(target_pos_w[:, robot_lf_idx] - target_pos_w[:, robot_1_lf_idx], dim=-1) + lf_pos_difference = torch.linalg.norm(target_pos_w[:, robot_lf_idx] - target_pos_w[:, robot_1_lf_idx], dim=-1) assert torch.all(lf_pos_difference > 1.0), ( f"Robot_LF_SHANK and Robot_1_LF_SHANK should have different positions (got diff={lf_pos_difference}). " "This indicates body name collision bug." @@ -775,7 +772,7 @@ class MultiRobotSceneCfg(InteractiveSceneCfg): # TEST 3: Implicit named frames (RF_SHANK) should also have DIFFERENT world positions # Even though they have the same frame name, internal body tracking uses full paths - rf_pos_difference = torch.norm( + rf_pos_difference = torch.linalg.norm( target_pos_w[:, rf_shank_indices[0]] - target_pos_w[:, rf_shank_indices[1]], dim=-1 ) assert torch.all(rf_pos_difference > 1.0), ( diff --git a/source/isaaclab_physx/test/sensors/test_imu.py b/source/isaaclab_physx/test/sensors/test_imu.py new file mode 100644 index 000000000000..641c72a8d4ba --- /dev/null +++ b/source/isaaclab_physx/test/sensors/test_imu.py @@ -0,0 +1,495 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import pathlib + +import pytest +import torch +import warp as wp +from isaaclab_physx.physics import PhysxCfg + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets import ArticulationCfg, RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors.imu import Imu, ImuCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +## +# Pre-defined configs +## +from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort: skip +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR # isort: skip + +# offset of imu_link from base_link on anymal_c +POS_OFFSET = (0.2488, 0.00835, 0.04628) +ROT_OFFSET = (0, 0, 0.7071068, 0.7071068) + +# offset of imu_link from link_1 on simple_2_link +PEND_POS_OFFSET = (0.4, 0.0, 0.1) +PEND_ROT_OFFSET = (0.5, 0.5, 0.5, 0.5) + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Example scene configuration.""" + + # terrain - flat terrain plane + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + max_init_terrain_level=None, + ) + + # rigid objects - balls + balls = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/ball", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 0.5)), + spawn=sim_utils.SphereCfg( + radius=0.25, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), + ), + ) + + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/cube", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -2.0, 0.5)), + spawn=sim_utils.CuboidCfg( + size=(0.25, 0.25, 0.25), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), + ), + ) + + # articulations - robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/robot") + # pendulum1 - uses merge_fixed_joints=True (same as pendulum2) so that fixed-joint + # child links (base, imu_link) are merged into their parents during URDF XML + # pre-processing. This avoids fixed-joint constraint violations at velocity level + # (the solver uses velocity_iteration_count=0). A non-physics imu_link Xform is + # created programmatically in the test fixture (see setup_sim). + pendulum = ArticulationCfg( + prim_path="{ENV_REGEX_NS}/pendulum", + spawn=sim_utils.UrdfFileCfg( + fix_base=True, + merge_fixed_joints=True, + make_instanceable=False, + asset_path=f"{pathlib.Path(__file__).parent.resolve()}/urdfs/simple_2_link.urdf", + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + joint_drive=sim_utils.UrdfConverterCfg.JointDriveCfg( + gains=sim_utils.UrdfConverterCfg.JointDriveCfg.PDGainsCfg(stiffness=None, damping=None) + ), + ), + init_state=ArticulationCfg.InitialStateCfg(), + actuators={ + "joint_1_act": ImplicitActuatorCfg(joint_names_expr=["joint_.*"], stiffness=0.0, damping=0.3), + }, + ) + # pendulum2 - uses merge_fixed_joints=True so that the fixed-joint child links (base, imu_link) + # are merged into their parents during URDF XML pre-processing. A non-physics imu_link Xform + # is created programmatically in the test fixture to test indirect IMU attachment (see setup_sim). + pendulum2 = ArticulationCfg( + prim_path="{ENV_REGEX_NS}/pendulum2", + spawn=sim_utils.UrdfFileCfg( + fix_base=True, + merge_fixed_joints=True, + make_instanceable=False, + asset_path=f"{pathlib.Path(__file__).parent.resolve()}/urdfs/simple_2_link.urdf", + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + joint_drive=sim_utils.UrdfConverterCfg.JointDriveCfg( + gains=sim_utils.UrdfConverterCfg.JointDriveCfg.PDGainsCfg(stiffness=None, damping=None) + ), + ), + init_state=ArticulationCfg.InitialStateCfg(), + actuators={ + "joint_1_act": ImplicitActuatorCfg(joint_names_expr=["joint_.*"], stiffness=0.0, damping=0.3), + }, + ) + + # sensors - imu (filled inside unit test) + imu_ball: ImuCfg = ImuCfg(prim_path="{ENV_REGEX_NS}/ball") + imu_cube: ImuCfg = ImuCfg(prim_path="{ENV_REGEX_NS}/cube") + imu_robot_imu_link: ImuCfg = ImuCfg(prim_path="{ENV_REGEX_NS}/robot/imu_link") + imu_robot_base: ImuCfg = ImuCfg( + prim_path="{ENV_REGEX_NS}/robot/base", + offset=ImuCfg.OffsetCfg( + pos=POS_OFFSET, + rot=ROT_OFFSET, + ), + ) + imu_robot_norb: ImuCfg = ImuCfg( + prim_path="{ENV_REGEX_NS}/robot/LF_HIP/LF_hip_fixed", + offset=ImuCfg.OffsetCfg( + pos=POS_OFFSET, + rot=ROT_OFFSET, + ), + ) + # The new URDF converter (urdf-usd-converter) places links under Geometry/ in a nested + # kinematic tree. With merge_fixed_joints=True the hierarchy for simple_2_link.urdf is: + # Geometry/world/link_1 (base merged into world, imu_link merged into link_1) + # A non-physics imu_link Xform is recreated in the test fixture (see setup_sim). + imu_indirect_pendulum_link: ImuCfg = ImuCfg( + prim_path="{ENV_REGEX_NS}/pendulum2/Geometry/world/link_1/imu_link", + ) + imu_indirect_pendulum_base: ImuCfg = ImuCfg( + prim_path="{ENV_REGEX_NS}/pendulum2/Geometry/world/link_1", + offset=ImuCfg.OffsetCfg( + pos=PEND_POS_OFFSET, + rot=PEND_ROT_OFFSET, + ), + ) + imu_pendulum_imu_link: ImuCfg = ImuCfg( + prim_path="{ENV_REGEX_NS}/pendulum/Geometry/world/link_1/imu_link", + ) + imu_pendulum_base: ImuCfg = ImuCfg( + prim_path="{ENV_REGEX_NS}/pendulum/Geometry/world/link_1", + offset=ImuCfg.OffsetCfg( + pos=PEND_POS_OFFSET, + rot=PEND_ROT_OFFSET, + ), + ) + + def __post_init__(self): + """Post initialization.""" + # change position of the robot + self.robot.init_state.pos = (0.0, 2.0, 1.0) + self.pendulum.init_state.pos = (-2.0, 1.0, 0.5) + self.pendulum2.init_state.pos = (2.0, 1.0, 0.5) + + # change asset + self.robot.spawn.usd_path = f"{ISAAC_NUCLEUS_DIR}/Robots/ANYbotics/anymal_c/anymal_c.usd" + # change iterations + self.robot.spawn.articulation_props.solver_position_iteration_count = 32 + self.robot.spawn.articulation_props.solver_velocity_iteration_count = 32 + + +@pytest.fixture +def setup_sim(): + """Create a simulation context and scene.""" + sim_cfg = sim_utils.SimulationCfg( + dt=0.001, physics=PhysxCfg(solver_type=0) + ) # 0: PGS, 1: TGS --> use PGS for more accurate results + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim: + sim._app_control_on_stop_handle = None + # construct scene + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False, replicate_physics=False) + scene = InteractiveScene(scene_cfg) + # Both pendulum and pendulum2 use merge_fixed_joints=True, so the + # fixed-joint child link imu_link is removed from the URDF before USD + # conversion. Recreate it as a plain Xform (no RigidBodyAPI) under each + # pendulum's link_1 for every environment. The IMU sensor must then + # resolve the rigid-body ancestor (link_1) and cache the fixed offset -- + # exercising the "indirect attachment" code path. + for i in range(scene_cfg.num_envs): + for art_name in ("pendulum", "pendulum2"): + prim_path = f"/World/envs/env_{i}/{art_name}/Geometry/world/link_1/imu_link" + sim_utils.create_prim(prim_path, "Xform", translation=PEND_POS_OFFSET, orientation=PEND_ROT_OFFSET) + # Play the simulator + sim.reset() + yield sim, scene + # Cleanup is handled by build_simulation_context + + +@pytest.mark.isaacsim_ci +def test_constant_velocity(setup_sim): + """Test the Imu sensor with a constant velocity. + + Expected behavior is that the linear acceleration is approx the same at every time step as in each step we set + the same velocity and therefore reset the physx buffers. + """ + sim, scene = setup_sim + prev_lin_acc_ball = torch.zeros((scene.num_envs, 3), dtype=torch.float32, device=scene.device) + prev_lin_acc_cube = torch.zeros((scene.num_envs, 3), dtype=torch.float32, device=scene.device) + + for idx in range(200): + # set velocity + scene.rigid_objects["balls"].write_root_velocity_to_sim( + torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32, device=scene.device).repeat( + scene.num_envs, 1 + ) + ) + scene.rigid_objects["cube"].write_root_velocity_to_sim( + torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32, device=scene.device).repeat( + scene.num_envs, 1 + ) + ) + # write data to sim + scene.write_data_to_sim() + + # perform step + sim.step() + # read data from sim + scene.update(sim.get_physics_dt()) + + if idx > 1: + # check the imu accelerations + torch.testing.assert_close( + scene.sensors["imu_ball"].data.lin_acc_b.torch, + prev_lin_acc_ball, + rtol=1e-3, + atol=1e-3, + ) + + torch.testing.assert_close( + scene.sensors["imu_cube"].data.lin_acc_b.torch, + prev_lin_acc_cube, + rtol=1e-3, + atol=1e-3, + ) + + # update previous values + prev_lin_acc_ball = scene.sensors["imu_ball"].data.lin_acc_b.torch.clone() + prev_lin_acc_cube = scene.sensors["imu_cube"].data.lin_acc_b.torch.clone() + + +@pytest.mark.isaacsim_ci +def test_constant_acceleration(setup_sim): + """Test the Imu sensor with a constant acceleration.""" + sim, scene = setup_sim + for idx in range(100): + # set acceleration + scene.rigid_objects["balls"].write_root_velocity_to_sim( + torch.tensor([[0.1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32, device=scene.device).repeat( + scene.num_envs, 1 + ) + * (idx + 1) + ) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim.get_physics_dt()) + + # skip first step where initial velocity is zero + if idx < 1: + continue + + # check the imu linear acceleration data (includes gravity since IMU always measures it) + torch.testing.assert_close( + scene.sensors["imu_ball"].data.lin_acc_b.torch, + math_utils.quat_apply_inverse( + scene.rigid_objects["balls"].data.root_quat_w.torch, + torch.tensor([[0.1, 0.0, 0.0]], dtype=torch.float32, device=scene.device).repeat(scene.num_envs, 1) + / sim.get_physics_dt() + + torch.tensor([[0.0, 0.0, 9.81]], dtype=torch.float32, device=scene.device).repeat(scene.num_envs, 1), + ), + rtol=1e-4, + atol=1e-4, + ) + + # check the angular velocity + torch.testing.assert_close( + scene.sensors["imu_ball"].data.ang_vel_b.torch, + scene.rigid_objects["balls"].data.root_ang_vel_b.torch, + rtol=1e-4, + atol=1e-4, + ) + + +@pytest.mark.isaacsim_ci +def test_single_dof_pendulum(setup_sim): + """Test imu against analytical pendulum problem.""" + sim, scene = setup_sim + + for idx in range(500): + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim.get_physics_dt()) + + # IMU data + imu_data = scene.sensors["imu_pendulum_imu_link"].data + base_data = scene.sensors["imu_pendulum_base"].data + + # skip first step where initial velocity is zero + if idx < 2: + continue + + # check the angular velocities of the imus between offset and direct definition + torch.testing.assert_close( + base_data.ang_vel_b.torch, + imu_data.ang_vel_b.torch, + rtol=1e-4, + atol=1e-4, + ) + + # check the linear acceleration of the imus between offset and direct definition + torch.testing.assert_close( + base_data.lin_acc_b.torch, + imu_data.lin_acc_b.torch, + rtol=1e-1, + atol=1e-1, + ) + + +@pytest.mark.isaacsim_ci +def test_indirect_attachment(setup_sim): + """Test attaching the imu through an xForm primitive configuration argument.""" + sim, scene = setup_sim + + for idx in range(500): + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim.get_physics_dt()) + + imu = scene.sensors["imu_indirect_pendulum_link"] + imu_base = scene.sensors["imu_indirect_pendulum_base"] + + torch.testing.assert_close( + wp.to_torch(imu._offset_pos_b), + wp.to_torch(imu_base._offset_pos_b), + ) + torch.testing.assert_close( + wp.to_torch(imu._offset_quat_b), + wp.to_torch(imu_base._offset_quat_b), + rtol=1e-4, + atol=1e-4, + ) + + # IMU data + imu_data = scene.sensors["imu_indirect_pendulum_link"].data + base_data = scene.sensors["imu_indirect_pendulum_base"].data + + # skip first step where initial velocity is zero + if idx < 2: + continue + + # check the angular velocities of the imus between offset and direct definition + torch.testing.assert_close( + base_data.ang_vel_b.torch, + imu_data.ang_vel_b.torch, + rtol=1e-4, + atol=1e-4, + ) + + # check the linear acceleration of the imus between offset and direct definition + torch.testing.assert_close( + base_data.lin_acc_b.torch, + imu_data.lin_acc_b.torch, + rtol=1e-1, + atol=1e-1, + ) + + +@pytest.mark.isaacsim_ci +def test_offset_calculation(setup_sim): + """Test offset configuration argument.""" + sim, scene = setup_sim + + # should achieve same results between the two imu sensors on the robot + for idx in range(500): + # set acceleration + scene.articulations["robot"].write_root_velocity_to_sim( + torch.tensor([[0.05, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32, device=scene.device).repeat( + scene.num_envs, 1 + ) + * (idx + 1) + ) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim.get_physics_dt()) + + # skip first step where initial velocity is zero + if idx < 1: + continue + + # check the linear accelerations + torch.testing.assert_close( + scene.sensors["imu_robot_base"].data.lin_acc_b.torch, + scene.sensors["imu_robot_imu_link"].data.lin_acc_b.torch, + rtol=1e-4, + atol=1e-4, + ) + + # check the angular velocities + torch.testing.assert_close( + scene.sensors["imu_robot_base"].data.ang_vel_b.torch, + scene.sensors["imu_robot_imu_link"].data.ang_vel_b.torch, + rtol=1e-4, + atol=1e-4, + ) + + +@pytest.mark.isaacsim_ci +def test_attachment_validity(setup_sim): + """Test invalid imu attachment. An imu cannot be attached directly to the world. It must be somehow attached to + something implementing physics.""" + sim, scene = setup_sim + imu_world_cfg = ImuCfg(prim_path="/World/envs/env_0") + with pytest.raises(RuntimeError) as exc_info: + imu_world = Imu(imu_world_cfg) + imu_world._initialize_impl() + assert exc_info.type is RuntimeError and "find a rigid body ancestor prim" in str(exc_info.value) + + +@pytest.mark.isaacsim_ci +def test_env_ids_propagation(setup_sim): + """Test that env_ids argument propagates through update and reset methods""" + sim, scene = setup_sim + scene.reset() + + for idx in range(10): + # set acceleration + scene.articulations["robot"].write_root_velocity_to_sim( + torch.tensor([[0.5, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=torch.float32, device=scene.device).repeat( + scene.num_envs, 1 + ) + * (idx + 1) + ) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim.get_physics_dt()) + + # reset scene for env 1 + scene.reset(env_ids=[1]) + # read data from sim + scene.update(sim.get_physics_dt()) + # perform step + sim.step() + # read data from sim + scene.update(sim.get_physics_dt()) + + +@pytest.mark.isaacsim_ci +def test_sensor_print(setup_sim): + """Test sensor print is working correctly.""" + sim, scene = setup_sim + # Create sensor + sensor = scene.sensors["imu_ball"] + # print info + print(sensor) diff --git a/source/isaaclab_physx/test/sensors/test_joint_wrench_sensor.py b/source/isaaclab_physx/test/sensors/test_joint_wrench_sensor.py new file mode 100644 index 000000000000..4f10f2506ebb --- /dev/null +++ b/source/isaaclab_physx/test/sensors/test_joint_wrench_sensor.py @@ -0,0 +1,426 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import math + +import pytest +import torch +import warp as wp +from isaaclab_physx.physics import PhysxCfg + +from pxr import Gf, UsdPhysics + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets import Articulation, ArticulationCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors import JointWrenchSensor, JointWrenchSensorCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass +from isaaclab.utils import math as math_utils +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR + +from isaaclab_assets.robots.ant import ANT_CFG + + +def _make_single_joint_articulation_cfg() -> ArticulationCfg: + """Single-joint revolute test articulation (root ``CenterPivot`` + arm ``Arm``).""" + return ArticulationCfg( + prim_path="{ENV_REGEX_NS}/Robot", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/IsaacSim/SimpleArticulation/revolute_articulation.usd", + joint_drive_props=sim_utils.JointDrivePropertiesCfg(max_effort=80.0, max_velocity=5.0), + ), + actuators={ + "joint": ImplicitActuatorCfg( + joint_names_expr=[".*"], + stiffness=2000.0, + damping=100.0, + ), + }, + init_state=ArticulationCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + +def _make_cartpole_articulation_cfg(pole_damping: float = 0.0) -> ArticulationCfg: + """Two-joint cartpole articulation (cart + pole). + + Args: + pole_damping: Damping for the cart-to-pole revolute joint. + """ + return ArticulationCfg( + prim_path="{ENV_REGEX_NS}/Robot", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Classic/Cartpole/cartpole.usd", + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 2.0), + joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0}, + ), + actuators={ + "cart_actuator": ImplicitActuatorCfg( + joint_names_expr=["slider_to_cart"], effort_limit_sim=400.0, stiffness=0.0, damping=10.0 + ), + "pole_actuator": ImplicitActuatorCfg( + joint_names_expr=["cart_to_pole"], effort_limit_sim=400.0, stiffness=0.0, damping=pole_damping + ), + }, + ) + + +@configclass +class _SingleJointSceneCfg(InteractiveSceneCfg): + """Scene with a single-joint articulation and the joint-wrench sensor.""" + + env_spacing = 2.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = _make_single_joint_articulation_cfg() + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@configclass +class _CartpoleSceneCfg(InteractiveSceneCfg): + """Scene with a cartpole (2-joint) articulation and the joint-wrench sensor.""" + + env_spacing = 4.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = _make_cartpole_articulation_cfg() + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@configclass +class _CartpoleDampedSceneCfg(InteractiveSceneCfg): + """Cartpole with pole damping for steady-state physics validation tests.""" + + env_spacing = 4.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = _make_cartpole_articulation_cfg(pole_damping=10.0) + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@configclass +class _NestedRootAntSceneCfg(InteractiveSceneCfg): + """Ant USD asset whose articulation root is nested under the configured asset prim.""" + + env_spacing = 4.0 + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + robot = ANT_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + + +@pytest.fixture +def sim(): + """Simulation context using the PhysX backend.""" + sim_cfg = SimulationCfg( + dt=1.0 / 120.0, + physics=PhysxCfg(), + ) + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim_ctx: + sim_ctx._app_control_on_stop_handle = None + yield sim_ctx + + +def _physx_incoming_joint_wrench(sensor: JointWrenchSensor) -> torch.Tensor: + """Read the raw PhysX incoming joint wrench tensor. + + PhysX reports spatial vectors as force followed by torque. Shape is + ``(num_envs, num_bodies, 6)``. + """ + raw_wrench = sensor._root_view.get_link_incoming_joint_force().view(wp.spatial_vectorf) + return wp.to_torch(raw_wrench) + + +def _assert_sensor_matches_physx_tensor(sensor: JointWrenchSensor) -> None: + """Compare sensor buffers to the raw PhysX tensor transformed into joint frames.""" + raw_wrench = _physx_incoming_joint_wrench(sensor) + sensor_data = sensor.data + + expected_force, expected_torque = _physx_incoming_joint_wrench_in_joint_frame(sensor, raw_wrench) + torch.testing.assert_close(sensor_data.force.torch, expected_force) + torch.testing.assert_close(sensor_data.torque.torch, expected_torque) + + +def _physx_incoming_joint_wrench_in_joint_frame( + sensor: JointWrenchSensor, raw_wrench: torch.Tensor +) -> tuple[torch.Tensor, torch.Tensor]: + """Transform raw PhysX body-frame incoming joint wrenches into the configured convention.""" + force_b = raw_wrench[..., :3] + torque_b = raw_wrench[..., 3:] + joint_pos_b = wp.to_torch(sensor._joint_pos_b).unsqueeze(0) + joint_quat_b = wp.to_torch(sensor._joint_quat_b).unsqueeze(0) + torque_joint_anchor_b = torque_b - torch.cross(joint_pos_b.expand_as(force_b), force_b, dim=-1) + + flat_joint_quat_b = joint_quat_b.expand_as(raw_wrench[..., :4]).reshape(-1, 4) + expected_force = math_utils.quat_apply_inverse(flat_joint_quat_b, force_b.reshape(-1, 3)).reshape(force_b.shape) + expected_torque = math_utils.quat_apply_inverse(flat_joint_quat_b, torque_joint_anchor_b.reshape(-1, 3)).reshape( + torque_b.shape + ) + return expected_force, expected_torque + + +def _set_child_joint_frame(scene: InteractiveScene, child_body_name: str) -> None: + """Set a non-identity child-side joint frame for the requested body in env 0.""" + for prim in scene.stage.Traverse(): + if not prim.GetPath().pathString.startswith("/World/envs/env_0/Robot"): + continue + joint = UsdPhysics.Joint(prim) + if joint and any(target.name == child_body_name for target in joint.GetBody1Rel().GetTargets()): + joint.GetLocalPos1Attr().Set(Gf.Vec3f(0.25, -0.15, 0.1)) + joint.GetLocalRot1Attr().Set( + Gf.Quatf( + math.cos(math.pi / 4.0), + Gf.Vec3f(math.sin(math.pi / 4.0), 0.0, 0.0), + ) + ) + return + raise RuntimeError(f"Failed to find a USD joint with child body '{child_body_name}'.") + + +# --------------------------------------------------------------------------- +# Sensor data — pre-init contract +# --------------------------------------------------------------------------- + + +def test_data_before_init_is_none(): + """``force``/``torque`` return ``None`` before :meth:`create_buffers` runs.""" + from isaaclab_physx.sensors.joint_wrench import JointWrenchSensorData + + data = JointWrenchSensorData() + assert data.force is None + assert data.torque is None + + +# --------------------------------------------------------------------------- +# Initialization and shapes +# --------------------------------------------------------------------------- + + +def test_initialization_and_shapes(sim): + """Sensor initializes on sim reset and exposes correctly-shaped buffers.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=2)) + sim.reset() + + robot: Articulation = scene["robot"] + sensor: JointWrenchSensor = scene["wrench"] + sim.step() + scene.update(sim.get_physics_dt()) + + # PhysX reports one incoming joint wrench per articulation link, including the root link. + num_envs = 2 + num_bodies = robot.num_bodies + assert sensor.data.force.torch.shape == (num_envs, num_bodies, 3) + assert sensor.data.torque.torch.shape == (num_envs, num_bodies, 3) + assert sensor.body_names == robot.body_names + assert sensor.find_bodies("Arm") == ([robot.body_names.index("Arm")], ["Arm"]) + _assert_sensor_matches_physx_tensor(sensor) + + +def test_multi_body_articulation(sim): + """Cartpole exposes a wrench for each link labelled by body name.""" + scene = InteractiveScene(_CartpoleSceneCfg(num_envs=2)) + sim.reset() + + robot: Articulation = scene["robot"] + sensor: JointWrenchSensor = scene["wrench"] + sim.step() + scene.update(sim.get_physics_dt()) + + num_envs = 2 + num_bodies = robot.num_bodies + assert sensor.data.force.torch.shape == (num_envs, num_bodies, 3) + assert sensor.data.torque.torch.shape == (num_envs, num_bodies, 3) + assert sensor.body_names == robot.body_names + assert len(sensor.body_names) == num_bodies + _assert_sensor_matches_physx_tensor(sensor) + + +def test_nested_articulation_root_resolution(sim): + """Sensor accepts an asset prim path whose articulation root is nested in the USD asset.""" + scene = InteractiveScene(_NestedRootAntSceneCfg(num_envs=1)) + sim.reset() + + robot: Articulation = scene["robot"] + sensor: JointWrenchSensor = scene["wrench"] + sim.step() + scene.update(sim.get_physics_dt()) + + assert sensor.body_names == robot.body_names + assert sensor.data.force.torch.shape == (1, robot.num_bodies, 3) + assert sensor.data.torque.torch.shape == (1, robot.num_bodies, 3) + _assert_sensor_matches_physx_tensor(sensor) + + +# --------------------------------------------------------------------------- +# Physical correctness +# --------------------------------------------------------------------------- + + +def test_force_and_torque_components_at_rest(sim): + """Component-level validation of force and torque against the PhysX tensor API.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=1)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + robot: Articulation = scene["robot"] + for _ in range(400): + sim.step() + scene.update(sim.get_physics_dt()) + + _assert_sensor_matches_physx_tensor(sensor) + + arm_idx = robot.body_names.index("Arm") + raw_wrench = _physx_incoming_joint_wrench(sensor) + assert torch.any(raw_wrench[:, arm_idx, :] != 0.0) + + +def test_non_identity_joint_frame_transform(sim): + """PhysX raw body-frame wrench is converted to the child-side joint frame.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=1)) + _set_child_joint_frame(scene, "Arm") + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + robot: Articulation = scene["robot"] + arm_idx = robot.body_names.index("Arm") + + for _ in range(400): + sim.step() + scene.update(sim.get_physics_dt()) + + raw_wrench = _physx_incoming_joint_wrench(sensor) + expected_force, expected_torque = _physx_incoming_joint_wrench_in_joint_frame(sensor, raw_wrench) + torch.testing.assert_close(sensor.data.force.torch, expected_force) + torch.testing.assert_close(sensor.data.torque.torch, expected_torque) + + raw_force = raw_wrench[:, arm_idx, :3] + raw_torque = raw_wrench[:, arm_idx, 3:] + assert not torch.allclose(sensor.data.force.torch[:, arm_idx], raw_force) + assert not torch.allclose(sensor.data.torque.torch[:, arm_idx], raw_torque) + + +def test_wrench_with_external_force_and_torque(sim): + """Full wrench validation with external force and torque applied.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=1)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + robot: Articulation = scene["robot"] + arm_idx = robot.body_names.index("Arm") + + # Apply 10 N in body-Y and 10 N·m in body-Z on the arm (matches Newton test). + ext_force_b = torch.zeros((1, robot.num_bodies, 3), device=sim.device) + ext_force_b[:, arm_idx, 1] = 10.0 + ext_torque_b = torch.zeros((1, robot.num_bodies, 3), device=sim.device) + ext_torque_b[:, arm_idx, 2] = 10.0 + + for _ in range(800): + robot.permanent_wrench_composer.set_forces_and_torques_index(forces=ext_force_b, torques=ext_torque_b) + robot.write_data_to_sim() + sim.step() + scene.update(sim.get_physics_dt()) + + _assert_sensor_matches_physx_tensor(sensor) + + raw_wrench = _physx_incoming_joint_wrench(sensor) + assert torch.any(raw_wrench[:, arm_idx, :] != 0.0) + + +def test_interior_joint_wrench_at_rest(sim): + """Interior joint wrench matches the raw PhysX incoming-joint tensor. + + The cartpole has an interior joint (``slider_to_cart``) and a terminal + joint (``cart_to_pole``). PhysX reports one entry for every link, so this + test compares all link entries, including the cart link controlled by the + interior joint, against the underlying tensor API. + """ + scene = InteractiveScene(_CartpoleDampedSceneCfg(num_envs=1)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + robot: Articulation = scene["robot"] + + for _ in range(800): + sim.step() + scene.update(sim.get_physics_dt()) + + _assert_sensor_matches_physx_tensor(sensor) + + cart_idx = robot.body_names.index("cart") + raw_wrench = _physx_incoming_joint_wrench(sensor) + assert torch.any(raw_wrench[:, cart_idx, :] != 0.0) + + +# --------------------------------------------------------------------------- +# String representation +# --------------------------------------------------------------------------- + + +def test_sensor_print(sim): + """Test that the sensor string representation works.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=2)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + sensor_str = str(sensor) + assert "physx" in sensor_str + assert "Joint wrench sensor" in sensor_str + + +# --------------------------------------------------------------------------- +# Reset behavior +# --------------------------------------------------------------------------- + + +def test_reset_zeros_buffers(sim): + """Resetting the sensor clears the force / torque buffers.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=2)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + for _ in range(100): + sim.step() + scene.update(sim.get_physics_dt()) + + assert torch.any(sensor.data.force.torch != 0), "Expected non-zero data before reset" + + sensor.reset() + + # Access raw buffers to skip lazy re-population from the PhysX view on the next data read. + force_after = wp.to_torch(sensor._data._force) + torque_after = wp.to_torch(sensor._data._torque) + torch.testing.assert_close(force_after, torch.zeros_like(force_after)) + torch.testing.assert_close(torque_after, torch.zeros_like(torque_after)) + + +def test_reset_with_env_ids_only_zeros_selected_envs(sim): + """Partial reset via env_ids should zero the selected envs and preserve the others.""" + scene = InteractiveScene(_SingleJointSceneCfg(num_envs=4)) + sim.reset() + + sensor: JointWrenchSensor = scene["wrench"] + for _ in range(100): + sim.step() + scene.update(sim.get_physics_dt()) + + force_before = sensor.data.force.torch.clone() + assert torch.any(force_before != 0), "Expected non-zero data before reset" + + sensor.reset(env_ids=[0, 2]) + + force_after = wp.to_torch(sensor._data._force) + torch.testing.assert_close(force_after[0], torch.zeros_like(force_after[0])) + torch.testing.assert_close(force_after[2], torch.zeros_like(force_after[2])) + torch.testing.assert_close(force_after[1], force_before[1]) + torch.testing.assert_close(force_after[3], force_before[3]) diff --git a/source/isaaclab/test/sensors/test_imu.py b/source/isaaclab_physx/test/sensors/test_pva.py similarity index 62% rename from source/isaaclab/test/sensors/test_imu.py rename to source/isaaclab_physx/test/sensors/test_pva.py index 92c97f0c6d70..feb527f88c62 100644 --- a/source/isaaclab/test/sensors/test_imu.py +++ b/source/isaaclab_physx/test/sensors/test_pva.py @@ -17,6 +17,8 @@ import pytest import torch +import warp as wp +from isaaclab_physx.physics import PhysxCfg import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils @@ -24,7 +26,7 @@ from isaaclab.assets import ArticulationCfg, RigidObjectCfg from isaaclab.markers.config import GREEN_ARROW_X_MARKER_CFG, RED_ARROW_X_MARKER_CFG from isaaclab.scene import InteractiveScene, InteractiveSceneCfg -from isaaclab.sensors.imu import Imu, ImuCfg +from isaaclab.sensors.pva import Pva, PvaCfg from isaaclab.terrains import TerrainImporterCfg from isaaclab.utils import configclass @@ -36,7 +38,7 @@ # offset of imu_link from base_link on anymal_c POS_OFFSET = (0.2488, 0.00835, 0.04628) -ROT_OFFSET = (0.7071068, 0, 0, 0.7071068) +ROT_OFFSET = (0, 0, 0.7071068, 0.7071068) # offset of imu_link from link_1 on simple_2_link PEND_POS_OFFSET = (0.4, 0.0, 0.1) @@ -81,12 +83,16 @@ class MySceneCfg(InteractiveSceneCfg): # articulations - robot robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/robot") - # pendulum1 + # pendulum1 - uses merge_fixed_joints=True (same as pendulum2) so that fixed-joint + # child links (base, imu_link) are merged into their parents during URDF XML + # pre-processing. This avoids fixed-joint constraint violations at velocity level + # (the solver uses velocity_iteration_count=0). A non-physics imu_link Xform is + # created programmatically in the test fixture (see setup_sim). pendulum = ArticulationCfg( prim_path="{ENV_REGEX_NS}/pendulum", spawn=sim_utils.UrdfFileCfg( fix_base=True, - merge_fixed_joints=False, + merge_fixed_joints=True, make_instanceable=False, asset_path=f"{pathlib.Path(__file__).parent.resolve()}/urdfs/simple_2_link.urdf", articulation_props=sim_utils.ArticulationRootPropertiesCfg( @@ -101,7 +107,9 @@ class MySceneCfg(InteractiveSceneCfg): "joint_1_act": ImplicitActuatorCfg(joint_names_expr=["joint_.*"], stiffness=0.0, damping=0.3), }, ) - # pendulum2 + # pendulum2 - uses merge_fixed_joints=True so that the fixed-joint child links (base, imu_link) + # are merged into their parents during URDF XML pre-processing. A non-physics imu_link Xform + # is created programmatically in the test fixture to test indirect PVA attachment (see setup_sim). pendulum2 = ArticulationCfg( prim_path="{ENV_REGEX_NS}/pendulum2", spawn=sim_utils.UrdfFileCfg( @@ -122,66 +130,55 @@ class MySceneCfg(InteractiveSceneCfg): }, ) - # sensors - imu (filled inside unit test) - imu_ball: ImuCfg = ImuCfg( - prim_path="{ENV_REGEX_NS}/ball", - gravity_bias=(0.0, 0.0, 0.0), - ) - imu_cube: ImuCfg = ImuCfg( - prim_path="{ENV_REGEX_NS}/cube", - gravity_bias=(0.0, 0.0, 0.0), - ) - imu_robot_imu_link: ImuCfg = ImuCfg( - prim_path="{ENV_REGEX_NS}/robot/imu_link", - gravity_bias=(0.0, 0.0, 0.0), - ) - imu_robot_base: ImuCfg = ImuCfg( + # sensors - pva (filled inside unit test) + pva_ball: PvaCfg = PvaCfg(prim_path="{ENV_REGEX_NS}/ball") + pva_cube: PvaCfg = PvaCfg(prim_path="{ENV_REGEX_NS}/cube") + pva_robot_imu_link: PvaCfg = PvaCfg(prim_path="{ENV_REGEX_NS}/robot/imu_link") + pva_robot_base: PvaCfg = PvaCfg( prim_path="{ENV_REGEX_NS}/robot/base", - offset=ImuCfg.OffsetCfg( + offset=PvaCfg.OffsetCfg( pos=POS_OFFSET, rot=ROT_OFFSET, ), - gravity_bias=(0.0, 0.0, 0.0), ) - imu_robot_norb: ImuCfg = ImuCfg( + pva_robot_norb: PvaCfg = PvaCfg( prim_path="{ENV_REGEX_NS}/robot/LF_HIP/LF_hip_fixed", - offset=ImuCfg.OffsetCfg( + offset=PvaCfg.OffsetCfg( pos=POS_OFFSET, rot=ROT_OFFSET, ), - gravity_bias=(0.0, 0.0, 0.0), ) - imu_indirect_pendulum_link: ImuCfg = ImuCfg( - prim_path="{ENV_REGEX_NS}/pendulum2/link_1/imu_link", + # The new URDF converter (urdf-usd-converter) places links under Geometry/ in a nested + # kinematic tree. With merge_fixed_joints=True the hierarchy for simple_2_link.urdf is: + # Geometry/world/link_1 (base merged into world, imu_link merged into link_1) + # A non-physics imu_link Xform is recreated in the test fixture (see setup_sim). + pva_indirect_pendulum_link: PvaCfg = PvaCfg( + prim_path="{ENV_REGEX_NS}/pendulum2/Geometry/world/link_1/imu_link", debug_vis=not app_launcher._headless, visualizer_cfg=RED_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Acceleration/imu_link"), - gravity_bias=(0.0, 0.0, 9.81), ) - imu_indirect_pendulum_base: ImuCfg = ImuCfg( - prim_path="{ENV_REGEX_NS}/pendulum2/link_1", - offset=ImuCfg.OffsetCfg( + pva_indirect_pendulum_base: PvaCfg = PvaCfg( + prim_path="{ENV_REGEX_NS}/pendulum2/Geometry/world/link_1", + offset=PvaCfg.OffsetCfg( pos=PEND_POS_OFFSET, rot=PEND_ROT_OFFSET, ), debug_vis=not app_launcher._headless, visualizer_cfg=GREEN_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Acceleration/base"), - gravity_bias=(0.0, 0.0, 9.81), ) - imu_pendulum_imu_link: ImuCfg = ImuCfg( - prim_path="{ENV_REGEX_NS}/pendulum/imu_link", + pva_pendulum_imu_link: PvaCfg = PvaCfg( + prim_path="{ENV_REGEX_NS}/pendulum/Geometry/world/link_1/imu_link", debug_vis=not app_launcher._headless, visualizer_cfg=RED_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Acceleration/imu_link"), - gravity_bias=(0.0, 0.0, 9.81), ) - imu_pendulum_base: ImuCfg = ImuCfg( - prim_path="{ENV_REGEX_NS}/pendulum/link_1", - offset=ImuCfg.OffsetCfg( + pva_pendulum_base: PvaCfg = PvaCfg( + prim_path="{ENV_REGEX_NS}/pendulum/Geometry/world/link_1", + offset=PvaCfg.OffsetCfg( pos=PEND_POS_OFFSET, rot=PEND_ROT_OFFSET, ), debug_vis=not app_launcher._headless, visualizer_cfg=GREEN_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Acceleration/base"), - gravity_bias=(0.0, 0.0, 9.81), ) def __post_init__(self): @@ -201,26 +198,33 @@ def __post_init__(self): @pytest.fixture def setup_sim(): """Create a simulation context and scene.""" - # Create a new stage - sim_utils.create_new_stage() - # Load simulation context - sim_cfg = sim_utils.SimulationCfg(dt=0.001) - sim_cfg.physx.solver_type = 0 # 0: PGS, 1: TGS --> use PGS for more accurate results - sim = sim_utils.SimulationContext(sim_cfg) - # construct scene - scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) - scene = InteractiveScene(scene_cfg) - # Play the simulator - sim.reset() - yield sim, scene - # Cleanup - sim.clear_all_callbacks() - sim.clear_instance() + sim_cfg = sim_utils.SimulationCfg( + dt=0.001, physics=PhysxCfg(solver_type=0) + ) # 0: PGS, 1: TGS --> use PGS for more accurate results + with sim_utils.build_simulation_context(sim_cfg=sim_cfg) as sim: + sim._app_control_on_stop_handle = None + # construct scene + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False, replicate_physics=False) + scene = InteractiveScene(scene_cfg) + # Both pendulum and pendulum2 use merge_fixed_joints=True, so the + # fixed-joint child link imu_link is removed from the URDF before USD + # conversion. Recreate it as a plain Xform (no RigidBodyAPI) under each + # pendulum's link_1 for every environment. The PVA sensor must then + # resolve the rigid-body ancestor (link_1) and cache the fixed offset — + # exercising the "indirect attachment" code path. + for i in range(scene_cfg.num_envs): + for art_name in ("pendulum", "pendulum2"): + prim_path = f"/World/envs/env_{i}/{art_name}/Geometry/world/link_1/imu_link" + sim_utils.create_prim(prim_path, "Xform", translation=PEND_POS_OFFSET, orientation=PEND_ROT_OFFSET) + # Play the simulator + sim.reset() + yield sim, scene + # Cleanup is handled by build_simulation_context @pytest.mark.isaacsim_ci def test_constant_velocity(setup_sim): - """Test the Imu sensor with a constant velocity. + """Test the PVA sensor with a constant velocity. Expected behavior is that the linear and angular are approx the same at every time step as in each step we set the same velocity and therefore reset the physx buffers. @@ -252,39 +256,39 @@ def test_constant_velocity(setup_sim): scene.update(sim.get_physics_dt()) if idx > 1: - # check the imu accelerations + # check the pva accelerations torch.testing.assert_close( - scene.sensors["imu_ball"].data.lin_acc_b, + scene.sensors["pva_ball"].data.lin_acc_b.torch, prev_lin_acc_ball, rtol=1e-3, atol=1e-3, ) torch.testing.assert_close( - scene.sensors["imu_ball"].data.ang_acc_b, + scene.sensors["pva_ball"].data.ang_acc_b.torch, prev_ang_acc_ball, rtol=1e-3, atol=1e-3, ) torch.testing.assert_close( - scene.sensors["imu_cube"].data.lin_acc_b, + scene.sensors["pva_cube"].data.lin_acc_b.torch, prev_lin_acc_cube, rtol=1e-3, atol=1e-3, ) torch.testing.assert_close( - scene.sensors["imu_cube"].data.ang_acc_b, + scene.sensors["pva_cube"].data.ang_acc_b.torch, prev_ang_acc_cube, rtol=1e-3, atol=1e-3, ) - # check the imu velocities + # check the pva velocities # NOTE: the expected lin_vel_b is the same as the set velocity, as write_root_velocity_to_sim is # setting v_0 (initial velocity) and then a calculation step of v_i = v_0 + a*dt. Consequently, # the data.lin_vel_b is returning approx. v_i. torch.testing.assert_close( - scene.sensors["imu_ball"].data.lin_vel_b, + scene.sensors["pva_ball"].data.lin_vel_b.torch, torch.tensor([[1.0, 0.0, -scene.physics_dt * 9.81]], dtype=torch.float32, device=scene.device).repeat( scene.num_envs, 1 ), @@ -292,7 +296,7 @@ def test_constant_velocity(setup_sim): atol=1e-4, ) torch.testing.assert_close( - scene.sensors["imu_cube"].data.lin_vel_b, + scene.sensors["pva_cube"].data.lin_vel_b.torch, torch.tensor([[1.0, 0.0, -scene.physics_dt * 9.81]], dtype=torch.float32, device=scene.device).repeat( scene.num_envs, 1 ), @@ -301,15 +305,15 @@ def test_constant_velocity(setup_sim): ) # update previous values - prev_lin_acc_ball = scene.sensors["imu_ball"].data.lin_acc_b.clone() - prev_ang_acc_ball = scene.sensors["imu_ball"].data.ang_acc_b.clone() - prev_lin_acc_cube = scene.sensors["imu_cube"].data.lin_acc_b.clone() - prev_ang_acc_cube = scene.sensors["imu_cube"].data.ang_acc_b.clone() + prev_lin_acc_ball = scene.sensors["pva_ball"].data.lin_acc_b.torch.clone() + prev_ang_acc_ball = scene.sensors["pva_ball"].data.ang_acc_b.torch.clone() + prev_lin_acc_cube = scene.sensors["pva_cube"].data.lin_acc_b.torch.clone() + prev_ang_acc_cube = scene.sensors["pva_cube"].data.ang_acc_b.torch.clone() @pytest.mark.isaacsim_ci def test_constant_acceleration(setup_sim): - """Test the Imu sensor with a constant acceleration.""" + """Test the PVA sensor with a constant acceleration.""" sim, scene = setup_sim for idx in range(100): # set acceleration @@ -330,11 +334,11 @@ def test_constant_acceleration(setup_sim): if idx < 1: continue - # check the imu data + # check the pva data torch.testing.assert_close( - scene.sensors["imu_ball"].data.lin_acc_b, + scene.sensors["pva_ball"].data.lin_acc_b.torch, math_utils.quat_apply_inverse( - scene.rigid_objects["balls"].data.root_quat_w, + scene.rigid_objects["balls"].data.root_quat_w.torch, torch.tensor([[0.1, 0.0, 0.0]], dtype=torch.float32, device=scene.device).repeat(scene.num_envs, 1) / sim.get_physics_dt(), ), @@ -344,8 +348,8 @@ def test_constant_acceleration(setup_sim): # check the angular velocity torch.testing.assert_close( - scene.sensors["imu_ball"].data.ang_vel_b, - scene.rigid_objects["balls"].data.root_ang_vel_b, + scene.sensors["pva_ball"].data.ang_vel_b.torch, + scene.rigid_objects["balls"].data.root_ang_vel_b.torch, rtol=1e-4, atol=1e-4, ) @@ -353,12 +357,12 @@ def test_constant_acceleration(setup_sim): @pytest.mark.isaacsim_ci def test_single_dof_pendulum(setup_sim): - """Test imu against analytical pendulum problem.""" + """Test PVA against analytical pendulum problem.""" sim, scene = setup_sim # pendulum length pend_length = PEND_POS_OFFSET[0] - # should achieve same results between the two imu sensors on the robot + # should achieve same results between the two pva sensors on the robot for idx in range(500): # write data to sim scene.write_data_to_sim() @@ -368,21 +372,21 @@ def test_single_dof_pendulum(setup_sim): scene.update(sim.get_physics_dt()) # get pendulum joint state - joint_pos = scene.articulations["pendulum"].data.joint_pos - joint_vel = scene.articulations["pendulum"].data.joint_vel - joint_acc = scene.articulations["pendulum"].data.joint_acc + joint_pos = scene.articulations["pendulum"].data.joint_pos.torch + joint_vel = scene.articulations["pendulum"].data.joint_vel.torch + joint_acc = scene.articulations["pendulum"].data.joint_acc.torch - # IMU and base data - imu_data = scene.sensors["imu_pendulum_imu_link"].data - base_data = scene.sensors["imu_pendulum_base"].data + # PVA and base data + pva_data = scene.sensors["pva_pendulum_imu_link"].data + base_data = scene.sensors["pva_pendulum_base"].data - # extract imu_link imu_sensor dynamics - lin_vel_w_imu_link = math_utils.quat_apply(imu_data.quat_w, imu_data.lin_vel_b) - lin_acc_w_imu_link = math_utils.quat_apply(imu_data.quat_w, imu_data.lin_acc_b) + # extract imu_link pva_sensor dynamics + lin_vel_w_imu_link = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.lin_vel_b.torch) + lin_acc_w_imu_link = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.lin_acc_b.torch) - # calculate the joint dynamics from the imu_sensor (y axis of imu_link is parallel to joint axis of pendulum) - joint_vel_imu = math_utils.quat_apply(imu_data.quat_w, imu_data.ang_vel_b)[..., 1].unsqueeze(-1) - joint_acc_imu = math_utils.quat_apply(imu_data.quat_w, imu_data.ang_acc_b)[..., 1].unsqueeze(-1) + # calculate the joint dynamics from the pva_sensor (y axis of imu_link is parallel to joint axis of pendulum) + joint_vel_pva = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.ang_vel_b.torch)[..., 1].unsqueeze(-1) + joint_acc_pva = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.ang_acc_b.torch)[..., 1].unsqueeze(-1) # calculate analytical solution vx = -joint_vel * pend_length * torch.sin(joint_pos) @@ -392,43 +396,43 @@ def test_single_dof_pendulum(setup_sim): ax = -joint_acc * pend_length * torch.sin(joint_pos) - joint_vel**2 * pend_length * torch.cos(joint_pos) ay = torch.zeros(2, 1, device=scene.device) - az = -joint_acc * pend_length * torch.cos(joint_pos) + joint_vel**2 * pend_length * torch.sin(joint_pos) + 9.81 + az = -joint_acc * pend_length * torch.cos(joint_pos) + joint_vel**2 * pend_length * torch.sin(joint_pos) gt_linear_acc_w = torch.cat([ax, ay, az], dim=-1) # skip first step where initial velocity is zero if idx < 2: continue - # compare imu projected gravity + # compare pva projected gravity gravity_dir_w = torch.tensor((0.0, 0.0, -1.0), device=scene.device).repeat(2, 1) - gravity_dir_b = math_utils.quat_apply_inverse(imu_data.quat_w, gravity_dir_w) + gravity_dir_b = math_utils.quat_apply_inverse(pva_data.quat_w.torch, gravity_dir_w) torch.testing.assert_close( - imu_data.projected_gravity_b, + pva_data.projected_gravity_b.torch, gravity_dir_b, ) - # compare imu angular velocity with joint velocity + # compare pva angular velocity with joint velocity torch.testing.assert_close( joint_vel, - joint_vel_imu, + joint_vel_pva, rtol=1e-1, atol=1e-3, ) - # compare imu angular acceleration with joint acceleration + # compare pva angular acceleration with joint acceleration torch.testing.assert_close( joint_acc, - joint_acc_imu, + joint_acc_pva, rtol=1e-1, atol=1e-3, ) - # compare imu linear velocity with simple pendulum calculation + # compare pva linear velocity with simple pendulum calculation torch.testing.assert_close( gt_linear_vel_w, lin_vel_w_imu_link, rtol=1e-1, atol=1e-3, ) - # compare imu linear acceleration with simple pendulum calculation + # compare pva linear acceleration with simple pendulum calculation torch.testing.assert_close( gt_linear_acc_w, lin_acc_w_imu_link, @@ -436,49 +440,49 @@ def test_single_dof_pendulum(setup_sim): atol=1e0, ) - # check the position between offset and imu definition + # check the position between offset and pva definition torch.testing.assert_close( - base_data.pos_w, - imu_data.pos_w, + base_data.pos_w.torch, + pva_data.pos_w.torch, rtol=1e-5, atol=1e-5, ) - # check the orientation between offset and imu definition + # check the orientation between offset and pva definition torch.testing.assert_close( - base_data.quat_w, - imu_data.quat_w, + base_data.quat_w.torch, + pva_data.quat_w.torch, rtol=1e-4, atol=1e-4, ) - # check the angular velocities of the imus between offset and imu definition + # check the angular velocities of the pvas between offset and pva definition torch.testing.assert_close( - base_data.ang_vel_b, - imu_data.ang_vel_b, + base_data.ang_vel_b.torch, + pva_data.ang_vel_b.torch, rtol=1e-4, atol=1e-4, ) - # check the angular acceleration of the imus between offset and imu definition + # check the angular acceleration of the pvas between offset and pva definition torch.testing.assert_close( - base_data.ang_acc_b, - imu_data.ang_acc_b, + base_data.ang_acc_b.torch, + pva_data.ang_acc_b.torch, rtol=1e-4, atol=1e-4, ) - # check the linear velocity of the imus between offset and imu definition + # check the linear velocity of the pvas between offset and pva definition torch.testing.assert_close( - base_data.lin_vel_b, - imu_data.lin_vel_b, + base_data.lin_vel_b.torch, + pva_data.lin_vel_b.torch, rtol=1e-2, atol=5e-3, ) - # check the linear acceleration of the imus between offset and imu definition + # check the linear acceleration of the pvas between offset and pva definition torch.testing.assert_close( - base_data.lin_acc_b, - imu_data.lin_acc_b, + base_data.lin_acc_b.torch, + pva_data.lin_acc_b.torch, rtol=1e-1, atol=1e-1, ) @@ -486,12 +490,12 @@ def test_single_dof_pendulum(setup_sim): @pytest.mark.isaacsim_ci def test_indirect_attachment(setup_sim): - """Test attaching the imu through an xForm primitive configuration argument.""" + """Test attaching the PVA sensor through an xForm primitive configuration argument.""" sim, scene = setup_sim # pendulum length pend_length = PEND_POS_OFFSET[0] - # should achieve same results between the two imu sensors on the robot + # should achieve same results between the two pva sensors on the robot for idx in range(500): # write data to sim scene.write_data_to_sim() @@ -501,29 +505,34 @@ def test_indirect_attachment(setup_sim): scene.update(sim.get_physics_dt()) # get pendulum joint state - joint_pos = scene.articulations["pendulum2"].data.joint_pos - joint_vel = scene.articulations["pendulum2"].data.joint_vel - joint_acc = scene.articulations["pendulum2"].data.joint_acc + joint_pos = scene.articulations["pendulum2"].data.joint_pos.torch + joint_vel = scene.articulations["pendulum2"].data.joint_vel.torch + joint_acc = scene.articulations["pendulum2"].data.joint_acc.torch - imu = scene.sensors["imu_indirect_pendulum_link"] - imu_base = scene.sensors["imu_indirect_pendulum_base"] + pva = scene.sensors["pva_indirect_pendulum_link"] + pva_base = scene.sensors["pva_indirect_pendulum_base"] torch.testing.assert_close( - imu._offset_pos_b, - imu_base._offset_pos_b, + wp.to_torch(pva._offset_pos_b), + wp.to_torch(pva_base._offset_pos_b), + ) + torch.testing.assert_close( + wp.to_torch(pva._offset_quat_b), + wp.to_torch(pva_base._offset_quat_b), + rtol=1e-4, + atol=1e-4, ) - torch.testing.assert_close(imu._offset_quat_b, imu_base._offset_quat_b, rtol=1e-4, atol=1e-4) - # IMU and base data - imu_data = scene.sensors["imu_indirect_pendulum_link"].data - base_data = scene.sensors["imu_indirect_pendulum_base"].data - # extract imu_link imu_sensor dynamics - lin_vel_w_imu_link = math_utils.quat_apply(imu_data.quat_w, imu_data.lin_vel_b) - lin_acc_w_imu_link = math_utils.quat_apply(imu_data.quat_w, imu_data.lin_acc_b) + # PVA and base data + pva_data = scene.sensors["pva_indirect_pendulum_link"].data + base_data = scene.sensors["pva_indirect_pendulum_base"].data + # extract imu_link pva_sensor dynamics + lin_vel_w_imu_link = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.lin_vel_b.torch) + lin_acc_w_imu_link = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.lin_acc_b.torch) - # calculate the joint dynamics from the imu_sensor (y axis of imu_link is parallel to joint axis of pendulum) - joint_vel_imu = math_utils.quat_apply(imu_data.quat_w, imu_data.ang_vel_b)[..., 1].unsqueeze(-1) - joint_acc_imu = math_utils.quat_apply(imu_data.quat_w, imu_data.ang_acc_b)[..., 1].unsqueeze(-1) + # calculate the joint dynamics from the pva_sensor (y axis of imu_link is parallel to joint axis of pendulum) + joint_vel_pva = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.ang_vel_b.torch)[..., 1].unsqueeze(-1) + joint_acc_pva = math_utils.quat_apply(pva_data.quat_w.torch, pva_data.ang_acc_b.torch)[..., 1].unsqueeze(-1) # calculate analytical solution vx = -joint_vel * pend_length * torch.sin(joint_pos) @@ -533,43 +542,43 @@ def test_indirect_attachment(setup_sim): ax = -joint_acc * pend_length * torch.sin(joint_pos) - joint_vel**2 * pend_length * torch.cos(joint_pos) ay = torch.zeros(2, 1, device=scene.device) - az = -joint_acc * pend_length * torch.cos(joint_pos) + joint_vel**2 * pend_length * torch.sin(joint_pos) + 9.81 + az = -joint_acc * pend_length * torch.cos(joint_pos) + joint_vel**2 * pend_length * torch.sin(joint_pos) gt_linear_acc_w = torch.cat([ax, ay, az], dim=-1) # skip first step where initial velocity is zero if idx < 2: continue - # compare imu projected gravity + # compare pva projected gravity gravity_dir_w = torch.tensor((0.0, 0.0, -1.0), device=scene.device).repeat(2, 1) - gravity_dir_b = math_utils.quat_apply_inverse(imu_data.quat_w, gravity_dir_w) + gravity_dir_b = math_utils.quat_apply_inverse(pva_data.quat_w.torch, gravity_dir_w) torch.testing.assert_close( - imu_data.projected_gravity_b, + pva_data.projected_gravity_b.torch, gravity_dir_b, ) - # compare imu angular velocity with joint velocity + # compare pva angular velocity with joint velocity torch.testing.assert_close( joint_vel, - joint_vel_imu, + joint_vel_pva, rtol=1e-1, atol=1e-3, ) - # compare imu angular acceleration with joint acceleration + # compare pva angular acceleration with joint acceleration torch.testing.assert_close( joint_acc, - joint_acc_imu, + joint_acc_pva, rtol=1e-1, atol=1e-3, ) - # compare imu linear velocity with simple pendulum calculation + # compare pva linear velocity with simple pendulum calculation torch.testing.assert_close( gt_linear_vel_w, lin_vel_w_imu_link, rtol=1e-1, atol=1e-3, ) - # compare imu linear acceleration with simple pendulum calculation + # compare pva linear acceleration with simple pendulum calculation torch.testing.assert_close( gt_linear_acc_w, lin_acc_w_imu_link, @@ -577,49 +586,49 @@ def test_indirect_attachment(setup_sim): atol=1e0, ) - # check the position between offset and imu definition + # check the position between offset and pva definition torch.testing.assert_close( - base_data.pos_w, - imu_data.pos_w, + base_data.pos_w.torch, + pva_data.pos_w.torch, rtol=1e-5, atol=1e-5, ) - # check the orientation between offset and imu definition + # check the orientation between offset and pva definition torch.testing.assert_close( - base_data.quat_w, - imu_data.quat_w, + base_data.quat_w.torch, + pva_data.quat_w.torch, rtol=1e-4, atol=1e-4, ) - # check the angular velocities of the imus between offset and imu definition + # check the angular velocities of the pvas between offset and pva definition torch.testing.assert_close( - base_data.ang_vel_b, - imu_data.ang_vel_b, + base_data.ang_vel_b.torch, + pva_data.ang_vel_b.torch, rtol=1e-4, atol=1e-4, ) - # check the angular acceleration of the imus between offset and imu definition + # check the angular acceleration of the pvas between offset and pva definition torch.testing.assert_close( - base_data.ang_acc_b, - imu_data.ang_acc_b, + base_data.ang_acc_b.torch, + pva_data.ang_acc_b.torch, rtol=1e-4, atol=1e-4, ) - # check the linear velocity of the imus between offset and imu definition + # check the linear velocity of the pvas between offset and pva definition torch.testing.assert_close( - base_data.lin_vel_b, - imu_data.lin_vel_b, + base_data.lin_vel_b.torch, + pva_data.lin_vel_b.torch, rtol=1e-2, atol=5e-3, ) - # check the linear acceleration of the imus between offset and imu definition + # check the linear acceleration of the pvas between offset and pva definition torch.testing.assert_close( - base_data.lin_acc_b, - imu_data.lin_acc_b, + base_data.lin_acc_b.torch, + pva_data.lin_acc_b.torch, rtol=1e-1, atol=1e-1, ) @@ -630,7 +639,7 @@ def test_offset_calculation(setup_sim): """Test offset configuration argument.""" sim, scene = setup_sim - # should achieve same results between the two imu sensors on the robot + # should achieve same results between the two pva sensors on the robot for idx in range(500): # set acceleration scene.articulations["robot"].write_root_velocity_to_sim( @@ -652,50 +661,50 @@ def test_offset_calculation(setup_sim): # check the accelerations torch.testing.assert_close( - scene.sensors["imu_robot_base"].data.lin_acc_b, - scene.sensors["imu_robot_imu_link"].data.lin_acc_b, + scene.sensors["pva_robot_base"].data.lin_acc_b.torch, + scene.sensors["pva_robot_imu_link"].data.lin_acc_b.torch, rtol=1e-4, atol=1e-4, ) torch.testing.assert_close( - scene.sensors["imu_robot_base"].data.ang_acc_b, - scene.sensors["imu_robot_imu_link"].data.ang_acc_b, + scene.sensors["pva_robot_base"].data.ang_acc_b.torch, + scene.sensors["pva_robot_imu_link"].data.ang_acc_b.torch, rtol=1e-4, atol=1e-4, ) # check the velocities torch.testing.assert_close( - scene.sensors["imu_robot_base"].data.ang_vel_b, - scene.sensors["imu_robot_imu_link"].data.ang_vel_b, + scene.sensors["pva_robot_base"].data.ang_vel_b.torch, + scene.sensors["pva_robot_imu_link"].data.ang_vel_b.torch, rtol=1e-4, atol=1e-4, ) torch.testing.assert_close( - scene.sensors["imu_robot_base"].data.lin_vel_b, - scene.sensors["imu_robot_imu_link"].data.lin_vel_b, + scene.sensors["pva_robot_base"].data.lin_vel_b.torch, + scene.sensors["pva_robot_imu_link"].data.lin_vel_b.torch, rtol=1e-4, atol=1e-4, ) # check the orientation torch.testing.assert_close( - scene.sensors["imu_robot_base"].data.quat_w, - scene.sensors["imu_robot_imu_link"].data.quat_w, + scene.sensors["pva_robot_base"].data.quat_w.torch, + scene.sensors["pva_robot_imu_link"].data.quat_w.torch, rtol=1e-4, atol=1e-4, ) # check the position torch.testing.assert_close( - scene.sensors["imu_robot_base"].data.pos_w, - scene.sensors["imu_robot_imu_link"].data.pos_w, + scene.sensors["pva_robot_base"].data.pos_w.torch, + scene.sensors["pva_robot_imu_link"].data.pos_w.torch, rtol=1e-4, atol=1e-4, ) # check the projected gravity torch.testing.assert_close( - scene.sensors["imu_robot_base"].data.projected_gravity_b, - scene.sensors["imu_robot_imu_link"].data.projected_gravity_b, + scene.sensors["pva_robot_base"].data.projected_gravity_b.torch, + scene.sensors["pva_robot_imu_link"].data.projected_gravity_b.torch, rtol=1e-4, atol=1e-4, ) @@ -703,16 +712,15 @@ def test_offset_calculation(setup_sim): @pytest.mark.isaacsim_ci def test_attachment_validity(setup_sim): - """Test invalid imu attachment. An imu cannot be attached directly to the world. It must be somehow attached to - something implementing physics.""" + """Test invalid PVA attachment. A PVA sensor cannot be attached directly to the world. + + It must be somehow attached to something implementing physics. + """ sim, scene = setup_sim - imu_world_cfg = ImuCfg( - prim_path="/World/envs/env_0", - gravity_bias=(0.0, 0.0, 0.0), - ) + pva_world_cfg = PvaCfg(prim_path="/World/envs/env_0") with pytest.raises(RuntimeError) as exc_info: - imu_world = Imu(imu_world_cfg) - imu_world._initialize_impl() + pva_world = Pva(pva_world_cfg) + pva_world._initialize_impl() assert exc_info.type is RuntimeError and "find a rigid body ancestor prim" in str(exc_info.value) @@ -752,6 +760,6 @@ def test_sensor_print(setup_sim): """Test sensor print is working correctly.""" sim, scene = setup_sim # Create sensor - sensor = scene.sensors["imu_ball"] + sensor = scene.sensors["pva_ball"] # print info print(sensor) diff --git a/source/isaaclab/test/sensors/urdfs/simple_2_link.urdf b/source/isaaclab_physx/test/sensors/urdfs/simple_2_link.urdf similarity index 100% rename from source/isaaclab/test/sensors/urdfs/simple_2_link.urdf rename to source/isaaclab_physx/test/sensors/urdfs/simple_2_link.urdf diff --git a/source/isaaclab_physx/test/sim/__init__.py b/source/isaaclab_physx/test/sim/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_physx/test/sim/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_physx/test/sim/test_cloner.py b/source/isaaclab_physx/test/sim/test_cloner.py new file mode 100644 index 000000000000..4bfba07d99e8 --- /dev/null +++ b/source/isaaclab_physx/test/sim/test_cloner.py @@ -0,0 +1,644 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for PhysX-dependent cloner utilities.""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import pytest +import torch +import warp as wp +from isaaclab_physx.cloner import physx_replicate + +import isaaclab.sim as sim_utils +from isaaclab.cloner import ( + TemplateCloneCfg, + _fabric_notices, + clone_from_template, + disabled_fabric_change_notifies, + sequential, + usd_replicate, +) +from isaaclab.sim import build_simulation_context + +wp.init() + +pytestmark = pytest.mark.isaacsim_ci + + +@pytest.fixture(params=["cpu", "cuda"]) +def sim(request): + """Provide a fresh simulation context for each test on CPU and CUDA.""" + with build_simulation_context(device=request.param, dt=0.01, add_lighting=False) as sim: + yield sim + + +def test_physx_replicate_no_error(sim): + """PhysX replicator call runs without raising exceptions for simple mapping.""" + sim_utils.create_prim("/World/envs", "Xform") + sim_utils.create_prim("/World/template", "Xform") + sim_utils.create_prim("/World/template/A", "Xform") + + num_envs = 2 + env_ids = torch.arange(num_envs, dtype=torch.long) + for i in range(num_envs): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform") + + mapping = torch.ones((1, num_envs), dtype=torch.bool) + + physx_replicate( + sim_utils.get_current_stage(), + sources=["/World/template/A"], + destinations=["/World/envs/env_{}/A"], + env_ids=env_ids, + mapping=mapping, + ) + + +def _make_mock_physx_rep(): + """Return (mock_rep, replicate_calls) where replicate_calls accumulates num_worlds per call. + + ``mock_rep.register_replicator`` immediately invokes attach_fn + attach_end_fn so the callbacks + fire synchronously inside ``physx_replicate``, making the calls observable in tests. + """ + from unittest.mock import MagicMock + + replicate_calls: list[int] = [] + mock_rep = MagicMock() + mock_rep.replicate.side_effect = lambda _sid, _src, num_worlds, **kw: replicate_calls.append(num_worlds) + + def _fake_register(_stage_id, attach_fn, attach_end_fn, rename_fn): + attach_fn(_stage_id) + attach_end_fn(_stage_id) + + mock_rep.register_replicator.side_effect = _fake_register + return mock_rep, replicate_calls + + +def _make_mock_physx_rep_detailed(): + """Return (mock_rep, replicate_calls, attach_excluded) for fine-grained inspection. + + ``replicate_calls`` is a list of ``(src, num_worlds)`` tuples — one entry per + ``rep.replicate`` invocation, preserving the source path for heterogeneous checks. + ``attach_excluded`` is the list of paths returned by ``attach_fn`` (i.e. the paths + that the replicator will exclude from its USD stage parse). + """ + from unittest.mock import MagicMock + + replicate_calls: list[tuple[str, int]] = [] + attach_excluded: list[str] = [] + mock_rep = MagicMock() + mock_rep.replicate.side_effect = lambda _sid, src, num_worlds, **kw: replicate_calls.append((src, num_worlds)) + + def _fake_register(_stage_id, attach_fn, attach_end_fn, rename_fn): + excluded = attach_fn(_stage_id) or [] + attach_excluded.extend(excluded) + attach_end_fn(_stage_id) + + mock_rep.register_replicator.side_effect = _fake_register + return mock_rep, replicate_calls, attach_excluded + + +@pytest.mark.parametrize( + "num_envs,src,expected_worlds", + [ + (3, "/World/envs/env_0", [2]), + (1, "/World/envs/env_0", []), + (3, "/World/template/Robot", [3]), + ], +) +def test_physx_replicate_world_counts(sim, num_envs, src, expected_worlds): + """physx_replicate calls rep.replicate with the correct world count (exclude-self). + + With ``exclude_self_replication=True`` (default), the source environment is excluded + from the replication targets when it also maps to other environments. A source at + ``env_0`` mapping to ``[0, 1, 2]`` only replicates to ``[1, 2]`` (2 worlds). + With ``num_envs == 1`` the ``num_envs > 1`` guard skips registration entirely. + Non-env sources (e.g. ``/World/template/Robot``) are never excluded because the + self-id is not a digit. + """ + from unittest.mock import patch + + stage = sim_utils.get_current_stage() + sim_utils.create_prim("/World/envs", "Xform") + sim_utils.create_prim("/World/template", "Xform") + sim_utils.create_prim("/World/template/Robot", "Xform") + for i in range(num_envs): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform") + + mock_rep, replicate_calls = _make_mock_physx_rep() + with patch("isaaclab_physx.cloner.physx_replicate.get_physx_replicator_interface", return_value=mock_rep): + physx_replicate( + stage, + sources=[src], + destinations=["/World/envs/env_{}"], + env_ids=torch.arange(num_envs, dtype=torch.long), + mapping=torch.ones((1, num_envs), dtype=torch.bool), + ) + + assert replicate_calls == expected_worlds, ( + f"Expected replicate world counts {expected_worlds}, got {replicate_calls}" + ) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_physx_replicate_isolated_source_loaded_without_replication(sim, device): + """A single-env source (worlds=[self]) is correctly loaded after physx_replicate. + + When there is only one environment and the source maps to itself, + ``exclude_self_replication=True`` (default) causes physx_replicate to skip + replication entirely. The prim already exists from USD, so after ``sim.reset()`` + PhysX must still be able to find the rigid body at the env path. + """ + stage = sim_utils.get_current_stage() + + sim_utils.create_prim("/World/envs", "Xform") + sim_utils.create_prim("/World/template", "Xform") + sphere_cfg = sim_utils.SphereCfg( + radius=0.1, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ) + sphere_cfg.func("/World/envs/env_0/Sphere", sphere_cfg) + + physx_replicate( + stage, + sources=["/World/envs/env_0/Sphere"], + destinations=["/World/envs/env_{}/Sphere"], + env_ids=torch.tensor([0], dtype=torch.long), + mapping=torch.ones((1, 1), dtype=torch.bool), + device=device, + ) + + sim.reset() + + physics_sim_view = sim.physics_manager.get_physics_sim_view() + physx_view = physics_sim_view.create_rigid_body_view("/World/envs/env_*/Sphere") + assert physx_view is not None and physx_view.count == 1, ( + f"Expected 1 rigid body at /World/envs/env_0/Sphere, got {'None' if physx_view is None else physx_view.count}." + ) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_physx_replicate_heterogeneous_isolated_sources(sim, device): + """physx_replicate handles heterogeneous sources excluding self from world lists. + + This is the Dexsuite scenario: multiple object types, each with a designated proto-env. + With ``exclude_self_replication=True`` (default), self is removed from the world list + only when the source also maps to other environments. Self-only sources keep self so + that ``rep.replicate()`` still fires and the source prim gets its physics body (since + ``/World/envs`` is excluded from normal PhysX parsing). + + Sources and expected behaviour: + env_0/Object → worlds [0, 2, 4] → exclude 0 → replicate to [2, 4] (2 worlds) + env_5/Object → worlds [5] → exclude 5 → keep [5] (1 world) + env_7/Object → worlds [7, 11] → exclude 7 → replicate to [11] (1 world) + """ + from unittest.mock import patch + + num_envs = 16 + stage = sim_utils.get_current_stage() + sim_utils.create_prim("/World/template", "Xform") + for i in range(num_envs): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform") + + mapping = torch.zeros((3, num_envs), dtype=torch.bool) + mapping[0, [0, 2, 4]] = True + mapping[1, [5]] = True + mapping[2, [7, 11]] = True + + mock_rep, replicate_calls, attach_excluded = _make_mock_physx_rep_detailed() + with patch("isaaclab_physx.cloner.physx_replicate.get_physx_replicator_interface", return_value=mock_rep): + physx_replicate( + stage, + sources=["/World/envs/env_0/Object", "/World/envs/env_5/Object", "/World/envs/env_7/Object"], + destinations=["/World/envs/env_{}/Object"] * 3, + env_ids=torch.arange(num_envs, dtype=torch.long), + mapping=mapping, + device=device, + ) + + expected = [ + ("/World/envs/env_0/Object", 2), + ("/World/envs/env_5/Object", 1), + ("/World/envs/env_7/Object", 1), + ] + assert replicate_calls == expected, f"Expected {expected}, got {replicate_calls}." + + # attach_fn always returns ["/World/template", "/World/envs"] so the replicator + # owns all env prims. Self-only sources get their physics body from the + # rep.replicate() call itself. + assert "/World/template" in attach_excluded + assert "/World/envs" in attach_excluded + + +def test_clone_from_template(sim): + """Clone prototypes via TemplateCloneCfg and clone_from_template and exercise both USD and PhysX. + + Steps: + - Create /World/template and /World/envs/env_0..env_31 + - Spawn three prototypes under /World/template/Object/proto_asset_.* + - Clone using TemplateCloneCfg with random_heterogeneous_cloning=False (modulo mapping) + - Verify modulo placement exists; then call sim.reset(), and create PhysX view + """ + num_clones = 32 + clone_cfg = TemplateCloneCfg(device=sim.cfg.device, clone_strategy=sequential) + sim_utils.create_prim(clone_cfg.template_root, "Xform") + sim_utils.create_prim(f"{clone_cfg.template_root}/Object", "Xform") + sim_utils.create_prim("/World/envs", "Xform") + for i in range(num_clones): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform", translation=(0, 0, 0)) + + cfg = sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + sim_utils.ConeCfg( + radius=0.3, + height=0.6, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + ), + sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0), metallic=0.2), + ), + sim_utils.SphereCfg( + radius=0.3, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0), metallic=0.2), + ), + ], + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ) + prim = cfg.func(f"{clone_cfg.template_root}/Object/{clone_cfg.template_prototype_identifier}_.*", cfg) + assert prim.IsValid() + + stage = sim_utils.get_current_stage() + clone_from_template(stage, num_clones=num_clones, template_clone_cfg=clone_cfg) + + primitive_prims = sim_utils.get_all_matching_child_prims( + "/World/envs", predicate=lambda prim: prim.GetTypeName() in ["Cone", "Cube", "Sphere"] + ) + + for i, primitive_prim in enumerate(primitive_prims): + modulus = i % 3 + if modulus == 0: + assert primitive_prim.GetTypeName() == "Cone" + elif modulus == 1: + assert primitive_prim.GetTypeName() == "Cube" + else: + assert primitive_prim.GetTypeName() == "Sphere" + + sim.reset() + object_view_regex = f"{clone_cfg.clone_regex}/Object".replace(".*", "*") + physics_sim_view = sim.physics_manager.get_physics_sim_view() + physx_view = physics_sim_view.create_rigid_body_view(object_view_regex) + assert physx_view is not None + + +def _run_colocation_collision_filter(sim, asset_cfg, expected_types, assert_count=False): + """Shared harness for colocated collision filter checks across devices.""" + num_clones = 32 + clone_cfg = TemplateCloneCfg(device=sim.cfg.device, clone_strategy=sequential) + sim_utils.create_prim(clone_cfg.template_root, "Xform") + sim_utils.create_prim(f"{clone_cfg.template_root}/Object", "Xform") + sim_utils.create_prim("/World/envs", "Xform") + for i in range(num_clones): + sim_utils.create_prim(f"/World/envs/env_{i}", "Xform", translation=(0, 0, 0)) + + prim = asset_cfg.func(f"{clone_cfg.template_root}/Object/{clone_cfg.template_prototype_identifier}_.*", asset_cfg) + assert prim.IsValid() + + stage = sim_utils.get_current_stage() + clone_from_template(stage, num_clones=num_clones, template_clone_cfg=clone_cfg) + + primitive_prims = sim_utils.get_all_matching_child_prims( + "/World/envs", predicate=lambda prim: prim.GetTypeName() in expected_types + ) + + if assert_count: + assert len(primitive_prims) == num_clones + + for i, primitive_prim in enumerate(primitive_prims): + assert primitive_prim.GetTypeName() == expected_types[i % len(expected_types)] + + sim.reset() + object_view_regex = f"{clone_cfg.clone_regex}/Object".replace(".*", "*") + physics_sim_view = sim.physics_manager.get_physics_sim_view() + physx_view = physics_sim_view.create_rigid_body_view(object_view_regex) + for _ in range(100): + sim.step() + transforms = wp.to_torch(physx_view.get_transforms()) + distance_from_origin = torch.linalg.norm(transforms[:, :2], dim=-1) + assert torch.all(distance_from_origin < 0.1) + + +def test_colocation_collision_filter_homogeneous(sim): + """Verify colocated clones of a single prototype stay stable after PhysX cloning. + + All clones are spawned at exactly the same pose; if the collision filter is wrong the pile + explodes on reset. This asserts the filter keeps the colocated objects stable while stepping + across CPU and CUDA backends. + """ + _run_colocation_collision_filter( + sim, + sim_utils.ConeCfg( + radius=0.3, + height=0.6, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + expected_types=["Cone"], + assert_count=True, + ) + + +@pytest.mark.xfail(reason="Heterogeneous cloning with collision filtering not yet fully supported") +def test_colocation_collision_filter_heterogeneous(sim): + """Verify colocated clones of multiple prototypes retain modulo ordering and remain stable. + + The cone, cube, and sphere are all spawned in the identical pose for every clone; an incorrect + collision filter would blow up the simulation on reset. This guards both modulo ordering and + that the colocated set stays stable through PhysX steps across CPU and CUDA. + """ + _run_colocation_collision_filter( + sim, + sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + sim_utils.ConeCfg( + radius=0.3, + height=0.6, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + ), + sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0), metallic=0.2), + ), + sim_utils.SphereCfg( + radius=0.3, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0), metallic=0.2), + ), + ], + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + expected_types=["Cone", "Cube", "Sphere"], + ) + + +def _run_sphere_velocity_sim(sim, use_physx_replicate: bool, num_steps: int = 10) -> torch.Tensor: + """Run a 2-env sphere simulation and return the full velocity trajectory. + + Returns a (num_steps, num_envs, 6) tensor of velocities at each step. + """ + num_envs = 2 + spacing = 5.0 + stage = sim_utils.get_current_stage() + + sim_utils.create_prim("/World/envs", "Xform") + sim_utils.create_prim("/World/envs/env_0", "Xform") + + sphere_cfg = sim_utils.SphereCfg( + radius=0.25, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(), + ) + sphere_cfg.func("/World/envs/env_0/ball", sphere_cfg, translation=(0.0, 0.0, 0.5)) + + env_ids = torch.arange(num_envs, dtype=torch.long) + positions = torch.tensor([[0.0, 0.0, 0.0], [spacing, 0.0, 0.0]]) + mapping = torch.ones((1, num_envs), dtype=torch.bool) + + if use_physx_replicate: + physx_replicate( + stage, + sources=["/World/envs/env_0/ball"], + destinations=["/World/envs/env_{}/ball"], + env_ids=env_ids, + mapping=mapping, + device=sim.cfg.device, + ) + + usd_replicate( + stage, + sources=["/World/envs/env_0"], + destinations=["/World/envs/env_{}"], + env_ids=env_ids, + mask=mapping, + positions=positions, + ) + + sim.reset() + + physics_sim_view = sim.physics_manager.get_physics_sim_view() + ball_view = physics_sim_view.create_rigid_body_view("/World/envs/env_*/ball") + assert ball_view.count == num_envs, f"Expected {num_envs} balls, got {ball_view.count}" + + device = sim.cfg.device + vel = wp.from_torch(torch.tensor([[10.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * num_envs, dtype=torch.float32, device=device)) + indices = wp.from_torch(torch.arange(num_envs, dtype=torch.int32, device=device)) + + velocities = [] + for _ in range(num_steps): + ball_view.set_velocities(vel, indices) + sim.step() + v = wp.to_torch(ball_view.get_velocities()) + velocities.append(v.cpu().clone()) + + return torch.stack(velocities) + + +def test_physx_replicate_env_consistency(sim): + """Test that env_0 and env_1 produce matching velocities when using physx_replicate.""" + trajectory = _run_sphere_velocity_sim(sim, use_physx_replicate=True) + + for idx in range(trajectory.shape[0]): + v0 = trajectory[idx, 0] + v1 = trajectory[idx, 1] + diff = (v0 - v1).abs().max().item() + assert diff < 1e-3, f"step {idx}: env_0 and env_1 diverge, max_diff={diff}" + + +@pytest.mark.xfail(reason="Source env gets physics from replicator, not USD parsing; may diverge from baseline.") +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_physx_replicate_vs_no_replicate(device): + """Test that physx_replicate does not change the physics behavior of env_0. + + With ``attach_fn`` excluding ``/World/envs``, env_0 receives its physics body + from the replicator (as the source of ``rep.replicate()``) rather than from + normal USD parsing, which may produce subtly different behaviour. + """ + with build_simulation_context(device=device, dt=0.01, add_lighting=False) as sim_no_rep: + baseline = _run_sphere_velocity_sim(sim_no_rep, use_physx_replicate=False) + + with build_simulation_context(device=device, dt=0.01, add_lighting=False) as sim_rep: + with_rep = _run_sphere_velocity_sim(sim_rep, use_physx_replicate=True) + + for idx in range(baseline.shape[0]): + diff = (with_rep[idx, 0] - baseline[idx, 0]).abs().max().item() + assert diff < 1e-3, f"step {idx}: replicate vs no-replicate diverge, max_diff={diff}" + + +def test_disabled_fabric_change_notifies_toggles_ifabricusd_flag(sim): + """Regression: ``disabled_fabric_change_notifies`` actually toggles the IFabricUsd flag. + + The PR's perf win depends on ``setEnableChangeNotifies`` being driven correctly by the + ctypes binding in ``_fabric_notices.py``. That binding reads hardcoded vtable offsets + and could silently no-op if Kit's ABI shifts (offsets drift) or libcarb fails to load. + + A perf-delta assertion can't be done reliably in synthetic isolation — the listener's + cost only shows up under full Kit+PhysX integration paths that this test environment + doesn't reproduce; production-scene benchmarks are the PR's load-bearing perf evidence. + What this test guards is the mechanic itself: ``is_enabled`` flips on entry, restores + on exit when ``restore=True``, stays off when ``restore=False``, and re-entrant nested + blocks behave correctly. + """ + import usdrt + from pxr import UsdUtils + + bindings = _fabric_notices.get_bindings() + if bindings is None: + pytest.skip("omni::fabric::IFabricUsd unavailable — Fabric notice path inert here") + + stage = sim_utils.get_current_stage() + cache = UsdUtils.StageCache.Get() + cached_id = cache.GetId(stage) + stage_id = cached_id.ToLongInt() if cached_id.IsValid() else cache.Insert(stage).ToLongInt() + fabric_id = usdrt.Usd.Stage.Attach(stage_id).GetFabricId().id + + # 1. Listener starts enabled. + assert bindings.is_enabled(fabric_id), "Fabric notice listener should be enabled at test start" + + # 2. Default ``restore=True`` round-trips the flag. + with disabled_fabric_change_notifies(stage): + assert not bindings.is_enabled(fabric_id), "listener should be suspended inside the with block" + assert bindings.is_enabled(fabric_id), "listener should be restored on exit when restore=True" + + # 3. ``restore=False`` leaves the flag off. Manually re-enable to get back to a + # known state for subsequent assertions. + with disabled_fabric_change_notifies(stage, restore=False): + assert not bindings.is_enabled(fabric_id), "listener should be suspended inside the with block" + assert not bindings.is_enabled(fabric_id), "listener should remain suspended on exit when restore=False" + bindings.set_enable(fabric_id, True) + assert bindings.is_enabled(fabric_id) + + # 4. Re-entrant nesting: inner exits don't re-enable while outer still wants it suspended. + with disabled_fabric_change_notifies(stage): + assert not bindings.is_enabled(fabric_id) + with disabled_fabric_change_notifies(stage): + assert not bindings.is_enabled(fabric_id) + assert not bindings.is_enabled(fabric_id), "inner exit must not re-enable while outer is active" + assert bindings.is_enabled(fabric_id), "outer exit should restore the flag" + + +@pytest.mark.skip( + reason=( + "Local-only perf regression; correctness is covered by" + " test_disabled_fabric_change_notifies_toggles_ifabricusd_flag." + " Re-enable manually when touching listener suspension." + ) +) +def test_disabled_fabric_change_notifies_speedup_regression(): + """Local-only perf regression: listener suspension speeds up clone+reset by >= 1.2x. + + Skipped unconditionally — the suspension mechanism's correctness is covered by + :func:`test_disabled_fabric_change_notifies_toggles_ifabricusd_flag`; the wall-clock + win is platform-sensitive (deferred Fabric resync in ``sim.reset`` can offset the + scene-time savings on some hardware). Re-verify locally when touching the suspension. + + Scene knobs from bisection: ``rigid_props`` is required (plain Xforms give ~1.0x), + ``replicate_physics=True`` is required (drops to ~1.19x without), and 16 bodies x + 4096 envs ≈ 64K firings keeps listener cost above noise. See PR #5432. + """ + import time + + import isaaclab.cloner._fabric_notices as fabric_notices_mod + import isaaclab.sim as sim_utils + from isaaclab.assets import RigidObjectCfg + from isaaclab.scene import InteractiveScene, InteractiveSceneCfg + from isaaclab.utils import configclass + + if fabric_notices_mod.get_bindings() is None: + pytest.skip("omni::fabric::IFabricUsd unavailable") + + def _body(i: int) -> RigidObjectCfg: + return RigidObjectCfg( + prim_path=f"/World/envs/env_.*/Body_{i}", + spawn=sim_utils.SphereCfg(radius=0.1, rigid_props=sim_utils.RigidBodyPropertiesCfg()), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.3 * (i % 4), 0.3 * (i // 4), 0.5)), + ) + + @configclass + class _SceneCfg(InteractiveSceneCfg): + pass + + for _i in range(16): + setattr(_SceneCfg, f"body_{_i}", _body(_i)) + + def _probe_flag(stage) -> bool | None: + try: + import usdrt + from pxr import UsdUtils + + cid = UsdUtils.StageCache.Get().GetId(stage) + sid = cid.ToLongInt() if cid.IsValid() else UsdUtils.StageCache.Get().Insert(stage).ToLongInt() + fid = usdrt.Usd.Stage.Attach(sid).GetFabricId().id + b = fabric_notices_mod.get_bindings() + return None if b is None else bool(b.is_enabled(fid)) + except Exception: + return None + + def _measure(simulate_pre_pr: bool) -> tuple[float, float, bool | None]: + original = fabric_notices_mod.get_bindings + if simulate_pre_pr: + fabric_notices_mod.get_bindings = lambda: None + assert fabric_notices_mod.get_bindings() is None, "monkey-patch did not take effect" + try: + with build_simulation_context(device="cpu", dt=0.01, add_lighting=False) as sim: + t0 = time.perf_counter() + scene = InteractiveScene(_SceneCfg(num_envs=4096, env_spacing=4.0, replicate_physics=True)) + scene_dt = time.perf_counter() - t0 + # Probe before reset so we see whether suspension actually engaged. + fabric_notices_mod.get_bindings = original + flag = _probe_flag(scene.stage) + if simulate_pre_pr: + fabric_notices_mod.get_bindings = lambda: None + t0 = time.perf_counter() + sim.reset() + return scene_dt, time.perf_counter() - t0, flag + finally: + fabric_notices_mod.get_bindings = original + + _measure(simulate_pre_pr=False) # warmup + s_scene, s_reset, s_flag = _measure(simulate_pre_pr=False) + a_scene, a_reset, a_flag = _measure(simulate_pre_pr=True) + + suspended = s_scene + s_reset + active = a_scene + a_reset + speedup = active / suspended + print( + f"\n[fabric-notice perf] active={active:.2f}s (flag={a_flag})" + f" suspended={suspended:.2f}s (flag={s_flag}) speedup={speedup:.2f}x" + ) + assert speedup >= 1.2, f"expected >= 1.2x, got {speedup:.2f}x (active={active:.2f}s suspended={suspended:.2f}s)" diff --git a/source/isaaclab_physx/test/sim/test_spawn_materials.py b/source/isaaclab_physx/test/sim/test_spawn_materials.py new file mode 100644 index 000000000000..477132c5c5aa --- /dev/null +++ b/source/isaaclab_physx/test/sim/test_spawn_materials.py @@ -0,0 +1,55 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + + +import pytest +from isaaclab_physx.sim.spawners.materials.physics_materials_cfg import DeformableBodyMaterialCfg + +import isaaclab.sim as sim_utils +from isaaclab.sim import SimulationCfg, SimulationContext + +pytestmark = pytest.mark.isaacsim_ci + + +@pytest.fixture +def sim(): + """Create a simulation context.""" + sim_utils.create_new_stage() + dt = 0.1 + sim = SimulationContext(SimulationCfg(dt=dt)) + sim_utils.update_stage() + yield sim + sim.stop() + sim.clear_instance() + + +def test_spawn_deformable_body_material(sim): + """Test spawning a deformable body material.""" + cfg = DeformableBodyMaterialCfg( + density=1.0, + dynamic_friction=0.25, + youngs_modulus=50000000.0, + poissons_ratio=0.5, + elasticity_damping=0.005, + ) + prim = cfg.func("/Looks/DeformableBodyMaterial", cfg) + # Check validity + assert prim.IsValid() + assert sim.stage.GetPrimAtPath("/Looks/DeformableBodyMaterial").IsValid() + # Check properties + assert prim.GetAttribute("omniphysics:density").Get() == cfg.density + assert prim.GetAttribute("omniphysics:dynamicFriction").Get() == cfg.dynamic_friction + assert prim.GetAttribute("omniphysics:youngsModulus").Get() == cfg.youngs_modulus + assert prim.GetAttribute("omniphysics:poissonsRatio").Get() == cfg.poissons_ratio + assert prim.GetAttribute("physxDeformableBody:elasticityDamping").Get() == pytest.approx(cfg.elasticity_damping) diff --git a/source/isaaclab_physx/test/sim/test_spawn_meshes.py b/source/isaaclab_physx/test/sim/test_spawn_meshes.py new file mode 100644 index 000000000000..7c24e289ef9f --- /dev/null +++ b/source/isaaclab_physx/test/sim/test_spawn_meshes.py @@ -0,0 +1,142 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + + +import pytest +from isaaclab_physx.sim.schemas.schemas_cfg import DeformableBodyPropertiesCfg +from isaaclab_physx.sim.spawners.materials.physics_materials_cfg import DeformableBodyMaterialCfg + +import isaaclab.sim as sim_utils +from isaaclab.sim import SimulationCfg, SimulationContext + +pytestmark = pytest.mark.isaacsim_ci + + +@pytest.fixture +def sim(): + """Create a simulation context for testing.""" + # Create a new stage + sim_utils.create_new_stage() + # Simulation time-step + dt = 0.1 + # Load kit helper + sim = SimulationContext(SimulationCfg(dt=dt)) + # Wait for spawning + sim_utils.update_stage() + yield sim + # Cleanup + sim._disable_app_control_on_stop_handle = True # prevent timeout + sim.stop() + sim.clear_instance() + + +""" +Physics properties. +""" + + +def test_spawn_cone_with_deformable_props(sim): + """Test spawning of UsdGeomMesh prim for a cone with deformable body API.""" + # Spawn cone + cfg = sim_utils.MeshConeCfg( + radius=1.0, + height=2.0, + deformable_props=DeformableBodyPropertiesCfg(deformable_body_enabled=True), + ) + prim = cfg.func("/World/Cone", cfg) + + # Check validity + assert prim.IsValid() + assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() + + # Check properties + # Unlike rigid body, deformable body properties are on the mesh prim + prim = sim.stage.GetPrimAtPath("/World/Cone") + assert prim.GetAttribute("omniphysics:deformableBodyEnabled").Get() == cfg.deformable_props.deformable_body_enabled + + +def test_spawn_cone_with_deformable_and_mass_props(sim): + """Test spawning of UsdGeomMesh prim for a cone with deformable body and mass API.""" + # Spawn cone + cfg = sim_utils.MeshConeCfg( + radius=1.0, + height=2.0, + deformable_props=DeformableBodyPropertiesCfg(deformable_body_enabled=True, mass=1.0), + ) + prim = cfg.func("/World/Cone", cfg) + + # Check validity + assert prim.IsValid() + assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() + # Check properties + prim = sim.stage.GetPrimAtPath("/World/Cone") + assert prim.GetAttribute("omniphysics:mass").Get() == cfg.deformable_props.mass + + # check sim playing + sim.play() + for _ in range(10): + sim.step() + + +def test_spawn_cone_with_deformable_and_density_props(sim): + """Test spawning of UsdGeomMesh prim for a cone with deformable body and mass API, + specifying density instead of mass. + """ + # Spawn cone + cfg = sim_utils.MeshConeCfg( + radius=1.0, + height=2.0, + deformable_props=DeformableBodyPropertiesCfg(deformable_body_enabled=True), + physics_material=DeformableBodyMaterialCfg(density=10.0), + ) + prim = cfg.func("/World/Cone", cfg) + + # Check validity + assert prim.IsValid() + assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() + assert sim.stage.GetPrimAtPath("/World/Cone/geometry/material").IsValid() + # Check properties + prim = sim.stage.GetPrimAtPath("/World/Cone/geometry/material") + assert prim.GetAttribute("omniphysics:density").Get() == cfg.physics_material.density + # check sim playing + sim.play() + for _ in range(10): + sim.step() + + +def test_spawn_cone_with_all_deformable_props(sim): + """Test spawning of UsdGeomMesh prim for a cone with all deformable properties.""" + # Spawn cone + cfg = sim_utils.MeshConeCfg( + radius=1.0, + height=2.0, + deformable_props=DeformableBodyPropertiesCfg(mass=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.75, 0.5)), + physics_material=DeformableBodyMaterialCfg(), + ) + prim = cfg.func("/World/Cone", cfg) + + # Check validity + assert prim.IsValid() + assert sim.stage.GetPrimAtPath("/World/Cone").IsValid() + assert sim.stage.GetPrimAtPath("/World/Cone/geometry/material").IsValid() + # Check properties + # -- deformable body + prim = sim.stage.GetPrimAtPath("/World/Cone") + assert prim.GetAttribute("omniphysics:deformableBodyEnabled").Get() is True + + # check sim playing + sim.play() + for _ in range(10): + sim.step() diff --git a/source/isaaclab_physx/test/sim/test_views_xform_prim_fabric.py b/source/isaaclab_physx/test/sim/test_views_xform_prim_fabric.py new file mode 100644 index 000000000000..4376c0e0b8ea --- /dev/null +++ b/source/isaaclab_physx/test/sim/test_views_xform_prim_fabric.py @@ -0,0 +1,106 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX Fabric backend tests for FrameView. + +Imports the shared contract tests and provides the Fabric-specific +``view_factory`` fixture (SimulationContext with use_fabric=True, +Camera prim type for Fabric SelectPrims compatibility). +""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[3] / "isaaclab" / "test" / "sim")) + +from isaaclab.app import AppLauncher + +simulation_app = AppLauncher(headless=True).app + +import pytest # noqa: E402 +import torch # noqa: E402 +from frame_view_contract_utils import * # noqa: F401, F403, E402 +from frame_view_contract_utils import CHILD_OFFSET, ViewBundle # noqa: E402 +from isaaclab_physx.sim.views import FabricFrameView as FrameView # noqa: E402 + +from pxr import Gf, UsdGeom # noqa: E402 + +import isaaclab.sim as sim_utils # noqa: E402 + +pytestmark = pytest.mark.isaacsim_ci +PARENT_POS = (0.0, 0.0, 1.0) + + +@pytest.fixture(autouse=True) +def test_setup_teardown(): + sim_utils.create_new_stage() + sim_utils.update_stage() + yield + sim_utils.clear_stage() + sim_utils.SimulationContext.clear_instance() + + +def _skip_if_unavailable(device: str): + if device.startswith("cuda") and not torch.cuda.is_available(): + pytest.skip("CUDA not available") + if device == "cpu": + pytest.skip("Warp fabricarray operations on CPU have known issues") + + +# ------------------------------------------------------------------ +# Parent position helpers (via USD xformOps) +# ------------------------------------------------------------------ + + +def _get_parent_positions(num_envs, device="cpu"): + stage = sim_utils.get_current_stage() + xform_cache = UsdGeom.XformCache() + positions = [] + for i in range(num_envs): + prim = stage.GetPrimAtPath(f"/World/Parent_{i}") + tf = xform_cache.GetLocalToWorldTransform(prim) + t = tf.ExtractTranslation() + positions.append([float(t[0]), float(t[1]), float(t[2])]) + return torch.tensor(positions, dtype=torch.float32, device=device) + + +def _set_parent_positions(positions, num_envs): + from pxr import Sdf # noqa: PLC0415 + + stage = sim_utils.get_current_stage() + with Sdf.ChangeBlock(): + for i in range(num_envs): + prim = stage.GetPrimAtPath(f"/World/Parent_{i}") + pos = positions[i] + prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(float(pos[0]), float(pos[1]), float(pos[2]))) + + +# ------------------------------------------------------------------ +# Contract fixture +# ------------------------------------------------------------------ + + +@pytest.fixture +def view_factory(): + """Fabric factory: Camera child at CHILD_OFFSET under parent Xforms, with Fabric enabled.""" + + def factory(num_envs: int, device: str) -> ViewBundle: + _skip_if_unavailable(device) + + stage = sim_utils.get_current_stage() + for i in range(num_envs): + sim_utils.create_prim(f"/World/Parent_{i}", "Xform", translation=PARENT_POS, stage=stage) + sim_utils.create_prim(f"/World/Parent_{i}/Child", "Camera", translation=CHILD_OFFSET, stage=stage) + + sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, device=device, use_fabric=True)) + view = FrameView("/World/Parent_.*/Child", device=device, sync_usd_on_fabric_write=True) + return ViewBundle( + view=view, + get_parent_pos=_get_parent_positions, + set_parent_pos=_set_parent_positions, + teardown=lambda: None, + ) + + return factory diff --git a/source/isaaclab_physx/test/test_mock_interfaces/__init__.py b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py new file mode 100644 index 000000000000..c360d6bd70c1 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for mock PhysX interfaces.""" diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_backend_factory.py b/source/isaaclab_physx/test/test_mock_interfaces/test_backend_factory.py new file mode 100644 index 000000000000..661a51a87f37 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_backend_factory.py @@ -0,0 +1,153 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for factory backend switching between torch and warp.""" + +from isaaclab_physx.test.mock_interfaces.factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, + create_mock_rigid_body_view, + create_mock_rigid_contact_view, +) +from isaaclab_physx.test.mock_interfaces.views import ( + MockArticulationView, + MockArticulationViewWarp, + MockRigidBodyView, + MockRigidBodyViewWarp, + MockRigidContactView, + MockRigidContactViewWarp, +) + + +class TestRigidBodyViewBackend: + """Tests for rigid body view backend switching.""" + + def test_factory_creates_torch_by_default(self): + """Test that factory creates torch backend by default.""" + view = create_mock_rigid_body_view(count=4) + assert isinstance(view, MockRigidBodyView) + assert not isinstance(view, MockRigidBodyViewWarp) + assert view._backend == "torch" + + def test_factory_creates_torch_when_specified(self): + """Test that factory creates torch backend when explicitly specified.""" + view = create_mock_rigid_body_view(count=4, backend="torch") + assert isinstance(view, MockRigidBodyView) + assert not isinstance(view, MockRigidBodyViewWarp) + assert view._backend == "torch" + + def test_factory_creates_warp_when_specified(self): + """Test that factory creates warp backend when specified.""" + view = create_mock_rigid_body_view(count=4, backend="warp") + assert isinstance(view, MockRigidBodyViewWarp) + assert not isinstance(view, MockRigidBodyView) + assert view._backend == "warp" + + +class TestArticulationViewBackend: + """Tests for articulation view backend switching.""" + + def test_factory_creates_torch_by_default(self): + """Test that factory creates torch backend by default.""" + view = create_mock_articulation_view(count=4, num_dofs=12, num_links=13) + assert isinstance(view, MockArticulationView) + assert not isinstance(view, MockArticulationViewWarp) + + def test_factory_creates_torch_when_specified(self): + """Test that factory creates torch backend when explicitly specified.""" + view = create_mock_articulation_view(count=4, num_dofs=12, num_links=13, backend="torch") + assert isinstance(view, MockArticulationView) + assert not isinstance(view, MockArticulationViewWarp) + + def test_factory_creates_warp_when_specified(self): + """Test that factory creates warp backend when specified.""" + view = create_mock_articulation_view(count=4, num_dofs=12, num_links=13, backend="warp") + assert isinstance(view, MockArticulationViewWarp) + assert not isinstance(view, MockArticulationView) + + +class TestRigidContactViewBackend: + """Tests for rigid contact view backend switching.""" + + def test_factory_creates_torch_by_default(self): + """Test that factory creates torch backend by default.""" + view = create_mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3) + assert isinstance(view, MockRigidContactView) + assert not isinstance(view, MockRigidContactViewWarp) + + def test_factory_creates_torch_when_specified(self): + """Test that factory creates torch backend when explicitly specified.""" + view = create_mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3, backend="torch") + assert isinstance(view, MockRigidContactView) + assert not isinstance(view, MockRigidContactViewWarp) + + def test_factory_creates_warp_when_specified(self): + """Test that factory creates warp backend when specified.""" + view = create_mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3, backend="warp") + assert isinstance(view, MockRigidContactViewWarp) + assert not isinstance(view, MockRigidContactView) + + +class TestQuadrupedViewBackend: + """Tests for quadruped view backend switching.""" + + def test_factory_creates_torch_by_default(self): + """Test that factory creates torch backend by default.""" + view = create_mock_quadruped_view(count=4) + assert isinstance(view, MockArticulationView) + assert not isinstance(view, MockArticulationViewWarp) + + def test_factory_creates_warp_when_specified(self): + """Test that factory creates warp backend when specified.""" + view = create_mock_quadruped_view(count=4, backend="warp") + assert isinstance(view, MockArticulationViewWarp) + assert not isinstance(view, MockArticulationView) + # Verify quadruped structure is preserved + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + + +class TestHumanoidViewBackend: + """Tests for humanoid view backend switching.""" + + def test_factory_creates_torch_by_default(self): + """Test that factory creates torch backend by default.""" + view = create_mock_humanoid_view(count=2) + assert isinstance(view, MockArticulationView) + assert not isinstance(view, MockArticulationViewWarp) + + def test_factory_creates_warp_when_specified(self): + """Test that factory creates warp backend when specified.""" + view = create_mock_humanoid_view(count=2, backend="warp") + assert isinstance(view, MockArticulationViewWarp) + assert not isinstance(view, MockArticulationView) + # Verify humanoid structure is preserved + assert view.shared_metatype.dof_count == 21 + assert view.shared_metatype.link_count == 22 + + +class TestBackendConsistency: + """Tests for backend consistency across views.""" + + def test_warp_views_have_backend_attribute(self): + """Test that warp view types have _backend attribute set to 'warp'.""" + warp_rb = create_mock_rigid_body_view(count=1, backend="warp") + warp_art = create_mock_articulation_view(count=1, backend="warp") + warp_contact = create_mock_rigid_contact_view(count=1, num_bodies=1, filter_count=0, backend="warp") + + assert hasattr(warp_rb, "_backend") + assert hasattr(warp_art, "_backend") + assert hasattr(warp_contact, "_backend") + + assert warp_rb._backend == "warp" + assert warp_art._backend == "warp" + assert warp_contact._backend == "warp" + + def test_torch_rigid_body_view_has_backend_attribute(self): + """Test that torch MockRigidBodyView has _backend attribute.""" + torch_rb = create_mock_rigid_body_view(count=1, backend="torch") + assert hasattr(torch_rb, "_backend") + assert torch_rb._backend == "torch" diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py new file mode 100644 index 000000000000..76f25f9c23aa --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py @@ -0,0 +1,332 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockArticulationView.""" + +import pytest +import torch +from isaaclab_physx.test.mock_interfaces.factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, +) +from isaaclab_physx.test.mock_interfaces.views import MockArticulationView + + +class TestMockArticulationViewInit: + """Tests for MockArticulationView initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockArticulationView() + assert view.count == 1 + assert view.shared_metatype.dof_count == 1 + assert view.shared_metatype.link_count == 2 + + def test_custom_configuration(self): + """Test initialization with custom configuration.""" + view = MockArticulationView( + count=4, + num_dofs=12, + num_links=13, + fixed_base=True, + ) + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + assert view.shared_metatype.fixed_base is True + + def test_custom_names(self): + """Test initialization with custom DOF and link names.""" + dof_names = ["joint_0", "joint_1"] + link_names = ["base", "link_1", "link_2"] + view = MockArticulationView( + num_dofs=2, + num_links=3, + dof_names=dof_names, + link_names=link_names, + ) + assert view.shared_metatype.dof_names == dof_names + assert view.shared_metatype.link_names == link_names + + def test_tendon_properties(self): + """Test tendon properties are zero.""" + view = MockArticulationView() + assert view.max_fixed_tendons == 0 + assert view.max_spatial_tendons == 0 + + +class TestMockArticulationViewRootGetters: + """Tests for MockArticulationView root getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_root_transforms_shape(self, view): + """Test root transforms shape.""" + transforms = view.get_root_transforms() + assert transforms.shape == (4, 7) + + def test_get_root_transforms_default_quaternion(self, view): + """Test that default quaternion is identity (xyzw format).""" + transforms = view.get_root_transforms() + assert torch.allclose(transforms[:, 6], torch.ones(4)) # w = 1 + + def test_get_root_velocities_shape(self, view): + """Test root velocities shape.""" + velocities = view.get_root_velocities() + assert velocities.shape == (4, 6) + + +class TestMockArticulationViewLinkGetters: + """Tests for MockArticulationView link getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_link_transforms_shape(self, view): + """Test link transforms shape.""" + transforms = view.get_link_transforms() + assert transforms.shape == (4, 13, 7) + + def test_get_link_velocities_shape(self, view): + """Test link velocities shape.""" + velocities = view.get_link_velocities() + assert velocities.shape == (4, 13, 6) + + +class TestMockArticulationViewDOFGetters: + """Tests for MockArticulationView DOF getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_dof_positions_shape(self, view): + """Test DOF positions shape.""" + positions = view.get_dof_positions() + assert positions.shape == (4, 12) + + def test_get_dof_velocities_shape(self, view): + """Test DOF velocities shape.""" + velocities = view.get_dof_velocities() + assert velocities.shape == (4, 12) + + def test_get_dof_projected_joint_forces_shape(self, view): + """Test projected joint forces shape.""" + forces = view.get_dof_projected_joint_forces() + assert forces.shape == (4, 12) + + def test_get_dof_limits_shape(self, view): + """Test DOF limits shape.""" + limits = view.get_dof_limits() + assert limits.shape == (4, 12, 2) + + def test_get_dof_limits_default_values(self, view): + """Test that default limits are infinite.""" + limits = view.get_dof_limits() + assert torch.all(limits[:, :, 0] == float("-inf")) # lower + assert torch.all(limits[:, :, 1] == float("inf")) # upper + + def test_get_dof_stiffnesses_shape(self, view): + """Test DOF stiffnesses shape.""" + stiffnesses = view.get_dof_stiffnesses() + assert stiffnesses.shape == (4, 12) + + def test_get_dof_dampings_shape(self, view): + """Test DOF dampings shape.""" + dampings = view.get_dof_dampings() + assert dampings.shape == (4, 12) + + def test_get_dof_max_forces_shape(self, view): + """Test DOF max forces shape.""" + max_forces = view.get_dof_max_forces() + assert max_forces.shape == (4, 12) + + def test_get_dof_max_velocities_shape(self, view): + """Test DOF max velocities shape.""" + max_velocities = view.get_dof_max_velocities() + assert max_velocities.shape == (4, 12) + + def test_get_dof_armatures_shape(self, view): + """Test DOF armatures shape.""" + armatures = view.get_dof_armatures() + assert armatures.shape == (4, 12) + + def test_get_dof_friction_coefficients_shape(self, view): + """Test DOF friction coefficients shape.""" + friction = view.get_dof_friction_coefficients() + assert friction.shape == (4, 12) + + +class TestMockArticulationViewMassGetters: + """Tests for MockArticulationView mass property getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_masses_shape(self, view): + """Test masses shape.""" + masses = view.get_masses() + assert masses.shape == (4, 13) + + def test_get_coms_shape(self, view): + """Test centers of mass shape.""" + coms = view.get_coms() + assert coms.shape == (4, 13, 7) + + def test_get_inertias_shape(self, view): + """Test inertias shape.""" + inertias = view.get_inertias() + assert inertias.shape == (4, 13, 9) + + +class TestMockArticulationViewSetters: + """Tests for MockArticulationView setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_set_root_transforms(self, view): + """Test setting root transforms.""" + new_data = torch.randn(4, 7) + view.set_root_transforms(new_data) + result = view.get_root_transforms() + assert torch.allclose(result, new_data) + + def test_set_dof_positions(self, view): + """Test setting DOF positions.""" + new_data = torch.randn(4, 12) + view.set_dof_positions(new_data) + result = view.get_dof_positions() + assert torch.allclose(result, new_data) + + def test_set_dof_positions_with_indices(self, view): + """Test setting DOF positions with indices.""" + new_data = torch.randn(2, 12) + indices = torch.tensor([0, 2]) + view.set_dof_positions(new_data, indices=indices) + result = view.get_dof_positions() + assert torch.allclose(result[0], new_data[0]) + assert torch.allclose(result[2], new_data[1]) + + def test_set_dof_velocities(self, view): + """Test setting DOF velocities.""" + new_data = torch.randn(4, 12) + view.set_dof_velocities(new_data) + result = view.get_dof_velocities() + assert torch.allclose(result, new_data) + + def test_set_dof_limits(self, view): + """Test setting DOF limits.""" + new_data = torch.randn(4, 12, 2) + view.set_dof_limits(new_data) + result = view.get_dof_limits() + assert torch.allclose(result, new_data) + + def test_set_dof_stiffnesses(self, view): + """Test setting DOF stiffnesses.""" + new_data = torch.randn(4, 12).abs() + view.set_dof_stiffnesses(new_data) + result = view.get_dof_stiffnesses() + assert torch.allclose(result, new_data) + + +class TestMockArticulationViewNoopSetters: + """Tests for MockArticulationView no-op setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs.""" + return MockArticulationView(count=4, num_dofs=12, device="cpu") + + def test_set_dof_position_targets_noop(self, view): + """Test that set_dof_position_targets is a no-op.""" + view.set_dof_position_targets(torch.randn(4, 12)) + + def test_set_dof_velocity_targets_noop(self, view): + """Test that set_dof_velocity_targets is a no-op.""" + view.set_dof_velocity_targets(torch.randn(4, 12)) + + def test_set_dof_actuation_forces_noop(self, view): + """Test that set_dof_actuation_forces is a no-op.""" + view.set_dof_actuation_forces(torch.randn(4, 12)) + + +class TestMockArticulationViewMockSetters: + """Tests for MockArticulationView mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_set_mock_root_transforms(self, view): + """Test mock root transform setter.""" + mock_data = torch.randn(4, 7) + view.set_mock_root_transforms(mock_data) + result = view.get_root_transforms() + assert torch.allclose(result, mock_data) + + def test_set_mock_link_transforms(self, view): + """Test mock link transform setter.""" + mock_data = torch.randn(4, 13, 7) + view.set_mock_link_transforms(mock_data) + result = view.get_link_transforms() + assert torch.allclose(result, mock_data) + + def test_set_mock_dof_positions(self, view): + """Test mock DOF position setter.""" + mock_data = torch.randn(4, 12) + view.set_mock_dof_positions(mock_data) + result = view.get_dof_positions() + assert torch.allclose(result, mock_data) + + def test_set_mock_dof_velocities(self, view): + """Test mock DOF velocity setter.""" + mock_data = torch.randn(4, 12) + view.set_mock_dof_velocities(mock_data) + result = view.get_dof_velocities() + assert torch.allclose(result, mock_data) + + +class TestMockArticulationViewFactories: + """Tests for articulation view factory functions.""" + + def test_create_mock_articulation_view_basic(self): + """Test basic factory usage.""" + view = create_mock_articulation_view(count=4, num_dofs=12, num_links=13) + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + + def test_create_mock_quadruped_view(self): + """Test quadruped factory.""" + view = create_mock_quadruped_view(count=4) + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + assert view.shared_metatype.fixed_base is False + assert "FL_hip_joint" in view.shared_metatype.dof_names + assert "base" in view.shared_metatype.link_names + + def test_create_mock_humanoid_view(self): + """Test humanoid factory.""" + view = create_mock_humanoid_view(count=2) + assert view.count == 2 + assert view.shared_metatype.dof_count == 21 + assert view.shared_metatype.link_count == 22 + assert view.shared_metatype.fixed_base is False + assert "left_elbow" in view.shared_metatype.dof_names + assert "pelvis" in view.shared_metatype.link_names diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view_warp.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view_warp.py new file mode 100644 index 000000000000..41ad540eb8b9 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view_warp.py @@ -0,0 +1,399 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockArticulationViewWarp.""" + +import numpy as np +import pytest +import warp as wp +from isaaclab_physx.test.mock_interfaces.factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, +) +from isaaclab_physx.test.mock_interfaces.views import MockArticulationViewWarp + + +class TestMockArticulationViewWarpInit: + """Tests for MockArticulationViewWarp initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockArticulationViewWarp() + assert view.count == 1 + assert view.shared_metatype.dof_count == 1 + assert view.shared_metatype.link_count == 2 + assert view._backend == "warp" + + def test_custom_configuration(self): + """Test initialization with custom configuration.""" + view = MockArticulationViewWarp( + count=4, + num_dofs=12, + num_links=13, + fixed_base=True, + ) + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + assert view.shared_metatype.fixed_base is True + + def test_custom_names(self): + """Test initialization with custom DOF and link names.""" + dof_names = ["joint_0", "joint_1"] + link_names = ["base", "link_1", "link_2"] + view = MockArticulationViewWarp( + num_dofs=2, + num_links=3, + dof_names=dof_names, + link_names=link_names, + ) + assert view.shared_metatype.dof_names == dof_names + assert view.shared_metatype.link_names == link_names + + def test_tendon_properties(self): + """Test tendon properties are zero.""" + view = MockArticulationViewWarp() + assert view.max_fixed_tendons == 0 + assert view.max_spatial_tendons == 0 + + +class TestMockArticulationViewWarpRootGetters: + """Tests for MockArticulationViewWarp root getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationViewWarp(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_root_transforms_shape(self, view): + """Test root transforms shape - should be (N,) with wp.transformf dtype.""" + transforms = view.get_root_transforms() + assert transforms.shape == (4,) + assert transforms.dtype == wp.transformf + + def test_get_root_transforms_default_quaternion(self, view): + """Test that default quaternion is identity (xyzw format).""" + transforms = view.get_root_transforms() + transforms_np = transforms.numpy() + for i in range(4): + quat = transforms_np[i, 3:] # xyzw + np.testing.assert_allclose(quat[3], 1.0) # w = 1 + + def test_get_root_velocities_shape(self, view): + """Test root velocities shape - should be (N,) with wp.spatial_vectorf dtype.""" + velocities = view.get_root_velocities() + assert velocities.shape == (4,) + assert velocities.dtype == wp.spatial_vectorf + + +class TestMockArticulationViewWarpLinkGetters: + """Tests for MockArticulationViewWarp link getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationViewWarp(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_link_transforms_shape(self, view): + """Test link transforms shape - should be (N, L) with wp.transformf dtype.""" + transforms = view.get_link_transforms() + assert transforms.shape == (4, 13) + assert transforms.dtype == wp.transformf + + def test_get_link_velocities_shape(self, view): + """Test link velocities shape - should be (N, L) with wp.spatial_vectorf dtype.""" + velocities = view.get_link_velocities() + assert velocities.shape == (4, 13) + assert velocities.dtype == wp.spatial_vectorf + + def test_get_link_accelerations_shape(self, view): + """Test link accelerations shape - should be (N, L) with wp.spatial_vectorf dtype.""" + accelerations = view.get_link_accelerations() + assert accelerations.shape == (4, 13) + assert accelerations.dtype == wp.spatial_vectorf + + def test_get_link_incoming_joint_force_shape(self, view): + """Test link incoming joint force shape - should be (N, L) with wp.spatial_vectorf dtype.""" + forces = view.get_link_incoming_joint_force() + assert forces.shape == (4, 13) + assert forces.dtype == wp.spatial_vectorf + + +class TestMockArticulationViewWarpDOFGetters: + """Tests for MockArticulationViewWarp DOF getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationViewWarp(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_dof_positions_shape(self, view): + """Test DOF positions shape.""" + positions = view.get_dof_positions() + assert positions.shape == (4, 12) + assert positions.dtype == wp.float32 + + def test_get_dof_velocities_shape(self, view): + """Test DOF velocities shape.""" + velocities = view.get_dof_velocities() + assert velocities.shape == (4, 12) + assert velocities.dtype == wp.float32 + + def test_get_dof_projected_joint_forces_shape(self, view): + """Test projected joint forces shape.""" + forces = view.get_dof_projected_joint_forces() + assert forces.shape == (4, 12) + assert forces.dtype == wp.float32 + + def test_get_dof_limits_shape(self, view): + """Test DOF limits shape - should be (N, J) with wp.vec2f dtype.""" + limits = view.get_dof_limits() + assert limits.shape == (4, 12) + assert limits.dtype == wp.vec2f + + def test_get_dof_limits_default_values(self, view): + """Test that default limits are infinite.""" + limits = view.get_dof_limits() + limits_np = limits.numpy() + assert np.all(limits_np[:, :, 0] == float("-inf")) # lower + assert np.all(limits_np[:, :, 1] == float("inf")) # upper + + def test_get_dof_stiffnesses_shape(self, view): + """Test DOF stiffnesses shape.""" + stiffnesses = view.get_dof_stiffnesses() + assert stiffnesses.shape == (4, 12) + assert stiffnesses.dtype == wp.float32 + + def test_get_dof_dampings_shape(self, view): + """Test DOF dampings shape.""" + dampings = view.get_dof_dampings() + assert dampings.shape == (4, 12) + assert dampings.dtype == wp.float32 + + def test_get_dof_max_forces_shape(self, view): + """Test DOF max forces shape.""" + max_forces = view.get_dof_max_forces() + assert max_forces.shape == (4, 12) + assert max_forces.dtype == wp.float32 + + def test_get_dof_max_velocities_shape(self, view): + """Test DOF max velocities shape.""" + max_velocities = view.get_dof_max_velocities() + assert max_velocities.shape == (4, 12) + assert max_velocities.dtype == wp.float32 + + def test_get_dof_armatures_shape(self, view): + """Test DOF armatures shape.""" + armatures = view.get_dof_armatures() + assert armatures.shape == (4, 12) + assert armatures.dtype == wp.float32 + + def test_get_dof_friction_coefficients_shape(self, view): + """Test DOF friction coefficients shape.""" + friction = view.get_dof_friction_coefficients() + assert friction.shape == (4, 12) + assert friction.dtype == wp.float32 + + def test_get_dof_friction_properties_shape(self, view): + """Test DOF friction properties shape.""" + friction_props = view.get_dof_friction_properties() + assert friction_props.shape == (4, 12, 3) + assert friction_props.dtype == wp.float32 + + +class TestMockArticulationViewWarpMassGetters: + """Tests for MockArticulationViewWarp mass property getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationViewWarp(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_masses_shape(self, view): + """Test masses shape.""" + masses = view.get_masses() + assert masses.shape == (4, 13) + assert masses.dtype == wp.float32 + + def test_get_coms_shape(self, view): + """Test centers of mass shape - should be (N, L) with wp.transformf dtype.""" + coms = view.get_coms() + assert coms.shape == (4, 13) + assert coms.dtype == wp.transformf + + def test_get_inertias_shape(self, view): + """Test inertias shape.""" + inertias = view.get_inertias() + assert inertias.shape == (4, 13, 9) + assert inertias.dtype == wp.float32 + + +class TestMockArticulationViewWarpSetters: + """Tests for MockArticulationViewWarp setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationViewWarp(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_set_root_transforms(self, view): + """Test setting root transforms.""" + new_data = np.random.randn(4, 7).astype(np.float32) + new_transforms = wp.array(new_data, dtype=wp.float32, device="cpu") + view.set_root_transforms(new_transforms) + result = view.get_root_transforms() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_data, rtol=1e-5) + + def test_set_dof_positions(self, view): + """Test setting DOF positions.""" + new_data = np.random.randn(4, 12).astype(np.float32) + new_positions = wp.array(new_data, dtype=wp.float32, device="cpu") + view.set_dof_positions(new_positions) + result = view.get_dof_positions() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_data, rtol=1e-5) + + def test_set_dof_positions_with_indices(self, view): + """Test setting DOF positions with indices.""" + new_data = np.random.randn(2, 12).astype(np.float32) + new_positions = wp.array(new_data, dtype=wp.float32, device="cpu") + indices = wp.array([0, 2], dtype=wp.int32, device="cpu") + view.set_dof_positions(new_positions, indices=indices) + result = view.get_dof_positions() + result_np = result.numpy() + np.testing.assert_allclose(result_np[0], new_data[0], rtol=1e-5) + np.testing.assert_allclose(result_np[2], new_data[1], rtol=1e-5) + + def test_set_dof_velocities(self, view): + """Test setting DOF velocities.""" + new_data = np.random.randn(4, 12).astype(np.float32) + new_velocities = wp.array(new_data, dtype=wp.float32, device="cpu") + view.set_dof_velocities(new_velocities) + result = view.get_dof_velocities() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_data, rtol=1e-5) + + def test_set_dof_limits(self, view): + """Test setting DOF limits.""" + new_data = np.random.randn(4, 12, 2).astype(np.float32) + new_limits = wp.array(new_data, dtype=wp.float32, device="cpu") + view.set_dof_limits(new_limits) + result = view.get_dof_limits() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_data, rtol=1e-5) + + def test_set_dof_stiffnesses(self, view): + """Test setting DOF stiffnesses.""" + new_data = np.abs(np.random.randn(4, 12)).astype(np.float32) + new_stiffnesses = wp.array(new_data, dtype=wp.float32, device="cpu") + view.set_dof_stiffnesses(new_stiffnesses) + result = view.get_dof_stiffnesses() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_data, rtol=1e-5) + + +class TestMockArticulationViewWarpNoopSetters: + """Tests for MockArticulationViewWarp no-op setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs.""" + return MockArticulationViewWarp(count=4, num_dofs=12, device="cpu") + + def test_set_dof_position_targets_noop(self, view): + """Test that set_dof_position_targets is a no-op.""" + targets = wp.zeros((4, 12), dtype=wp.float32, device="cpu") + view.set_dof_position_targets(targets) + + def test_set_dof_velocity_targets_noop(self, view): + """Test that set_dof_velocity_targets is a no-op.""" + targets = wp.zeros((4, 12), dtype=wp.float32, device="cpu") + view.set_dof_velocity_targets(targets) + + def test_set_dof_actuation_forces_noop(self, view): + """Test that set_dof_actuation_forces is a no-op.""" + forces = wp.zeros((4, 12), dtype=wp.float32, device="cpu") + view.set_dof_actuation_forces(forces) + + +class TestMockArticulationViewWarpMockSetters: + """Tests for MockArticulationViewWarp mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationViewWarp(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_set_mock_root_transforms(self, view): + """Test mock root transform setter.""" + mock_data = np.random.randn(4, 7).astype(np.float32) + mock_transforms = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_root_transforms(mock_transforms) + result = view.get_root_transforms() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_link_transforms(self, view): + """Test mock link transform setter.""" + mock_data = np.random.randn(4, 13, 7).astype(np.float32) + mock_transforms = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_link_transforms(mock_transforms) + result = view.get_link_transforms() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_dof_positions(self, view): + """Test mock DOF position setter.""" + mock_data = np.random.randn(4, 12).astype(np.float32) + mock_positions = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_dof_positions(mock_positions) + result = view.get_dof_positions() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_dof_velocities(self, view): + """Test mock DOF velocity setter.""" + mock_data = np.random.randn(4, 12).astype(np.float32) + mock_velocities = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_dof_velocities(mock_velocities) + result = view.get_dof_velocities() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + +class TestMockArticulationViewWarpFactories: + """Tests for articulation view factory functions with warp backend.""" + + def test_create_mock_articulation_view_basic(self): + """Test basic factory usage with warp backend.""" + view = create_mock_articulation_view(count=4, num_dofs=12, num_links=13, backend="warp") + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + assert isinstance(view, MockArticulationViewWarp) + + def test_create_mock_quadruped_view(self): + """Test quadruped factory with warp backend.""" + view = create_mock_quadruped_view(count=4, backend="warp") + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + assert view.shared_metatype.fixed_base is False + assert "FL_hip_joint" in view.shared_metatype.dof_names + assert "base" in view.shared_metatype.link_names + assert isinstance(view, MockArticulationViewWarp) + + def test_create_mock_humanoid_view(self): + """Test humanoid factory with warp backend.""" + view = create_mock_humanoid_view(count=2, backend="warp") + assert view.count == 2 + assert view.shared_metatype.dof_count == 21 + assert view.shared_metatype.link_count == 22 + assert view.shared_metatype.fixed_base is False + assert "left_elbow" in view.shared_metatype.dof_names + assert "pelvis" in view.shared_metatype.link_names + assert isinstance(view, MockArticulationViewWarp) diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py new file mode 100644 index 000000000000..04423ce60088 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py @@ -0,0 +1,217 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockRigidBodyView.""" + +import pytest +import torch +from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_body_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyView + + +class TestMockRigidBodyViewInit: + """Tests for MockRigidBodyView initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockRigidBodyView() + assert view.count == 1 + assert len(view.prim_paths) == 1 + assert view._backend == "torch" + + def test_custom_count(self): + """Test initialization with custom count.""" + view = MockRigidBodyView(count=4) + assert view.count == 4 + assert len(view.prim_paths) == 4 + + def test_custom_prim_paths(self): + """Test initialization with custom prim paths.""" + paths = ["/World/Body_A", "/World/Body_B"] + view = MockRigidBodyView(count=2, prim_paths=paths) + assert view.prim_paths == paths + + +class TestMockRigidBodyViewGetters: + """Tests for MockRigidBodyView getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyView(count=4, device="cpu") + + def test_get_transforms_shape(self, view): + """Test transforms shape.""" + transforms = view.get_transforms() + assert transforms.shape == (4, 7) + + def test_get_transforms_default_quaternion(self, view): + """Test that default quaternion is identity (xyzw format).""" + transforms = view.get_transforms() + # xyzw format: [x, y, z, w] = [0, 0, 0, 1] for identity + assert torch.allclose(transforms[:, 3:6], torch.zeros(4, 3)) # xyz = 0 + assert torch.allclose(transforms[:, 6], torch.ones(4)) # w = 1 + + def test_get_velocities_shape(self, view): + """Test velocities shape.""" + velocities = view.get_velocities() + assert velocities.shape == (4, 6) + + def test_get_accelerations_shape(self, view): + """Test accelerations shape.""" + accelerations = view.get_accelerations() + assert accelerations.shape == (4, 6) + + def test_get_masses_shape(self, view): + """Test masses shape.""" + masses = view.get_masses() + assert masses.shape == (4, 1) + + def test_get_masses_default_value(self, view): + """Test that default mass is 1.""" + masses = view.get_masses() + assert torch.allclose(masses, torch.ones(4, 1)) + + def test_get_coms_shape(self, view): + """Test centers of mass shape.""" + coms = view.get_coms() + assert coms.shape == (4, 7) + + def test_get_inertias_shape(self, view): + """Test inertias shape.""" + inertias = view.get_inertias() + assert inertias.shape == (4, 9) + + def test_get_inertias_default_value(self, view): + """Test that default inertia is identity.""" + inertias = view.get_inertias() + expected = torch.eye(3).repeat(4, 1).reshape(4, 9) + assert torch.allclose(inertias, expected) + + def test_getters_return_clones(self, view): + """Test that getters return clones, not references.""" + transforms1 = view.get_transforms() + transforms1[0, 0] = 999.0 + transforms2 = view.get_transforms() + assert transforms2[0, 0] != 999.0 + + +class TestMockRigidBodyViewSetters: + """Tests for MockRigidBodyView setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyView(count=4, device="cpu") + + def test_set_transforms(self, view): + """Test setting transforms.""" + new_transforms = torch.randn(4, 7) + view.set_transforms(new_transforms) + result = view.get_transforms() + assert torch.allclose(result, new_transforms) + + def test_set_transforms_with_indices(self, view): + """Test setting transforms with indices.""" + new_transforms = torch.randn(2, 7) + indices = torch.tensor([0, 2]) + view.set_transforms(new_transforms, indices=indices) + result = view.get_transforms() + assert torch.allclose(result[0], new_transforms[0]) + assert torch.allclose(result[2], new_transforms[1]) + + def test_set_velocities(self, view): + """Test setting velocities.""" + new_velocities = torch.randn(4, 6) + view.set_velocities(new_velocities) + result = view.get_velocities() + assert torch.allclose(result, new_velocities) + + def test_set_masses(self, view): + """Test setting masses.""" + new_masses = torch.randn(4, 1, 1).abs() + view.set_masses(new_masses) + result = view.get_masses() + assert torch.allclose(result, new_masses) + + +class TestMockRigidBodyViewMockSetters: + """Tests for MockRigidBodyView mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyView(count=4, device="cpu") + + def test_set_mock_transforms(self, view): + """Test mock transform setter.""" + mock_data = torch.randn(4, 7) + view.set_mock_transforms(mock_data) + result = view.get_transforms() + assert torch.allclose(result, mock_data) + + def test_set_mock_velocities(self, view): + """Test mock velocity setter.""" + mock_data = torch.randn(4, 6) + view.set_mock_velocities(mock_data) + result = view.get_velocities() + assert torch.allclose(result, mock_data) + + def test_set_mock_accelerations(self, view): + """Test mock acceleration setter.""" + mock_data = torch.randn(4, 6) + view.set_mock_accelerations(mock_data) + result = view.get_accelerations() + assert torch.allclose(result, mock_data) + + def test_set_mock_masses(self, view): + """Test mock mass setter.""" + mock_data = torch.randn(4, 1, 1).abs() + view.set_mock_masses(mock_data) + result = view.get_masses() + assert torch.allclose(result, mock_data) + + def test_set_mock_coms(self, view): + """Test mock center of mass setter.""" + mock_data = torch.randn(4, 1, 7) + view.set_mock_coms(mock_data) + result = view.get_coms() + assert torch.allclose(result, mock_data) + + def test_set_mock_inertias(self, view): + """Test mock inertia setter.""" + mock_data = torch.randn(4, 1, 3, 3) + view.set_mock_inertias(mock_data) + result = view.get_inertias() + assert torch.allclose(result, mock_data) + + +class TestMockRigidBodyViewActions: + """Tests for MockRigidBodyView action methods.""" + + def test_apply_forces_and_torques_at_position_noop(self): + """Test that apply_forces_and_torques_at_position is a no-op.""" + view = MockRigidBodyView(count=4) + # Should not raise + view.apply_forces_and_torques_at_position( + force_data=torch.randn(4, 3), + torque_data=torch.randn(4, 3), + position_data=torch.randn(4, 3), + ) + + +class TestMockRigidBodyViewFactory: + """Tests for create_mock_rigid_body_view factory function.""" + + def test_factory_basic(self): + """Test basic factory usage.""" + view = create_mock_rigid_body_view(count=4) + assert view.count == 4 + + def test_factory_with_prim_paths(self): + """Test factory with custom prim paths.""" + paths = ["/World/A", "/World/B"] + view = create_mock_rigid_body_view(count=2, prim_paths=paths) + assert view.prim_paths == paths diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view_warp.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view_warp.py new file mode 100644 index 000000000000..2e752052e9ac --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view_warp.py @@ -0,0 +1,259 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockRigidBodyViewWarp.""" + +import numpy as np +import pytest +import warp as wp +from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_body_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyViewWarp + + +class TestMockRigidBodyViewWarpInit: + """Tests for MockRigidBodyViewWarp initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockRigidBodyViewWarp() + assert view.count == 1 + assert len(view.prim_paths) == 1 + assert view._backend == "warp" + + def test_custom_count(self): + """Test initialization with custom count.""" + view = MockRigidBodyViewWarp(count=4) + assert view.count == 4 + assert len(view.prim_paths) == 4 + + def test_custom_prim_paths(self): + """Test initialization with custom prim paths.""" + paths = ["/World/Body_A", "/World/Body_B"] + view = MockRigidBodyViewWarp(count=2, prim_paths=paths) + assert view.prim_paths == paths + + +class TestMockRigidBodyViewWarpGetters: + """Tests for MockRigidBodyViewWarp getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyViewWarp(count=4, device="cpu") + + def test_get_transforms_shape(self, view): + """Test transforms shape - should be (N, 7) with wp.float32 dtype.""" + transforms = view.get_transforms() + assert transforms.shape == (4, 7) + assert transforms.dtype == wp.float32 + + def test_get_transforms_default_quaternion(self, view): + """Test that default quaternion is identity (xyzw format).""" + transforms = view.get_transforms() + transforms_np = transforms.numpy() + # (N, 7) float32: [pos(3), quat_xyzw(4)] + for i in range(4): + pos = transforms_np[i, :3] + quat = transforms_np[i, 3:] # xyzw + np.testing.assert_allclose(pos, [0.0, 0.0, 0.0]) + np.testing.assert_allclose(quat[:3], [0.0, 0.0, 0.0]) # xyz = 0 + np.testing.assert_allclose(quat[3], 1.0) # w = 1 + + def test_get_velocities_shape(self, view): + """Test velocities shape - should be (N, 6) with wp.float32 dtype.""" + velocities = view.get_velocities() + assert velocities.shape == (4, 6) + assert velocities.dtype == wp.float32 + + def test_get_accelerations_shape(self, view): + """Test accelerations shape - should be (N, 6) with wp.float32 dtype.""" + accelerations = view.get_accelerations() + assert accelerations.shape == (4, 6) + assert accelerations.dtype == wp.float32 + + def test_get_masses_shape(self, view): + """Test masses shape.""" + masses = view.get_masses() + assert masses.shape == (4, 1) + assert masses.dtype == wp.float32 + + def test_get_masses_default_value(self, view): + """Test that default mass is 1.""" + masses = view.get_masses() + masses_np = masses.numpy() + np.testing.assert_allclose(masses_np, np.ones((4, 1))) + + def test_get_coms_shape(self, view): + """Test centers of mass shape - should be (N, 7) with wp.float32 dtype.""" + coms = view.get_coms() + assert coms.shape == (4, 7) + assert coms.dtype == wp.float32 + + def test_get_inertias_shape(self, view): + """Test inertias shape.""" + inertias = view.get_inertias() + assert inertias.shape == (4, 9) + assert inertias.dtype == wp.float32 + + def test_get_inertias_default_value(self, view): + """Test that default inertia is identity.""" + inertias = view.get_inertias() + inertias_np = inertias.numpy() + expected = np.tile(np.eye(3).flatten(), (4, 1)) + np.testing.assert_allclose(inertias_np, expected) + + def test_getters_return_clones(self, view): + """Test that getters return clones, not references.""" + transforms1 = view.get_transforms() + transforms1_np = transforms1.numpy() + transforms1_np[0, 0] = 999.0 + transforms2 = view.get_transforms() + transforms2_np = transforms2.numpy() + assert transforms2_np[0, 0] != 999.0 + + +class TestMockRigidBodyViewWarpSetters: + """Tests for MockRigidBodyViewWarp setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyViewWarp(count=4, device="cpu") + + def test_set_transforms(self, view): + """Test setting transforms.""" + # Create new transforms + new_data = np.random.randn(4, 7).astype(np.float32) + new_transforms = wp.array(new_data, dtype=wp.float32, device="cpu") + view.set_transforms(new_transforms) + result = view.get_transforms() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_data, rtol=1e-5) + + def test_set_transforms_with_indices(self, view): + """Test setting transforms with indices.""" + new_data = np.random.randn(2, 7).astype(np.float32) + new_transforms = wp.array(new_data, dtype=wp.float32, device="cpu") + indices = wp.array([0, 2], dtype=wp.int32, device="cpu") + view.set_transforms(new_transforms, indices=indices) + result = view.get_transforms() + result_np = result.numpy() + np.testing.assert_allclose(result_np[0], new_data[0], rtol=1e-5) + np.testing.assert_allclose(result_np[2], new_data[1], rtol=1e-5) + + def test_set_velocities(self, view): + """Test setting velocities.""" + new_data = np.random.randn(4, 6).astype(np.float32) + new_velocities = wp.array(new_data, dtype=wp.float32, device="cpu") + view.set_velocities(new_velocities) + result = view.get_velocities() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_data, rtol=1e-5) + + def test_set_masses(self, view): + """Test setting masses.""" + new_masses_np = np.abs(np.random.randn(4, 1)).astype(np.float32) + new_masses = wp.array(new_masses_np, dtype=wp.float32, device="cpu") + view.set_masses(new_masses) + result = view.get_masses() + result_np = result.numpy() + np.testing.assert_allclose(result_np, new_masses_np, rtol=1e-5) + + +class TestMockRigidBodyViewWarpMockSetters: + """Tests for MockRigidBodyViewWarp mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyViewWarp(count=4, device="cpu") + + def test_set_mock_transforms(self, view): + """Test mock transform setter.""" + mock_data = np.random.randn(4, 7).astype(np.float32) + mock_transforms = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_transforms(mock_transforms) + result = view.get_transforms() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_velocities(self, view): + """Test mock velocity setter.""" + mock_data = np.random.randn(4, 6).astype(np.float32) + mock_velocities = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_velocities(mock_velocities) + result = view.get_velocities() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_accelerations(self, view): + """Test mock acceleration setter.""" + mock_data = np.random.randn(4, 6).astype(np.float32) + mock_accelerations = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_accelerations(mock_accelerations) + result = view.get_accelerations() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_masses(self, view): + """Test mock mass setter.""" + mock_data = np.abs(np.random.randn(4, 1)).astype(np.float32) + mock_masses = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_masses(mock_masses) + result = view.get_masses() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_coms(self, view): + """Test mock center of mass setter.""" + mock_data = np.random.randn(4, 7).astype(np.float32) + mock_coms = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_coms(mock_coms) + result = view.get_coms() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_inertias(self, view): + """Test mock inertia setter.""" + mock_data = np.random.randn(4, 9).astype(np.float32) + mock_inertias = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_inertias(mock_inertias) + result = view.get_inertias() + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + +class TestMockRigidBodyViewWarpActions: + """Tests for MockRigidBodyViewWarp action methods.""" + + def test_apply_forces_and_torques_at_position_noop(self): + """Test that apply_forces_and_torques_at_position is a no-op.""" + view = MockRigidBodyViewWarp(count=4) + # Should not raise + forces = wp.zeros((4, 3), dtype=wp.float32, device="cpu") + torques = wp.zeros((4, 3), dtype=wp.float32, device="cpu") + positions = wp.zeros((4, 3), dtype=wp.float32, device="cpu") + view.apply_forces_and_torques_at_position( + force_data=forces, + torque_data=torques, + position_data=positions, + ) + + +class TestMockRigidBodyViewWarpFactory: + """Tests for create_mock_rigid_body_view factory with warp backend.""" + + def test_factory_basic(self): + """Test basic factory usage with warp backend.""" + view = create_mock_rigid_body_view(count=4, backend="warp") + assert view.count == 4 + assert isinstance(view, MockRigidBodyViewWarp) + + def test_factory_with_prim_paths(self): + """Test factory with custom prim paths.""" + paths = ["/World/A", "/World/B"] + view = create_mock_rigid_body_view(count=2, prim_paths=paths, backend="warp") + assert view.prim_paths == paths + assert isinstance(view, MockRigidBodyViewWarp) diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py new file mode 100644 index 000000000000..4c2503531da5 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py @@ -0,0 +1,196 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockRigidContactView.""" + +import pytest +import torch +from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_contact_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidContactView + + +class TestMockRigidContactViewInit: + """Tests for MockRigidContactView initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockRigidContactView() + assert view.count == 1 + assert view.num_bodies == 1 + assert view.filter_count == 0 + + def test_custom_configuration(self): + """Test initialization with custom configuration.""" + view = MockRigidContactView( + count=4, + num_bodies=5, + filter_count=3, + ) + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + + +class TestMockRigidContactViewGetters: + """Tests for MockRigidContactView getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 5 bodies, 3 filters.""" + return MockRigidContactView(count=4, num_bodies=5, filter_count=3, device="cpu") + + def test_get_net_contact_forces_shape(self, view): + """Test net contact forces shape.""" + forces = view.get_net_contact_forces(dt=0.01) + # Shape: (N*B, 3) = (4*5, 3) = (20, 3) + assert forces.shape == (20, 3) + + def test_get_contact_force_matrix_shape(self, view): + """Test contact force matrix shape.""" + matrix = view.get_contact_force_matrix(dt=0.01) + # Shape: (N*B, F, 3) = (4*5, 3, 3) = (20, 3, 3) + assert matrix.shape == (20, 3, 3) + + def test_get_contact_data_shapes(self, view): + """Test contact data tuple shapes.""" + data = view.get_contact_data(dt=0.01) + assert len(data) == 6 + positions, normals, impulses, separations, num_found, patch_id = data + + total_bodies = 4 * 5 # count * num_bodies + max_contacts = 16 # default + + assert positions.shape == (total_bodies, max_contacts, 3) + assert normals.shape == (total_bodies, max_contacts, 3) + assert impulses.shape == (total_bodies, max_contacts, 3) + assert separations.shape == (total_bodies, max_contacts) + assert num_found.shape == (total_bodies,) + assert patch_id.shape == (total_bodies, max_contacts) + + def test_get_friction_data_shapes(self, view): + """Test friction data tuple shapes.""" + data = view.get_friction_data(dt=0.01) + assert len(data) == 4 + forces, impulses, points, patch_id = data + + total_bodies = 4 * 5 + max_contacts = 16 + + assert forces.shape == (total_bodies, max_contacts, 3) + assert impulses.shape == (total_bodies, max_contacts, 3) + assert points.shape == (total_bodies, max_contacts, 3) + assert patch_id.shape == (total_bodies, max_contacts) + + def test_getters_return_clones(self, view): + """Test that getters return clones, not references.""" + forces1 = view.get_net_contact_forces(0.01) + forces1[0, 0] = 999.0 + forces2 = view.get_net_contact_forces(0.01) + assert forces2[0, 0] != 999.0 + + +class TestMockRigidContactViewMockSetters: + """Tests for MockRigidContactView mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 5 bodies, 3 filters.""" + return MockRigidContactView(count=4, num_bodies=5, filter_count=3, device="cpu") + + def test_set_mock_net_contact_forces(self, view): + """Test mock net contact forces setter.""" + mock_data = torch.randn(20, 3) + view.set_mock_net_contact_forces(mock_data) + result = view.get_net_contact_forces(0.01) + assert torch.allclose(result, mock_data) + + def test_set_mock_contact_force_matrix(self, view): + """Test mock contact force matrix setter.""" + mock_data = torch.randn(20, 3, 3) + view.set_mock_contact_force_matrix(mock_data) + result = view.get_contact_force_matrix(0.01) + assert torch.allclose(result, mock_data) + + def test_set_mock_contact_data_partial(self, view): + """Test setting partial contact data.""" + mock_positions = torch.randn(20, 16, 3) + mock_normals = torch.randn(20, 16, 3) + view.set_mock_contact_data(positions=mock_positions, normals=mock_normals) + + positions, normals, _, _, _, _ = view.get_contact_data(0.01) + assert torch.allclose(positions, mock_positions) + assert torch.allclose(normals, mock_normals) + + def test_set_mock_contact_data_full(self, view): + """Test setting full contact data.""" + total_bodies = 20 + max_contacts = 16 + + mock_positions = torch.randn(total_bodies, max_contacts, 3) + mock_normals = torch.randn(total_bodies, max_contacts, 3) + mock_impulses = torch.randn(total_bodies, max_contacts, 3) + mock_separations = torch.randn(total_bodies, max_contacts) + mock_num_found = torch.randint(0, max_contacts, (total_bodies,), dtype=torch.int32) + mock_patch_id = torch.randint(0, 10, (total_bodies, max_contacts), dtype=torch.int32) + + view.set_mock_contact_data( + positions=mock_positions, + normals=mock_normals, + impulses=mock_impulses, + separations=mock_separations, + num_found=mock_num_found, + patch_id=mock_patch_id, + ) + + positions, normals, impulses, separations, num_found, patch_id = view.get_contact_data(0.01) + + assert torch.allclose(positions, mock_positions) + assert torch.allclose(normals, mock_normals) + assert torch.allclose(impulses, mock_impulses) + assert torch.allclose(separations, mock_separations) + assert torch.equal(num_found, mock_num_found) + assert torch.equal(patch_id, mock_patch_id) + + def test_set_mock_friction_data(self, view): + """Test setting friction data.""" + total_bodies = 20 + max_contacts = 16 + + mock_forces = torch.randn(total_bodies, max_contacts, 3) + mock_impulses = torch.randn(total_bodies, max_contacts, 3) + + view.set_mock_friction_data(forces=mock_forces, impulses=mock_impulses) + + forces, impulses, _, _ = view.get_friction_data(0.01) + assert torch.allclose(forces, mock_forces) + assert torch.allclose(impulses, mock_impulses) + + +class TestMockRigidContactViewFactory: + """Tests for create_mock_rigid_contact_view factory function.""" + + def test_factory_basic(self): + """Test basic factory usage.""" + view = create_mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3) + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + + def test_factory_custom_max_contacts(self): + """Test factory with custom max contact data count.""" + view = create_mock_rigid_contact_view(count=2, num_bodies=3, max_contact_data_count=32) + _, _, _, separations, _, _ = view.get_contact_data(0.01) + assert separations.shape[1] == 32 + + +class TestMockRigidContactViewZeroFilterCount: + """Tests for MockRigidContactView with zero filter count.""" + + def test_contact_force_matrix_zero_filters(self): + """Test contact force matrix with zero filters.""" + view = MockRigidContactView(count=4, num_bodies=5, filter_count=0) + matrix = view.get_contact_force_matrix(0.01) + # Shape: (N*B, F, 3) = (20, 0, 3) + assert matrix.shape == (20, 0, 3) diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view_warp.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view_warp.py new file mode 100644 index 000000000000..a119db3dff2b --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view_warp.py @@ -0,0 +1,232 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockRigidContactViewWarp.""" + +import numpy as np +import pytest +import warp as wp +from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_contact_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidContactViewWarp + + +class TestMockRigidContactViewWarpInit: + """Tests for MockRigidContactViewWarp initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockRigidContactViewWarp() + assert view.count == 1 + assert view.num_bodies == 1 + assert view.filter_count == 0 + assert view._backend == "warp" + + def test_custom_configuration(self): + """Test initialization with custom configuration.""" + view = MockRigidContactViewWarp( + count=4, + num_bodies=5, + filter_count=3, + ) + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + + +class TestMockRigidContactViewWarpGetters: + """Tests for MockRigidContactViewWarp getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 5 bodies, 3 filters.""" + return MockRigidContactViewWarp(count=4, num_bodies=5, filter_count=3, device="cpu") + + def test_get_net_contact_forces_shape(self, view): + """Test net contact forces shape - should be (N*B, 3) with wp.float32 dtype.""" + forces = view.get_net_contact_forces(dt=0.01) + # Shape: (N*B, 3) = (4*5, 3) = (20, 3) + assert forces.shape == (20, 3) + assert forces.dtype == wp.float32 + + def test_get_contact_force_matrix_shape(self, view): + """Test contact force matrix shape - should be (N*B, F, 3) with wp.float32 dtype.""" + matrix = view.get_contact_force_matrix(dt=0.01) + # Shape: (N*B, F, 3) = (4*5, 3, 3) = (20, 3, 3) + assert matrix.shape == (20, 3, 3) + assert matrix.dtype == wp.float32 + + def test_get_contact_data_shapes(self, view): + """Test contact data tuple shapes.""" + data = view.get_contact_data(dt=0.01) + assert len(data) == 6 + positions, normals, impulses, separations, num_found, patch_id = data + + total_bodies = 4 * 5 # count * num_bodies + max_contacts = 16 # default + + assert positions.shape == (total_bodies, max_contacts, 3) + assert positions.dtype == wp.float32 + assert normals.shape == (total_bodies, max_contacts, 3) + assert normals.dtype == wp.float32 + assert impulses.shape == (total_bodies, max_contacts, 3) + assert impulses.dtype == wp.float32 + assert separations.shape == (total_bodies, max_contacts) + assert separations.dtype == wp.float32 + assert num_found.shape == (total_bodies,) + assert num_found.dtype == wp.int32 + assert patch_id.shape == (total_bodies, max_contacts) + assert patch_id.dtype == wp.int32 + + def test_get_friction_data_shapes(self, view): + """Test friction data tuple shapes.""" + data = view.get_friction_data(dt=0.01) + assert len(data) == 4 + forces, impulses, points, patch_id = data + + total_bodies = 4 * 5 + max_contacts = 16 + + assert forces.shape == (total_bodies, max_contacts, 3) + assert forces.dtype == wp.float32 + assert impulses.shape == (total_bodies, max_contacts, 3) + assert impulses.dtype == wp.float32 + assert points.shape == (total_bodies, max_contacts, 3) + assert points.dtype == wp.float32 + assert patch_id.shape == (total_bodies, max_contacts) + assert patch_id.dtype == wp.int32 + + def test_getters_return_clones(self, view): + """Test that getters return clones, not references.""" + forces1 = view.get_net_contact_forces(0.01) + forces1_np = forces1.numpy() + forces1_np[0, 0] = 999.0 + forces2 = view.get_net_contact_forces(0.01) + forces2_np = forces2.numpy() + assert forces2_np[0, 0] != 999.0 + + +class TestMockRigidContactViewWarpMockSetters: + """Tests for MockRigidContactViewWarp mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 5 bodies, 3 filters.""" + return MockRigidContactViewWarp(count=4, num_bodies=5, filter_count=3, device="cpu") + + def test_set_mock_net_contact_forces(self, view): + """Test mock net contact forces setter.""" + mock_data = np.random.randn(20, 3).astype(np.float32) + mock_forces = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_net_contact_forces(mock_forces) + result = view.get_net_contact_forces(0.01) + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_contact_force_matrix(self, view): + """Test mock contact force matrix setter.""" + mock_data = np.random.randn(20, 3, 3).astype(np.float32) + mock_matrix = wp.array(mock_data, dtype=wp.float32, device="cpu") + view.set_mock_contact_force_matrix(mock_matrix) + result = view.get_contact_force_matrix(0.01) + result_np = result.numpy() + np.testing.assert_allclose(result_np, mock_data, rtol=1e-5) + + def test_set_mock_contact_data_partial(self, view): + """Test setting partial contact data.""" + mock_positions_data = np.random.randn(20, 16, 3).astype(np.float32) + mock_normals_data = np.random.randn(20, 16, 3).astype(np.float32) + mock_positions = wp.array(mock_positions_data, dtype=wp.float32, device="cpu") + mock_normals = wp.array(mock_normals_data, dtype=wp.float32, device="cpu") + view.set_mock_contact_data(positions=mock_positions, normals=mock_normals) + + positions, normals, _, _, _, _ = view.get_contact_data(0.01) + positions_np = positions.numpy() + normals_np = normals.numpy() + np.testing.assert_allclose(positions_np, mock_positions_data, rtol=1e-5) + np.testing.assert_allclose(normals_np, mock_normals_data, rtol=1e-5) + + def test_set_mock_contact_data_full(self, view): + """Test setting full contact data.""" + total_bodies = 20 + max_contacts = 16 + + mock_positions_data = np.random.randn(total_bodies, max_contacts, 3).astype(np.float32) + mock_normals_data = np.random.randn(total_bodies, max_contacts, 3).astype(np.float32) + mock_impulses_data = np.random.randn(total_bodies, max_contacts, 3).astype(np.float32) + mock_separations_data = np.random.randn(total_bodies, max_contacts).astype(np.float32) + mock_num_found_data = np.random.randint(0, max_contacts, (total_bodies,), dtype=np.int32) + mock_patch_id_data = np.random.randint(0, 10, (total_bodies, max_contacts), dtype=np.int32) + + mock_positions = wp.array(mock_positions_data, dtype=wp.float32, device="cpu") + mock_normals = wp.array(mock_normals_data, dtype=wp.float32, device="cpu") + mock_impulses = wp.array(mock_impulses_data, dtype=wp.float32, device="cpu") + mock_separations = wp.array(mock_separations_data, dtype=wp.float32, device="cpu") + mock_num_found = wp.array(mock_num_found_data, dtype=wp.int32, device="cpu") + mock_patch_id = wp.array(mock_patch_id_data, dtype=wp.int32, device="cpu") + + view.set_mock_contact_data( + positions=mock_positions, + normals=mock_normals, + impulses=mock_impulses, + separations=mock_separations, + num_found=mock_num_found, + patch_id=mock_patch_id, + ) + + positions, normals, impulses, separations, num_found, patch_id = view.get_contact_data(0.01) + + np.testing.assert_allclose(positions.numpy(), mock_positions_data, rtol=1e-5) + np.testing.assert_allclose(normals.numpy(), mock_normals_data, rtol=1e-5) + np.testing.assert_allclose(impulses.numpy(), mock_impulses_data, rtol=1e-5) + np.testing.assert_allclose(separations.numpy(), mock_separations_data, rtol=1e-5) + np.testing.assert_array_equal(num_found.numpy(), mock_num_found_data) + np.testing.assert_array_equal(patch_id.numpy(), mock_patch_id_data) + + def test_set_mock_friction_data(self, view): + """Test setting friction data.""" + total_bodies = 20 + max_contacts = 16 + + mock_forces_data = np.random.randn(total_bodies, max_contacts, 3).astype(np.float32) + mock_impulses_data = np.random.randn(total_bodies, max_contacts, 3).astype(np.float32) + + mock_forces = wp.array(mock_forces_data, dtype=wp.float32, device="cpu") + mock_impulses = wp.array(mock_impulses_data, dtype=wp.float32, device="cpu") + + view.set_mock_friction_data(forces=mock_forces, impulses=mock_impulses) + + forces, impulses, _, _ = view.get_friction_data(0.01) + np.testing.assert_allclose(forces.numpy(), mock_forces_data, rtol=1e-5) + np.testing.assert_allclose(impulses.numpy(), mock_impulses_data, rtol=1e-5) + + +class TestMockRigidContactViewWarpFactory: + """Tests for create_mock_rigid_contact_view factory with warp backend.""" + + def test_factory_basic(self): + """Test basic factory usage with warp backend.""" + view = create_mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3, backend="warp") + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + assert isinstance(view, MockRigidContactViewWarp) + + def test_factory_custom_max_contacts(self): + """Test factory with custom max contact data count.""" + view = create_mock_rigid_contact_view(count=2, num_bodies=3, max_contact_data_count=32, backend="warp") + _, _, _, separations, _, _ = view.get_contact_data(0.01) + assert separations.shape[1] == 32 + assert isinstance(view, MockRigidContactViewWarp) + + +class TestMockRigidContactViewWarpZeroFilterCount: + """Tests for MockRigidContactViewWarp with zero filter count.""" + + def test_contact_force_matrix_zero_filters(self): + """Test contact force matrix with zero filters.""" + view = MockRigidContactViewWarp(count=4, num_bodies=5, filter_count=0) + matrix = view.get_contact_force_matrix(0.01) + # Shape: (N*B, F, 3) = (20, 0, 3) + assert matrix.shape == (20, 0, 3) diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py new file mode 100644 index 000000000000..7c49afdef342 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py @@ -0,0 +1,180 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for patching utilities.""" + +from isaaclab_physx.test.mock_interfaces.utils import ( + mock_articulation_view, + mock_rigid_body_view, + mock_rigid_contact_view, + patch_articulation_view, + patch_rigid_body_view, + patch_rigid_contact_view, +) + + +class TestPatchRigidBodyView: + """Tests for patch_rigid_body_view context manager.""" + + def test_basic_patching(self): + """Test basic patching functionality. + + Note: Patching works when the target module imports the class dynamically. + In tests where we import at module level, we need to import inside the + context to get the patched version. + """ + target = "isaaclab_physx.test.mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" + with patch_rigid_body_view(target, count=4): + # Import inside context to get patched version + from isaaclab_physx.test.mock_interfaces.views import mock_rigid_body_view + + view = mock_rigid_body_view.MockRigidBodyView() + assert view.count == 4 + + def test_patching_preserves_configuration(self): + """Test that patching preserves configuration.""" + target = "isaaclab_physx.test.mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" + prim_paths = ["/World/A", "/World/B", "/World/C", "/World/D"] + with patch_rigid_body_view(target, count=4, prim_paths=prim_paths): + from isaaclab_physx.test.mock_interfaces.views import mock_rigid_body_view + + view = mock_rigid_body_view.MockRigidBodyView() + assert view.prim_paths == prim_paths + + +class TestPatchArticulationView: + """Tests for patch_articulation_view context manager.""" + + def test_basic_patching(self): + """Test basic patching functionality.""" + target = "isaaclab_physx.test.mock_interfaces.views.mock_articulation_view.MockArticulationView" + with patch_articulation_view(target, count=4, num_dofs=12, num_links=13): + from isaaclab_physx.test.mock_interfaces.views import mock_articulation_view + + view = mock_articulation_view.MockArticulationView() + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + + def test_patching_with_names(self): + """Test patching with custom names.""" + target = "isaaclab_physx.test.mock_interfaces.views.mock_articulation_view.MockArticulationView" + dof_names = ["joint_a", "joint_b"] + with patch_articulation_view(target, num_dofs=2, dof_names=dof_names): + from isaaclab_physx.test.mock_interfaces.views import mock_articulation_view + + view = mock_articulation_view.MockArticulationView() + assert view.shared_metatype.dof_names == dof_names + + +class TestPatchRigidContactView: + """Tests for patch_rigid_contact_view context manager.""" + + def test_basic_patching(self): + """Test basic patching functionality.""" + target = "isaaclab_physx.test.mock_interfaces.views.mock_rigid_contact_view.MockRigidContactView" + with patch_rigid_contact_view(target, count=4, num_bodies=5, filter_count=3): + from isaaclab_physx.test.mock_interfaces.views import mock_rigid_contact_view + + view = mock_rigid_contact_view.MockRigidContactView() + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + + +class TestDecoratorUtilities: + """Tests for decorator utilities. + + Note: These decorators are designed for wrapping functions, not pytest tests. + Pytest inspects function signatures for fixtures, which conflicts with our + decorator pattern. We test them by manually invoking the decorated functions. + """ + + def test_mock_rigid_body_view_decorator(self): + """Test mock_rigid_body_view decorator injects mock view.""" + + @mock_rigid_body_view(count=4) + def my_function(view): + return view.count, view.get_transforms().shape + + count, shape = my_function() + assert count == 4 + assert shape == (4, 7) + + def test_mock_articulation_view_decorator(self): + """Test mock_articulation_view decorator injects mock view.""" + + @mock_articulation_view(count=4, num_dofs=12, num_links=13) + def my_function(view): + return ( + view.count, + view.shared_metatype.dof_count, + view.get_dof_positions().shape, + ) + + count, dof_count, shape = my_function() + assert count == 4 + assert dof_count == 12 + assert shape == (4, 12) + + def test_mock_rigid_contact_view_decorator(self): + """Test mock_rigid_contact_view decorator injects mock view.""" + + @mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3) + def my_function(view): + return ( + view.count, + view.num_bodies, + view.get_net_contact_forces(0.01).shape, + ) + + count, num_bodies, shape = my_function() + assert count == 4 + assert num_bodies == 5 + assert shape == (20, 3) # 4 * 5 = 20 + + def test_decorator_with_prim_paths(self): + """Test decorator with custom prim paths.""" + + @mock_rigid_body_view(count=2, prim_paths=["/World/A", "/World/B"]) + def my_function(view): + return view.prim_paths + + paths = my_function() + assert paths == ["/World/A", "/World/B"] + + def test_decorator_with_fixed_base(self): + """Test decorator with fixed base.""" + + @mock_articulation_view(count=2, num_dofs=6, fixed_base=True) + def my_function(view): + return view.shared_metatype.fixed_base + + fixed_base = my_function() + assert fixed_base is True + + +class TestDecoratorFunctionPreservation: + """Tests that decorators preserve function metadata.""" + + def test_function_name_preserved(self): + """Test that function name is preserved.""" + + @mock_rigid_body_view(count=4) + def my_documented_function(view): + """This is a documented function.""" + pass + + assert my_documented_function.__name__ == "my_documented_function" + + def test_docstring_preserved(self): + """Test that docstring is preserved.""" + + @mock_rigid_body_view(count=4) + def my_documented_function(view): + """This is a documented function.""" + pass + + assert "documented function" in my_documented_function.__doc__ diff --git a/source/isaaclab_rl/changelog.d/.gitkeep b/source/isaaclab_rl/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_rl/changelog.d/leapp_export_integration.rst b/source/isaaclab_rl/changelog.d/leapp_export_integration.rst new file mode 100644 index 000000000000..8a9a65b18d7f --- /dev/null +++ b/source/isaaclab_rl/changelog.d/leapp_export_integration.rst @@ -0,0 +1,5 @@ +Added +^^^^^ + +* Added RSL-RL LEAPP export scripts and integration tests for exporting trained + policies with semantic input, output, and state annotations. diff --git a/source/isaaclab_rl/config/extension.toml b/source/isaaclab_rl/config/extension.toml index 35ce26490606..6b5ae668f03e 100644 --- a/source/isaaclab_rl/config/extension.toml +++ b/source/isaaclab_rl/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.5.0" +version = "0.5.1" # Description title = "Isaac Lab RL" diff --git a/source/isaaclab_rl/docs/CHANGELOG.rst b/source/isaaclab_rl/docs/CHANGELOG.rst index 9666e7214e81..0c4c4323ced7 100644 --- a/source/isaaclab_rl/docs/CHANGELOG.rst +++ b/source/isaaclab_rl/docs/CHANGELOG.rst @@ -1,6 +1,16 @@ Changelog --------- +0.5.1 (2026-04-21) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Locked h5py dependency to last stable version 3.15.1 to prevent package import errors on Windows with version 3.16.0. +* Updated skrl wrapper to support the new version of skrl 2.0. + + 0.5.0 (2026-3-04) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.py b/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.py index f1d006ae6cc9..802dbfc77d8b 100644 --- a/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.py +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.py @@ -5,5 +5,6 @@ """Wrappers and utilities to configure an environment for rl-games library.""" -from .pbt import * -from .rl_games import * +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.pyi b/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.pyi new file mode 100644 index 000000000000..e413da6ebe2b --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MultiObserver", + "PbtAlgoObserver", + "PbtCfg", + "RlGamesGpuEnv", + "RlGamesVecEnvWrapper", + "make_concat_plan", +] + +from .pbt import MultiObserver, PbtAlgoObserver, PbtCfg +from .rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper, make_concat_plan diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.py b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.py index c56bf4f40e51..e14e0f6d52c5 100644 --- a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.py +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.py @@ -3,5 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -from .pbt import MultiObserver, PbtAlgoObserver -from .pbt_cfg import PbtCfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.pyi b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.pyi new file mode 100644 index 000000000000..c1bedec66818 --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/__init__.pyi @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MultiObserver", + "PbtAlgoObserver", + "PbtCfg", +] + +from .pbt import MultiObserver, PbtAlgoObserver +from .pbt_cfg import PbtCfg diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_cfg.py b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_cfg.py index b494dd1fdefe..2fdf88badb61 100644 --- a/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_cfg.py +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/pbt/pbt_cfg.py @@ -53,11 +53,13 @@ class PbtCfg: """Lower and upper bound of multiplicative change factor (sampled in [change_min, change_max]).""" mutation: dict[str, str] = {} - """Mutation strings indicating which parameter will be mutated when pbt restart - example: + """Mutation strings indicating which parameter will be mutated when pbt restart. + + Example:: + { - "agent.params.config.learning_rate": "mutate_float" - "agent.params.config.grad_norm": "mutate_float" - "agent.params.config.entropy_coef": "mutate_float" + "agent.params.config.learning_rate": "mutate_float", + "agent.params.config.grad_norm": "mutate_float", + "agent.params.config.entropy_coef": "mutate_float", } """ diff --git a/source/isaaclab_rl/isaaclab_rl/rl_games/rl_games.py b/source/isaaclab_rl/isaaclab_rl/rl_games/rl_games.py index d5c786c7c9e7..ccc16d29abaf 100644 --- a/source/isaaclab_rl/isaaclab_rl/rl_games/rl_games.py +++ b/source/isaaclab_rl/isaaclab_rl/rl_games/rl_games.py @@ -34,7 +34,9 @@ # needed to import for allowing type-hinting:gym.spaces.Box | None from __future__ import annotations +import contextlib from collections.abc import Callable +from typing import TYPE_CHECKING import gym.spaces # needed for rl-games incompatibility: https://github.com/Denys88/rl_games/issues/261 import gymnasium @@ -42,7 +44,16 @@ from rl_games.common import env_configurations from rl_games.common.vecenv import IVecEnv -from isaaclab.envs import DirectRLEnv, ManagerBasedRLEnv, VecEnvObs +from isaaclab.envs import VecEnvObs + +if TYPE_CHECKING: + from isaaclab.envs import ( + DirectRLEnv, + ManagerBasedRLEnv, + ) + + with contextlib.suppress(ImportError): + from isaaclab_experimental.envs import DirectRLEnvWarp, ManagerBasedRLEnvWarp """ Vectorized environment wrapper. @@ -108,9 +119,25 @@ def __init__( ValueError: If specified, the privileged observations (critic) are not of type :obj:`gym.spaces.Box`. """ # check that input is valid - if not isinstance(env.unwrapped, ManagerBasedRLEnv) and not isinstance(env.unwrapped, DirectRLEnv): + # NOTE: import here (not at module level) to avoid loading heavy env classes before Isaac Sim is initialized. + from isaaclab.envs import DirectMARLEnv, DirectRLEnv, ManagerBasedRLEnv + + try: + from isaaclab_experimental.envs import DirectRLEnvWarp, ManagerBasedRLEnvWarp + except ImportError: + DirectRLEnvWarp = None + ManagerBasedRLEnvWarp = None + + allowed_types = (ManagerBasedRLEnv, DirectRLEnv, DirectMARLEnv) + if DirectRLEnvWarp is not None: + allowed_types += (DirectRLEnvWarp,) + if ManagerBasedRLEnvWarp is not None: + allowed_types += (ManagerBasedRLEnvWarp,) + + if not isinstance(env.unwrapped, allowed_types): raise ValueError( - "The environment must be inherited from ManagerBasedRLEnv or DirectRLEnv. Environment type:" + "The environment must be inherited from ManagerBasedRLEnv / DirectRLEnv / DirectRLEnvWarp /" + " ManagerBasedRLEnvWarp. Environment type:" f" {type(env)}" ) # initialize the wrapper @@ -210,7 +237,7 @@ def class_name(cls) -> str: return cls.__name__ @property - def unwrapped(self) -> ManagerBasedRLEnv | DirectRLEnv: + def unwrapped(self) -> ManagerBasedRLEnv | DirectRLEnv | DirectRLEnvWarp | ManagerBasedRLEnvWarp: """Returns the base environment of the wrapper. This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers. diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/__init__.py b/source/isaaclab_rl/isaaclab_rl/rsl_rl/__init__.py index 31c0865f4566..517733e19374 100644 --- a/source/isaaclab_rl/isaaclab_rl/rsl_rl/__init__.py +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/__init__.py @@ -15,10 +15,6 @@ """ -from .distillation_cfg import * -from .exporter import export_policy_as_jit, export_policy_as_onnx -from .rl_cfg import * -from .rnd_cfg import RslRlRndCfg -from .symmetry_cfg import RslRlSymmetryCfg -from .vecenv_wrapper import RslRlVecEnvWrapper -from .utils import handle_deprecated_rsl_rl_cfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/__init__.pyi b/source/isaaclab_rl/isaaclab_rl/rsl_rl/__init__.pyi new file mode 100644 index 000000000000..3eae1d119b5b --- /dev/null +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/__init__.pyi @@ -0,0 +1,47 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "RslRlDistillationAlgorithmCfg", + "RslRlDistillationRunnerCfg", + "RslRlDistillationStudentTeacherCfg", + "RslRlDistillationStudentTeacherRecurrentCfg", + "export_policy_as_jit", + "export_policy_as_onnx", + "handle_deprecated_rsl_rl_cfg", + "RslRlBaseRunnerCfg", + "RslRlCNNModelCfg", + "RslRlMLPModelCfg", + "RslRlOnPolicyRunnerCfg", + "RslRlPpoActorCriticCfg", + "RslRlPpoActorCriticRecurrentCfg", + "RslRlPpoAlgorithmCfg", + "RslRlRNNModelCfg", + "RslRlRndCfg", + "RslRlSymmetryCfg", + "RslRlVecEnvWrapper", +] + +from .distillation_cfg import ( + RslRlDistillationAlgorithmCfg, + RslRlDistillationRunnerCfg, + RslRlDistillationStudentTeacherCfg, + RslRlDistillationStudentTeacherRecurrentCfg, +) +from .exporter import export_policy_as_jit, export_policy_as_onnx +from .rl_cfg import ( + RslRlBaseRunnerCfg, + RslRlCNNModelCfg, + RslRlMLPModelCfg, + RslRlOnPolicyRunnerCfg, + RslRlPpoActorCriticCfg, + RslRlPpoActorCriticRecurrentCfg, + RslRlPpoAlgorithmCfg, + RslRlRNNModelCfg, +) +from .rnd_cfg import RslRlRndCfg +from .symmetry_cfg import RslRlSymmetryCfg +from .utils import handle_deprecated_rsl_rl_cfg +from .vecenv_wrapper import RslRlVecEnvWrapper diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/distillation_cfg.py b/source/isaaclab_rl/isaaclab_rl/rsl_rl/distillation_cfg.py index 8e81aae4d1f2..f94e94112901 100644 --- a/source/isaaclab_rl/isaaclab_rl/rsl_rl/distillation_cfg.py +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/distillation_cfg.py @@ -22,7 +22,7 @@ class RslRlDistillationAlgorithmCfg: """Configuration for the distillation algorithm.""" class_name: str = "Distillation" - """The algorithm class name. Default is Distillation.""" + """The algorithm class name. Defaults to Distillation.""" num_learning_epochs: int = MISSING """The number of updates performed with each sample.""" @@ -34,13 +34,13 @@ class RslRlDistillationAlgorithmCfg: """The number of environment steps the gradient flows back.""" max_grad_norm: None | float = None - """The maximum norm the gradient is clipped to.""" + """The maximum norm the gradient is clipped to. Defaults to None.""" optimizer: Literal["adam", "adamw", "sgd", "rmsprop"] = "adam" - """The optimizer to use for the student policy.""" + """The optimizer to use for the student policy. Defaults to adam.""" loss_type: Literal["mse", "huber"] = "mse" - """The loss type to use for the student policy.""" + """The loss type to use for the student policy. Defaults to mse.""" ######################### @@ -53,7 +53,7 @@ class RslRlDistillationRunnerCfg(RslRlBaseRunnerCfg): """Configuration of the runner for distillation algorithms.""" class_name: str = "DistillationRunner" - """The runner class name. Default is DistillationRunner.""" + """The runner class name. Defaults to DistillationRunner.""" student: RslRlMLPModelCfg = MISSING """The student configuration.""" @@ -85,13 +85,13 @@ class RslRlDistillationStudentTeacherCfg: """ class_name: str = "StudentTeacher" - """The policy class name. Default is StudentTeacher.""" + """The policy class name. Defaults to StudentTeacher.""" init_noise_std: float = MISSING """The initial noise standard deviation for the student policy.""" noise_std_type: Literal["scalar", "log"] = "scalar" - """The type of noise standard deviation for the policy. Default is scalar.""" + """The type of noise standard deviation for the policy. Defaults to scalar.""" student_obs_normalization: bool = MISSING """Whether to normalize the observation for the student network.""" @@ -117,7 +117,7 @@ class RslRlDistillationStudentTeacherRecurrentCfg(RslRlDistillationStudentTeache """ class_name: str = "StudentTeacherRecurrent" - """The policy class name. Default is StudentTeacherRecurrent.""" + """The policy class name. Defaults to StudentTeacherRecurrent.""" rnn_type: str = MISSING """The type of the RNN network. Either "lstm" or "gru".""" diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/rl_cfg.py b/source/isaaclab_rl/isaaclab_rl/rsl_rl/rl_cfg.py index 0b9f14bf6b2e..6a57d492b7f3 100644 --- a/source/isaaclab_rl/isaaclab_rl/rsl_rl/rl_cfg.py +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/rl_cfg.py @@ -23,7 +23,7 @@ class RslRlMLPModelCfg: """Configuration for the MLP model.""" class_name: str = "MLPModel" - """The model class name. Default is MLPModel.""" + """The model class name. Defaults to MLPModel.""" hidden_dims: list[int] = MISSING """The hidden dimensions of the MLP network.""" @@ -32,10 +32,10 @@ class RslRlMLPModelCfg: """The activation function for the MLP network.""" obs_normalization: bool = False - """Whether to normalize the observation for the model. Default is False.""" + """Whether to normalize the observation for the model. Defaults to False.""" distribution_cfg: DistributionCfg | None = None - """The configuration for the output distribution. Default is None, in which case no distribution is used.""" + """The configuration for the output distribution. Defaults to None, in which case no distribution is used.""" @configclass class DistributionCfg: @@ -79,14 +79,14 @@ class HeteroscedasticGaussianDistributionCfg(GaussianDistributionCfg): """ noise_std_type: Literal["scalar", "log"] = "scalar" - """The type of noise standard deviation for the model. Default is scalar. + """The type of noise standard deviation for the model. Defaults to scalar. For rsl-rl >= 5.0.0, this configuration is is deprecated. Please use `distribution_cfg` instead and use the `std_type` field of the distribution configuration to specify the type of noise standard deviation. """ state_dependent_std: bool = False - """Whether to use state-dependent standard deviation for the policy. Default is False. + """Whether to use state-dependent standard deviation for the policy. Defaults to False. For rsl-rl >= 5.0.0, this configuration is is deprecated. Please use `distribution_cfg` instead and use the `HeteroscedasticGaussianDistributionCfg` if state-dependent standard deviation is desired. @@ -98,7 +98,7 @@ class RslRlRNNModelCfg(RslRlMLPModelCfg): """Configuration for RNN model.""" class_name: str = "RNNModel" - """The model class name. Default is RNNModel.""" + """The model class name. Defaults to RNNModel.""" rnn_type: str = MISSING """The type of RNN to use. Either "lstm" or "gru".""" @@ -115,7 +115,7 @@ class RslRlCNNModelCfg(RslRlMLPModelCfg): """Configuration for CNN model.""" class_name: str = "CNNModel" - """The model class name. Default is CNNModel.""" + """The model class name. Defaults to CNNModel.""" @configclass class CNNCfg: @@ -126,28 +126,28 @@ class CNNCfg: """The kernel size for the CNN.""" stride: int | tuple[int] | list[int] = 1 - """The stride for the CNN.""" + """The stride for the CNN. Defaults to 1.""" dilation: int | tuple[int] | list[int] = 1 - """The dilation for the CNN.""" + """The dilation for the CNN. Defaults to 1.""" padding: Literal["none", "zeros", "reflect", "replicate", "circular"] = "none" - """The padding for the CNN.""" + """The padding for the CNN. Defaults to none.""" norm: Literal["none", "batch", "layer"] | tuple[str] | list[str] = "none" - """The normalization for the CNN.""" + """The normalization for the CNN. Defaults to none.""" activation: str = MISSING """The activation function for the CNN.""" max_pool: bool | tuple[bool] | list[bool] = False - """Whether to use max pooling for the CNN.""" + """Whether to use max pooling for the CNN. Defaults to False.""" global_pool: Literal["none", "max", "avg"] = "none" - """The global pooling for the CNN.""" + """The global pooling for the CNN. Defaults to none.""" flatten: bool = True - """Whether to flatten the output of the CNN.""" + """Whether to flatten the output of the CNN. Defaults to True.""" cnn_cfg: CNNCfg = MISSING """The configuration for the CNN(s).""" @@ -163,7 +163,7 @@ class RslRlPpoAlgorithmCfg: """Configuration for the PPO algorithm.""" class_name: str = "PPO" - """The algorithm class name. Default is PPO.""" + """The algorithm class name. Defaults to PPO.""" num_learning_epochs: int = MISSING """The number of learning epochs per update.""" @@ -193,7 +193,7 @@ class RslRlPpoAlgorithmCfg: """The maximum gradient norm.""" optimizer: Literal["adam", "adamw", "sgd", "rmsprop"] = "adam" - """The optimizer to use.""" + """The optimizer to use. Defaults to adam.""" value_loss_coef: float = MISSING """The coefficient for the value loss.""" @@ -205,7 +205,7 @@ class RslRlPpoAlgorithmCfg: """The clipping parameter for the policy.""" normalize_advantage_per_mini_batch: bool = False - """Whether to normalize the advantage per mini-batch. Default is False. + """Whether to normalize the advantage per mini-batch. Defaults to False. If True, the advantage is normalized over the mini-batches only. Otherwise, the advantage is normalized over the entire collected trajectories. @@ -215,10 +215,10 @@ class RslRlPpoAlgorithmCfg: """Whether to share the CNN networks between actor and critic, in case CNNModels are used. Defaults to False.""" rnd_cfg: RslRlRndCfg | None = None - """The RND configuration. Default is None, in which case RND is not used.""" + """The RND configuration. Defaults to None, in which case RND is not used.""" symmetry_cfg: RslRlSymmetryCfg | None = None - """The symmetry configuration. Default is None, in which case symmetry is not used.""" + """The symmetry configuration. Defaults to None, in which case symmetry is not used.""" ######################### @@ -231,10 +231,10 @@ class RslRlBaseRunnerCfg: """Base configuration of the runner.""" seed: int = 42 - """The seed for the experiment. Default is 42.""" + """The seed for the experiment. Defaults to 42.""" device: str = "cuda:0" - """The device for the rl-agent. Default is cuda:0.""" + """The device for the rl-agent. Defaults to cuda:0.""" num_steps_per_env: int = MISSING """The number of steps per environment per update.""" @@ -288,7 +288,7 @@ class RslRlBaseRunnerCfg: """The experiment name.""" run_name: str = "" - """The run name. Default is empty string. + """The run name. Defaults to empty string. The name of the run directory is typically the time-stamp at execution. If the run name is not empty, then it is appended to the run directory's name, i.e. the logging directory's name will become @@ -296,28 +296,28 @@ class RslRlBaseRunnerCfg: """ logger: Literal["tensorboard", "neptune", "wandb"] = "tensorboard" - """The logger to use. Default is tensorboard.""" + """The logger to use. Defaults to tensorboard.""" neptune_project: str = "isaaclab" - """The neptune project name. Default is "isaaclab".""" + """The neptune project name. Defaults to "isaaclab".""" wandb_project: str = "isaaclab" - """The wandb project name. Default is "isaaclab".""" + """The wandb project name. Defaults to "isaaclab".""" resume: bool = False - """Whether to resume a previous training. Default is False. + """Whether to resume a previous training. Defaults to False. This flag will be ignored for distillation. """ load_run: str = ".*" - """The run directory to load. Default is ".*" (all). + """The run directory to load. Defaults to ".*" (all). If regex expression, the latest (alphabetical order) matching run will be loaded. """ load_checkpoint: str = "model_.*.pt" - """The checkpoint file to load. Default is ``"model_.*.pt"`` (all). + """The checkpoint file to load. Defaults to ``"model_.*.pt"`` (all). If regex expression, the latest (alphabetical order) matching file will be loaded. """ @@ -328,7 +328,7 @@ class RslRlOnPolicyRunnerCfg(RslRlBaseRunnerCfg): """Configuration of the runner for on-policy algorithms.""" class_name: str = "OnPolicyRunner" - """The runner class name. Default is OnPolicyRunner.""" + """The runner class name. Defaults to OnPolicyRunner.""" actor: RslRlMLPModelCfg = MISSING """The actor configuration.""" @@ -360,16 +360,16 @@ class RslRlPpoActorCriticCfg: """ class_name: str = "ActorCritic" - """The policy class name. Default is ActorCritic.""" + """The policy class name. Defaults to ActorCritic.""" init_noise_std: float = MISSING """The initial noise standard deviation for the policy.""" noise_std_type: Literal["scalar", "log"] = "scalar" - """The type of noise standard deviation for the policy. Default is scalar.""" + """The type of noise standard deviation for the policy. Defaults to scalar.""" state_dependent_std: bool = False - """Whether to use state-dependent standard deviation for the policy. Default is False.""" + """Whether to use state-dependent standard deviation for the policy. Defaults to False.""" actor_obs_normalization: bool = MISSING """Whether to normalize the observation for the actor network.""" @@ -395,7 +395,7 @@ class RslRlPpoActorCriticRecurrentCfg(RslRlPpoActorCriticCfg): """ class_name: str = "ActorCriticRecurrent" - """The policy class name. Default is ActorCriticRecurrent.""" + """The policy class name. Defaults to ActorCriticRecurrent.""" rnn_type: str = MISSING """The type of RNN to use. Either "lstm" or "gru".""" diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/rnd_cfg.py b/source/isaaclab_rl/isaaclab_rl/rsl_rl/rnd_cfg.py index 0cc698dc8813..ed8f41e8d55a 100644 --- a/source/isaaclab_rl/isaaclab_rl/rsl_rl/rnd_cfg.py +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/rnd_cfg.py @@ -20,7 +20,7 @@ class WeightScheduleCfg: """Configuration for the weight schedule.""" mode: str = "constant" - """The type of weight schedule. Default is "constant".""" + """The type of weight schedule. Defaults to "constant".""" @configclass class LinearWeightScheduleCfg(WeightScheduleCfg): @@ -31,6 +31,7 @@ class LinearWeightScheduleCfg(WeightScheduleCfg): """ mode: str = "linear" + """The type of weight schedule. Defaults to "linear".""" final_value: float = MISSING """The final value of the weight parameter.""" @@ -55,6 +56,7 @@ class StepWeightScheduleCfg(WeightScheduleCfg): """ mode: str = "step" + """The type of weight schedule. Defaults to "step".""" final_step: int = MISSING """The final step of the weight schedule. @@ -66,34 +68,34 @@ class StepWeightScheduleCfg(WeightScheduleCfg): """The final value of the weight parameter.""" weight: float = 0.0 - """The weight for the RND reward (also known as intrinsic reward). Default is 0.0. + """The weight for the RND reward (also known as intrinsic reward). Defaults to 0.0. Similar to other reward terms, the RND reward is scaled by this weight. """ weight_schedule: WeightScheduleCfg | None = None - """The weight schedule for the RND reward. Default is None, which means the weight is constant.""" + """The weight schedule for the RND reward. Defaults to None, which means the weight is constant.""" reward_normalization: bool = False - """Whether to normalize the RND reward. Default is False.""" + """Whether to normalize the RND reward. Defaults to False.""" state_normalization: bool = False - """Whether to normalize the RND state. Default is False.""" + """Whether to normalize the RND state. Defaults to False.""" learning_rate: float = 1e-3 - """The learning rate for the RND module. Default is 1e-3.""" + """The learning rate for the RND module. Defaults to 1e-3.""" num_outputs: int = 1 - """The number of outputs for the RND module. Default is 1.""" + """The number of outputs for the RND module. Defaults to 1.""" predictor_hidden_dims: list[int] = [-1] - """The hidden dimensions for the RND predictor network. Default is [-1]. + """The hidden dimensions for the RND predictor network. Defaults to [-1]. If the list contains -1, then the hidden dimensions are the same as the input dimensions. """ target_hidden_dims: list[int] = [-1] - """The hidden dimensions for the RND target network. Default is [-1]. + """The hidden dimensions for the RND target network. Defaults to [-1]. If the list contains -1, then the hidden dimensions are the same as the input dimensions. """ diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/symmetry_cfg.py b/source/isaaclab_rl/isaaclab_rl/rsl_rl/symmetry_cfg.py index 8f60c7430686..535308c2c348 100644 --- a/source/isaaclab_rl/isaaclab_rl/rsl_rl/symmetry_cfg.py +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/symmetry_cfg.py @@ -26,10 +26,10 @@ class RslRlSymmetryCfg: """ use_data_augmentation: bool = False - """Whether to use symmetry-based data augmentation. Default is False.""" + """Whether to use symmetry-based data augmentation. Defaults to False.""" use_mirror_loss: bool = False - """Whether to use the symmetry-augmentation loss. Default is False.""" + """Whether to use the symmetry-augmentation loss. Defaults to False.""" data_augmentation_func: callable = MISSING """The symmetry data augmentation function. @@ -48,4 +48,4 @@ class RslRlSymmetryCfg: """ mirror_loss_coeff: float = 0.0 - """The weight for the symmetry-mirror loss. Default is 0.0.""" + """The weight for the symmetry-mirror loss. Defaults to 0.0.""" diff --git a/source/isaaclab_rl/isaaclab_rl/rsl_rl/vecenv_wrapper.py b/source/isaaclab_rl/isaaclab_rl/rsl_rl/vecenv_wrapper.py index dde20f2bb165..5a3fdcd47164 100644 --- a/source/isaaclab_rl/isaaclab_rl/rsl_rl/vecenv_wrapper.py +++ b/source/isaaclab_rl/isaaclab_rl/rsl_rl/vecenv_wrapper.py @@ -2,13 +2,24 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +import contextlib +from typing import TYPE_CHECKING import gymnasium as gym import torch from rsl_rl.env import VecEnv from tensordict import TensorDict -from isaaclab.envs import DirectRLEnv, ManagerBasedRLEnv +if TYPE_CHECKING: + from isaaclab.envs import ( + DirectRLEnv, + ManagerBasedRLEnv, + ) + + with contextlib.suppress(ImportError): + from isaaclab_experimental.envs import DirectRLEnvWarp, ManagerBasedRLEnvWarp class RslRlVecEnvWrapper(VecEnv): @@ -36,11 +47,29 @@ def __init__(self, env: ManagerBasedRLEnv | DirectRLEnv, clip_actions: float | N Raises: ValueError: When the environment is not an instance of :class:`ManagerBasedRLEnv` or :class:`DirectRLEnv`. """ - # check that input is valid - if not isinstance(env.unwrapped, ManagerBasedRLEnv) and not isinstance(env.unwrapped, DirectRLEnv): + # NOTE: import here (not at module level) to avoid loading heavy env classes before Isaac Sim is initialized. + from isaaclab.envs import DirectRLEnv, ManagerBasedEnv, ManagerBasedRLEnv + + try: + from isaaclab_experimental.envs import DirectRLEnvWarp, ManagerBasedEnvWarp, ManagerBasedRLEnvWarp + except ImportError: + DirectRLEnvWarp = None + ManagerBasedEnvWarp = None + ManagerBasedRLEnvWarp = None + + allowed_types = (ManagerBasedRLEnv, ManagerBasedEnv, DirectRLEnv) + if DirectRLEnvWarp is not None: + allowed_types += (DirectRLEnvWarp,) + if ManagerBasedEnvWarp is not None: + allowed_types += (ManagerBasedEnvWarp,) + if ManagerBasedRLEnvWarp is not None: + allowed_types += (ManagerBasedRLEnvWarp,) + + if not isinstance(env.unwrapped, allowed_types): raise ValueError( - "The environment must be inherited from ManagerBasedRLEnv or DirectRLEnv. Environment type:" + "The environment must be inherited from ManagerBasedRLEnv / DirectRLEnv / DirectRLEnvWarp /" + " ManagerBasedRLEnvWarp. Environment type:" f" {type(env)}" ) @@ -103,7 +132,7 @@ def class_name(cls) -> str: return cls.__name__ @property - def unwrapped(self) -> ManagerBasedRLEnv | DirectRLEnv: + def unwrapped(self) -> ManagerBasedRLEnv | DirectRLEnv | DirectRLEnvWarp | ManagerBasedRLEnvWarp: """Returns the base environment of the wrapper. This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers. diff --git a/source/isaaclab_rl/isaaclab_rl/sb3.py b/source/isaaclab_rl/isaaclab_rl/sb3.py index 2177bc6252c4..90ca0fd3b854 100644 --- a/source/isaaclab_rl/isaaclab_rl/sb3.py +++ b/source/isaaclab_rl/isaaclab_rl/sb3.py @@ -18,8 +18,9 @@ # needed to import for allowing type-hinting: torch.Tensor | dict[str, torch.Tensor] from __future__ import annotations +import contextlib import warnings -from typing import Any +from typing import TYPE_CHECKING, Any import gymnasium as gym import numpy as np @@ -29,7 +30,11 @@ from stable_baselines3.common.utils import constant_fn from stable_baselines3.common.vec_env.base_vec_env import VecEnv, VecEnvObs, VecEnvStepReturn -from isaaclab.envs import DirectRLEnv, ManagerBasedRLEnv +if TYPE_CHECKING: + from isaaclab.envs import DirectRLEnv, ManagerBasedRLEnv + + with contextlib.suppress(ImportError): + from isaaclab_experimental.envs import DirectRLEnvWarp, ManagerBasedRLEnvWarp # remove SB3 warnings because PPO with bigger net actually benefits from GPU warnings.filterwarnings("ignore", message="You are trying to run PPO on the GPU") @@ -146,9 +151,25 @@ def __init__(self, env: ManagerBasedRLEnv | DirectRLEnv, fast_variant: bool = Tr ValueError: When the environment is not an instance of :class:`ManagerBasedRLEnv` or :class:`DirectRLEnv`. """ # check that input is valid - if not isinstance(env.unwrapped, ManagerBasedRLEnv) and not isinstance(env.unwrapped, DirectRLEnv): + # NOTE: import here (not at module level) to avoid loading heavy env classes before Isaac Sim is initialized. + from isaaclab.envs import DirectRLEnv, ManagerBasedRLEnv + + try: + from isaaclab_experimental.envs import DirectRLEnvWarp, ManagerBasedRLEnvWarp + except ImportError: + DirectRLEnvWarp = None + ManagerBasedRLEnvWarp = None + + allowed_types = (ManagerBasedRLEnv, DirectRLEnv) + if DirectRLEnvWarp is not None: + allowed_types += (DirectRLEnvWarp,) + if ManagerBasedRLEnvWarp is not None: + allowed_types += (ManagerBasedRLEnvWarp,) + + if not isinstance(env.unwrapped, allowed_types): raise ValueError( - "The environment must be inherited from ManagerBasedRLEnv or DirectRLEnv. Environment type:" + "The environment must be inherited from ManagerBasedRLEnv / DirectRLEnv / DirectRLEnvWarp /" + " ManagerBasedRLEnvWarp. Environment type:" f" {type(env)}" ) # initialize the wrapper @@ -182,7 +203,7 @@ def class_name(cls) -> str: return cls.__name__ @property - def unwrapped(self) -> ManagerBasedRLEnv | DirectRLEnv: + def unwrapped(self) -> ManagerBasedRLEnv | DirectRLEnv | DirectRLEnvWarp | ManagerBasedRLEnvWarp: """Returns the base environment of the wrapper. This will be the bare :class:`gymnasium.Env` environment, underneath all layers of wrappers. diff --git a/source/isaaclab_rl/isaaclab_rl/skrl.py b/source/isaaclab_rl/isaaclab_rl/skrl.py index 5aba121523f2..ebd66697fbb1 100644 --- a/source/isaaclab_rl/isaaclab_rl/skrl.py +++ b/source/isaaclab_rl/isaaclab_rl/skrl.py @@ -27,9 +27,14 @@ # needed to import for type hinting: Agent | list[Agent] from __future__ import annotations -from typing import Literal +from typing import TYPE_CHECKING, Literal -from isaaclab.envs import DirectMARLEnv, DirectRLEnv, ManagerBasedRLEnv +if TYPE_CHECKING: + from isaaclab.envs import ( + DirectMARLEnv, + DirectRLEnv, + ManagerBasedRLEnv, + ) """ Vectorized environment wrapper. @@ -38,7 +43,7 @@ def SkrlVecEnvWrapper( env: ManagerBasedRLEnv | DirectRLEnv | DirectMARLEnv, - ml_framework: Literal["torch", "jax", "jax-numpy"] = "torch", + ml_framework: Literal["torch", "jax", "warp"] = "torch", wrapper: Literal["auto", "isaaclab", "isaaclab-single-agent", "isaaclab-multi-agent"] = "isaaclab", ): """Wraps around Isaac Lab environment for skrl. @@ -62,14 +67,25 @@ def SkrlVecEnvWrapper( https://skrl.readthedocs.io/en/latest/api/envs/wrapping.html """ # check that input is valid - if ( - not isinstance(env.unwrapped, ManagerBasedRLEnv) - and not isinstance(env.unwrapped, DirectRLEnv) - and not isinstance(env.unwrapped, DirectMARLEnv) - ): + # NOTE: import here (not at module level) to avoid loading heavy env classes before Isaac Sim is initialized. + from isaaclab.envs import DirectMARLEnv, DirectRLEnv, ManagerBasedRLEnv + + try: + from isaaclab_experimental.envs import DirectRLEnvWarp, ManagerBasedRLEnvWarp + except ImportError: + DirectRLEnvWarp = None + ManagerBasedRLEnvWarp = None + + allowed_types = (ManagerBasedRLEnv, DirectRLEnv, DirectMARLEnv) + if DirectRLEnvWarp is not None: + allowed_types += (DirectRLEnvWarp,) + if ManagerBasedRLEnvWarp is not None: + allowed_types += (ManagerBasedRLEnvWarp,) + + if not isinstance(env.unwrapped, allowed_types): raise ValueError( - "The environment must be inherited from ManagerBasedRLEnv, DirectRLEnv or DirectMARLEnv. Environment type:" - f" {type(env)}" + "The environment must be inherited from ManagerBasedRLEnv, DirectRLEnv, DirectMARLEnv," + f" DirectRLEnvWarp or ManagerBasedRLEnvWarp. Environment type: {type(env)}" ) # import statements according to the ML framework @@ -77,9 +93,11 @@ def SkrlVecEnvWrapper( from skrl.envs.wrappers.torch import wrap_env elif ml_framework.startswith("jax"): from skrl.envs.wrappers.jax import wrap_env + elif ml_framework.startswith("warp"): + from skrl.envs.wrappers.warp import wrap_env else: - ValueError( - f"Invalid ML framework for skrl: {ml_framework}. Available options are: 'torch', 'jax' or 'jax-numpy'" + raise ValueError( + f"Invalid ML framework for skrl: {ml_framework}. Available options are: 'torch', 'jax', 'warp'" ) # wrap and return the environment diff --git a/source/isaaclab_rl/isaaclab_rl/utils/pretrained_checkpoint.py b/source/isaaclab_rl/isaaclab_rl/utils/pretrained_checkpoint.py index c2ada0e9b5e8..3e26d8c4d4f0 100644 --- a/source/isaaclab_rl/isaaclab_rl/utils/pretrained_checkpoint.py +++ b/source/isaaclab_rl/isaaclab_rl/utils/pretrained_checkpoint.py @@ -9,18 +9,11 @@ import json import os -import carb.settings - -from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, NUCLEUS_ASSET_ROOT_DIR, retrieve_file_path from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry # noqa: F401 -PRETRAINED_CHECKPOINTS_ASSET_ROOT_DIR = carb.settings.get_settings().get( - "/persistent/isaaclab/asset_root/pretrained_checkpoints" -) -"""Path to the root directory on the Nucleus Server.""" - -PRETRAINED_CHECKPOINT_PATH = str(PRETRAINED_CHECKPOINTS_ASSET_ROOT_DIR) + "/Isaac/IsaacLab/PretrainedCheckpoints" +PRETRAINED_CHECKPOINT_PATH = NUCLEUS_ASSET_ROOT_DIR + "/IsaacLab/PretrainedCheckpoints" """URL for where we store all the pre-trained checkpoints""" WORKFLOWS = ["rl_games", "rsl_rl", "sb3", "skrl"] @@ -50,8 +43,8 @@ def has_pretrained_checkpoints_asset_root_dir() -> bool: - """Returns True if and only if /persistent/isaaclab/asset_root/pretrained_checkpoints exists""" - return PRETRAINED_CHECKPOINTS_ASSET_ROOT_DIR is not None + """Returns True if and only if the asset root directory is configured in the app kit file.""" + return bool(NUCLEUS_ASSET_ROOT_DIR) def get_log_root_path(workflow: str, task_name: str) -> str: diff --git a/source/isaaclab_rl/setup.py b/source/isaaclab_rl/setup.py index f1cf55d3222c..53255aa55b4f 100644 --- a/source/isaaclab_rl/setup.py +++ b/source/isaaclab_rl/setup.py @@ -19,21 +19,20 @@ # Minimum dependencies required prior to installation INSTALL_REQUIRES = [ # generic - "numpy<2", - "torch>=2.7", - "torchvision>=0.14.1", # ensure compatibility with torch 1.13.1 + "numpy", + "torch>=2.10", + "torchvision>=0.25.0", # ensure compatibility with torch 2.10.0 "protobuf>=4.25.8,!=5.26.0", # configuration management "hydra-core", # data collection - "h5py", + "h5py==3.15.1", # basic logger "tensorboard", # video recording "moviepy", - # make sure this is consistent with isaac sim version - "pillow==11.3.0", "packaging<24", + "tqdm==4.67.1", # previous version was causing sys errors ] PYTORCH_INDEX_URL = ["https://download.pytorch.org/whl/cu128"] @@ -41,10 +40,12 @@ # Extra dependencies for RL agents EXTRAS_REQUIRE = { "sb3": ["stable-baselines3>=2.6", "tqdm", "rich"], # tqdm/rich for progress bar - "skrl": ["skrl>=1.4.3"], + "skrl": ["skrl>=2.0.0"], "rl-games": [ + "aiohttp==3.13.3", "rl-games @ git+https://github.com/isaac-sim/rl_games.git@python3.11", "gym", + "standard-distutils", ], # rl-games still needs gym :( "rsl-rl": ["rsl-rl-lib==5.0.1", "onnxscript>=0.5"], # linux aarch 64 requires manual onnxscript installation } @@ -67,18 +68,16 @@ description=EXTENSION_TOML_DATA["package"]["description"], keywords=EXTENSION_TOML_DATA["package"]["keywords"], include_package_data=True, - python_requires=">=3.10", + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", install_requires=INSTALL_REQUIRES, dependency_links=PYTORCH_INDEX_URL, extras_require=EXTRAS_REQUIRE, packages=["isaaclab_rl"], classifiers=[ "Natural Language :: English", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Isaac Sim :: 4.5.0", - "Isaac Sim :: 5.0.0", - "Isaac Sim :: 5.1.0", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", ], zip_safe=False, ) diff --git a/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py b/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py new file mode 100644 index 000000000000..ecc155c666e5 --- /dev/null +++ b/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py @@ -0,0 +1,194 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Direct-env export integration test with subprocess-side gym re-registration.""" + +from __future__ import annotations + +import argparse +import importlib +import importlib.util +import os +import runpy +import shutil +import subprocess +import sys +import tempfile +import types +from pathlib import Path + +import gymnasium as gym +import pytest + +_THIS_FILE = Path(__file__).resolve() +_REPO_ROOT = str(_THIS_FILE.parents[4]) +_EXPORT_SCRIPT = os.path.join(_REPO_ROOT, "scripts", "reinforcement_learning", "leapp", "rsl_rl", "export.py") +_THIS_SCRIPT = str(_THIS_FILE) +_TASK_NAME = "Isaac-Velocity-Flat-Anymal-C-Direct-v0" +_PACKAGE_NAME = "_isaaclab_test_tutorial_anymal_c" +_MODULE_NAME = f"{_PACKAGE_NAME}.anymal_c_env" +_CFG_MODULE_NAME = f"{_PACKAGE_NAME}.anymal_c_env_cfg" +_RUNTIME_MODULE_NAME = "_isaaclab_test_tutorial_anymal_c_runtime" +_TUTORIAL_ENV_PATH = Path(_REPO_ROOT) / "scripts" / "tutorials" / "06_deploy" / "anymal_c_env.py" + + +def _export_command(task_name: str, export_dir: str) -> list[str]: + """Build a subprocess command that runs this file in helper mode.""" + return [ + sys.executable, + _THIS_SCRIPT, + "--task", + task_name, + "--use_pretrained_checkpoint", + "--export_save_path", + export_dir, + "--disable_graph_visualization", + "--headless", + ] + + +def _artifact_dir(export_dir: str, task_name: str) -> str: + """Return the LEAPP artifact directory for the exported task.""" + return os.path.join(export_dir, task_name) + + +def _load_tutorial_env_class(): + """Load the tutorial env through a synthetic package for relative imports.""" + module = sys.modules.get(_MODULE_NAME) + if module is not None: + return module.AnymalCEnv + + package = types.ModuleType(_PACKAGE_NAME) + package.__path__ = [] # type: ignore[attr-defined] + sys.modules.setdefault(_PACKAGE_NAME, package) + + cfg_module = importlib.import_module("isaaclab_tasks.direct.anymal_c.anymal_c_env_cfg") + sys.modules[_CFG_MODULE_NAME] = cfg_module + + spec = importlib.util.spec_from_file_location(_MODULE_NAME, _TUTORIAL_ENV_PATH) + if spec is None or spec.loader is None: + raise ImportError(f"Could not create module spec for tutorial env: {_TUTORIAL_ENV_PATH}") + + module = importlib.util.module_from_spec(spec) + sys.modules[_MODULE_NAME] = module + spec.loader.exec_module(module) + return module.AnymalCEnv + + +class _LazyTutorialEnvModule(types.ModuleType): + """Resolve the tutorial env class only when gym imports the entrypoint.""" + + def __getattr__(self, name: str): + if name != "AnymalCEnv": + raise AttributeError(name) + env_class = _load_tutorial_env_class() + setattr(self, name, env_class) + return env_class + + +def _install_lazy_runtime_module() -> str: + """Install a lazy module so gym can defer tutorial env imports.""" + module = sys.modules.get(_RUNTIME_MODULE_NAME) + if module is None: + sys.modules[_RUNTIME_MODULE_NAME] = _LazyTutorialEnvModule(_RUNTIME_MODULE_NAME) + return _RUNTIME_MODULE_NAME + + +def _reregister_task(task_name: str) -> None: + """Override the direct task registration to point at the tutorial env.""" + import isaaclab_tasks.direct.anymal_c # noqa: F401 + + original_spec = gym.spec(task_name) + original_kwargs = dict(original_spec.kwargs) + runtime_module_name = _install_lazy_runtime_module() + + gym.registry.pop(task_name, None) + gym.register( + id=task_name, + entry_point=f"{runtime_module_name}:AnymalCEnv", + disable_env_checker=original_spec.disable_env_checker, + kwargs=original_kwargs, + ) + + +def _run_export_subprocess_entrypoint() -> None: + """Run export.py after re-registering the direct task in-process.""" + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--task", required=True) + args, remaining_args = parser.parse_known_args() + + _reregister_task(args.task) + export_script_dir = os.path.dirname(_EXPORT_SCRIPT) + sys.argv = [_EXPORT_SCRIPT, "--task", args.task, *remaining_args] + if export_script_dir not in sys.path: + sys.path.insert(0, export_script_dir) + runpy.run_path(_EXPORT_SCRIPT, run_name="__main__") + + +def _build_failure_context(result: subprocess.CompletedProcess[str], artifact_dir: str) -> str: + """Return debug context for subprocess and export artifacts.""" + export_dir = os.path.dirname(artifact_dir) + log_txt_path = os.path.join(artifact_dir, "log.txt") + leapp_tail = "" + if os.path.isfile(log_txt_path): + with open(log_txt_path) as file: + last_lines = file.readlines()[-50:] + leapp_tail = f"\n--- leapp log.txt (last 50 lines) ---\n{''.join(last_lines)}" + + try: + export_dir_contents = sorted(os.listdir(export_dir)) + except FileNotFoundError: + export_dir_contents = [""] + + try: + artifact_dir_contents = sorted(os.listdir(artifact_dir)) + except FileNotFoundError: + artifact_dir_contents = [""] + + return ( + f"--- export_dir ---\n{export_dir}\n" + f"--- export_dir contents ---\n{export_dir_contents}\n" + f"--- artifact_dir ---\n{artifact_dir}\n" + f"--- artifact_dir contents ---\n{artifact_dir_contents}\n" + f"--- stdout ---\n{result.stdout[-3000:]}\n" + f"--- stderr ---\n{result.stderr[-3000:]}" + f"{leapp_tail}" + ) + + +def test_direct_env_export_flow(): + """Run export.py against the tutorial direct env and assert artifacts are created.""" + export_dir = tempfile.mkdtemp(prefix="isaaclab-direct-export-") + artifact_dir = _artifact_dir(export_dir, _TASK_NAME) + shutil.rmtree(artifact_dir, ignore_errors=True) + + result = subprocess.run( + _export_command(_TASK_NAME, export_dir), + cwd=_REPO_ROOT, + capture_output=True, + text=True, + timeout=6000, + ) + + if "pre-trained checkpoint is currently unavailable" in result.stdout: + pytest.skip(f"No pretrained checkpoint available for {_TASK_NAME}") + + if result.returncode != 0: + pytest.fail(f"export.py exited with code {result.returncode}.\n{_build_failure_context(result, artifact_dir)}") + + onnx_path = os.path.join(artifact_dir, f"{_TASK_NAME}.onnx") + yaml_path = os.path.join(artifact_dir, f"{_TASK_NAME}.yaml") + log_path = os.path.join(artifact_dir, "log.txt") + + if not os.path.isfile(onnx_path): + pytest.fail(f"Missing .onnx export at {onnx_path}.\n{_build_failure_context(result, artifact_dir)}") + if not os.path.isfile(yaml_path): + pytest.fail(f"Missing .yaml export at {yaml_path}.\n{_build_failure_context(result, artifact_dir)}") + if not os.path.isfile(log_path): + pytest.fail(f"Missing log.txt at {log_path}.\n{_build_failure_context(result, artifact_dir)}") + + +if __name__ == "__main__": + _run_export_subprocess_entrypoint() diff --git a/source/isaaclab_rl/test/export/test_rsl_rl_export_flow.py b/source/isaaclab_rl/test/export/test_rsl_rl_export_flow.py new file mode 100644 index 000000000000..5606be484286 --- /dev/null +++ b/source/isaaclab_rl/test/export/test_rsl_rl_export_flow.py @@ -0,0 +1,151 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Export pipeline integration tests. + +Each test calls ``export.py`` as a subprocess so that Isaac Sim's AppLauncher +is fully isolated per task and the export logic is not duplicated here. +The export artifacts land in the default checkpoint directory; only the +per-task export subdirectory is removed after each test. +""" + +import os +import shutil +import subprocess + +import pytest + +# Root of the repository (three levels up from this file). +_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", "..")) +_EXPORT_SCRIPT = os.path.join("scripts", "reinforcement_learning", "leapp", "rsl_rl", "export.py") + + +# Tasks with confirmed pretrained checkpoints (Direct and no-checkpoint tasks excluded). +TASKS = [ + # Classic + "Isaac-Ant-v0", + "Isaac-Cartpole-v0", + # Navigation + "Isaac-Navigation-Flat-Anymal-C-v0", + "Isaac-Navigation-Flat-Anymal-C-Play-v0", + # Locomotion Velocity + "Isaac-Velocity-Flat-Anymal-B-v0", + "Isaac-Velocity-Flat-Anymal-B-Play-v0", + "Isaac-Velocity-Rough-Anymal-B-v0", + "Isaac-Velocity-Rough-Anymal-B-Play-v0", + "Isaac-Velocity-Flat-Anymal-C-v0", + "Isaac-Velocity-Flat-Anymal-C-Play-v0", + "Isaac-Velocity-Rough-Anymal-C-v0", + "Isaac-Velocity-Rough-Anymal-C-Play-v0", + "Isaac-Velocity-Flat-Anymal-D-v0", + "Isaac-Velocity-Flat-Anymal-D-Play-v0", + "Isaac-Velocity-Rough-Anymal-D-v0", + "Isaac-Velocity-Rough-Anymal-D-Play-v0", + "Isaac-Velocity-Flat-Cassie-v0", + "Isaac-Velocity-Flat-Cassie-Play-v0", + "Isaac-Velocity-Rough-Cassie-v0", + "Isaac-Velocity-Rough-Cassie-Play-v0", + "Isaac-Velocity-Flat-G1-v0", + "Isaac-Velocity-Flat-G1-Play-v0", + "Isaac-Velocity-Rough-G1-v0", + "Isaac-Velocity-Rough-G1-Play-v0", + "Isaac-Velocity-Flat-H1-v0", + "Isaac-Velocity-Flat-H1-Play-v0", + "Isaac-Velocity-Rough-H1-v0", + "Isaac-Velocity-Rough-H1-Play-v0", + "Isaac-Velocity-Flat-Spot-v0", + "Isaac-Velocity-Flat-Spot-Play-v0", + "Isaac-Velocity-Flat-Unitree-A1-v0", + "Isaac-Velocity-Flat-Unitree-A1-Play-v0", + "Isaac-Velocity-Rough-Unitree-A1-v0", + "Isaac-Velocity-Rough-Unitree-A1-Play-v0", + "Isaac-Velocity-Flat-Unitree-Go1-v0", + "Isaac-Velocity-Flat-Unitree-Go1-Play-v0", + "Isaac-Velocity-Rough-Unitree-Go1-v0", + "Isaac-Velocity-Rough-Unitree-Go1-Play-v0", + "Isaac-Velocity-Flat-Unitree-Go2-v0", + "Isaac-Velocity-Flat-Unitree-Go2-Play-v0", + "Isaac-Velocity-Rough-Unitree-Go2-v0", + "Isaac-Velocity-Rough-Unitree-Go2-Play-v0", + # Manipulation Reach + "Isaac-Reach-Franka-v0", + "Isaac-Reach-Franka-Play-v0", + "Isaac-Reach-UR10-v0", + "Isaac-Reach-UR10-Play-v0", + # Manipulation Lift + "Isaac-Lift-Cube-Franka-v0", + "Isaac-Lift-Cube-Franka-Play-v0", + # Manipulation Cabinet + "Isaac-Open-Drawer-Franka-v0", + "Isaac-Open-Drawer-Franka-Play-v0", + # Dexsuite + "Isaac-Dexsuite-Kuka-Allegro-Reorient-v0", + "Isaac-Dexsuite-Kuka-Allegro-Reorient-Play-v0", + "Isaac-Dexsuite-Kuka-Allegro-Lift-v0", + "Isaac-Dexsuite-Kuka-Allegro-Lift-Play-v0", +] + + +def _export_dir(task_name: str) -> str: + """Return the directory where export.py writes artifacts for *task_name*.""" + train_task = task_name.replace("-Play", "") + return os.path.join(_REPO_ROOT, ".pretrained_checkpoints", "rsl_rl", train_task, task_name) + + +@pytest.mark.parametrize("task_name", TASKS) +def test_export_flow(task_name): + """Run export.py for *task_name* and assert the expected artifacts are created.""" + export_dir = _export_dir(task_name) + + try: + result = subprocess.run( + [ + "./isaaclab.sh", + "-p", + _EXPORT_SCRIPT, + "--task", + task_name, + "--use_pretrained_checkpoint", + "--disable_graph_visualization", + "--headless", + ], + cwd=_REPO_ROOT, + capture_output=True, + text=True, + timeout=600, + ) + + # Gracefully skip tasks whose checkpoint isn't published yet + if "pre-trained checkpoint is currently unavailable" in result.stdout: + pytest.skip(f"No pretrained checkpoint available for {task_name.replace('-Play', '')}") + + # Skip tasks whose checkpoint was saved with an older rsl_rl architecture + # that does not use the 'actor_state_dict' key expected by the current runner + if "actor_state_dict" in result.stderr: + pytest.skip( + f"{task_name} checkpoint uses an older network architecture incompatible with the current rsl_rl runner" + ) + + # Surface stdout/stderr on failure for easier debugging + if result.returncode != 0: + log_txt_path = os.path.join(export_dir, "log.txt") + leapp_tail = "" + if os.path.isfile(log_txt_path): + with open(log_txt_path) as f: + last_lines = f.readlines()[-50:] + leapp_tail = f"\n--- leapp log.txt (last 50 lines) ---\n{''.join(last_lines)}" + pytest.fail( + f"export.py exited with code {result.returncode}.\n" + f"--- stdout ---\n{result.stdout[-3000:]}\n" + f"--- stderr ---\n{result.stderr[-3000:]}" + f"{leapp_tail}" + ) + + assert os.path.isfile(os.path.join(export_dir, f"{task_name}.onnx")), "Missing .onnx export" + assert os.path.isfile(os.path.join(export_dir, f"{task_name}.yaml")), "Missing .yaml export" + assert os.path.isfile(os.path.join(export_dir, "log.txt")), "Missing log.txt" + + finally: + shutil.rmtree(export_dir, ignore_errors=True) diff --git a/source/isaaclab_rl/test/test_rl_games_wrapper.py b/source/isaaclab_rl/test/test_rl_games_wrapper.py index e7f01a561b99..137bb1240a4a 100644 --- a/source/isaaclab_rl/test/test_rl_games_wrapper.py +++ b/source/isaaclab_rl/test/test_rl_games_wrapper.py @@ -20,9 +20,8 @@ import pytest import torch -import carb -import omni.usd - +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.envs import DirectMARLEnv, multi_agent_to_single_agent from isaaclab_rl.rl_games import RlGamesVecEnvWrapper @@ -51,8 +50,7 @@ def registered_tasks(): # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running the # test on many environments. - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) # print all existing task names print(">>> All registered environments:", registered_tasks) @@ -68,9 +66,9 @@ def test_random_actions(registered_tasks): # Use pytest's subtests print(f">>> Running test for environment: {task_name}") # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # reset the rtx sensors carb setting to False - carb.settings.get_settings().set_bool("/isaaclab/render/rtx_sensors", False) + get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", False) try: # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) diff --git a/source/isaaclab_rl/test/test_rsl_rl_wrapper.py b/source/isaaclab_rl/test/test_rsl_rl_wrapper.py index 2ae427df2797..01edf9008641 100644 --- a/source/isaaclab_rl/test/test_rsl_rl_wrapper.py +++ b/source/isaaclab_rl/test/test_rsl_rl_wrapper.py @@ -19,9 +19,8 @@ import torch from tensordict import TensorDict -import carb -import omni.usd - +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.envs import DirectMARLEnv, multi_agent_to_single_agent from isaaclab_rl.rsl_rl import RslRlVecEnvWrapper @@ -45,8 +44,7 @@ def registered_tasks(): # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running the # test on many environments. - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) # print all existing task names print(">>> All registered environments:", registered_tasks) @@ -62,9 +60,9 @@ def test_random_actions(registered_tasks): # Use pytest's subtests print(f">>> Running test for environment: {task_name}") # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # reset the rtx sensors carb setting to False - carb.settings.get_settings().set_bool("/isaaclab/render/rtx_sensors", False) + get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", False) try: # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) @@ -114,7 +112,7 @@ def test_no_time_outs(registered_tasks): # Use pytest's subtests print(f">>> Running test for environment: {task_name}") # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) # change to finite horizon diff --git a/source/isaaclab_rl/test/test_sb3_wrapper.py b/source/isaaclab_rl/test/test_sb3_wrapper.py index be5dc46741e7..22226aad789e 100644 --- a/source/isaaclab_rl/test/test_sb3_wrapper.py +++ b/source/isaaclab_rl/test/test_sb3_wrapper.py @@ -19,9 +19,8 @@ import pytest import torch -import carb -import omni.usd - +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.envs import DirectMARLEnv, multi_agent_to_single_agent from isaaclab_rl.sb3 import Sb3VecEnvWrapper @@ -45,8 +44,7 @@ def registered_tasks(): # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running the # test on many environments. - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) # print all existing task names print(">>> All registered environments:", registered_tasks) @@ -62,9 +60,9 @@ def test_random_actions(registered_tasks): # Use pytest's subtests print(f">>> Running test for environment: {task_name}") # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # reset the rtx sensors carb setting to False - carb.settings.get_settings().set_bool("/isaaclab/render/rtx_sensors", False) + get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", False) try: # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) diff --git a/source/isaaclab_rl/test/test_skrl_wrapper.py b/source/isaaclab_rl/test/test_skrl_wrapper.py index 25ce35a1c476..b9a173a1a447 100644 --- a/source/isaaclab_rl/test/test_skrl_wrapper.py +++ b/source/isaaclab_rl/test/test_skrl_wrapper.py @@ -18,9 +18,8 @@ import pytest import torch -import carb -import omni.usd - +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.envs import DirectMARLEnv, multi_agent_to_single_agent from isaaclab_rl.skrl import SkrlVecEnvWrapper @@ -44,8 +43,7 @@ def registered_tasks(): # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running the # test on many environments. - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) # print all existing task names print(">>> All registered environments:", registered_tasks) @@ -61,9 +59,9 @@ def test_random_actions(registered_tasks): # Use pytest's subtests print(f">>> Running test for environment: {task_name}") # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # reset the rtx sensors carb setting to False - carb.settings.get_settings().set_bool("/isaaclab/render/rtx_sensors", False) + get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", False) try: # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) diff --git a/source/isaaclab_tasks/changelog.d/.gitkeep b/source/isaaclab_tasks/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_tasks/changelog.d/antoiner-rename-newton-presets.rst b/source/isaaclab_tasks/changelog.d/antoiner-rename-newton-presets.rst new file mode 100644 index 000000000000..ae311dd1071b --- /dev/null +++ b/source/isaaclab_tasks/changelog.d/antoiner-rename-newton-presets.rst @@ -0,0 +1,25 @@ +Changed +^^^^^^^ + +* **Breaking:** Renamed the Newton-backend solver presets to a ``newton_`` + prefix so they group together in autocomplete and read distinctly from the + Newton backend label, package, and visualizer. The change is shimmed by + deprecation aliases (see ``Deprecated`` below), but workflows that iterate + ``__dataclass_fields__`` directly or treat :exc:`FutureWarning` as an error + will need updates. Migration: rename the field in any + :class:`~isaaclab_tasks.utils.hydra.PresetCfg` subclass and update CLI + invocations (``presets=...`` and ``env.=...``): + + - ``newton`` -> ``newton_mjwarp`` + - ``kamino`` -> ``newton_kamino`` + +Deprecated +^^^^^^^^^^ + +* Deprecated the legacy ``newton`` and ``kamino`` preset names. They still + resolve to ``newton_mjwarp`` and ``newton_kamino`` respectively but emit a + :exc:`FutureWarning` and will be removed in a future release. Update CLI + overrides (``presets=newton`` -> ``presets=newton_mjwarp``; + ``presets=kamino`` -> ``presets=newton_kamino``) and any + :class:`~isaaclab_tasks.utils.hydra.PresetCfg` field declarations + (``newton: NewtonCfg = ...`` -> ``newton_mjwarp: NewtonCfg = ...``). diff --git a/source/isaaclab_tasks/changelog.d/g1-rough-terrain-wip.rst b/source/isaaclab_tasks/changelog.d/g1-rough-terrain-wip.rst new file mode 100644 index 000000000000..9efbf82b8bf6 --- /dev/null +++ b/source/isaaclab_tasks/changelog.d/g1-rough-terrain-wip.rst @@ -0,0 +1,11 @@ +Added +^^^^^ + +* Added Newton rough terrain support for the G1 biped locomotion velocity + env. The only engine-specific change is a ~1.7x ``max_iterations`` preset on + :class:`~isaaclab_tasks.manager_based.locomotion.velocity.config.g1.agents.rsl_rl_ppo_cfg.G1RoughPPORunnerCfg` + (Newton = 5000, PhysX = 3000). PhysX saturates near iter 3000 on both + reward (≈ +18) and episode length (≈ 980) and does not meaningfully + improve further; Newton reaches the same (reward, ep_len) quality at + iter 5000. The iteration budget is bumped rather than tuning physics + or reward terms. diff --git a/source/isaaclab_tasks/changelog.d/huidongc-flaky-mark.skip b/source/isaaclab_tasks/changelog.d/huidongc-flaky-mark.skip new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_tasks/changelog.d/leapp_export_integration.rst b/source/isaaclab_tasks/changelog.d/leapp_export_integration.rst new file mode 100644 index 000000000000..94a128b6416f --- /dev/null +++ b/source/isaaclab_tasks/changelog.d/leapp_export_integration.rst @@ -0,0 +1,5 @@ +Added +^^^^^ + +* Added LEAPP-compatible policy deployment tutorials and tracing-compatible task + observation helpers for exported policy workflows. diff --git a/source/isaaclab_tasks/changelog.d/mtrepte-expand_viz_markers.skip b/source/isaaclab_tasks/changelog.d/mtrepte-expand_viz_markers.skip new file mode 100644 index 000000000000..a23b7c7322b3 --- /dev/null +++ b/source/isaaclab_tasks/changelog.d/mtrepte-expand_viz_markers.skip @@ -0,0 +1 @@ +Marker visualization changes are covered by the isaaclab fragment. diff --git a/source/isaaclab_tasks/changelog.d/pr-5458-merge-develop.rst b/source/isaaclab_tasks/changelog.d/pr-5458-merge-develop.rst new file mode 100644 index 000000000000..cd4dc7e674d4 --- /dev/null +++ b/source/isaaclab_tasks/changelog.d/pr-5458-merge-develop.rst @@ -0,0 +1,10 @@ +Changed +^^^^^^^ + +* Updated classic Ant/Humanoid manager-based environments and direct in-hand + manipulation environments to read body incoming wrenches from + :class:`~isaaclab.sensors.JointWrenchSensor` instead of + ``ArticulationData.body_incoming_joint_wrench_b``. Add a + :class:`~isaaclab.sensors.JointWrenchSensorCfg` to the scene and pass its + :class:`~isaaclab.managers.SceneEntityCfg` as ``sensor_cfg``. The classic + Ant/Humanoid Newton presets now use the same wrench observations as PhysX. diff --git a/source/isaaclab_tasks/changelog.d/rendering-test-flakiness.skip b/source/isaaclab_tasks/changelog.d/rendering-test-flakiness.skip new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_tasks/changelog.d/rwiltz-restore-legacy-teleop.rst b/source/isaaclab_tasks/changelog.d/rwiltz-restore-legacy-teleop.rst new file mode 100644 index 000000000000..59e71ddc3984 --- /dev/null +++ b/source/isaaclab_tasks/changelog.d/rwiltz-restore-legacy-teleop.rst @@ -0,0 +1,8 @@ +Added +^^^^^ + +* Added legacy ``teleop_devices`` configuration (``OpenXRDeviceCfg``, + ``ManusViveCfg``, ``GR1T2RetargeterCfg``) to + :class:`~isaaclab_tasks.manager_based.manipulation.pick_place.pickplace_gr1t2_env_cfg.PickPlaceGR1T2EnvCfg` + alongside the existing ``isaac_teleop`` pipeline, enabling CI validation + via ``--teleop_device=handtracking``. diff --git a/source/isaaclab_tasks/config/extension.toml b/source/isaaclab_tasks/config/extension.toml index 80335e4cf123..273ae57d2cb9 100644 --- a/source/isaaclab_tasks/config/extension.toml +++ b/source/isaaclab_tasks/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.11.14" +version = "1.5.34" # Description title = "Isaac Lab Environments" diff --git a/source/isaaclab_tasks/docs/CHANGELOG.rst b/source/isaaclab_tasks/docs/CHANGELOG.rst index 56940967eed6..2044f807afc5 100644 --- a/source/isaaclab_tasks/docs/CHANGELOG.rst +++ b/source/isaaclab_tasks/docs/CHANGELOG.rst @@ -1,6 +1,598 @@ Changelog --------- +1.5.34 (2026-04-30) +~~~~~~~~~~~~~~~~~~~ +Added +^^^^^ + +* Added Flexiv Rizon 4s gear assembly environment with Grav parallel gripper, including + training, ROS inference, and deterministic play/debug configurations. +* Added EE-grasp keypoint reward terms (``keypoint_ee_grasp_error``, ``keypoint_ee_grasp_error_exp``) + for tracking end-effector alignment with the grasp-corrected pose. +* Added quaternion noise model (``ResetSampledQuaternionNoiseModelCfg``) for Rizon 4s + gear shaft orientation observations. + +Fixed +^^^^^ + +* Fixed quaternion w-component indexing in gear assembly observation functions to match XYZW convention. + + +1.5.33 (2026-04-30) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Re-enabled ``add_base_mass`` randomization on H1 and Cassie in their + rough-terrain configs (previously ``= None`` per the pre-existing biped + convention). H1 uses the shared log-uniform scale default from + ``EventsCfg``; Cassie overrides to ``(1.0, 1.25)`` asymmetric heavier-bias + (never lighter than nominal). Symmetric ±25% regressed Cassie reward by + 40% vs disabled due to closed-loop Achilles coupling destabilizing on + lighter pelvis mass; ``(1.0, 1.25)`` recovers to 90% of the + mass-rand-disabled baseline while retaining the domain-randomization + benefit. + + +1.5.32 (2026-04-30) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Refactored rendering correctness tests under ``source/isaaclab_tasks/test/``: shared ``rendering_test_utils.py``, + split ``test_rendering_*`` modules (cartpole, Dexsuite Kuka Allegro lift, shadow hand) with ``*_kitless`` variants, + and Newton + OVRTX golden images. Newton + ``ovrtx_renderer`` test cases remain skipped on GitHub Actions temporarily + until they can run on GitHub Actions. + + +1.5.31 (2026-04-29) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added benchmark extraction for ``Metrics/success_rate`` and survival + success logging for direct cartpole camera environments. + + +1.5.30 (2026-04-28) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added Kamino solver physics presets to direct and manager-based environment + configs: cartpole and ant. + +Changed +^^^^^^^ + +* Updated skrl agent configuration files to support skrl 2.0. + + +1.5.29 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Enabled Newton rough-terrain locomotion training on the remaining + quadrupeds (Go1, Go2, A1, Anymal-B, Anymal-C), bipeds (H1, Cassie), + Digit, and G1 on top of Octi's Anymal-D work cherry-picked from + PR #5225. +* Hoisted the per-env Anymal-D ``RoughPhysicsCfg`` (MJWarp solver + + collision pipeline) into the shared + :class:`~isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg.LocomotionVelocityRoughEnvCfg` + so every rough-terrain env inherits identical physics. The shared + preset opts in to ``default_shape_cfg=NewtonShapeCfg(margin=0.01)``, + which is the single most important Newton setting for rough terrain. +* Added Go1 Newton-only leg armature preset to improve rough-terrain + training stability on lightweight quadrupeds. + +Changed +^^^^^^^ + +* Replaced the additive ``(-5, 5)`` kg default on + ``EventsCfg.add_base_mass`` with a multiplicative ``(1/1.25, 1.25)`` + log-uniform scale (``operation="scale"``, + ``distribution="log_uniform"``). Scale-invariant across robot sizes + with geometric mean 1.0; removes the need for per-robot + ``(-1.0, 3.0)`` additive overrides on A1/Go1/Go2. + + +1.5.28 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated stack-event utilities to enable optional extensions via + ``isaacsim.core.experimental.utils.app.enable_extension`` (non-deprecated Isaac Sim path). + + +1.5.27 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Refactored Franka cube-stack manager-based environment configs (IK-relative, visuomotor, and joint-position + variants under ``stack/config/franka/``) to build on :class:`~isaaclab_tasks.manager_based.manipulation.stack.stack_env_cfg.StackEnvCfg`, + including explicit cube spawns with semantic tags, gripper actions where applicable, end-effector + :class:`~isaaclab.sensors.frame_transformer.frame_transformer_cfg.FrameTransformerCfg`, and default Franka poses + via articulation ``InitialStateCfg`` instead of a reset-time default-pose event. +* Changed GR1T2 and Unitree G1 Inspire pick-place environment configs to define ``idle_action`` as a plain Python + sequence instead of ``torch.tensor``, dropping the ``torch`` import from those modules. + + +1.5.26 (2026-04-27) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Migrated golden images ``source/isaaclab_tasks/test/golden_images/**/*.png`` to Git LFS. + + +1.5.25 (2026-04-23) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Migrated all data property accesses from ``wp.to_torch(data.property)`` to + ``data.property.torch`` to match the new :class:`~isaaclab.utils.warp.ProxyArray` + return type introduced in ``isaaclab`` 4.6.13. + + +1.5.24 (2026-04-22) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated locomotion :class:`~isaaclab.sensors.ray_caster.ray_caster_cfg.RayCasterCfg` + height-scanner defaults to spawn a ``raycaster`` Xform child under the robot attachment link + (using :class:`~isaaclab.sim.spawners.sensors.sensors_cfg.RayCasterXformCfg`) so the sensor + works with Newton site-based :class:`~isaaclab.sim.views.FrameView` tracking. +* Updated all sensor configurations to use :class:`~isaaclab.sim.views.FrameView` instead of + the deprecated ``XformPrimView``. + + +1.5.23 (2026-04-21) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Refreshed Newton Warp renderer golden images for Dexsuite Kuka-Allegro environment case in + ``test_rendering_correctness`` because Newton Warp renderer honors visibility of prims now. + + +1.5.22 (2026-04-20) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed rendering correctness tests to use a shared environment seed constant, pass ``seed`` into + :meth:`gymnasium.Env.reset`, and aggregate per-data-type validation failures into a single + ``pytest.fail`` message in ``test_rendering_correctness``. +* Refreshed Newton Warp renderer golden images in ``test_rendering_correctness`` so image baselines match the current + camera output after Newton shape color alignment and the clear background color change. + +1.5.21 (2026-04-13) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Replaced ``resolve_preset_defaults`` with :func:`~isaaclab_tasks.utils.hydra.resolve_presets` + which resolves all presets in one pass with CLI selection support. + +Added +^^^^^ + +* Unknown preset names now raise ``ValueError`` with a grouped listing of all + available presets and the config paths they affect. + +Fixed +^^^^^ + +* Fixed presets inside dict-valued alternatives and ``PresetCfg(default=None)`` + not being discovered or resolved, causing wrong defaults in deeply nested configs. +* Unresolvable ``PresetCfg`` (no ``default``, no matching selection) now raises + ``ValueError`` instead of silently lingering in the config tree. + + +1.5.20 (2026-04-06) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Change Franka visuomotor and GR1T2 nut pouring environments to use TiledCamera. + + +1.5.19 (2026-04-06) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Aligned :class:`~isaaclab_tasks.manager_based.manipulation.deploy.reach.config.rizon_4s.ros_inference_env_cfg.Rizon4sReachROSInferenceEnvCfg` + with the Flexiv Rizon 4s mount and workspace at NVIDIA Hubble Lab. + + +1.5.18 (2026-04-02) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Set semantic tags on the in-hand object in :class:`~isaaclab_tasks.direct.shadow_hand.shadow_hand_env_cfg.ObjectCfg` + so the object receives the same semantic labels for the Newton and PhysX backends. + + +1.5.17 (2026-03-30) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :func:`~isaaclab_tasks.utils.hydra.apply_overrides` raising a false + conflict error when two global presets resolve to the same value for a path + (e.g. ``newton`` aliased to ``cube``). + + +1.5.16 (2026-03-24) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :func:`~isaaclab_tasks.utils.hydra.collect_presets` not discovering + presets inside nested dicts (e.g. ``EventTerm.params.terms.*.params``). + + +1.5.15 (2026-03-25) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added semantic segmentation to preset data types for the Cartpole Camera environment. +* Added semantic segmentation to preset data types for the Shadow Hand environment. +* Added semantic_segmentation64 to preset data types for for the Dexsuite Kuka-Allegro environment. +* Added ``Isaac-Deploy-Reach-Rizon4s-ROS`` environments. + +1.5.13 (2026-03-18) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added consistency validation to the camera outputs in ``test_rendering_correctness.py``. + + +1.5.12 (2026-03-16) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Increased ``max_iterations`` from 200 to 300 for ``Isaac-Dexsuite-Kuka-Allegro-Lift-v0`` + in the benchmarking configuration to allow sufficient training time for convergence. + + +1.5.11 (2026-03-13) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Simplified the Hydra preset system by removing the dict-style ``presets = {...}`` + attribute in favor of :class:`~isaaclab_tasks.utils.hydra.PresetCfg` subclasses + and the new :func:`~isaaclab_tasks.utils.hydra.preset` factory for inline scalar + overrides. + + +1.5.10 (2026-03-12) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``test_rendering_correctness.py`` to validate rendering correctness of the environments. + + +1.5.9 (2026-03-10) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed ``FileNotFoundError`` for Dexsuite environments by removing stale + ``rl_games_cfg_entry_point`` from gym registrations. Benchmark config updated + to use RSL-RL. + + +1.5.8 (2026-03-10) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``validate_config`` overrides to :class:`ShadowHandVisionEnvCfg` and + :class:`DexsuiteReorientEnvCfg` to catch invalid preset combinations early + (e.g. Warp renderer with unsupported data types, Newton physics with + multi-asset spawning). + +Changed +^^^^^^^ + +* Moved :class:`ShadowHandVisionEnvCfg` validation logic from the env constructor + into :meth:`~ShadowHandVisionEnvCfg.validate_config`, leveraging the new + ``configclass`` validation hook. + + +1.5.7 (2026-03-10) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Marked ``physx-warp-rgb`` and ``physx-warp-depth`` Shadow Hand vision preset + render tests as expected failures. The standard Shadow Hand USD contains PhysX + tendon schemas that Newton's ``ModelBuilder`` cannot parse. + + +1.5.6 (2026-03-10) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Simplified Dexsuite gravity randomization to use the unified + :class:`~isaaclab.envs.mdp.randomize_physics_scene_gravity` term, removing the + backend-specific ``GravityRandomizationCfg`` preset. + +Added +^^^^^ + +* Added Dexsuite multi-hand dexterous manipulation environments with Kuka Allegro configurations for lift and + reorientation tasks. + + +1.5.5 (2026-03-07) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Simplified all task MDP ``__init__.py`` files to call ``lazy_export()`` without + arguments. Fallback packages are now inferred from ``__init__.pyi`` stubs. + +Added +^^^^^ + +* Added ``from isaaclab.envs.mdp import *`` wildcard re-exports to all task MDP + ``__init__.pyi`` stubs, fixing broken type hints for base MDP symbols. + +* Added ``test_lazy_export_stubs.py`` to enforce that ``lazy_export()`` is called + without arguments across the codebase. + + +1.5.4 (2026-03-08) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :file:`test/test_environments_newton.py` — an end-to-end CI test that auto-discovers + all environments with a ``newton`` physics preset and runs 100 random-action steps against each. + +* Added :func:`~isaaclab_tasks.utils.parse_cfg.apply_named_preset` helper that walks the full + configuration tree and applies a named preset (e.g. ``'newton'``) to every preset-wrapper + field, replacing the default-resolved value. This enables Newton preset overrides for all + scene fields (e.g. ``scene.contact_forces``) when running tests outside the Hydra pipeline. + +* Added Newton physics presets and compatibility fixes to locomotion, reach, Franka cabinet, + allegro-hand, and shadow-hand environments: replaced unsupported ``ls_iterations`` / + ``ls_parallel`` solver fields with Newton-compatible settings, and added a per-preset + :class:`~isaaclab_tasks.manager_based.manipulation.reach.reach_env_cfg.TableCfg` using box + geometry instead of a USD asset for Newton compatibility. + +* Added :class:`~isaaclab_tasks.manager_based.manipulation.cabinet.cabinet_env_cfg.CabinetSimCfg` + preset to the cabinet environment, replacing the physics-only preset with a full simulation + config preset so the Newton backend can run at a finer timestep (``dt=1/600``) while PhysX + keeps its default (``dt=1/60``). + +* Added backend-specific ``joint_gears`` to ant and humanoid direct environments. Newton and + PhysX joint orderings differ, so each backend now has its own gear ratio list resolved at + env init. + + +1.5.3 (2026-03-06) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed :class:`~isaaclab.envs.mdp.noise.NoiseModelWithAdditiveBias` shape mismatch in Newton + environment tests. :func:`~isaaclab_tasks.utils.parse_cfg.apply_named_preset` previously + replaced ``scene`` with the preset's default ``num_envs`` (e.g. 8192), overwriting the + test-requested value (e.g. 2). The ``_bias`` tensor was then allocated with 8192 rows while + action data only had 2, causing a ``RuntimeError`` on addition. The fix re-applies the + caller's ``num_envs`` after preset application. + +* Fixed in-hand manipulation goal orientation: the quaternion imaginary-component clamping used + the wrong slice (``[1:4]`` instead of ``[0:3]``), causing incorrect goal distance computation + in the Newton preset. + +* Fixed Franka cabinet direct-env initialization orientation. + +Changed +^^^^^^^ + +* Renamed ``EventCfg`` to :class:`~isaaclab_tasks.direct.shadow_hand.shadow_hand_env_cfg.NewtonEventCfg` + in the Shadow Hand env config. The new name makes explicit that this preset covers only + Newton-compatible randomizations (joint gains, joint position limits, object mass, gravity). + Material and fixed-tendon randomization remain exclusively in + :class:`~isaaclab_tasks.direct.shadow_hand.shadow_hand_env_cfg.PhysxEventCfg`. + +1.5.2 (2026-03-05) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :func:`~isaaclab_tasks.utils.sim_launcher.compute_kit_requirements` to expose the Kit + decision logic for testing (e.g. preset resolution: ``presets=newton,ovrtx_renderer`` → + ``needs_kit=False``). + +* Added :file:`test_preset_kit_decision.py` — beginner-friendly unit tests that verify + preset resolution and Kit decision. + + +1.5.1 (2026-03-03) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Resolved :class:`~isaaclab_tasks.utils.PresetCfg` fields (e.g. physics) to their default values + in :func:`~isaaclab_tasks.utils.parse_env_cfg` so environments created via ``gym.make()`` outside + the Hydra pipeline no longer fail with ``AttributeError: 'XxxPhysicsCfg' object has no attribute 'class_type'``. + + +1.5.0 (2026-03-02) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab_tasks.utils.PresetCfg` base class — a ``@configclass`` whose typed + fields represent named configuration variants (e.g. ``default``, ``physx``, ``newton``). + The active variant is selected at launch with ``presets=`` via the Hydra CLI, enabling + a single environment config to support multiple physics backends. + +* Added Newton backend support (via ``PresetCfg``) to the following environments: + + * **Direct RL**: Cartpole (camera), Ant, Humanoid + * **Manager-based classic**: Ant, Humanoid + * **Manager-based locomotion velocity**: A1, AnymalB, AnymalC, AnymalD, Cassie, G1, + Go1, Go2, H1, Spot (flat and rough configs) + * **Manager-based manipulation reach**: Franka and UR10 reach + * **Dexsuite**: Kuka Allegro Lift + + +1.4.0 (2026-03-02) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``sim_launcher`` module with ``add_launcher_args`` and ``launch_simulation`` utilities + that auto-detect the physics backend (Newton vs Kit/PhysX) from the env config and launch the + appropriate simulation runtime. Training and play scripts no longer need to import ``AppLauncher`` + directly. + + +1.3.0 (2026-02-26) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Migrated all direct and manager-based task environments to use new ``_index`` write/set + APIs with keyword-only arguments. + + +1.2.0 (2026-02-25) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Split environment configuration from implementation for the following direct RL task environments. + Each environment now has a dedicated ``*_env_cfg.py`` file containing only the configuration dataclass, + keeping ``__init__.py`` imports cfg-only and leaving the implementation file free of cfg dependencies: + +* Added strict ``TYPE_CHECKING`` guards across MDP modules (observations, rewards, terminations, + curriculums, events) so that heavy simulation-backend imports (``pxr``, ``omni``, ``carb``, + ``scipy``) are not triggered when task configs are loaded without a running simulator. + + +1.1.2 (2026-02-25) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Set replicate physics to False for GR1T2 and G1 environments. + + +1.1.1 (2026-02-23) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Update stack and pick place environments to use warp data and fix quaternion ordering. + + +1.1.0 (2026-02-13) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated all task environments to wrap warp data property accesses with ``wp.to_torch()`` + for compatibility with the new warp backend. This includes direct RL environments + and all manager-based MDP functions (actions, observations, rewards, terminations, + commands, events, and curriculums). + + +1.0.0 (2026-01-30) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated all task environments to use the new ``root_view`` property instead of the deprecated + ``root_physx_view`` property. This includes the following environments: + + * AutoMate Assembly and Disassembly environments + * Factory environments + * FORGE environments + * Inhand manipulation environments + * Quadcopter environments + * Shadow Hand environments + + +0.12.0 (2026-01-30) +~~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Changed the quaternion ordering to match warp, PhysX, and Newton native XYZW quaternion ordering. + + +0.11.15 (2026-03-07) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``Isaac-Stack-Cube-RedGreen-Franka-IK-Rel-v0``, ``Isaac-Stack-Cube-RedGreenBlue-Franka-IK-Rel-v0``, + ``Isaac-Stack-Cube-BlueGreen-Franka-IK-Rel-v0``, and ``Isaac-Stack-Cube-BlueGreenRed-Franka-IK-Rel-v0`` environments. + + 0.11.14 (2026-02-27) ~~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_tasks/isaaclab_tasks/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/__init__.py index 60b7f852ef26..fc892b1aa159 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/__init__.py @@ -30,10 +30,15 @@ # Register Gym environments. ## +import builtins + from .utils import import_packages -# The blacklist is used to prevent importing configs from sub-packages -# TODO(@ashwinvk): Remove pick_place from the blacklist once pinocchio from Isaac Sim is compatibility -_BLACKLIST_PKGS = ["utils", ".mdp", "pick_place", "direct.humanoid_amp.motions"] -# Import all configs in this package -import_packages(__name__, _BLACKLIST_PKGS) +# Guard: AppLauncher._create_app() temporarily removes all "lab" modules from +# sys.modules while creating SimulationApp. If Kit re-imports this package +# during that window, __init__ runs again and re-registers every gym env. +# We stash a flag on builtins because it is never evicted from sys.modules. +if not getattr(builtins, "_isaaclab_tasks_registered", False): + _BLACKLIST_PKGS = ["utils", ".mdp", "direct.humanoid_amp.motions"] + import_packages(__name__, _BLACKLIST_PKGS) + builtins._isaaclab_tasks_registered = True diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/allegro_hand_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/allegro_hand_env_cfg.py index 75087bbe0199..bd982c5d7105 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/allegro_hand_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/allegro_hand/allegro_hand_env_cfg.py @@ -4,19 +4,84 @@ # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, RigidObjectCfg from isaaclab.envs import DirectRLEnvCfg from isaaclab.markers import VisualizationMarkersCfg from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import PhysxCfg, SimulationCfg +from isaaclab.sim import SimulationCfg from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab_tasks.utils import PresetCfg + from isaaclab_assets.robots.allegro import ALLEGRO_HAND_CFG +@configclass +class ObjectCfg(PresetCfg): + physx = RigidObjectCfg( + prim_path="/World/envs/env_.*/object", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg( + kinematic_enabled=False, + disable_gravity=False, + enable_gyroscopic_forces=True, + solver_position_iteration_count=8, + solver_velocity_iteration_count=0, + sleep_threshold=0.005, + stabilization_threshold=0.0025, + max_depenetration_velocity=1000.0, + ), + mass_props=sim_utils.MassPropertiesCfg(density=400.0), + scale=(1.2, 1.2, 1.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.17, 0.56), rot=(0.0, 0.0, 0.0, 1.0)), + ) + newton_mjwarp = ArticulationCfg( + prim_path="/World/envs/env_.*/object", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + mass_props=sim_utils.MassPropertiesCfg(density=400.0), + scale=(1.2, 1.2, 1.2), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, -0.17, 0.565), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} + ), + actuators={}, + articulation_root_prim_path="", + ) + default = physx + + +@configclass +class PhysicsCfg(PresetCfg): + physx = PhysxCfg( + bounce_threshold_velocity=0.2, + ) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + solver="newton", + integrator="implicitfast", + njmax=80, + nconmax=70, + impratio=10.0, + cone="elliptic", + update_data_interval=2, + iterations=100, + # save_to_mjcf="AllegroHand.xml", + ), + num_substeps=2, + debug_mode=False, + ) + default = physx + + @configclass class AllegroHandEnvCfg(DirectRLEnvCfg): # env @@ -35,9 +100,7 @@ class AllegroHandEnvCfg(DirectRLEnvCfg): static_friction=1.0, dynamic_friction=1.0, ), - physx=PhysxCfg( - bounce_threshold_velocity=0.2, - ), + physics=PhysicsCfg(), ) # robot robot_cfg: ArticulationCfg = ALLEGRO_HAND_CFG.replace(prim_path="/World/envs/env_.*/Robot") @@ -68,25 +131,7 @@ class AllegroHandEnvCfg(DirectRLEnvCfg): ] # in-hand object - object_cfg: RigidObjectCfg = RigidObjectCfg( - prim_path="/World/envs/env_.*/object", - spawn=sim_utils.UsdFileCfg( - usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", - rigid_props=sim_utils.RigidBodyPropertiesCfg( - kinematic_enabled=False, - disable_gravity=False, - enable_gyroscopic_forces=True, - solver_position_iteration_count=8, - solver_velocity_iteration_count=0, - sleep_threshold=0.005, - stabilization_threshold=0.0025, - max_depenetration_velocity=1000.0, - ), - mass_props=sim_utils.MassPropertiesCfg(density=400.0), - scale=(1.2, 1.2, 1.2), - ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.17, 0.56), rot=(1.0, 0.0, 0.0, 0.0)), - ) + object_cfg: ObjectCfg = ObjectCfg() # goal object goal_object_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg( prim_path="/Visuals/goal_marker", @@ -116,6 +161,8 @@ class AllegroHandEnvCfg(DirectRLEnvCfg): vel_obs_scale = 0.2 success_tolerance = 0.2 max_consecutive_success = 0 + success_count_threshold: int = 1 + """Minimum number of goals reached in an episode to count it as a successful episode.""" av_factor = 0.1 act_moving_average = 1.0 force_torque_obs_scale = 10.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/ant/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/__init__.py index 9881cd66ca74..bd8e1bba2b51 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/ant/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/__init__.py @@ -20,7 +20,7 @@ entry_point=f"{__name__}.ant_env:AntEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.ant_env:AntEnvCfg", + "env_cfg_entry_point": f"{__name__}.ant_env_cfg:AntEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AntPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/ant/ant_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/ant_env.py index 39ae57b29677..1facc07440d3 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/ant/ant_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/ant_env.py @@ -5,67 +5,9 @@ from __future__ import annotations -import isaaclab.sim as sim_utils -from isaaclab.assets import ArticulationCfg -from isaaclab.envs import DirectRLEnvCfg -from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import SimulationCfg -from isaaclab.terrains import TerrainImporterCfg -from isaaclab.utils import configclass - from isaaclab_tasks.direct.locomotion.locomotion_env import LocomotionEnv -from isaaclab_assets.robots.ant import ANT_CFG - - -@configclass -class AntEnvCfg(DirectRLEnvCfg): - # env - episode_length_s = 15.0 - decimation = 2 - action_scale = 0.5 - action_space = 8 - observation_space = 36 - state_space = 0 - - # simulation - sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation) - terrain = TerrainImporterCfg( - prim_path="/World/ground", - terrain_type="plane", - collision_group=-1, - physics_material=sim_utils.RigidBodyMaterialCfg( - friction_combine_mode="average", - restitution_combine_mode="average", - static_friction=1.0, - dynamic_friction=1.0, - restitution=0.0, - ), - debug_vis=False, - ) - - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg( - num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True - ) - - # robot - robot: ArticulationCfg = ANT_CFG.replace(prim_path="/World/envs/env_.*/Robot") - joint_gears: list = [15, 15, 15, 15, 15, 15, 15, 15] - - heading_weight: float = 0.5 - up_weight: float = 0.1 - - energy_cost_scale: float = 0.05 - actions_cost_scale: float = 0.005 - alive_reward_scale: float = 0.5 - dof_vel_scale: float = 0.2 - - death_cost: float = -2.0 - termination_height: float = 0.31 - - angular_velocity_scale: float = 1.0 - contact_force_scale: float = 0.1 +from .ant_env_cfg import AntEnvCfg class AntEnv(LocomotionEnv): diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/ant/ant_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/ant_env_cfg.py new file mode 100644 index 000000000000..7ee86abc0013 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/ant/ant_env_cfg.py @@ -0,0 +1,113 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab_newton.physics import KaminoSolverCfg, MJWarpSolverCfg, NewtonCfg +from isaaclab_ovphysx.physics import OvPhysxCfg +from isaaclab_physx.physics import PhysxCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.utils import PresetCfg + +from isaaclab_assets.robots.ant import ANT_CFG + + +@configclass +class AntPhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + physx: PhysxCfg = PhysxCfg() + newton_mjwarp: NewtonCfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=45, + nconmax=25, + cone="pyramidal", + integrator="implicitfast", + impratio=1, + ), + num_substeps=1, + debug_mode=False, + ) + newton_kamino: NewtonCfg = NewtonCfg( + solver_cfg=KaminoSolverCfg( + integrator="moreau", + use_collision_detector=False, + sparse_jacobian=True, + constraints_alpha=0.1, + padmm_max_iterations=100, + padmm_primal_tolerance=1e-4, + padmm_dual_tolerance=1e-4, + padmm_compl_tolerance=1e-4, + padmm_rho_0=0.05, + padmm_eta=1e-5, + padmm_use_acceleration=True, + padmm_warmstart_mode="containers", + padmm_contact_warmstart_method="geom_pair_net_force", + padmm_use_graph_conditionals=False, + collision_detector_pipeline="unified", + collision_detector_max_contacts_per_pair=8, + ), + num_substeps=2, + debug_mode=False, + use_cuda_graph=True, + ) + ovphysx: OvPhysxCfg = OvPhysxCfg() + + +@configclass +class AntEnvCfg(DirectRLEnvCfg): + # env + episode_length_s = 15.0 + decimation = 2 + action_scale = 0.5 + action_space = 8 + observation_space = 36 + state_space = 0 + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation, physics=AntPhysicsCfg()) + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="average", + restitution_combine_mode="average", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + debug_vis=False, + ) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True + ) + + # robot + robot: ArticulationCfg = ANT_CFG.replace(prim_path="/World/envs/env_.*/Robot") + joint_gears: list = [15, 15, 15, 15, 15, 15, 15, 15] + + heading_weight: float = 0.5 + up_weight: float = 0.1 + + energy_cost_scale: float = 0.05 + actions_cost_scale: float = 0.005 + alive_reward_scale: float = 0.5 + dof_vel_scale: float = 0.2 + + death_cost: float = -2.0 + termination_height: float = 0.31 + + angular_velocity_scale: float = 1.0 + contact_force_scale: float = 0.1 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env.py index 2f7e792f93ad..3c1c0aa0ef2f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env.py @@ -7,6 +7,7 @@ import gymnasium as gym import torch +import warp as wp import isaaclab.sim as sim_utils from isaaclab.assets import Articulation @@ -31,6 +32,13 @@ def __init__(self, cfg: AnymalCFlatEnvCfg | AnymalCRoughEnvCfg, render_mode: str # X/Y linear velocity and yaw angular velocity commands self._commands = torch.zeros(self.num_envs, 3, device=self.device) + # Per-episode tracking accumulators for the velocity-tracking success metric. + # Errors are summed over actual episode steps and the mean is finalized at reset, + # so logged values reflect the just-ended episode rather than the post-reset instant. + self._error_xy_sum = torch.zeros(self.num_envs, device=self.device) + self._error_yaw_sum = torch.zeros(self.num_envs, device=self.device) + self._step_count = torch.zeros(self.num_envs, device=self.device) + # Logging self._episode_sums = { key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) @@ -48,9 +56,9 @@ def __init__(self, cfg: AnymalCFlatEnvCfg | AnymalCRoughEnvCfg, render_mode: str ] } # Get specific body indices - self._base_id, _ = self._contact_sensor.find_bodies("base") - self._feet_ids, _ = self._contact_sensor.find_bodies(".*FOOT") - self._undesired_contact_body_ids, _ = self._contact_sensor.find_bodies(".*THIGH") + self._base_id, _ = self._contact_sensor.find_sensors("base") + self._feet_ids, _ = self._contact_sensor.find_sensors(".*FOOT") + self._undesired_contact_body_ids, _ = self._contact_sensor.find_sensors(".*THIGH") def _setup_scene(self): self._robot = Articulation(self.cfg.robot) @@ -75,28 +83,30 @@ def _setup_scene(self): def _pre_physics_step(self, actions: torch.Tensor): self._actions = actions.clone() - self._processed_actions = self.cfg.action_scale * self._actions + self._robot.data.default_joint_pos + self._processed_actions = self.cfg.action_scale * self._actions + self._robot.data.default_joint_pos.torch def _apply_action(self): - self._robot.set_joint_position_target(self._processed_actions) + self._robot.set_joint_position_target_index(target=self._processed_actions) def _get_observations(self) -> dict: self._previous_actions = self._actions.clone() height_data = None if isinstance(self.cfg, AnymalCRoughEnvCfg): height_data = ( - self._height_scanner.data.pos_w[:, 2].unsqueeze(1) - self._height_scanner.data.ray_hits_w[..., 2] - 0.5 + self._height_scanner.data.pos_w.torch[:, 2].unsqueeze(1) + - self._height_scanner.data.ray_hits_w.torch[..., 2] + - 0.5 ).clip(-1.0, 1.0) obs = torch.cat( [ tensor for tensor in ( - self._robot.data.root_lin_vel_b, - self._robot.data.root_ang_vel_b, - self._robot.data.projected_gravity_b, + self._robot.data.root_lin_vel_b.torch, + self._robot.data.root_ang_vel_b.torch, + self._robot.data.projected_gravity_b.torch, self._commands, - self._robot.data.joint_pos - self._robot.data.default_joint_pos, - self._robot.data.joint_vel, + self._robot.data.joint_pos.torch - self._robot.data.default_joint_pos.torch, + self._robot.data.joint_vel.torch, height_data, self._actions, ) @@ -109,35 +119,38 @@ def _get_observations(self) -> dict: def _get_rewards(self) -> torch.Tensor: # linear velocity tracking - lin_vel_error = torch.sum(torch.square(self._commands[:, :2] - self._robot.data.root_lin_vel_b[:, :2]), dim=1) + lin_vel_error = torch.sum( + torch.square(self._commands[:, :2] - self._robot.data.root_lin_vel_b.torch[:, :2]), dim=1 + ) lin_vel_error_mapped = torch.exp(-lin_vel_error / 0.25) # yaw rate tracking - yaw_rate_error = torch.square(self._commands[:, 2] - self._robot.data.root_ang_vel_b[:, 2]) + yaw_rate_error = torch.square(self._commands[:, 2] - self._robot.data.root_ang_vel_b.torch[:, 2]) yaw_rate_error_mapped = torch.exp(-yaw_rate_error / 0.25) # z velocity tracking - z_vel_error = torch.square(self._robot.data.root_lin_vel_b[:, 2]) + z_vel_error = torch.square(self._robot.data.root_lin_vel_b.torch[:, 2]) # angular velocity x/y - ang_vel_error = torch.sum(torch.square(self._robot.data.root_ang_vel_b[:, :2]), dim=1) + ang_vel_error = torch.sum(torch.square(self._robot.data.root_ang_vel_b.torch[:, :2]), dim=1) # joint torques - joint_torques = torch.sum(torch.square(self._robot.data.applied_torque), dim=1) + joint_torques = torch.sum(torch.square(self._robot.data.applied_torque.torch), dim=1) # joint acceleration - joint_accel = torch.sum(torch.square(self._robot.data.joint_acc), dim=1) + joint_accel = torch.sum(torch.square(self._robot.data.joint_acc.torch), dim=1) # action rate action_rate = torch.sum(torch.square(self._actions - self._previous_actions), dim=1) # feet air time - first_contact = self._contact_sensor.compute_first_contact(self.step_dt)[:, self._feet_ids] - last_air_time = self._contact_sensor.data.last_air_time[:, self._feet_ids] + first_contact = self._contact_sensor.compute_first_contact(self.step_dt).torch[:, self._feet_ids] + last_air_time = self._contact_sensor.data.last_air_time.torch[:, self._feet_ids] air_time = torch.sum((last_air_time - 0.5) * first_contact, dim=1) * ( - torch.norm(self._commands[:, :2], dim=1) > 0.1 + torch.linalg.norm(self._commands[:, :2], dim=1) > 0.1 ) # undesired contacts - net_contact_forces = self._contact_sensor.data.net_forces_w_history + net_contact_forces = self._contact_sensor.data.net_forces_w_history.torch is_contact = ( - torch.max(torch.norm(net_contact_forces[:, :, self._undesired_contact_body_ids], dim=-1), dim=1)[0] > 1.0 + torch.max(torch.linalg.norm(net_contact_forces[:, :, self._undesired_contact_body_ids], dim=-1), dim=1)[0] + > 1.0 ) contacts = torch.sum(is_contact, dim=1) # flat orientation - flat_orientation = torch.sum(torch.square(self._robot.data.projected_gravity_b[:, :2]), dim=1) + flat_orientation = torch.sum(torch.square(self._robot.data.projected_gravity_b.torch[:, :2]), dim=1) rewards = { "track_lin_vel_xy_exp": lin_vel_error_mapped * self.cfg.lin_vel_reward_scale * self.step_dt, @@ -152,6 +165,13 @@ def _get_rewards(self) -> torch.Tensor: "flat_orientation_l2": flat_orientation * self.cfg.flat_orientation_reward_scale * self.step_dt, } reward = torch.sum(torch.stack(list(rewards.values())), dim=0) + # Accumulate per-episode velocity tracking error sums; success is computed at reset + # as a per-env binary check on the *episode-mean* errors against the cfg thresholds. + error_xy = torch.linalg.norm(self._commands[:, :2] - self._robot.data.root_lin_vel_b.torch[:, :2], dim=-1) + error_yaw = torch.abs(self._commands[:, 2] - self._robot.data.root_ang_vel_b.torch[:, 2]) + self._error_xy_sum += error_xy + self._error_yaw_sum += error_yaw + self._step_count += 1.0 # Logging for key, value in rewards.items(): self._episode_sums[key] += value @@ -159,13 +179,15 @@ def _get_rewards(self) -> torch.Tensor: def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: time_out = self.episode_length_buf >= self.max_episode_length - 1 - net_contact_forces = self._contact_sensor.data.net_forces_w_history - died = torch.any(torch.max(torch.norm(net_contact_forces[:, :, self._base_id], dim=-1), dim=1)[0] > 1.0, dim=1) + net_contact_forces = self._contact_sensor.data.net_forces_w_history.torch + died = torch.any( + torch.max(torch.linalg.norm(net_contact_forces[:, :, self._base_id], dim=-1), dim=1)[0] > 1.0, dim=1 + ) return died, time_out def _reset_idx(self, env_ids: torch.Tensor | None): if env_ids is None or len(env_ids) == self.num_envs: - env_ids = self._robot._ALL_INDICES + env_ids = wp.to_torch(self._robot._ALL_INDICES) self._robot.reset(env_ids) super()._reset_idx(env_ids) if len(env_ids) == self.num_envs: @@ -176,13 +198,15 @@ def _reset_idx(self, env_ids: torch.Tensor | None): # Sample new commands self._commands[env_ids] = torch.zeros_like(self._commands[env_ids]).uniform_(-1.0, 1.0) # Reset robot state - joint_pos = self._robot.data.default_joint_pos[env_ids] - joint_vel = self._robot.data.default_joint_vel[env_ids] - default_root_state = self._robot.data.default_root_state[env_ids] - default_root_state[:, :3] += self._terrain.env_origins[env_ids] - self._robot.write_root_pose_to_sim(default_root_state[:, :7], env_ids) - self._robot.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids) - self._robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + joint_pos = self._robot.data.default_joint_pos.torch[env_ids] + joint_vel = self._robot.data.default_joint_vel.torch[env_ids] + default_root_pose = self._robot.data.default_root_pose.torch[env_ids] + default_root_vel = self._robot.data.default_root_vel.torch[env_ids] + default_root_pose[:, :3] += self._terrain.env_origins[env_ids] + self._robot.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + self._robot.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) + self._robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) # Logging extras = dict() for key in self._episode_sums.keys(): @@ -194,4 +218,23 @@ def _reset_idx(self, env_ids: torch.Tensor | None): extras = dict() extras["Episode_Termination/base_contact"] = torch.count_nonzero(self.reset_terminated[env_ids]).item() extras["Episode_Termination/time_out"] = torch.count_nonzero(self.reset_time_outs[env_ids]).item() + # Flush per-episode velocity tracking metrics. ``success_rate`` is per-env binary: + # the *episode-mean* error stayed below both thresholds, mean-reduced across env_ids. + denom = self._step_count[env_ids].clamp_min(1.0) + mean_error_xy = self._error_xy_sum[env_ids] / denom + mean_error_yaw = self._error_yaw_sum[env_ids] / denom + extras["Metrics/error_vel_xy"] = mean_error_xy.mean().item() + extras["Metrics/error_vel_yaw"] = mean_error_yaw.mean().item() + extras["Metrics/success_rate"] = ( + ( + (mean_error_xy < self.cfg.vel_xy_success_threshold) + & (mean_error_yaw < self.cfg.vel_yaw_success_threshold) + ) + .float() + .mean() + .item() + ) + self._error_xy_sum[env_ids] = 0.0 + self._error_yaw_sum[env_ids] = 0.0 + self._step_count[env_ids] = 0.0 self.extras["log"].update(extras) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env_cfg.py index f5e12b599122..f2939b661508 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_physx.physics import PhysxCfg + import isaaclab.envs.mdp as mdp import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg @@ -63,6 +65,7 @@ class AnymalCFlatEnvCfg(DirectRLEnvCfg): sim: SimulationCfg = SimulationCfg( dt=1 / 200, render_interval=decimation, + physics=PhysxCfg(gpu_max_rigid_patch_count=2**20), physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="multiply", restitution_combine_mode="multiply", @@ -109,6 +112,15 @@ class AnymalCFlatEnvCfg(DirectRLEnvCfg): undesired_contact_reward_scale = -1.0 flat_orientation_reward_scale = -5.0 + # success criteria — episode success_rate = per-env binary check that the *episode-mean* + # error stayed below both thresholds, mean-reduced across resetting envs. Matches the + # manager-based ``UniformVelocityCommandCfg`` defaults so flat/rough/direct curves are + # comparable on the unified ``Metrics/success_rate`` card. + vel_xy_success_threshold: float = 0.5 + """Threshold on the per-episode mean XY velocity error norm [m/s].""" + vel_yaw_success_threshold: float = 0.4 + """Threshold on the per-episode mean yaw velocity error [rad/s].""" + @configclass class AnymalCRoughEnvCfg(AnymalCFlatEnvCfg): diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/__init__.py index 9da64598c0c4..ef9160ac3e0e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/__init__.py @@ -16,7 +16,7 @@ entry_point=f"{__name__}.assembly_env:AssemblyEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.assembly_env:AssemblyEnvCfg", + "env_cfg_entry_point": f"{__name__}.assembly_env_cfg:AssemblyEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, ) @@ -27,7 +27,7 @@ entry_point=f"{__name__}.disassembly_env:DisassemblyEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.disassembly_env:DisassemblyEnvCfg", + "env_cfg_entry_point": f"{__name__}.disassembly_env_cfg:DisassemblyEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", }, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/agents/rl_games_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/agents/rl_games_ppo_cfg.yaml index 3d8d070248cf..48d051a540e0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/agents/rl_games_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/agents/rl_games_ppo_cfg.yaml @@ -45,7 +45,6 @@ params: config: name: Assembly device: cuda:0 - full_experiment_name: test env_name: rlgpu multi_gpu: False ppo: True diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py index 35e999120380..438f0f80603b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py @@ -4,21 +4,25 @@ # SPDX-License-Identifier: BSD-3-Clause import json -import os import numpy as np import torch import warp as wp -import carb -import isaacsim.core.utils.torch as torch_utils - import isaaclab.sim as sim_utils from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import DirectRLEnv from isaaclab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, retrieve_file_path -from isaaclab.utils.math import axis_angle_from_quat +from isaaclab.utils.math import ( + axis_angle_from_quat, + combine_frame_transforms, + euler_xyz_from_quat, + quat_conjugate, + quat_from_angle_axis, + quat_from_euler_xyz, + quat_mul, +) from . import automate_algo_utils as automate_algo from . import automate_log_utils as automate_log @@ -60,11 +64,7 @@ def __init__(self, cfg: AssemblyEnvCfg, render_mode: str | None = None, **kwargs ) # Create criterion for dynamic time warping (later used for imitation reward) - cuda_version = automate_algo.get_cuda_version() - if (cuda_version is not None) and (cuda_version < (13, 0, 0)): - self.soft_dtw_criterion = SoftDTW(use_cuda=True, device=self.device, gamma=self.cfg_task.soft_dtw_gamma) - else: - self.soft_dtw_criterion = SoftDTW(use_cuda=False, device=self.device, gamma=self.cfg_task.soft_dtw_gamma) + self.soft_dtw_criterion = SoftDTW(use_cuda=True, device=self.device, gamma=self.cfg_task.soft_dtw_gamma) # Evaluate if self.cfg_task.if_logging_eval: @@ -82,11 +82,13 @@ def _init_eval_logging(self): def _set_body_inertias(self): """Note: this is to account for the asset_options.armature parameter in IGE.""" - inertias = self._robot.root_physx_view.get_inertias() + inertias = wp.to_torch(self._robot.root_view.get_inertias()) offset = torch.zeros_like(inertias) offset[:, :, [0, 4, 8]] += 0.01 new_inertias = inertias + offset - self._robot.root_physx_view.set_inertias(new_inertias, torch.arange(self.num_envs)) + self._robot.root_view.set_inertias( + wp.from_torch(new_inertias), wp.from_torch(torch.arange(self.num_envs, dtype=torch.int32)) + ) def _set_default_dynamics_parameters(self): """Set parameters defining dynamic interactions.""" @@ -108,16 +110,16 @@ def _set_default_dynamics_parameters(self): def _set_friction(self, asset, value): """Update material properties for a given asset.""" - materials = asset.root_physx_view.get_material_properties() + materials = wp.to_torch(asset.root_view.get_material_properties()) materials[..., 0] = value # Static friction. materials[..., 1] = value # Dynamic friction. - env_ids = torch.arange(self.scene.num_envs, device="cpu") - asset.root_physx_view.set_material_properties(materials, env_ids) + env_ids = torch.arange(self.scene.num_envs, device="cpu", dtype=torch.int32) + asset.root_view.set_material_properties(wp.from_torch(materials), wp.from_torch(env_ids)) def _init_tensors(self): """Initialize tensors once.""" self.identity_quat = ( - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) ) # Control targets. @@ -154,7 +156,7 @@ def _init_tensors(self): .repeat(self.num_envs, 1) ) self.robot_to_gripper_quat = ( - torch.tensor([0.0, 1.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) ) self.plug_grasp_pos_local = self.plug_grasps[: self.num_envs, :3] self.plug_grasp_quat_local = torch.roll(self.plug_grasps[: self.num_envs, 3:], -1, 1) @@ -195,13 +197,13 @@ def _init_tensors(self): def _load_assembly_info(self): """Load grasp pose and disassembly distance for plugs in each environment.""" - retrieve_file_path(self.cfg_task.plug_grasp_json, download_dir="./") - with open(os.path.basename(self.cfg_task.plug_grasp_json)) as f: + plug_grasp_path = retrieve_file_path(self.cfg_task.plug_grasp_json) + with open(plug_grasp_path) as f: plug_grasp_dict = json.load(f) plug_grasps = [plug_grasp_dict[f"asset_{self.cfg_task.assembly_id}"] for i in range(self.num_envs)] - retrieve_file_path(self.cfg_task.disassembly_dist_json, download_dir="./") - with open(os.path.basename(self.cfg_task.disassembly_dist_json)) as f: + disassembly_dist_path = retrieve_file_path(self.cfg_task.disassembly_dist_json) + with open(disassembly_dist_path) as f: disassembly_dist_dict = json.load(f) disassembly_dists = [disassembly_dist_dict[f"asset_{self.cfg_task.assembly_id}"] for i in range(self.num_envs)] @@ -223,8 +225,8 @@ def _get_curriculum_info(self, disassembly_dists): def _load_disassembly_data(self): """Load pre-collected disassembly trajectories (end-effector position only).""" - retrieve_file_path(self.cfg_task.disassembly_path_json, download_dir="./") - with open(os.path.basename(self.cfg_task.disassembly_path_json)) as f: + local_path = retrieve_file_path(self.cfg_task.disassembly_path_json, download_dir="./") + with open(local_path) as f: disassembly_traj = json.load(f) eef_pos_traj = [] @@ -252,7 +254,7 @@ def _setup_scene(self): # spawn a usd file of a table into the scene cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func( - "/World/envs/env_.*/Table", cfg, translation=(0.55, 0.0, 0.0), orientation=(0.70711, 0.0, 0.0, 0.70711) + "/World/envs/env_.*/Table", cfg, translation=(0.55, 0.0, 0.0), orientation=(0.0, 0.0, 0.70711, 0.70711) ) self._robot = Articulation(self.cfg.robot) @@ -273,39 +275,41 @@ def _setup_scene(self): def _compute_intermediate_values(self, dt): """Get values computed from raw tensors. This includes adding noise.""" # TODO: A lot of these can probably only be set once? - self.fixed_pos = self._fixed_asset.data.root_pos_w - self.scene.env_origins - self.fixed_quat = self._fixed_asset.data.root_quat_w + self.fixed_pos = self._fixed_asset.data.root_pos_w.torch - self.scene.env_origins + self.fixed_quat = self._fixed_asset.data.root_quat_w.torch - self.held_pos = self._held_asset.data.root_pos_w - self.scene.env_origins - self.held_quat = self._held_asset.data.root_quat_w + self.held_pos = self._held_asset.data.root_pos_w.torch - self.scene.env_origins + self.held_quat = self._held_asset.data.root_quat_w.torch - self.fingertip_midpoint_pos = self._robot.data.body_pos_w[:, self.fingertip_body_idx] - self.scene.env_origins - self.fingertip_midpoint_quat = self._robot.data.body_quat_w[:, self.fingertip_body_idx] - self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w[:, self.fingertip_body_idx] - self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w[:, self.fingertip_body_idx] + self.fingertip_midpoint_pos = ( + self._robot.data.body_pos_w.torch[:, self.fingertip_body_idx] - self.scene.env_origins + ) + self.fingertip_midpoint_quat = self._robot.data.body_quat_w.torch[:, self.fingertip_body_idx] + self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w.torch[:, self.fingertip_body_idx] + self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w.torch[:, self.fingertip_body_idx] - jacobians = self._robot.root_physx_view.get_jacobians() + jacobians = wp.to_torch(self._robot.root_view.get_jacobians()) self.left_finger_jacobian = jacobians[:, self.left_finger_body_idx - 1, 0:6, 0:7] self.right_finger_jacobian = jacobians[:, self.right_finger_body_idx - 1, 0:6, 0:7] self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 - self.arm_mass_matrix = self._robot.root_physx_view.get_generalized_mass_matrices()[:, 0:7, 0:7] - self.joint_pos = self._robot.data.joint_pos.clone() - self.joint_vel = self._robot.data.joint_vel.clone() + self.arm_mass_matrix = wp.to_torch(self._robot.root_view.get_generalized_mass_matrices())[:, 0:7, 0:7] + self.joint_pos = self._robot.data.joint_pos.torch.clone() + self.joint_vel = self._robot.data.joint_vel.torch.clone() # Compute pose of gripper goal and top of socket in socket frame - self.gripper_goal_quat, self.gripper_goal_pos = torch_utils.tf_combine( - self.fixed_quat, + self.gripper_goal_pos, self.gripper_goal_quat = combine_frame_transforms( self.fixed_pos, - self.plug_grasp_quat_local, + self.fixed_quat, self.plug_grasp_pos_local, + self.plug_grasp_quat_local, ) - self.gripper_goal_quat, self.gripper_goal_pos = torch_utils.tf_combine( - self.gripper_goal_quat, + self.gripper_goal_pos, self.gripper_goal_quat = combine_frame_transforms( self.gripper_goal_pos, - self.robot_to_gripper_quat, + self.gripper_goal_quat, self.palm_to_finger_center, + self.robot_to_gripper_quat, ) # Finite-differencing results in more reliable velocity estimates. @@ -313,10 +317,8 @@ def _compute_intermediate_values(self, dt): self.prev_fingertip_pos = self.fingertip_midpoint_pos.clone() # Add state differences if velocity isn't being added. - rot_diff_quat = torch_utils.quat_mul( - self.fingertip_midpoint_quat, torch_utils.quat_conjugate(self.prev_fingertip_quat) - ) - rot_diff_quat *= torch.sign(rot_diff_quat[:, 0]).unsqueeze(-1) + rot_diff_quat = quat_mul(self.fingertip_midpoint_quat, quat_conjugate(self.prev_fingertip_quat)) + rot_diff_quat *= torch.sign(rot_diff_quat[:, 3]).unsqueeze(-1) # W component is at index 3 in XYZW format rot_diff_aa = axis_angle_from_quat(rot_diff_quat) self.ee_angvel_fd = rot_diff_aa / dt self.prev_fingertip_quat = self.fingertip_midpoint_quat.clone() @@ -326,26 +328,26 @@ def _compute_intermediate_values(self, dt): self.prev_joint_pos = self.joint_pos[:, 0:7].clone() # Keypoint tensors. - self.held_base_quat[:], self.held_base_pos[:] = torch_utils.tf_combine( - self.held_quat, self.held_pos, self.held_base_quat_local, self.held_base_pos_local + self.held_base_pos[:], self.held_base_quat[:] = combine_frame_transforms( + self.held_pos, self.held_quat, self.held_base_pos_local, self.held_base_quat_local ) - self.target_held_base_quat[:], self.target_held_base_pos[:] = torch_utils.tf_combine( - self.fixed_quat, self.fixed_pos, self.identity_quat, self.fixed_success_pos_local + self.target_held_base_pos[:], self.target_held_base_quat[:] = combine_frame_transforms( + self.fixed_pos, self.fixed_quat, self.fixed_success_pos_local, self.identity_quat ) # Compute pos of keypoints on held asset, and fixed asset in world frame for idx, keypoint_offset in enumerate(self.keypoint_offsets): - self.keypoints_held[:, idx] = torch_utils.tf_combine( - self.held_base_quat, self.held_base_pos, self.identity_quat, keypoint_offset.repeat(self.num_envs, 1) - )[1] - self.keypoints_fixed[:, idx] = torch_utils.tf_combine( - self.target_held_base_quat, + self.keypoints_held[:, idx] = combine_frame_transforms( + self.held_base_pos, self.held_base_quat, keypoint_offset.repeat(self.num_envs, 1), self.identity_quat + )[0] + self.keypoints_fixed[:, idx] = combine_frame_transforms( self.target_held_base_pos, - self.identity_quat, + self.target_held_base_quat, keypoint_offset.repeat(self.num_envs, 1), - )[1] + self.identity_quat, + )[0] - self.keypoint_dist = torch.norm(self.keypoints_held - self.keypoints_fixed, p=2, dim=-1).mean(-1) + self.keypoint_dist = torch.linalg.norm(self.keypoints_held - self.keypoints_fixed, ord=2, dim=-1).mean(-1) self.last_update_timestamp = self._robot._data._sim_timestamp def _get_observations(self): @@ -410,23 +412,23 @@ def move_gripper_in_place(self, ctrl_target_gripper_dof_pos): rot_actions = actions[:, 3:6] # Convert to quat and set rot target - angle = torch.norm(rot_actions, p=2, dim=-1) + angle = torch.linalg.norm(rot_actions, ord=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) - rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) + rot_actions_quat = quat_from_angle_axis(angle, axis) rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > 1.0e-6, rot_actions_quat, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1), + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1), ) - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) + self.ctrl_target_fingertip_midpoint_quat = quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) - target_euler_xyz = torch.stack(torch_utils.get_euler_xyz(self.ctrl_target_fingertip_midpoint_quat), dim=1) + target_euler_xyz = torch.stack(euler_xyz_from_quat(self.ctrl_target_fingertip_midpoint_quat), dim=1) target_euler_xyz[:, 0] = 3.14159 target_euler_xyz[:, 1] = 0.0 - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( + self.ctrl_target_fingertip_midpoint_quat = quat_from_euler_xyz( roll=target_euler_xyz[:, 0], pitch=target_euler_xyz[:, 1], yaw=target_euler_xyz[:, 2] ) @@ -436,7 +438,7 @@ def move_gripper_in_place(self, ctrl_target_gripper_dof_pos): def _apply_action(self): """Apply actions for policy as delta targets from current position.""" # Get current yaw for success checking. - _, _, curr_yaw = torch_utils.get_euler_xyz(self.fingertip_midpoint_quat) + _, _, curr_yaw = euler_xyz_from_quat(self.fingertip_midpoint_quat) self.curr_yaw = torch.where(curr_yaw > np.deg2rad(235), curr_yaw - 2 * np.pi, curr_yaw) # Note: We use finite-differenced velocities for control and observations. @@ -462,22 +464,22 @@ def _apply_action(self): self.ctrl_target_fingertip_midpoint_pos = self.fixed_pos_action_frame + pos_error_clipped # Convert to quat and set rot target - angle = torch.norm(rot_actions, p=2, dim=-1) + angle = torch.linalg.norm(rot_actions, ord=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) - rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) + rot_actions_quat = quat_from_angle_axis(angle, axis) rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > 1e-6, rot_actions_quat, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1), + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1), ) - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) + self.ctrl_target_fingertip_midpoint_quat = quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) - target_euler_xyz = torch.stack(torch_utils.get_euler_xyz(self.ctrl_target_fingertip_midpoint_quat), dim=1) + target_euler_xyz = torch.stack(euler_xyz_from_quat(self.ctrl_target_fingertip_midpoint_quat), dim=1) target_euler_xyz[:, 0] = 3.14159 # Restrict actions to be upright. target_euler_xyz[:, 1] = 0.0 - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( + self.ctrl_target_fingertip_midpoint_quat = quat_from_euler_xyz( roll=target_euler_xyz[:, 0], pitch=target_euler_xyz[:, 1], yaw=target_euler_xyz[:, 2] ) @@ -513,8 +515,8 @@ def generate_ctrl_signals(self): self.ctrl_target_joint_pos[:, 7:9] = self.ctrl_target_gripper_dof_pos self.joint_torque[:, 7:9] = 0.0 - self._robot.set_joint_position_target(self.ctrl_target_joint_pos) - self._robot.set_joint_effort_target(self.joint_torque) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos) + self._robot.set_joint_effort_target_index(target=self.joint_torque) def _get_dones(self): """Update intermediate values used for rewards and observations.""" @@ -541,7 +543,9 @@ def _get_rewards(self): # Only log episode success rates at the end of an episode. if torch.any(self.reset_buf): - self.extras["successes"] = torch.count_nonzero(self.ep_succeeded) / self.num_envs + self.extras.setdefault("log", {})["Metrics/success_rate"] = ( + torch.count_nonzero(self.ep_succeeded) / self.num_envs + ).item() sbc_rwd_scale = automate_algo.get_curriculum_reward_scale( curr_max_disp=self.curr_max_disp, @@ -646,35 +650,44 @@ def _reset_idx(self, env_ids): def _set_assets_to_default_pose(self, env_ids): """Move assets to default pose before randomization.""" - held_state = self._held_asset.data.default_root_state.clone()[env_ids] + held_state = torch.cat( + [self._held_asset.data.default_root_pose.torch, self._held_asset.data.default_root_vel.torch], + dim=-1, + )[env_ids].clone() held_state[:, 0:3] += self.scene.env_origins[env_ids] held_state[:, 7:] = 0.0 - self._held_asset.write_root_pose_to_sim(held_state[:, 0:7], env_ids=env_ids) - self._held_asset.write_root_velocity_to_sim(held_state[:, 7:], env_ids=env_ids) + self._held_asset.write_root_pose_to_sim_index(root_pose=held_state[:, 0:7], env_ids=env_ids) + self._held_asset.write_root_velocity_to_sim_index(root_velocity=held_state[:, 7:], env_ids=env_ids) self._held_asset.reset() - fixed_state = self._fixed_asset.data.default_root_state.clone()[env_ids] + fixed_state = torch.cat( + [ + self._fixed_asset.data.default_root_pose.torch, + self._fixed_asset.data.default_root_vel.torch, + ], + dim=-1, + )[env_ids].clone() fixed_state[:, 0:3] += self.scene.env_origins[env_ids] fixed_state[:, 7:] = 0.0 - self._fixed_asset.write_root_pose_to_sim(fixed_state[:, 0:7], env_ids=env_ids) - self._fixed_asset.write_root_velocity_to_sim(fixed_state[:, 7:], env_ids=env_ids) + self._fixed_asset.write_root_pose_to_sim_index(root_pose=fixed_state[:, 0:7], env_ids=env_ids) + self._fixed_asset.write_root_velocity_to_sim_index(root_velocity=fixed_state[:, 7:], env_ids=env_ids) self._fixed_asset.reset() def _move_gripper_to_grasp_pose(self, env_ids): """Define grasp pose for plug and move gripper to pose.""" - gripper_goal_quat, gripper_goal_pos = torch_utils.tf_combine( - self.held_quat, + gripper_goal_pos, gripper_goal_quat = combine_frame_transforms( self.held_pos, - self.plug_grasp_quat_local, + self.held_quat, self.plug_grasp_pos_local, + self.plug_grasp_quat_local, ) - gripper_goal_quat, gripper_goal_pos = torch_utils.tf_combine( - gripper_goal_quat, + gripper_goal_pos, gripper_goal_quat = combine_frame_transforms( gripper_goal_pos, - self.robot_to_gripper_quat, + gripper_goal_quat, self.palm_to_finger_center, + self.robot_to_gripper_quat, ) # Set target_pos @@ -714,9 +727,10 @@ def set_pos_inverse_kinematics(self, env_ids): self.ctrl_target_joint_pos[env_ids, 0:7] = self.joint_pos[env_ids, 0:7] # Update dof state. - self._robot.write_joint_state_to_sim(self.joint_pos, self.joint_vel) + self._robot.write_joint_position_to_sim_index(position=self.joint_pos) + self._robot.write_joint_velocity_to_sim_index(velocity=self.joint_vel) self._robot.reset() - self._robot.set_joint_position_target(self.ctrl_target_joint_pos) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos) # Simulate and update tensors. self.step_sim_no_action() @@ -727,16 +741,17 @@ def set_pos_inverse_kinematics(self, env_ids): def _set_franka_to_default_pose(self, joints, env_ids): """Return Franka to its default joint position.""" gripper_width = self.gripper_open_width - joint_pos = self._robot.data.default_joint_pos[env_ids] + joint_pos = self._robot.data.default_joint_pos.torch[env_ids] joint_pos[:, 7:] = gripper_width # MIMIC joint_pos[:, :7] = torch.tensor(joints, device=self.device)[None, :] joint_vel = torch.zeros_like(joint_pos) joint_effort = torch.zeros_like(joint_pos) self.ctrl_target_joint_pos[env_ids, :] = joint_pos - self._robot.set_joint_position_target(self.ctrl_target_joint_pos[env_ids], env_ids=env_ids) - self._robot.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos[env_ids], env_ids=env_ids) + self._robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) self._robot.reset() - self._robot.set_joint_effort_target(joint_effort, env_ids=env_ids) + self._robot.set_joint_effort_target_index(target=joint_effort, env_ids=env_ids) self.step_sim_no_action() @@ -749,7 +764,13 @@ def step_sim_no_action(self): def randomize_fixed_initial_state(self, env_ids): # (1.) Randomize fixed asset pose. - fixed_state = self._fixed_asset.data.default_root_state.clone()[env_ids] + fixed_state = torch.cat( + [ + self._fixed_asset.data.default_root_pose.torch, + self._fixed_asset.data.default_root_vel.torch, + ], + dim=-1, + )[env_ids].clone() # (1.a.) Position rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device) fixed_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1] @@ -766,14 +787,13 @@ def randomize_fixed_initial_state(self, env_ids): rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device) fixed_orn_euler = fixed_orn_init_yaw + fixed_orn_yaw_range * rand_sample fixed_orn_euler[:, 0:2] = 0.0 # Only change yaw. - fixed_orn_quat = torch_utils.quat_from_euler_xyz( - fixed_orn_euler[:, 0], fixed_orn_euler[:, 1], fixed_orn_euler[:, 2] - ) + fixed_orn_quat = quat_from_euler_xyz(fixed_orn_euler[:, 0], fixed_orn_euler[:, 1], fixed_orn_euler[:, 2]) fixed_state[:, 3:7] = fixed_orn_quat # (1.c.) Velocity fixed_state[:, 7:] = 0.0 # vel # (1.d.) Update values. - self._fixed_asset.write_root_state_to_sim(fixed_state, env_ids=env_ids) + self._fixed_asset.write_root_pose_to_sim_index(root_pose=fixed_state[:, 0:7], env_ids=env_ids) + self._fixed_asset.write_root_velocity_to_sim_index(root_velocity=fixed_state[:, 7:], env_ids=env_ids) self._fixed_asset.reset() # (1.e.) Noisy position observation. @@ -800,7 +820,10 @@ def randomize_held_initial_state(self, env_ids, pre_grasp): # Set plug pos to assembled state, but offset plug Z-coordinate by height of socket, # minus curriculum displacement - held_state = self._held_asset.data.default_root_state.clone() + held_state = torch.cat( + [self._held_asset.data.default_root_pose.torch, self._held_asset.data.default_root_vel.torch], + dim=-1, + ).clone() held_state[env_ids, 0:3] = self.fixed_pos[env_ids].clone() + self.scene.env_origins[env_ids] held_state[env_ids, 3:7] = self.fixed_quat[env_ids].clone() held_state[env_ids, 7:] = 0.0 @@ -810,13 +833,16 @@ def randomize_held_initial_state(self, env_ids, pre_grasp): plug_in_freespace_idx = torch.argwhere(self.curriculum_disp > self.disassembly_dists) held_state[plug_in_freespace_idx, :2] += self.held_pos_init_rand[plug_in_freespace_idx, :2] - self._held_asset.write_root_state_to_sim(held_state) + self._held_asset.write_root_pose_to_sim_index(root_pose=held_state[:, 0:7]) + self._held_asset.write_root_velocity_to_sim_index(root_velocity=held_state[:, 7:]) self._held_asset.reset() self.step_sim_no_action() def randomize_initial_state(self, env_ids): """Randomize initial state and perform any episode-level randomization.""" + import carb + # Disable gravity. physics_sim_view = sim_utils.SimulationContext.instance().physics_sim_view physics_sim_view.set_gravity(carb.Float3(0.0, 0.0, 0.0)) @@ -829,8 +855,8 @@ def randomize_initial_state(self, env_ids): fixed_tip_pos_local[:, 2] += self.cfg_task.fixed_asset_cfg.height fixed_tip_pos_local[:, 2] += self.cfg_task.fixed_asset_cfg.base_height - _, fixed_tip_pos = torch_utils.tf_combine( - self.fixed_quat, self.fixed_pos, self.identity_quat, fixed_tip_pos_local + fixed_tip_pos, _ = combine_frame_transforms( + self.fixed_pos, self.fixed_quat, fixed_tip_pos_local, self.identity_quat ) self.fixed_pos_obs_frame[:] = fixed_tip_pos @@ -851,7 +877,7 @@ def randomize_initial_state(self, env_ids): self.step_sim_no_action() grasp_time = 0.0 - while grasp_time < 0.25: + while grasp_time < 1.0: self.ctrl_target_joint_pos[env_ids, 7:] = 0.0 # Close gripper. self.ctrl_target_gripper_dof_pos = 0.0 self.move_gripper_in_place(ctrl_target_gripper_dof_pos=0.0) @@ -874,4 +900,6 @@ def randomize_initial_state(self, env_ids): # Set initial gains for the episode. self._set_gains(self.default_gains) + import carb + physics_sim_view.set_gravity(carb.Float3(*self.cfg.sim.gravity)) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env_cfg.py index 5d4f66ec8969..7fae88707c5c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env_cfg.py @@ -3,12 +3,14 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.assets import ArticulationCfg from isaaclab.envs import DirectRLEnvCfg from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import PhysxCfg, SimulationCfg +from isaaclab.sim import SimulationCfg from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from isaaclab.utils import configclass @@ -107,7 +109,7 @@ class AssemblyEnvCfg(DirectRLEnvCfg): device="cuda:0", dt=1 / 120, gravity=(0.0, 0.0, -9.81), - physx=PhysxCfg( + physics=PhysxCfg( solver_type=1, max_position_iteration_count=192, # Important to avoid interpenetration. max_velocity_iteration_count=1, @@ -163,7 +165,7 @@ class AssemblyEnvCfg(DirectRLEnvCfg): "panda_finger_joint2": 0.04, }, pos=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), actuators={ "panda_arm1": ImplicitActuatorCfg( diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py index b651767f7f35..fcc965276c4e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_tasks_cfg.py @@ -232,7 +232,7 @@ class Insertion(AssemblyTask): init_state=ArticulationCfg.InitialStateCfg( # init_state=RigidObjectCfg.InitialStateCfg( pos=(0.6, 0.0, 0.05), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={}, ), @@ -262,7 +262,7 @@ class Insertion(AssemblyTask): # init_state=ArticulationCfg.InitialStateCfg( init_state=RigidObjectCfg.InitialStateCfg( pos=(0.0, 0.4, 0.1), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), # joint_pos={}, # joint_vel={} ), diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py index 4588d4e227fd..5aa584e9b65b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/automate_algo_utils.py @@ -4,8 +4,6 @@ # SPDX-License-Identifier: BSD-3-Clause import os -import re -import subprocess import sys import torch @@ -25,56 +23,9 @@ """ -def parse_cuda_version(version_string): - """ - Parse CUDA version string into comparable tuple of (major, minor, patch). - - Args: - version_string: Version string like "12.8.9" or "11.2" - - Returns: - Tuple of (major, minor, patch) as integers, where patch defaults to 0 iff - not present. - - Example: - "12.8.9" -> (12, 8, 9) - "11.2" -> (11, 2, 0) - """ - parts = version_string.split(".") - major = int(parts[0]) - minor = int(parts[1]) if len(parts) > 1 else 0 - patch = int(parts[2]) if len(parts) > 2 else 0 - return (major, minor, patch) - - -def get_cuda_version(): - try: - # Execute nvcc --version command - result = subprocess.run(["nvcc", "--version"], capture_output=True, text=True, check=True) - output = result.stdout - - # Use regex to find the CUDA version (e.g., V11.2.67) - match = re.search(r"V(\d+\.\d+(\.\d+)?)", output) - if match: - return parse_cuda_version(match.group(1)) - else: - print("CUDA version not found in output.") - return None - except FileNotFoundError: - print("nvcc command not found. Is CUDA installed and in your PATH?") - return None - except subprocess.CalledProcessError as e: - print(f"Error executing nvcc: {e.stderr}") - return None - except Exception as e: - print(f"An unexpected error occurred: {e}") - return None - - def get_gripper_open_width(obj_filepath): - retrieve_file_path(obj_filepath, download_dir="./") - obj_mesh = trimesh.load_mesh(os.path.basename(obj_filepath)) - # obj_mesh = trimesh.load_mesh(obj_filepath) + local_path = retrieve_file_path(obj_filepath, download_dir="./") + obj_mesh = trimesh.load_mesh(local_path) aabb = obj_mesh.bounds return min(0.04, (aabb[1][1] - aabb[0][1]) / 1.25) @@ -201,7 +152,7 @@ def check_plug_close_to_socket(keypoints_plug, keypoints_socket, dist_threshold, """Check if plug is close to socket.""" # Compute keypoint distance between plug and socket - keypoint_dist = torch.norm(keypoints_socket - keypoints_plug, p=2, dim=-1) + keypoint_dist = torch.linalg.norm(keypoints_socket - keypoints_plug, ord=2, dim=-1) # Check if keypoint distance is below threshold is_plug_close_to_socket = torch.where( diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py index a4b454829eac..c79441f223ae 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py @@ -8,16 +8,22 @@ import numpy as np import torch - -import carb -import isaacsim.core.utils.torch as torch_utils +import warp as wp import isaaclab.sim as sim_utils from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import DirectRLEnv from isaaclab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, retrieve_file_path -from isaaclab.utils.math import axis_angle_from_quat +from isaaclab.utils.math import ( + axis_angle_from_quat, + combine_frame_transforms, + euler_xyz_from_quat, + quat_conjugate, + quat_from_angle_axis, + quat_from_euler_xyz, + quat_mul, +) from . import automate_algo_utils as automate_algo from . import factory_control as fc @@ -50,11 +56,13 @@ def __init__(self, cfg: DisassemblyEnvCfg, render_mode: str | None = None, **kwa def _set_body_inertias(self): """Note: this is to account for the asset_options.armature parameter in IGE.""" - inertias = self._robot.root_physx_view.get_inertias() + inertias = wp.to_torch(self._robot.root_view.get_inertias()) offset = torch.zeros_like(inertias) offset[:, :, [0, 4, 8]] += 0.01 new_inertias = inertias + offset - self._robot.root_physx_view.set_inertias(new_inertias, torch.arange(self.num_envs)) + self._robot.root_view.set_inertias( + wp.from_torch(new_inertias), wp.from_torch(torch.arange(self.num_envs, dtype=torch.int32)) + ) def _set_default_dynamics_parameters(self): """Set parameters defining dynamic interactions.""" @@ -76,16 +84,16 @@ def _set_default_dynamics_parameters(self): def _set_friction(self, asset, value): """Update material properties for a given asset.""" - materials = asset.root_physx_view.get_material_properties() + materials = wp.to_torch(asset.root_view.get_material_properties()) materials[..., 0] = value # Static friction. materials[..., 1] = value # Dynamic friction. - env_ids = torch.arange(self.scene.num_envs, device="cpu") - asset.root_physx_view.set_material_properties(materials, env_ids) + env_ids = torch.arange(self.scene.num_envs, device="cpu", dtype=torch.int32) + asset.root_view.set_material_properties(wp.from_torch(materials), wp.from_torch(env_ids)) def _init_tensors(self): """Initialize tensors once.""" self.identity_quat = ( - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) ) # Control targets. @@ -120,7 +128,7 @@ def _init_tensors(self): .repeat(self.num_envs, 1) ) self.robot_to_gripper_quat = ( - torch.tensor([0.0, 1.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) ) self.plug_grasp_pos_local = self.plug_grasps[: self.num_envs, :3] self.plug_grasp_quat_local = torch.roll(self.plug_grasps[: self.num_envs, 3:], -1, 1) @@ -150,13 +158,13 @@ def _init_tensors(self): def _load_assembly_info(self): """Load grasp pose and disassembly distance for plugs in each environment.""" - retrieve_file_path(self.cfg_task.plug_grasp_json, download_dir="./") - with open(os.path.basename(self.cfg_task.plug_grasp_json)) as f: + plug_grasp_path = retrieve_file_path(self.cfg_task.plug_grasp_json) + with open(plug_grasp_path) as f: plug_grasp_dict = json.load(f) plug_grasps = [plug_grasp_dict[f"asset_{self.cfg_task.assembly_id}"] for i in range(self.num_envs)] - retrieve_file_path(self.cfg_task.disassembly_dist_json, download_dir="./") - with open(os.path.basename(self.cfg_task.disassembly_dist_json)) as f: + disassembly_dist_path = retrieve_file_path(self.cfg_task.disassembly_dist_json) + with open(disassembly_dist_path) as f: disassembly_dist_dict = json.load(f) disassembly_dists = [disassembly_dist_dict[f"asset_{self.cfg_task.assembly_id}"] for i in range(self.num_envs)] @@ -169,7 +177,7 @@ def _setup_scene(self): # spawn a usd file of a table into the scene cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func( - "/World/envs/env_.*/Table", cfg, translation=(0.55, 0.0, 0.0), orientation=(0.70711, 0.0, 0.0, 0.70711) + "/World/envs/env_.*/Table", cfg, translation=(0.55, 0.0, 0.0), orientation=(0.0, 0.0, 0.70711, 0.70711) ) self._robot = Articulation(self.cfg.robot) @@ -194,39 +202,41 @@ def _setup_scene(self): def _compute_intermediate_values(self, dt): """Get values computed from raw tensors. This includes adding noise.""" # TODO: A lot of these can probably only be set once? - self.fixed_pos = self._fixed_asset.data.root_pos_w - self.scene.env_origins - self.fixed_quat = self._fixed_asset.data.root_quat_w + self.fixed_pos = self._fixed_asset.data.root_pos_w.torch - self.scene.env_origins + self.fixed_quat = self._fixed_asset.data.root_quat_w.torch - self.held_pos = self._held_asset.data.root_pos_w - self.scene.env_origins - self.held_quat = self._held_asset.data.root_quat_w + self.held_pos = self._held_asset.data.root_pos_w.torch - self.scene.env_origins + self.held_quat = self._held_asset.data.root_quat_w.torch - self.fingertip_midpoint_pos = self._robot.data.body_pos_w[:, self.fingertip_body_idx] - self.scene.env_origins - self.fingertip_midpoint_quat = self._robot.data.body_quat_w[:, self.fingertip_body_idx] - self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w[:, self.fingertip_body_idx] - self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w[:, self.fingertip_body_idx] + self.fingertip_midpoint_pos = ( + self._robot.data.body_pos_w.torch[:, self.fingertip_body_idx] - self.scene.env_origins + ) + self.fingertip_midpoint_quat = self._robot.data.body_quat_w.torch[:, self.fingertip_body_idx] + self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w.torch[:, self.fingertip_body_idx] + self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w.torch[:, self.fingertip_body_idx] - jacobians = self._robot.root_physx_view.get_jacobians() + jacobians = wp.to_torch(self._robot.root_view.get_jacobians()) self.left_finger_jacobian = jacobians[:, self.left_finger_body_idx - 1, 0:6, 0:7] self.right_finger_jacobian = jacobians[:, self.right_finger_body_idx - 1, 0:6, 0:7] self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 - self.arm_mass_matrix = self._robot.root_physx_view.get_generalized_mass_matrices()[:, 0:7, 0:7] - self.joint_pos = self._robot.data.joint_pos.clone() - self.joint_vel = self._robot.data.joint_vel.clone() + self.arm_mass_matrix = wp.to_torch(self._robot.root_view.get_generalized_mass_matrices())[:, 0:7, 0:7] + self.joint_pos = self._robot.data.joint_pos.torch.clone() + self.joint_vel = self._robot.data.joint_vel.torch.clone() # Compute pose of gripper goal and top of socket in socket frame - self.gripper_goal_quat, self.gripper_goal_pos = torch_utils.tf_combine( - self.fixed_quat, + self.gripper_goal_pos, self.gripper_goal_quat = combine_frame_transforms( self.fixed_pos, - self.plug_grasp_quat_local, + self.fixed_quat, self.plug_grasp_pos_local, + self.plug_grasp_quat_local, ) - self.gripper_goal_quat, self.gripper_goal_pos = torch_utils.tf_combine( - self.gripper_goal_quat, + self.gripper_goal_pos, self.gripper_goal_quat = combine_frame_transforms( self.gripper_goal_pos, - self.robot_to_gripper_quat, + self.gripper_goal_quat, self.palm_to_finger_center, + self.robot_to_gripper_quat, ) # Finite-differencing results in more reliable velocity estimates. @@ -234,10 +244,8 @@ def _compute_intermediate_values(self, dt): self.prev_fingertip_pos = self.fingertip_midpoint_pos.clone() # Add state differences if velocity isn't being added. - rot_diff_quat = torch_utils.quat_mul( - self.fingertip_midpoint_quat, torch_utils.quat_conjugate(self.prev_fingertip_quat) - ) - rot_diff_quat *= torch.sign(rot_diff_quat[:, 0]).unsqueeze(-1) + rot_diff_quat = quat_mul(self.fingertip_midpoint_quat, quat_conjugate(self.prev_fingertip_quat)) + rot_diff_quat *= torch.sign(rot_diff_quat[:, 3]).unsqueeze(-1) # W component is at index 3 in XYZW format rot_diff_aa = axis_angle_from_quat(rot_diff_quat) self.ee_angvel_fd = rot_diff_aa / dt self.prev_fingertip_quat = self.fingertip_midpoint_quat.clone() @@ -247,11 +255,11 @@ def _compute_intermediate_values(self, dt): self.prev_joint_pos = self.joint_pos[:, 0:7].clone() # Keypoint tensors. - self.held_base_quat[:], self.held_base_pos[:] = torch_utils.tf_combine( - self.held_quat, self.held_pos, self.held_base_quat_local, self.held_base_pos_local + self.held_base_pos[:], self.held_base_quat[:] = combine_frame_transforms( + self.held_pos, self.held_quat, self.held_base_pos_local, self.held_base_quat_local ) - self.target_held_base_quat[:], self.target_held_base_pos[:] = torch_utils.tf_combine( - self.fixed_quat, self.fixed_pos, self.identity_quat, self.fixed_success_pos_local + self.target_held_base_pos[:], self.target_held_base_quat[:] = combine_frame_transforms( + self.fixed_pos, self.fixed_quat, self.fixed_success_pos_local, self.identity_quat ) self.last_update_timestamp = self._robot._data._sim_timestamp @@ -314,23 +322,23 @@ def move_gripper_in_place(self, ctrl_target_gripper_dof_pos): rot_actions = actions[:, 3:6] # Convert to quat and set rot target - angle = torch.norm(rot_actions, p=2, dim=-1) + angle = torch.linalg.norm(rot_actions, ord=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) - rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) + rot_actions_quat = quat_from_angle_axis(angle, axis) rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > 1.0e-6, rot_actions_quat, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1), + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1), ) - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) + self.ctrl_target_fingertip_midpoint_quat = quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) - target_euler_xyz = torch.stack(torch_utils.get_euler_xyz(self.ctrl_target_fingertip_midpoint_quat), dim=1) + target_euler_xyz = torch.stack(euler_xyz_from_quat(self.ctrl_target_fingertip_midpoint_quat), dim=1) target_euler_xyz[:, 0] = 3.14159 target_euler_xyz[:, 1] = 0.0 - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( + self.ctrl_target_fingertip_midpoint_quat = quat_from_euler_xyz( roll=target_euler_xyz[:, 0], pitch=target_euler_xyz[:, 1], yaw=target_euler_xyz[:, 2] ) @@ -340,7 +348,7 @@ def move_gripper_in_place(self, ctrl_target_gripper_dof_pos): def _apply_action(self): """Apply actions for policy as delta targets from current position.""" # Get current yaw for success checking. - _, _, curr_yaw = torch_utils.get_euler_xyz(self.fingertip_midpoint_quat) + _, _, curr_yaw = euler_xyz_from_quat(self.fingertip_midpoint_quat) self.curr_yaw = torch.where(curr_yaw > np.deg2rad(235), curr_yaw - 2 * np.pi, curr_yaw) # Note: We use finite-differenced velocities for control and observations. @@ -366,22 +374,22 @@ def _apply_action(self): self.ctrl_target_fingertip_midpoint_pos = self.fixed_pos_action_frame + pos_error_clipped # Convert to quat and set rot target - angle = torch.norm(rot_actions, p=2, dim=-1) + angle = torch.linalg.norm(rot_actions, ord=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) - rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) + rot_actions_quat = quat_from_angle_axis(angle, axis) rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > 1e-6, rot_actions_quat, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1), + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1), ) - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) + self.ctrl_target_fingertip_midpoint_quat = quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) - target_euler_xyz = torch.stack(torch_utils.get_euler_xyz(self.ctrl_target_fingertip_midpoint_quat), dim=1) + target_euler_xyz = torch.stack(euler_xyz_from_quat(self.ctrl_target_fingertip_midpoint_quat), dim=1) target_euler_xyz[:, 0] = 3.14159 # Restrict actions to be upright. target_euler_xyz[:, 1] = 0.0 - self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( + self.ctrl_target_fingertip_midpoint_quat = quat_from_euler_xyz( roll=target_euler_xyz[:, 0], pitch=target_euler_xyz[:, 1], yaw=target_euler_xyz[:, 2] ) @@ -417,8 +425,8 @@ def generate_ctrl_signals(self): self.ctrl_target_joint_pos[:, 7:9] = self.ctrl_target_gripper_dof_pos self.joint_torque[:, 7:9] = 0.0 - self._robot.set_joint_position_target(self.ctrl_target_joint_pos) - self._robot.set_joint_effort_target(self.joint_torque) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos) + self._robot.set_joint_effort_target_index(target=self.joint_torque) def _get_dones(self): """Update intermediate values used for rewards and observations.""" @@ -440,9 +448,13 @@ def _get_dones(self): def _get_rewards(self): """Update rewards and compute success statistics.""" - # Get successful and failed envs at current timestep - rew_buf = self._update_rew_buf() + + if torch.any(self.reset_buf): + self.extras.setdefault("log", {})["Metrics/success_rate"] = ( + torch.count_nonzero(self.ep_succeeded) / self.num_envs + ).item() + return rew_buf def _update_rew_buf(self): @@ -471,35 +483,44 @@ def _reset_idx(self, env_ids): def _set_assets_to_default_pose(self, env_ids): """Move assets to default pose before randomization.""" - held_state = self._held_asset.data.default_root_state.clone()[env_ids] + held_state = torch.cat( + [self._held_asset.data.default_root_pose.torch, self._held_asset.data.default_root_vel.torch], + dim=-1, + )[env_ids].clone() held_state[:, 0:3] += self.scene.env_origins[env_ids] held_state[:, 7:] = 0.0 - self._held_asset.write_root_pose_to_sim(held_state[:, 0:7], env_ids=env_ids) - self._held_asset.write_root_velocity_to_sim(held_state[:, 7:], env_ids=env_ids) + self._held_asset.write_root_pose_to_sim_index(root_pose=held_state[:, 0:7], env_ids=env_ids) + self._held_asset.write_root_velocity_to_sim_index(root_velocity=held_state[:, 7:], env_ids=env_ids) self._held_asset.reset() - fixed_state = self._fixed_asset.data.default_root_state.clone()[env_ids] + fixed_state = torch.cat( + [ + self._fixed_asset.data.default_root_pose.torch, + self._fixed_asset.data.default_root_vel.torch, + ], + dim=-1, + )[env_ids].clone() fixed_state[:, 0:3] += self.scene.env_origins[env_ids] fixed_state[:, 7:] = 0.0 - self._fixed_asset.write_root_pose_to_sim(fixed_state[:, 0:7], env_ids=env_ids) - self._fixed_asset.write_root_velocity_to_sim(fixed_state[:, 7:], env_ids=env_ids) + self._fixed_asset.write_root_pose_to_sim_index(root_pose=fixed_state[:, 0:7], env_ids=env_ids) + self._fixed_asset.write_root_velocity_to_sim_index(root_velocity=fixed_state[:, 7:], env_ids=env_ids) self._fixed_asset.reset() def _move_gripper_to_grasp_pose(self, env_ids): """Define grasp pose for plug and move gripper to pose.""" - gripper_goal_quat, gripper_goal_pos = torch_utils.tf_combine( - self.held_quat, + gripper_goal_pos, gripper_goal_quat = combine_frame_transforms( self.held_pos, - self.plug_grasp_quat_local, + self.held_quat, self.plug_grasp_pos_local, + self.plug_grasp_quat_local, ) - gripper_goal_quat, gripper_goal_pos = torch_utils.tf_combine( - gripper_goal_quat, + gripper_goal_pos, gripper_goal_quat = combine_frame_transforms( gripper_goal_pos, - self.robot_to_gripper_quat, + gripper_goal_quat, self.palm_to_finger_center, + self.robot_to_gripper_quat, ) # Set target_pos @@ -539,9 +560,10 @@ def set_pos_inverse_kinematics(self, env_ids): self.ctrl_target_joint_pos[env_ids, 0:7] = self.joint_pos[env_ids, 0:7] # Update dof state. - self._robot.write_joint_state_to_sim(self.joint_pos, self.joint_vel) + self._robot.write_joint_position_to_sim_index(position=self.joint_pos) + self._robot.write_joint_velocity_to_sim_index(velocity=self.joint_vel) self._robot.reset() - self._robot.set_joint_position_target(self.ctrl_target_joint_pos) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos) # Simulate and update tensors. self.step_sim_no_action() @@ -568,7 +590,7 @@ def _move_gripper_to_eef_pose(self, env_ids, goal_pos, goal_quat, sim_steps, if_ self.actions *= 0.0 self.actions[env_ids, :6] = delta_hand_pose - is_rendering = self.sim.has_gui() or self.sim.has_rtx_sensors() + is_rendering = self.sim.is_rendering # perform physics stepping for _ in range(self.cfg.decimation): self._sim_step_counter += 1 @@ -594,16 +616,17 @@ def _set_franka_to_default_pose(self, joints, env_ids): # gripper_width = self.cfg_task.held_asset_cfg.diameter / 2 * 1.25 # gripper_width = self.cfg_task.hand_width_max / 3.0 gripper_width = self.gripper_open_width - joint_pos = self._robot.data.default_joint_pos[env_ids] + joint_pos = self._robot.data.default_joint_pos.torch[env_ids] joint_pos[:, 7:] = gripper_width # MIMIC joint_pos[:, :7] = torch.tensor(joints, device=self.device)[None, :] joint_vel = torch.zeros_like(joint_pos) joint_effort = torch.zeros_like(joint_pos) self.ctrl_target_joint_pos[env_ids, :] = joint_pos - self._robot.set_joint_position_target(self.ctrl_target_joint_pos[env_ids], env_ids=env_ids) - self._robot.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos[env_ids], env_ids=env_ids) + self._robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) self._robot.reset() - self._robot.set_joint_effort_target(joint_effort, env_ids=env_ids) + self._robot.set_joint_effort_target_index(target=joint_effort, env_ids=env_ids) self.step_sim_no_action() @@ -616,7 +639,13 @@ def step_sim_no_action(self): def randomize_fixed_initial_state(self, env_ids): # (1.) Randomize fixed asset pose. - fixed_state = self._fixed_asset.data.default_root_state.clone()[env_ids] + fixed_state = torch.cat( + [ + self._fixed_asset.data.default_root_pose.torch, + self._fixed_asset.data.default_root_vel.torch, + ], + dim=-1, + )[env_ids].clone() # (1.a.) Position rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device) fixed_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1] @@ -633,14 +662,13 @@ def randomize_fixed_initial_state(self, env_ids): rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device) fixed_orn_euler = fixed_orn_init_yaw + fixed_orn_yaw_range * rand_sample fixed_orn_euler[:, 0:2] = 0.0 # Only change yaw. - fixed_orn_quat = torch_utils.quat_from_euler_xyz( - fixed_orn_euler[:, 0], fixed_orn_euler[:, 1], fixed_orn_euler[:, 2] - ) + fixed_orn_quat = quat_from_euler_xyz(fixed_orn_euler[:, 0], fixed_orn_euler[:, 1], fixed_orn_euler[:, 2]) fixed_state[:, 3:7] = fixed_orn_quat # (1.c.) Velocity fixed_state[:, 7:] = 0.0 # vel # (1.d.) Update values. - self._fixed_asset.write_root_state_to_sim(fixed_state, env_ids=env_ids) + self._fixed_asset.write_root_pose_to_sim_index(root_pose=fixed_state[:, 0:7], env_ids=env_ids) + self._fixed_asset.write_root_velocity_to_sim_index(root_velocity=fixed_state[:, 7:], env_ids=env_ids) self._fixed_asset.reset() # (1.e.) Noisy position observation. @@ -653,12 +681,16 @@ def randomize_fixed_initial_state(self, env_ids): def randomize_held_initial_state(self, env_ids, pre_grasp): # Set plug pos to assembled state - held_state = self._held_asset.data.default_root_state.clone() + held_state = torch.cat( + [self._held_asset.data.default_root_pose.torch, self._held_asset.data.default_root_vel.torch], + dim=-1, + ).clone() held_state[env_ids, 0:3] = self.fixed_pos[env_ids].clone() + self.scene.env_origins[env_ids] held_state[env_ids, 3:7] = self.fixed_quat[env_ids].clone() held_state[env_ids, 7:] = 0.0 - self._held_asset.write_root_state_to_sim(held_state) + self._held_asset.write_root_pose_to_sim_index(root_pose=held_state[:, 0:7]) + self._held_asset.write_root_velocity_to_sim_index(root_velocity=held_state[:, 7:]) self._held_asset.reset() self.step_sim_no_action() @@ -684,6 +716,8 @@ def close_gripper(self, env_ids): def randomize_initial_state(self, env_ids): """Randomize initial state and perform any episode-level randomization.""" + import carb + # Disable gravity. physics_sim_view = sim_utils.SimulationContext.instance().physics_sim_view physics_sim_view.set_gravity(carb.Float3(0.0, 0.0, 0.0)) @@ -696,8 +730,8 @@ def randomize_initial_state(self, env_ids): fixed_tip_pos_local[:, 2] += self.cfg_task.fixed_asset_cfg.height fixed_tip_pos_local[:, 2] += self.cfg_task.fixed_asset_cfg.base_height - _, fixed_tip_pos = torch_utils.tf_combine( - self.fixed_quat, self.fixed_pos, self.identity_quat, fixed_tip_pos_local + fixed_tip_pos, _ = combine_frame_transforms( + self.fixed_pos, self.fixed_quat, fixed_tip_pos_local, self.identity_quat ) self.fixed_pos_obs_frame[:] = fixed_tip_pos @@ -725,6 +759,8 @@ def randomize_initial_state(self, env_ids): # Set initial gains for the episode. self._set_gains(self.default_gains) + import carb + physics_sim_view.set_gravity(carb.Float3(*self.cfg.sim.gravity)) def _disassemble_plug_from_socket(self): @@ -782,7 +818,7 @@ def _randomize_gripper_pose(self, sim_steps, env_ids): torch.tensor(self.cfg_task.gripper_rand_rot_noise, device=self.device) ) ctrl_target_fingertip_centered_euler += fingertip_centered_rot_noise - ctrl_tgt_quat = torch_utils.quat_from_euler_xyz( + ctrl_tgt_quat = quat_from_euler_xyz( ctrl_target_fingertip_centered_euler[:, 0], ctrl_target_fingertip_centered_euler[:, 1], ctrl_target_fingertip_centered_euler[:, 2], diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env_cfg.py index 9d17f3597587..a2b7fbe49133 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env_cfg.py @@ -3,12 +3,15 @@ # # SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.assets import ArticulationCfg from isaaclab.envs import DirectRLEnvCfg from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import PhysxCfg, SimulationCfg +from isaaclab.sim import SimulationCfg from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from isaaclab.utils import configclass @@ -107,7 +110,7 @@ class DisassemblyEnvCfg(DirectRLEnvCfg): device="cuda:0", dt=1 / 120, gravity=(0.0, 0.0, -9.81), - physx=PhysxCfg( + physics=PhysxCfg( solver_type=1, max_position_iteration_count=192, # Important to avoid interpenetration. max_velocity_iteration_count=1, @@ -162,7 +165,7 @@ class DisassemblyEnvCfg(DirectRLEnvCfg): "panda_finger_joint2": 0.04, }, pos=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), actuators={ "panda_arm1": ImplicitActuatorCfg( diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py index d21bd0166e5c..1c3037873031 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_tasks_cfg.py @@ -182,7 +182,7 @@ class Extraction(DisassemblyTask): init_state=ArticulationCfg.InitialStateCfg( # init_state=RigidObjectCfg.InitialStateCfg( pos=(0.6, 0.0, 0.05), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={}, ), @@ -212,7 +212,7 @@ class Extraction(DisassemblyTask): # init_state=ArticulationCfg.InitialStateCfg( init_state=RigidObjectCfg.InitialStateCfg( pos=(0.0, 0.4, 0.1), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), # joint_pos={}, # joint_vel={} ), diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/factory_control.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/factory_control.py index 0e51b6e41f6c..2d7d98e0fb0e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/factory_control.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/factory_control.py @@ -12,9 +12,7 @@ import torch -import isaacsim.core.utils.torch as torch_utils - -from isaaclab.utils.math import axis_angle_from_quat +from isaaclab.utils.math import axis_angle_from_quat, quat_conjugate, quat_mul def compute_dof_torque( @@ -116,13 +114,13 @@ def get_pose_error( quat_dot.expand(-1, 4) >= 0, ctrl_target_fingertip_midpoint_quat, -ctrl_target_fingertip_midpoint_quat ) - fingertip_midpoint_quat_norm = torch_utils.quat_mul( - fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat) - )[:, 0] # scalar component - fingertip_midpoint_quat_inv = torch_utils.quat_conjugate( - fingertip_midpoint_quat - ) / fingertip_midpoint_quat_norm.unsqueeze(-1) - quat_error = torch_utils.quat_mul(ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv) + fingertip_midpoint_quat_norm = quat_mul(fingertip_midpoint_quat, quat_conjugate(fingertip_midpoint_quat))[ + :, 3 + ] # W component is at index 3 in XYZW format + fingertip_midpoint_quat_inv = quat_conjugate(fingertip_midpoint_quat) / fingertip_midpoint_quat_norm.unsqueeze( + -1 + ) + quat_error = quat_mul(ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv) # Convert to axis-angle error axis_angle_error = axis_angle_from_quat(quat_error) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/industreal_algo_utils.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/industreal_algo_utils.py index c324eb46f46f..43604bef0926 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/industreal_algo_utils.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/industreal_algo_utils.py @@ -44,7 +44,6 @@ # Force garbage collection for large arrays import gc -import os import numpy as np @@ -65,12 +64,10 @@ def load_asset_mesh_in_warp(held_asset_obj, fixed_asset_obj, num_samples, device): """Create mesh objects in Warp for all environments.""" - retrieve_file_path(held_asset_obj, download_dir="./") - plug_trimesh = load(os.path.basename(held_asset_obj)) - # plug_trimesh = load(held_asset_obj) - retrieve_file_path(fixed_asset_obj, download_dir="./") - socket_trimesh = load(os.path.basename(fixed_asset_obj)) - # socket_trimesh = load(fixed_asset_obj) + held_asset_local = retrieve_file_path(held_asset_obj, download_dir="./") + plug_trimesh = load(held_asset_local) + fixed_asset_local = retrieve_file_path(fixed_asset_obj, download_dir="./") + socket_trimesh = load(fixed_asset_local) plug_wp_mesh = wp.Mesh( points=wp.array(plug_trimesh.vertices, dtype=wp.vec3, device=device), @@ -234,7 +231,7 @@ def check_plug_close_to_socket(keypoints_plug, keypoints_socket, dist_threshold, """Check if plug is close to socket.""" # Compute keypoint distance between plug and socket - keypoint_dist = torch.norm(keypoints_socket - keypoints_plug, p=2, dim=-1) + keypoint_dist = torch.linalg.norm(keypoints_socket - keypoints_plug, ord=2, dim=-1) # Check if keypoint distance is below threshold is_plug_close_to_socket = torch.where( diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_disassembly_w_id.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_disassembly_w_id.py index 962162174503..9c7c6b2e712e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_disassembly_w_id.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_disassembly_w_id.py @@ -51,7 +51,6 @@ def main(): parser.add_argument("--assembly_id", type=str, default="00731", help="New assembly ID to set.") parser.add_argument("--num_envs", type=int, default=128, help="Number of parallel environment.") parser.add_argument("--seed", type=int, default=-1, help="Random seed.") - parser.add_argument("--headless", action="store_true", help="Run in headless mode.") args = parser.parse_args() os.makedirs(args.disassembly_dir, exist_ok=True) @@ -77,9 +76,6 @@ def main(): ] ) - if args.headless: - command.append("--headless") - # Run the command subprocess.run(command, check=True) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py index abfdf1fe7313..0411f8008f68 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/run_w_id.py @@ -54,7 +54,6 @@ def main(): parser.add_argument("--seed", type=int, default=-1, help="Random seed.") parser.add_argument("--train", action="store_true", help="Run training mode.") parser.add_argument("--log_eval", action="store_true", help="Log evaluation results.") - parser.add_argument("--headless", action="store_true", help="Run in headless mode.") parser.add_argument("--max_iterations", type=int, default=1500, help="Number of iteration for policy learning.") args = parser.parse_args() @@ -91,9 +90,6 @@ def main(): if args.checkpoint: command.append(f"--checkpoint={args.checkpoint}") - if args.headless: - command.append("--headless") - # Run the command subprocess.run(command, env=env, check=True) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py index a979ec449381..9c0f7a3d4b03 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/soft_dtw_cuda.py @@ -28,10 +28,11 @@ import math +import numba.cuda as cuda import numpy as np import torch import torch.cuda -from numba import cuda, jit, prange +from numba import jit, prange from torch.autograd import Function diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/__init__.py index b4913fb2c3a9..cfdb8af38f4b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/__init__.py @@ -20,7 +20,7 @@ entry_point=f"{__name__}.cart_double_pendulum_env:CartDoublePendulumEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.cart_double_pendulum_env:CartDoublePendulumEnvCfg", + "env_cfg_entry_point": f"{__name__}.cart_double_pendulum_env_cfg:CartDoublePendulumEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", "skrl_ippo_cfg_entry_point": f"{agents.__name__}:skrl_ippo_cfg.yaml", diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml index f9298c9252ac..606d5634e92e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_ippo_cfg.yaml @@ -54,7 +54,9 @@ agent: learning_rate_scheduler: KLAdaptiveLR learning_rate_scheduler_kwargs: kl_threshold: 0.008 - state_preprocessor: RunningStandardScaler + observation_preprocessor: RunningStandardScaler + observation_preprocessor_kwargs: null + state_preprocessor: null state_preprocessor_kwargs: null value_preprocessor: RunningStandardScaler value_preprocessor_kwargs: null diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml index 8f192cf2988d..aef5480970a9 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/agents/skrl_mappo_cfg.yaml @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: OBSERVATIONS + input: STATES layers: [32, 32] activations: elu output: ONE @@ -54,10 +54,10 @@ agent: learning_rate_scheduler: KLAdaptiveLR learning_rate_scheduler_kwargs: kl_threshold: 0.008 + observation_preprocessor: RunningStandardScaler + observation_preprocessor_kwargs: null state_preprocessor: RunningStandardScaler state_preprocessor_kwargs: null - shared_state_preprocessor: RunningStandardScaler - shared_state_preprocessor_kwargs: null value_preprocessor: RunningStandardScaler value_preprocessor_kwargs: null random_timesteps: 0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/cart_double_pendulum_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/cart_double_pendulum_env.py index e0464a7201c8..f146ef56330f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/cart_double_pendulum_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/cart_double_pendulum_env.py @@ -74,8 +74,8 @@ def __init__(self, cfg: CartDoublePendulumEnvCfg, render_mode: str | None = None self._pole_dof_idx, _ = self.robot.find_joints(self.cfg.pole_dof_name) self._pendulum_dof_idx, _ = self.robot.find_joints(self.cfg.pendulum_dof_name) - self.joint_pos = self.robot.data.joint_pos - self.joint_vel = self.robot.data.joint_vel + self.joint_pos = self.robot.data.joint_pos.torch + self.joint_vel = self.robot.data.joint_vel.torch def _setup_scene(self): self.robot = Articulation(self.cfg.robot_cfg) @@ -96,11 +96,11 @@ def _pre_physics_step(self, actions: dict[str, torch.Tensor]) -> None: self.actions = actions def _apply_action(self) -> None: - self.robot.set_joint_effort_target( - self.actions["cart"] * self.cfg.cart_action_scale, joint_ids=self._cart_dof_idx + self.robot.set_joint_effort_target_index( + target=self.actions["cart"] * self.cfg.cart_action_scale, joint_ids=self._cart_dof_idx ) - self.robot.set_joint_effort_target( - self.actions["pendulum"] * self.cfg.pendulum_action_scale, joint_ids=self._pendulum_dof_idx + self.robot.set_joint_effort_target_index( + target=self.actions["pendulum"] * self.cfg.pendulum_action_scale, joint_ids=self._pendulum_dof_idx ) def _get_observations(self) -> dict[str, torch.Tensor]: @@ -148,8 +148,8 @@ def _get_rewards(self) -> dict[str, torch.Tensor]: return total_reward def _get_dones(self) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: - self.joint_pos = self.robot.data.joint_pos - self.joint_vel = self.robot.data.joint_vel + self.joint_pos = self.robot.data.joint_pos.torch + self.joint_vel = self.robot.data.joint_vel.torch time_out = self.episode_length_buf >= self.max_episode_length - 1 out_of_bounds = torch.any(torch.abs(self.joint_pos[:, self._cart_dof_idx]) > self.cfg.max_cart_pos, dim=1) @@ -162,9 +162,16 @@ def _get_dones(self) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: def _reset_idx(self, env_ids: Sequence[int] | None): if env_ids is None: env_ids = self.robot._ALL_INDICES + # Episode success = timed out without leaving the cart/pole bounds. All agents share + # the same termination signal in this task, so reading any agent's flag is fine. + # DirectMARLEnv only sets these dicts inside step(), so skip on the initial reset. + if hasattr(self, "time_out_dict"): + any_agent = next(iter(self.cfg.possible_agents)) + survived = self.time_out_dict[any_agent][env_ids] & ~self.terminated_dict[any_agent][env_ids] + self.extras.setdefault("log", {})["Metrics/success_rate"] = survived.float().mean().item() super()._reset_idx(env_ids) - joint_pos = self.robot.data.default_joint_pos[env_ids] + joint_pos = self.robot.data.default_joint_pos.torch[env_ids] joint_pos[:, self._pole_dof_idx] += sample_uniform( self.cfg.initial_pole_angle_range[0] * math.pi, self.cfg.initial_pole_angle_range[1] * math.pi, @@ -177,17 +184,19 @@ def _reset_idx(self, env_ids: Sequence[int] | None): joint_pos[:, self._pendulum_dof_idx].shape, joint_pos.device, ) - joint_vel = self.robot.data.default_joint_vel[env_ids] + joint_vel = self.robot.data.default_joint_vel.torch[env_ids] - default_root_state = self.robot.data.default_root_state[env_ids] - default_root_state[:, :3] += self.scene.env_origins[env_ids] + default_root_pose = self.robot.data.default_root_pose.torch[env_ids] + default_root_vel = self.robot.data.default_root_vel.torch[env_ids] + default_root_pose[:, :3] += self.scene.env_origins[env_ids] self.joint_pos[env_ids] = joint_pos self.joint_vel[env_ids] = joint_vel - self.robot.write_root_pose_to_sim(default_root_state[:, :7], env_ids) - self.robot.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids) - self.robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + self.robot.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + self.robot.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) + self.robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self.robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) @torch.jit.script diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/cart_double_pendulum_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/cart_double_pendulum_env_cfg.py new file mode 100644 index 000000000000..048750158cdc --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cart_double_pendulum/cart_double_pendulum_env_cfg.py @@ -0,0 +1,56 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectMARLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from isaaclab_assets.robots.cart_double_pendulum import CART_DOUBLE_PENDULUM_CFG + + +@configclass +class CartDoublePendulumEnvCfg(DirectMARLEnvCfg): + # env + decimation = 2 + episode_length_s = 5.0 + possible_agents = ["cart", "pendulum"] + action_spaces = {"cart": 1, "pendulum": 1} + observation_spaces = {"cart": 4, "pendulum": 3} + state_space = -1 + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation) + + # robot + robot_cfg: ArticulationCfg = CART_DOUBLE_PENDULUM_CFG.replace(prim_path="/World/envs/env_.*/Robot") + cart_dof_name = "slider_to_cart" + pole_dof_name = "cart_to_pole" + pendulum_dof_name = "pole_to_pendulum" + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=4.0, replicate_physics=True) + + # reset + max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m] + initial_pole_angle_range = [-0.25, 0.25] # the range in which the pole angle is sampled from on reset [rad] + initial_pendulum_angle_range = [-0.25, 0.25] # the range in which the pendulum angle is sampled from on reset [rad] + + # action scales + cart_action_scale = 100.0 # [N] + pendulum_action_scale = 50.0 # [Nm] + + # reward scales + rew_scale_alive = 1.0 + rew_scale_terminated = -2.0 + rew_scale_cart_pos = 0 + rew_scale_cart_vel = -0.01 + rew_scale_pole_pos = -1.0 + rew_scale_pole_vel = -0.01 + rew_scale_pendulum_pos = -1.0 + rew_scale_pendulum_vel = -0.01 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/__init__.py index f72ee0a6f8dc..599cfa9d51a2 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/__init__.py @@ -20,7 +20,7 @@ entry_point=f"{__name__}.cartpole_env:CartpoleEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.cartpole_env:CartpoleEnvCfg", + "env_cfg_entry_point": f"{__name__}.cartpole_env_cfg:CartpoleEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:CartpolePPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", @@ -33,7 +33,51 @@ entry_point=f"{__name__}.cartpole_camera_env:CartpoleCameraEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.cartpole_camera_env:CartpoleRGBCameraEnvCfg", + "env_cfg_entry_point": f"{__name__}.cartpole_camera_env_cfg:CartpoleRGBCameraEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_camera_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Cartpole-Albedo-Camera-Direct-v0", + entry_point=f"{__name__}.cartpole_camera_env:CartpoleCameraEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.cartpole_camera_env_cfg:CartpoleAlbedoCameraEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_camera_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Cartpole-SimpleShading-Constant-Camera-Direct-v0", + entry_point=f"{__name__}.cartpole_camera_env:CartpoleCameraEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.cartpole_camera_env_cfg:CartpoleSimpleShadingConstantCameraEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_camera_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Cartpole-SimpleShading-Diffuse-Camera-Direct-v0", + entry_point=f"{__name__}.cartpole_camera_env:CartpoleCameraEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.cartpole_camera_env_cfg:CartpoleSimpleShadingDiffuseCameraEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_camera_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Cartpole-SimpleShading-Full-Camera-Direct-v0", + entry_point=f"{__name__}.cartpole_camera_env:CartpoleCameraEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.cartpole_camera_env_cfg:CartpoleSimpleShadingFullCameraEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_camera_ppo_cfg.yaml", }, @@ -44,7 +88,18 @@ entry_point=f"{__name__}.cartpole_camera_env:CartpoleCameraEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.cartpole_camera_env:CartpoleDepthCameraEnvCfg", + "env_cfg_entry_point": f"{__name__}.cartpole_camera_env_cfg:CartpoleDepthCameraEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_camera_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Cartpole-Camera-Presets-Direct-v0", + entry_point=f"{__name__}.cartpole_camera_env:CartpoleCameraEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.cartpole_camera_presets_env_cfg:CartpoleCameraPresetsEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_camera_ppo_cfg.yaml", }, diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_env.py index 9606008ccf19..807c0cc09d91 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_env.py @@ -7,94 +7,46 @@ import math from collections.abc import Sequence +from typing import TYPE_CHECKING import torch import isaaclab.sim as sim_utils -from isaaclab.assets import Articulation, ArticulationCfg -from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg, ViewerCfg -from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sensors import TiledCamera, TiledCameraCfg, save_images_to_file -from isaaclab.sim import SimulationCfg -from isaaclab.utils import configclass +from isaaclab.assets import Articulation +from isaaclab.envs import DirectRLEnv +from isaaclab.sensors import Camera, save_images_to_file from isaaclab.utils.math import sample_uniform -from isaaclab_assets.robots.cartpole import CARTPOLE_CFG - - -@configclass -class CartpoleRGBCameraEnvCfg(DirectRLEnvCfg): - # env - decimation = 2 - episode_length_s = 5.0 - action_scale = 100.0 # [N] - - # simulation - sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation) - - # robot - robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot") - cart_dof_name = "slider_to_cart" - pole_dof_name = "cart_to_pole" - - # camera - tiled_camera: TiledCameraCfg = TiledCameraCfg( - prim_path="/World/envs/env_.*/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), - data_types=["rgb"], - spawn=sim_utils.PinholeCameraCfg( - focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) - ), - width=100, - height=100, - ) - write_image_to_file = False - - # spaces - action_space = 1 - state_space = 0 - observation_space = [tiled_camera.height, tiled_camera.width, 3] - - # change viewer settings - viewer = ViewerCfg(eye=(20.0, 20.0, 20.0)) - - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=512, env_spacing=20.0, replicate_physics=True) - - # reset - max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m] - initial_pole_angle_range = [-0.125, 0.125] # the range in which the pole angle is sampled from on reset [rad] - - # reward scales - rew_scale_alive = 1.0 - rew_scale_terminated = -2.0 - rew_scale_pole_pos = -1.0 - rew_scale_cart_vel = -0.01 - rew_scale_pole_vel = -0.005 - - -@configclass -class CartpoleDepthCameraEnvCfg(CartpoleRGBCameraEnvCfg): - # camera - tiled_camera: TiledCameraCfg = TiledCameraCfg( - prim_path="/World/envs/env_.*/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), - data_types=["depth"], - spawn=sim_utils.PinholeCameraCfg( - focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) - ), - width=100, - height=100, +if TYPE_CHECKING: + from .cartpole_camera_env_cfg import ( + CartpoleAlbedoCameraEnvCfg, + CartpoleDepthCameraEnvCfg, + CartpoleRGBCameraEnvCfg, + CartpoleSimpleShadingConstantCameraEnvCfg, + CartpoleSimpleShadingDiffuseCameraEnvCfg, + CartpoleSimpleShadingFullCameraEnvCfg, ) + from .cartpole_camera_env_preset_cfg import CartpoleCameraEnvCfg - # spaces - observation_space = [tiled_camera.height, tiled_camera.width, 1] +SIMPLE_SHADING_TYPES = { + "simple_shading_constant_diffuse", + "simple_shading_diffuse_mdl", + "simple_shading_full_mdl", +} class CartpoleCameraEnv(DirectRLEnv): """Cartpole Camera Environment.""" - cfg: CartpoleRGBCameraEnvCfg | CartpoleDepthCameraEnvCfg + cfg: ( + CartpoleRGBCameraEnvCfg + | CartpoleDepthCameraEnvCfg + | CartpoleAlbedoCameraEnvCfg + | CartpoleSimpleShadingConstantCameraEnvCfg + | CartpoleSimpleShadingDiffuseCameraEnvCfg + | CartpoleSimpleShadingFullCameraEnvCfg + | CartpoleCameraEnvCfg + ) def __init__( self, cfg: CartpoleRGBCameraEnvCfg | CartpoleDepthCameraEnvCfg, render_mode: str | None = None, **kwargs @@ -105,8 +57,8 @@ def __init__( self._pole_dof_idx, _ = self._cartpole.find_joints(self.cfg.pole_dof_name) self.action_scale = self.cfg.action_scale - self.joint_pos = self._cartpole.data.joint_pos - self.joint_vel = self._cartpole.data.joint_vel + self.joint_pos = self._cartpole.data.joint_pos.torch + self.joint_vel = self._cartpole.data.joint_vel.torch if len(self.cfg.tiled_camera.data_types) != 1: raise ValueError( @@ -121,7 +73,7 @@ def close(self): def _setup_scene(self): """Setup the scene with the cartpole and camera.""" self._cartpole = Articulation(self.cfg.robot_cfg) - self._tiled_camera = TiledCamera(self.cfg.tiled_camera) + self._tiled_camera = Camera(self.cfg.tiled_camera) # clone and replicate self.scene.clone_environments(copy_from_source=False) @@ -140,22 +92,35 @@ def _pre_physics_step(self, actions: torch.Tensor) -> None: self.actions = self.action_scale * actions.clone() def _apply_action(self) -> None: - self._cartpole.set_joint_effort_target(self.actions, joint_ids=self._cart_dof_idx) + self._cartpole.set_joint_effort_target_index(target=self.actions, joint_ids=self._cart_dof_idx) def _get_observations(self) -> dict: - data_type = "rgb" if "rgb" in self.cfg.tiled_camera.data_types else "depth" + data_type = self.cfg.tiled_camera.data_types[0] if "rgb" in self.cfg.tiled_camera.data_types: camera_data = self._tiled_camera.data.output[data_type] / 255.0 # normalize the camera data for better training results mean_tensor = torch.mean(camera_data, dim=(1, 2), keepdim=True) camera_data -= mean_tensor + elif "albedo" in self.cfg.tiled_camera.data_types: + camera_data = self._tiled_camera.data.output[data_type][..., :3] / 255.0 + # normalize the camera data for better training results + mean_tensor = torch.mean(camera_data, dim=(1, 2), keepdim=True) + camera_data -= mean_tensor + elif data_type in SIMPLE_SHADING_TYPES: + camera_data = self._tiled_camera.data.output[data_type] / 255.0 + # normalize the camera data for better training results + mean_tensor = torch.mean(camera_data, dim=(1, 2), keepdim=True) + camera_data -= mean_tensor elif "depth" in self.cfg.tiled_camera.data_types: camera_data = self._tiled_camera.data.output[data_type] camera_data[camera_data == float("inf")] = 0 + elif "semantic_segmentation" in self.cfg.tiled_camera.data_types: + camera_data = self._tiled_camera.data.output[data_type] + observations = {"policy": camera_data.clone()} if self.cfg.write_image_to_file: - save_images_to_file(observations["policy"], f"cartpole_{data_type}.png") + save_images_to_file(self._tiled_camera.data.output[data_type] / 255.0, f"cartpole_{data_type}.png") return observations @@ -175,8 +140,8 @@ def _get_rewards(self) -> torch.Tensor: return total_reward def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: - self.joint_pos = self._cartpole.data.joint_pos - self.joint_vel = self._cartpole.data.joint_vel + self.joint_pos = self._cartpole.data.joint_pos.torch + self.joint_vel = self._cartpole.data.joint_vel.torch time_out = self.episode_length_buf >= self.max_episode_length - 1 out_of_bounds = torch.any(torch.abs(self.joint_pos[:, self._cart_dof_idx]) > self.cfg.max_cart_pos, dim=1) @@ -186,26 +151,33 @@ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: def _reset_idx(self, env_ids: Sequence[int] | None): if env_ids is None: env_ids = self._cartpole._ALL_INDICES + + # Log survival success rate before resetting. + survived = self.reset_time_outs[env_ids].float() + self.extras.setdefault("log", {})["Metrics/success_rate"] = survived.mean().item() + super()._reset_idx(env_ids) - joint_pos = self._cartpole.data.default_joint_pos[env_ids] + joint_pos = self._cartpole.data.default_joint_pos.torch[env_ids] joint_pos[:, self._pole_dof_idx] += sample_uniform( self.cfg.initial_pole_angle_range[0] * math.pi, self.cfg.initial_pole_angle_range[1] * math.pi, joint_pos[:, self._pole_dof_idx].shape, joint_pos.device, ) - joint_vel = self._cartpole.data.default_joint_vel[env_ids] + joint_vel = self._cartpole.data.default_joint_vel.torch[env_ids] - default_root_state = self._cartpole.data.default_root_state[env_ids] - default_root_state[:, :3] += self.scene.env_origins[env_ids] + default_root_pose = self._cartpole.data.default_root_pose.torch[env_ids] + default_root_pose[:, :3] += self.scene.env_origins[env_ids] + default_root_vel = self._cartpole.data.default_root_vel.torch[env_ids] self.joint_pos[env_ids] = joint_pos self.joint_vel[env_ids] = joint_vel - self._cartpole.write_root_pose_to_sim(default_root_state[:, :7], env_ids) - self._cartpole.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids) - self._cartpole.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + self._cartpole.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + self._cartpole.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) + self._cartpole.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._cartpole.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) @torch.jit.script diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_env_cfg.py new file mode 100644 index 000000000000..9539b99bfb89 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_env_cfg.py @@ -0,0 +1,160 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg, ViewerCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sensors import CameraCfg +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.utils.presets import MultiBackendRendererCfg + +from isaaclab_assets.robots.cartpole import CARTPOLE_CFG + + +@configclass +class CartpoleRGBCameraEnvCfg(DirectRLEnvCfg): + # env + decimation = 2 + episode_length_s = 5.0 + action_scale = 100.0 # [N] + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation) + + # robot + robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot") + cart_dof_name = "slider_to_cart" + pole_dof_name = "cart_to_pole" + + # camera: default=RTX, newton_renderer=Warp. Override: env.tiled_camera.renderer_cfg=newton_renderer + tiled_camera: CameraCfg = CameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=CameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ), + width=100, + height=100, + renderer_cfg=MultiBackendRendererCfg(), + ) + write_image_to_file = False + + # spaces + action_space = 1 + state_space = 0 + observation_space = [tiled_camera.height, tiled_camera.width, 3] + + # change viewer settings + viewer = ViewerCfg(eye=(20.0, 20.0, 20.0)) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=512, env_spacing=20.0, replicate_physics=True) + + # reset + max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m] + initial_pole_angle_range = [-0.125, 0.125] # the range in which the pole angle is sampled from on reset [rad] + + # reward scales + rew_scale_alive = 1.0 + rew_scale_terminated = -2.0 + rew_scale_pole_pos = -1.0 + rew_scale_cart_vel = -0.01 + rew_scale_pole_vel = -0.005 + + +@configclass +class CartpoleDepthCameraEnvCfg(CartpoleRGBCameraEnvCfg): + # camera + tiled_camera: CameraCfg = CameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=CameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), + data_types=["depth"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ), + width=100, + height=100, + ) + + # spaces + observation_space = [tiled_camera.height, tiled_camera.width, 1] + + +@configclass +class CartpoleAlbedoCameraEnvCfg(CartpoleRGBCameraEnvCfg): + # camera + tiled_camera: CameraCfg = CameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=CameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + data_types=["albedo"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ), + width=100, + height=100, + ) + + # spaces + observation_space = [tiled_camera.height, tiled_camera.width, 3] + + +@configclass +class CartpoleSimpleShadingConstantCameraEnvCfg(CartpoleRGBCameraEnvCfg): + # camera + tiled_camera: CameraCfg = CameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=CameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + data_types=["simple_shading_constant_diffuse"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ), + width=100, + height=100, + ) + + # spaces + observation_space = [tiled_camera.height, tiled_camera.width, 3] + + +@configclass +class CartpoleSimpleShadingDiffuseCameraEnvCfg(CartpoleRGBCameraEnvCfg): + # camera + tiled_camera: CameraCfg = CameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=CameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + data_types=["simple_shading_diffuse_mdl"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ), + width=100, + height=100, + ) + + # spaces + observation_space = [tiled_camera.height, tiled_camera.width, 3] + + +@configclass +class CartpoleSimpleShadingFullCameraEnvCfg(CartpoleRGBCameraEnvCfg): + # camera + tiled_camera: CameraCfg = CameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=CameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + data_types=["simple_shading_full_mdl"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ), + width=100, + height=100, + ) + + # spaces + observation_space = [tiled_camera.height, tiled_camera.width, 3] diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_presets_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_presets_env_cfg.py new file mode 100644 index 000000000000..4c27674ff7c6 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_camera_presets_env_cfg.py @@ -0,0 +1,107 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab_newton.physics import NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg, ViewerCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sensors import CameraCfg +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.utils import PresetCfg +from isaaclab_tasks.utils.presets import MultiBackendRendererCfg + +from isaaclab_assets.robots.cartpole import CARTPOLE_CFG + + +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg() + physx = PhysxCfg() + newton_mjwarp = NewtonCfg() + + +@configclass +class MultiDataTypeCartpoleTiledCameraCfg(PresetCfg): + @configclass + class CartpoleTiledCameraCfg(CameraCfg): + prim_path: str = "/World/envs/env_.*/Camera" + offset: CameraCfg.OffsetCfg = CameraCfg.OffsetCfg( + pos=(-5.0, 0.0, 2.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world" + ) + data_types: list[str] = [] + spawn: sim_utils.PinholeCameraCfg = sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ) + width: int = 100 + height: int = 100 + renderer_cfg: MultiBackendRendererCfg = MultiBackendRendererCfg() + + default = CartpoleTiledCameraCfg(data_types=["rgb"]) + depth = CartpoleTiledCameraCfg(data_types=["depth"]) + albedo = CartpoleTiledCameraCfg(data_types=["albedo"]) + semantic_segmentation = CartpoleTiledCameraCfg(data_types=["semantic_segmentation"]) + simple_shading_constant_diffuse = CartpoleTiledCameraCfg(data_types=["simple_shading_constant_diffuse"]) + simple_shading_diffuse_mdl = CartpoleTiledCameraCfg(data_types=["simple_shading_diffuse_mdl"]) + simple_shading_full_mdl = CartpoleTiledCameraCfg(data_types=["simple_shading_full_mdl"]) + rgb = default + + +@configclass +class CartpoleCameraPresetsEnvCfg(PresetCfg): + @configclass + class BaseCartpoleCameraEnvCfg(DirectRLEnvCfg): + # env + decimation = 2 + episode_length_s = 5.0 + action_scale = 100.0 # [N] + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation, physics=PhysicsCfg()) + + # robot + robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot") + cart_dof_name = "slider_to_cart" + pole_dof_name = "cart_to_pole" + + # camera + tiled_camera: MultiDataTypeCartpoleTiledCameraCfg = MultiDataTypeCartpoleTiledCameraCfg() + write_image_to_file = False + + # spaces + action_space = 1 + state_space = 0 + observation_space = [100, 100, 3] + + # change viewer settings + viewer = ViewerCfg(eye=(20.0, 20.0, 20.0)) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=512, env_spacing=20.0, replicate_physics=True) + + # reset + max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m] + initial_pole_angle_range = [-0.125, 0.125] # the range in which the pole angle is sampled from on reset [rad] + + # reward scales + rew_scale_alive = 1.0 + rew_scale_terminated = -2.0 + rew_scale_pole_pos = -1.0 + rew_scale_cart_vel = -0.01 + rew_scale_pole_vel = -0.005 + + default = BaseCartpoleCameraEnvCfg() + depth = BaseCartpoleCameraEnvCfg(observation_space=[100, 100, 1]) + albedo = BaseCartpoleCameraEnvCfg(observation_space=[100, 100, 3]) + semantic_segmentation = BaseCartpoleCameraEnvCfg(observation_space=[100, 100, 4]) + simple_shading_constant_diffuse = BaseCartpoleCameraEnvCfg(observation_space=[100, 100, 3]) + simple_shading_diffuse_mdl = BaseCartpoleCameraEnvCfg(observation_space=[100, 100, 3]) + simple_shading_full_mdl = BaseCartpoleCameraEnvCfg(observation_space=[100, 100, 3]) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_env.py index f897b64f3ec9..10442ef08cc8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_env.py @@ -7,54 +7,18 @@ import math from collections.abc import Sequence +from typing import TYPE_CHECKING import torch import isaaclab.sim as sim_utils -from isaaclab.assets import Articulation, ArticulationCfg -from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg -from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import SimulationCfg +from isaaclab.assets import Articulation +from isaaclab.envs import DirectRLEnv from isaaclab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane -from isaaclab.utils import configclass from isaaclab.utils.math import sample_uniform -from isaaclab_assets.robots.cartpole import CARTPOLE_CFG - - -@configclass -class CartpoleEnvCfg(DirectRLEnvCfg): - # env - decimation = 2 - episode_length_s = 5.0 - action_scale = 100.0 # [N] - action_space = 1 - observation_space = 4 - state_space = 0 - - # simulation - sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation) - - # robot - robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot") - cart_dof_name = "slider_to_cart" - pole_dof_name = "cart_to_pole" - - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg( - num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True - ) - - # reset - max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m] - initial_pole_angle_range = [-0.25, 0.25] # the range in which the pole angle is sampled from on reset [rad] - - # reward scales - rew_scale_alive = 1.0 - rew_scale_terminated = -2.0 - rew_scale_pole_pos = -1.0 - rew_scale_cart_vel = -0.01 - rew_scale_pole_vel = -0.005 +if TYPE_CHECKING: + from .cartpole_env_cfg import CartpoleEnvCfg class CartpoleEnv(DirectRLEnv): @@ -67,8 +31,8 @@ def __init__(self, cfg: CartpoleEnvCfg, render_mode: str | None = None, **kwargs self._pole_dof_idx, _ = self.cartpole.find_joints(self.cfg.pole_dof_name) self.action_scale = self.cfg.action_scale - self.joint_pos = self.cartpole.data.joint_pos - self.joint_vel = self.cartpole.data.joint_vel + self.joint_pos = self.cartpole.data.joint_pos.torch + self.joint_vel = self.cartpole.data.joint_vel.torch def _setup_scene(self): self.cartpole = Articulation(self.cfg.robot_cfg) @@ -89,7 +53,7 @@ def _pre_physics_step(self, actions: torch.Tensor) -> None: self.actions = self.action_scale * actions.clone() def _apply_action(self) -> None: - self.cartpole.set_joint_effort_target(self.actions, joint_ids=self._cart_dof_idx) + self.cartpole.set_joint_effort_target_index(target=self.actions, joint_ids=self._cart_dof_idx) def _get_observations(self) -> dict: obs = torch.cat( @@ -120,8 +84,8 @@ def _get_rewards(self) -> torch.Tensor: return total_reward def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: - self.joint_pos = self.cartpole.data.joint_pos - self.joint_vel = self.cartpole.data.joint_vel + self.joint_pos = self.cartpole.data.joint_pos.torch + self.joint_vel = self.cartpole.data.joint_vel.torch time_out = self.episode_length_buf >= self.max_episode_length - 1 out_of_bounds = torch.any(torch.abs(self.joint_pos[:, self._cart_dof_idx]) > self.cfg.max_cart_pos, dim=1) @@ -131,26 +95,33 @@ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: def _reset_idx(self, env_ids: Sequence[int] | None): if env_ids is None: env_ids = self.cartpole._ALL_INDICES + + # Log survival success rate before resetting + survived = self.reset_time_outs[env_ids].float() + self.extras.setdefault("log", {})["Metrics/success_rate"] = survived.mean().item() + super()._reset_idx(env_ids) - joint_pos = self.cartpole.data.default_joint_pos[env_ids] + joint_pos = self.cartpole.data.default_joint_pos.torch[env_ids].clone() joint_pos[:, self._pole_dof_idx] += sample_uniform( self.cfg.initial_pole_angle_range[0] * math.pi, self.cfg.initial_pole_angle_range[1] * math.pi, joint_pos[:, self._pole_dof_idx].shape, joint_pos.device, ) - joint_vel = self.cartpole.data.default_joint_vel[env_ids] + joint_vel = self.cartpole.data.default_joint_vel.torch[env_ids].clone() - default_root_state = self.cartpole.data.default_root_state[env_ids] - default_root_state[:, :3] += self.scene.env_origins[env_ids] + default_root_pose = self.cartpole.data.default_root_pose.torch[env_ids].clone() + default_root_pose[:, :3] += self.scene.env_origins[env_ids] + default_root_vel = self.cartpole.data.default_root_vel.torch[env_ids].clone() self.joint_pos[env_ids] = joint_pos self.joint_vel[env_ids] = joint_vel - self.cartpole.write_root_pose_to_sim(default_root_state[:, :7], env_ids) - self.cartpole.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids) - self.cartpole.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + self.cartpole.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + self.cartpole.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) + self.cartpole.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self.cartpole.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) @torch.jit.script diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_env_cfg.py new file mode 100644 index 000000000000..2b93d0f0ed79 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole/cartpole_env_cfg.py @@ -0,0 +1,97 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab_newton.physics import KaminoSolverCfg, MJWarpSolverCfg, NewtonCfg +from isaaclab_ovphysx.physics import OvPhysxCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.utils import PresetCfg + +from isaaclab_assets.robots.cartpole import CARTPOLE_CFG + + +@configclass +class CartpolePhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + physx: PhysxCfg = PhysxCfg() + newton_mjwarp: NewtonCfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=5, + nconmax=3, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + newton_kamino: NewtonCfg = NewtonCfg( + solver_cfg=KaminoSolverCfg( + integrator="moreau", + use_collision_detector=True, + sparse_jacobian=True, + constraints_alpha=0.1, + padmm_max_iterations=100, + padmm_primal_tolerance=1e-4, + padmm_dual_tolerance=1e-4, + padmm_compl_tolerance=1e-4, + padmm_rho_0=0.05, + padmm_eta=1e-5, + padmm_use_acceleration=True, + padmm_warmstart_mode="containers", + padmm_contact_warmstart_method="geom_pair_net_force", + padmm_use_graph_conditionals=False, + collision_detector_pipeline="unified", + collision_detector_max_contacts_per_pair=8, + ), + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + ovphysx: OvPhysxCfg = OvPhysxCfg() + + +@configclass +class CartpoleEnvCfg(DirectRLEnvCfg): + # env + decimation = 2 + episode_length_s = 5.0 + action_scale = 100.0 # [N] + action_space = 1 + observation_space = 4 + state_space = 0 + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation, physics=CartpolePhysicsCfg()) + + # robot + robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot") + cart_dof_name = "slider_to_cart" + pole_dof_name = "cart_to_pole" + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True + ) + + # reset + max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m] + initial_pole_angle_range = [-0.25, 0.25] # the range in which the pole angle is sampled from on reset [rad] + + # reward scales + rew_scale_alive = 1.0 + rew_scale_terminated = -2.0 + rew_scale_pole_pos = -1.0 + rew_scale_cart_vel = -0.01 + rew_scale_pole_vel = -0.005 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/__init__.py index d401c413967d..b3f8d0b43b68 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/__init__.py @@ -4,6 +4,3 @@ # SPDX-License-Identifier: BSD-3-Clause """Cartpole environment showcase for the supported Gymnasium spaces.""" - -from .cartpole import * # noqa -from .cartpole_camera import * # noqa diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env.py index dc03eb299d0d..52042de66cf4 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env.py @@ -8,7 +8,8 @@ import gymnasium as gym import torch -from isaaclab_tasks.direct.cartpole.cartpole_env import CartpoleEnv, CartpoleEnvCfg +from isaaclab_tasks.direct.cartpole.cartpole_env import CartpoleEnv +from isaaclab_tasks.direct.cartpole.cartpole_env_cfg import CartpoleEnvCfg class CartpoleShowcaseEnv(CartpoleEnv): @@ -39,7 +40,7 @@ def _apply_action(self) -> None: raise NotImplementedError(f"Action space {type(self.single_action_space)} not implemented") # set target - self.cartpole.set_joint_effort_target(target, joint_ids=self._cart_dof_idx) + self.cartpole.set_joint_effort_target_index(target=target, joint_ids=self._cart_dof_idx) def _get_observations(self) -> dict: # fundamental spaces diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env_cfg.py index e6e8169b1ba0..9384305a07ed 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole/cartpole_env_cfg.py @@ -9,7 +9,7 @@ from isaaclab.utils import configclass -from isaaclab_tasks.direct.cartpole.cartpole_env import CartpoleEnvCfg +from isaaclab_tasks.direct.cartpole.cartpole_env_cfg import CartpoleEnvCfg ### # Observation space as Box diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env.py index 1c8dc6b4a078..d06d5c595c69 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env.py @@ -8,7 +8,8 @@ import gymnasium as gym import torch -from isaaclab_tasks.direct.cartpole.cartpole_camera_env import CartpoleCameraEnv, CartpoleRGBCameraEnvCfg +from isaaclab_tasks.direct.cartpole.cartpole_camera_env import CartpoleCameraEnv +from isaaclab_tasks.direct.cartpole.cartpole_camera_env_cfg import CartpoleRGBCameraEnvCfg class CartpoleCameraShowcaseEnv(CartpoleCameraEnv): @@ -39,7 +40,7 @@ def _apply_action(self) -> None: raise NotImplementedError(f"Action space {type(self.single_action_space)} not implemented") # set target - self._cartpole.set_joint_effort_target(target, joint_ids=self._cart_dof_idx) + self._cartpole.set_joint_effort_target_index(target=target, joint_ids=self._cart_dof_idx) def _get_observations(self) -> dict: # get camera data diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env_cfg.py index 5e146041b79b..c81fe197bab1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/cartpole_showcase/cartpole_camera/cartpole_camera_env_cfg.py @@ -8,16 +8,16 @@ from gymnasium import spaces import isaaclab.sim as sim_utils -from isaaclab.sensors import TiledCameraCfg +from isaaclab.sensors import CameraCfg from isaaclab.utils import configclass -from isaaclab_tasks.direct.cartpole.cartpole_camera_env import CartpoleRGBCameraEnvCfg as CartpoleCameraEnvCfg +from isaaclab_tasks.direct.cartpole.cartpole_camera_env_cfg import CartpoleRGBCameraEnvCfg as CartpoleCameraEnvCfg -def get_tiled_camera_cfg(data_type: str, width: int = 100, height: int = 100) -> TiledCameraCfg: - return TiledCameraCfg( +def get_tiled_camera_cfg(data_type: str, width: int = 100, height: int = 100) -> CameraCfg: + return CameraCfg( prim_path="/World/envs/env_.*/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(1.0, 0.0, 0.0, 0.0), convention="world"), + offset=CameraCfg.OffsetCfg(pos=(-5.0, 0.0, 2.0), rot=(0.0, 0.0, 0.0, 1.0), convention="world"), data_types=[data_type], spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) @@ -53,7 +53,7 @@ class BoxBoxEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Box( @@ -85,7 +85,7 @@ class BoxDiscreteEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Box( @@ -124,7 +124,7 @@ class BoxMultiDiscreteEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Box( @@ -160,7 +160,7 @@ class DictBoxEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Dict( @@ -198,7 +198,7 @@ class DictDiscreteEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Dict( @@ -243,7 +243,7 @@ class DictMultiDiscreteEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Dict( @@ -284,7 +284,7 @@ class TupleBoxEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Tuple( @@ -320,7 +320,7 @@ class TupleDiscreteEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Tuple( @@ -363,7 +363,7 @@ class TupleMultiDiscreteEnvCfg(CartpoleCameraEnvCfg): """ # camera - tiled_camera: TiledCameraCfg = get_tiled_camera_cfg("rgb") + tiled_camera: CameraCfg = get_tiled_camera_cfg("rgb") # spaces observation_space = spaces.Tuple( diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/agents/rl_games_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/agents/rl_games_ppo_cfg.yaml index f11f6d996745..f37faaa8ce4e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/agents/rl_games_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/agents/rl_games_ppo_cfg.yaml @@ -52,7 +52,6 @@ params: config: name: Factory device: cuda:0 - full_experiment_name: test env_name: rlgpu multi_gpu: False ppo: True diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_control.py b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_control.py index f8ccb0e13451..a43b7ffcc743 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_control.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_control.py @@ -12,9 +12,7 @@ import torch -import isaacsim.core.utils.torch as torch_utils - -from isaaclab.utils.math import axis_angle_from_quat +from isaaclab.utils import math as torch_utils def compute_dof_torque( @@ -128,14 +126,14 @@ def get_pose_error( fingertip_midpoint_quat_norm = torch_utils.quat_mul( fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat) - )[:, 0] # scalar component + )[:, 3] # scalar component (W is at index 3 in XYZW format) fingertip_midpoint_quat_inv = torch_utils.quat_conjugate( fingertip_midpoint_quat ) / fingertip_midpoint_quat_norm.unsqueeze(-1) quat_error = torch_utils.quat_mul(ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv) # Convert to axis-angle error - axis_angle_error = axis_angle_from_quat(quat_error) + axis_angle_error = torch_utils.axis_angle_from_quat(quat_error) if rot_error_type == "quat": return pos_error, quat_error diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py index a4e9c6d9ece9..ecc1ef33a038 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py @@ -5,16 +5,16 @@ import numpy as np import torch +import warp as wp import carb -import isaacsim.core.utils.torch as torch_utils import isaaclab.sim as sim_utils from isaaclab.assets import Articulation from isaaclab.envs import DirectRLEnv from isaaclab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane +from isaaclab.utils import math as torch_utils from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.math import axis_angle_from_quat from . import factory_control, factory_utils from .factory_env_cfg import OBS_DIM_CFG, STATE_DIM_CFG, FactoryEnvCfg @@ -75,7 +75,7 @@ def _init_tensors(self): self.last_update_timestamp = 0.0 # Note: This is for finite differencing body velocities. self.prev_fingertip_pos = torch.zeros((self.num_envs, 3), device=self.device) self.prev_fingertip_quat = ( - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) ) self.prev_joint_pos = torch.zeros((self.num_envs, 7), device=self.device) @@ -89,7 +89,7 @@ def _setup_scene(self): # spawn a usd file of a table into the scene cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") cfg.func( - "/World/envs/env_.*/Table", cfg, translation=(0.55, 0.0, 0.0), orientation=(0.70711, 0.0, 0.0, 0.70711) + "/World/envs/env_.*/Table", cfg, translation=(0.55, 0.0, 0.0), orientation=(0.0, 0.0, 0.70711, 0.70711) ) self._robot = Articulation(self.cfg.robot) @@ -118,25 +118,27 @@ def _setup_scene(self): def _compute_intermediate_values(self, dt): """Get values computed from raw tensors. This includes adding noise.""" # TODO: A lot of these can probably only be set once? - self.fixed_pos = self._fixed_asset.data.root_pos_w - self.scene.env_origins - self.fixed_quat = self._fixed_asset.data.root_quat_w + self.fixed_pos = self._fixed_asset.data.root_pos_w.torch - self.scene.env_origins + self.fixed_quat = self._fixed_asset.data.root_quat_w.torch - self.held_pos = self._held_asset.data.root_pos_w - self.scene.env_origins - self.held_quat = self._held_asset.data.root_quat_w + self.held_pos = self._held_asset.data.root_pos_w.torch - self.scene.env_origins + self.held_quat = self._held_asset.data.root_quat_w.torch - self.fingertip_midpoint_pos = self._robot.data.body_pos_w[:, self.fingertip_body_idx] - self.scene.env_origins - self.fingertip_midpoint_quat = self._robot.data.body_quat_w[:, self.fingertip_body_idx] - self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w[:, self.fingertip_body_idx] - self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w[:, self.fingertip_body_idx] + self.fingertip_midpoint_pos = ( + self._robot.data.body_pos_w.torch[:, self.fingertip_body_idx] - self.scene.env_origins + ) + self.fingertip_midpoint_quat = self._robot.data.body_quat_w.torch[:, self.fingertip_body_idx] + self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w.torch[:, self.fingertip_body_idx] + self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w.torch[:, self.fingertip_body_idx] - jacobians = self._robot.root_physx_view.get_jacobians() + jacobians = wp.to_torch(self._robot.root_view.get_jacobians()) self.left_finger_jacobian = jacobians[:, self.left_finger_body_idx - 1, 0:6, 0:7] self.right_finger_jacobian = jacobians[:, self.right_finger_body_idx - 1, 0:6, 0:7] self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 - self.arm_mass_matrix = self._robot.root_physx_view.get_generalized_mass_matrices()[:, 0:7, 0:7] - self.joint_pos = self._robot.data.joint_pos.clone() - self.joint_vel = self._robot.data.joint_vel.clone() + self.arm_mass_matrix = wp.to_torch(self._robot.root_view.get_generalized_mass_matrices())[:, 0:7, 0:7] + self.joint_pos = self._robot.data.joint_pos.torch.clone() + self.joint_vel = self._robot.data.joint_vel.torch.clone() # Finite-differencing results in more reliable velocity estimates. self.ee_linvel_fd = (self.fingertip_midpoint_pos - self.prev_fingertip_pos) / dt @@ -146,8 +148,8 @@ def _compute_intermediate_values(self, dt): rot_diff_quat = torch_utils.quat_mul( self.fingertip_midpoint_quat, torch_utils.quat_conjugate(self.prev_fingertip_quat) ) - rot_diff_quat *= torch.sign(rot_diff_quat[:, 0]).unsqueeze(-1) - rot_diff_aa = axis_angle_from_quat(rot_diff_quat) + rot_diff_quat *= torch.sign(rot_diff_quat[:, 3]).unsqueeze(-1) # W component is at index 3 in XYZW format + rot_diff_aa = torch_utils.axis_angle_from_quat(rot_diff_quat) self.ee_angvel_fd = rot_diff_aa / dt self.prev_fingertip_quat = self.fingertip_midpoint_quat.clone() @@ -224,7 +226,7 @@ def close_gripper_in_place(self): rot_actions = actions[:, 3:6] # Convert to quat and set rot target - angle = torch.norm(rot_actions, p=2, dim=-1) + angle = torch.linalg.norm(rot_actions, ord=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) @@ -232,11 +234,11 @@ def close_gripper_in_place(self): rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > 1.0e-6, rot_actions_quat, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1), + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1), ) ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) - target_euler_xyz = torch.stack(torch_utils.get_euler_xyz(ctrl_target_fingertip_midpoint_quat), dim=1) + target_euler_xyz = torch.stack(torch_utils.euler_xyz_from_quat(ctrl_target_fingertip_midpoint_quat), dim=1) target_euler_xyz[:, 0] = 3.14159 target_euler_xyz[:, 1] = 0.0 @@ -276,18 +278,18 @@ def _apply_action(self): ctrl_target_fingertip_midpoint_pos = fixed_pos_action_frame + pos_error_clipped # Convert to quat and set rot target - angle = torch.norm(rot_actions, p=2, dim=-1) + angle = torch.linalg.norm(rot_actions, ord=2, dim=-1) axis = rot_actions / angle.unsqueeze(-1) rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis) rot_actions_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > 1e-6, rot_actions_quat, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1), + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs, 1), ) ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat) - target_euler_xyz = torch.stack(torch_utils.get_euler_xyz(ctrl_target_fingertip_midpoint_quat), dim=1) + target_euler_xyz = torch.stack(torch_utils.euler_xyz_from_quat(ctrl_target_fingertip_midpoint_quat), dim=1) target_euler_xyz[:, 0] = 3.14159 # Restrict actions to be upright. target_euler_xyz[:, 1] = 0.0 @@ -327,8 +329,8 @@ def generate_ctrl_signals( self.ctrl_target_joint_pos[:, 7:9] = ctrl_target_gripper_dof_pos self.joint_torque[:, 7:9] = 0.0 - self._robot.set_joint_position_target(self.ctrl_target_joint_pos) - self._robot.set_joint_effort_target(self.joint_torque) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos) + self._robot.set_joint_effort_target_index(target=self.joint_torque) def _get_dones(self): """Check which environments are terminated. @@ -374,7 +376,7 @@ def _get_curr_successes(self, success_threshold, check_rot=False): curr_successes = torch.logical_and(is_centered, is_close_or_below) if check_rot: - _, _, curr_yaw = torch_utils.get_euler_xyz(self.fingertip_midpoint_quat) + _, _, curr_yaw = torch_utils.euler_xyz_from_quat(self.fingertip_midpoint_quat) curr_yaw = factory_utils.wrap_yaw(curr_yaw) is_rotated = curr_yaw < self.cfg_task.ee_success_yaw curr_successes = torch.logical_and(curr_successes, is_rotated) @@ -385,7 +387,9 @@ def _log_factory_metrics(self, rew_dict, curr_successes): """Keep track of episode statistics and log rewards.""" # Only log episode success rates at the end of an episode. if torch.any(self.reset_buf): - self.extras["successes"] = torch.count_nonzero(curr_successes) / self.num_envs + self.extras.setdefault("log", {})["Metrics/success_rate"] = ( + torch.count_nonzero(curr_successes) / self.num_envs + ).item() # Get the time at which an episode first succeeds. first_success = torch.logical_and(curr_successes, torch.logical_not(self.ep_succeeded)) @@ -443,26 +447,26 @@ def _get_factory_rew_dict(self, curr_successes): offsets = factory_utils.get_keypoint_offsets(self.cfg_task.num_keypoints, self.device) keypoint_offsets = offsets * self.cfg_task.keypoint_scale for idx, keypoint_offset in enumerate(keypoint_offsets): - keypoints_held[:, idx] = torch_utils.tf_combine( - held_base_quat, + keypoints_held[:, idx], _ = torch_utils.combine_frame_transforms( held_base_pos, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), + held_base_quat, keypoint_offset.repeat(self.num_envs, 1), - )[1] - keypoints_fixed[:, idx] = torch_utils.tf_combine( - target_held_base_quat, + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), + ) + keypoints_fixed[:, idx], _ = torch_utils.combine_frame_transforms( target_held_base_pos, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), + target_held_base_quat, keypoint_offset.repeat(self.num_envs, 1), - )[1] - keypoint_dist = torch.norm(keypoints_held - keypoints_fixed, p=2, dim=-1).mean(-1) + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), + ) + keypoint_dist = torch.linalg.norm(keypoints_held - keypoints_fixed, ord=2, dim=-1).mean(-1) a0, b0 = self.cfg_task.keypoint_coef_baseline a1, b1 = self.cfg_task.keypoint_coef_coarse a2, b2 = self.cfg_task.keypoint_coef_fine # Action penalties. - action_penalty_ee = torch.norm(self.actions, p=2) - action_grad_penalty = torch.norm(self.actions - self.prev_actions, p=2, dim=-1) + action_penalty_ee = torch.linalg.norm(self.actions, ord=2) + action_grad_penalty = torch.linalg.norm(self.actions - self.prev_actions, ord=2, dim=-1) curr_engaged = self._get_curr_successes(success_threshold=self.cfg_task.engage_threshold, check_rot=False) rew_dict = { @@ -497,18 +501,20 @@ def _reset_idx(self, env_ids): def _set_assets_to_default_pose(self, env_ids): """Move assets to default pose before randomization.""" - held_state = self._held_asset.data.default_root_state.clone()[env_ids] - held_state[:, 0:3] += self.scene.env_origins[env_ids] - held_state[:, 7:] = 0.0 - self._held_asset.write_root_pose_to_sim(held_state[:, 0:7], env_ids=env_ids) - self._held_asset.write_root_velocity_to_sim(held_state[:, 7:], env_ids=env_ids) + held_pose = self._held_asset.data.default_root_pose.torch.clone()[env_ids] + held_vel = self._held_asset.data.default_root_vel.torch.clone()[env_ids] + held_pose[:, 0:3] += self.scene.env_origins[env_ids] + held_vel[:] = 0.0 + self._held_asset.write_root_pose_to_sim_index(root_pose=held_pose, env_ids=env_ids) + self._held_asset.write_root_velocity_to_sim_index(root_velocity=held_vel, env_ids=env_ids) self._held_asset.reset() - fixed_state = self._fixed_asset.data.default_root_state.clone()[env_ids] - fixed_state[:, 0:3] += self.scene.env_origins[env_ids] - fixed_state[:, 7:] = 0.0 - self._fixed_asset.write_root_pose_to_sim(fixed_state[:, 0:7], env_ids=env_ids) - self._fixed_asset.write_root_velocity_to_sim(fixed_state[:, 7:], env_ids=env_ids) + fixed_pose = self._fixed_asset.data.default_root_pose.torch.clone()[env_ids] + fixed_vel = self._fixed_asset.data.default_root_vel.torch.clone()[env_ids] + fixed_pose[:, 0:3] += self.scene.env_origins[env_ids] + fixed_vel[:] = 0.0 + self._fixed_asset.write_root_pose_to_sim_index(root_pose=fixed_pose, env_ids=env_ids) + self._fixed_asset.write_root_velocity_to_sim_index(root_velocity=fixed_vel, env_ids=env_ids) self._fixed_asset.reset() def set_pos_inverse_kinematics( @@ -541,8 +547,9 @@ def set_pos_inverse_kinematics( self.ctrl_target_joint_pos[env_ids, 0:7] = self.joint_pos[env_ids, 0:7] # Update dof state. - self._robot.write_joint_state_to_sim(self.joint_pos, self.joint_vel) - self._robot.set_joint_position_target(self.ctrl_target_joint_pos) + self._robot.write_joint_position_to_sim_index(position=self.joint_pos) + self._robot.write_joint_velocity_to_sim_index(velocity=self.joint_vel) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos) # Simulate and update tensors. self.step_sim_no_action() @@ -570,7 +577,7 @@ def get_handheld_asset_relative_pose(self): raise NotImplementedError("Task not implemented") held_asset_relative_quat = ( - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) ) if self.cfg_task.name == "nut_thread": # Rotate along z-axis of frame for default position. @@ -587,16 +594,17 @@ def get_handheld_asset_relative_pose(self): def _set_franka_to_default_pose(self, joints, env_ids): """Return Franka to its default joint position.""" gripper_width = self.cfg_task.held_asset_cfg.diameter / 2 * 1.25 - joint_pos = self._robot.data.default_joint_pos[env_ids] + joint_pos = self._robot.data.default_joint_pos.torch[env_ids] joint_pos[:, 7:] = gripper_width # MIMIC joint_pos[:, :7] = torch.tensor(joints, device=self.device)[None, :] joint_vel = torch.zeros_like(joint_pos) joint_effort = torch.zeros_like(joint_pos) self.ctrl_target_joint_pos[env_ids, :] = joint_pos - self._robot.set_joint_position_target(self.ctrl_target_joint_pos[env_ids], env_ids=env_ids) - self._robot.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + self._robot.set_joint_position_target_index(target=self.ctrl_target_joint_pos[env_ids], env_ids=env_ids) + self._robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) self._robot.reset() - self._robot.set_joint_effort_target(joint_effort, env_ids=env_ids) + self._robot.set_joint_effort_target_index(target=joint_effort, env_ids=env_ids) self.step_sim_no_action() @@ -618,7 +626,8 @@ def randomize_initial_state(self, env_ids): physics_sim_view.set_gravity(carb.Float3(0.0, 0.0, 0.0)) # (1.) Randomize fixed asset pose. - fixed_state = self._fixed_asset.data.default_root_state.clone()[env_ids] + fixed_pose = self._fixed_asset.data.default_root_pose.torch.clone()[env_ids] + fixed_vel = self._fixed_asset.data.default_root_vel.torch.clone()[env_ids] # (1.a.) Position rand_sample = torch.rand((len(env_ids), 3), dtype=torch.float32, device=self.device) fixed_pos_init_rand = 2 * (rand_sample - 0.5) # [-1, 1] @@ -626,7 +635,7 @@ def randomize_initial_state(self, env_ids): self.cfg_task.fixed_asset_init_pos_noise, dtype=torch.float32, device=self.device ) fixed_pos_init_rand = fixed_pos_init_rand @ torch.diag(fixed_asset_init_pos_rand) - fixed_state[:, 0:3] += fixed_pos_init_rand + self.scene.env_origins[env_ids] + fixed_pose[:, 0:3] += fixed_pos_init_rand + self.scene.env_origins[env_ids] # (1.b.) Orientation fixed_orn_init_yaw = np.deg2rad(self.cfg_task.fixed_asset_init_orn_deg) fixed_orn_yaw_range = np.deg2rad(self.cfg_task.fixed_asset_init_orn_range_deg) @@ -636,12 +645,12 @@ def randomize_initial_state(self, env_ids): fixed_orn_quat = torch_utils.quat_from_euler_xyz( fixed_orn_euler[:, 0], fixed_orn_euler[:, 1], fixed_orn_euler[:, 2] ) - fixed_state[:, 3:7] = fixed_orn_quat + fixed_pose[:, 3:7] = fixed_orn_quat # (1.c.) Velocity - fixed_state[:, 7:] = 0.0 # vel + fixed_vel[:] = 0.0 # vel # (1.d.) Update values. - self._fixed_asset.write_root_pose_to_sim(fixed_state[:, 0:7], env_ids=env_ids) - self._fixed_asset.write_root_velocity_to_sim(fixed_state[:, 7:], env_ids=env_ids) + self._fixed_asset.write_root_pose_to_sim_index(root_pose=fixed_pose, env_ids=env_ids) + self._fixed_asset.write_root_velocity_to_sim_index(root_velocity=fixed_vel, env_ids=env_ids) self._fixed_asset.reset() # (1.e.) Noisy position observation. @@ -660,11 +669,11 @@ def randomize_initial_state(self, env_ids): if self.cfg_task.name == "gear_mesh": fixed_tip_pos_local[:, 0] = self.cfg_task.fixed_asset_cfg.medium_gear_base_offset[0] - _, fixed_tip_pos = torch_utils.tf_combine( - self.fixed_quat, + fixed_tip_pos, _ = torch_utils.combine_frame_transforms( self.fixed_pos, - torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), + self.fixed_quat, fixed_tip_pos_local, + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), ) self.fixed_pos_obs_frame[:] = fixed_tip_pos @@ -707,7 +716,7 @@ def randomize_initial_state(self, env_ids): env_ids=bad_envs, ) pos_error = torch.linalg.norm(pos_error, dim=1) > 1e-3 - angle_error = torch.norm(aa_error, dim=1) > 1e-3 + angle_error = torch.linalg.norm(aa_error, dim=1) > 1e-3 any_error = torch.logical_or(pos_error, angle_error) bad_envs = bad_envs[any_error.nonzero(as_tuple=False).squeeze(-1)] @@ -725,38 +734,40 @@ def randomize_initial_state(self, env_ids): # Add flanking gears after servo (so arm doesn't move them). if self.cfg_task.name == "gear_mesh" and self.cfg_task.add_flanking_gears: - small_gear_state = self._small_gear_asset.data.default_root_state.clone()[env_ids] - small_gear_state[:, 0:7] = fixed_state[:, 0:7] - small_gear_state[:, 7:] = 0.0 # vel - self._small_gear_asset.write_root_pose_to_sim(small_gear_state[:, 0:7], env_ids=env_ids) - self._small_gear_asset.write_root_velocity_to_sim(small_gear_state[:, 7:], env_ids=env_ids) + small_gear_pose = self._small_gear_asset.data.default_root_pose.torch.clone()[env_ids] + small_gear_vel = self._small_gear_asset.data.default_root_vel.torch.clone()[env_ids] + small_gear_pose[:, 0:7] = fixed_pose[:, 0:7] + small_gear_vel[:] = 0.0 # vel + self._small_gear_asset.write_root_pose_to_sim_index(root_pose=small_gear_pose, env_ids=env_ids) + self._small_gear_asset.write_root_velocity_to_sim_index(root_velocity=small_gear_vel, env_ids=env_ids) self._small_gear_asset.reset() - large_gear_state = self._large_gear_asset.data.default_root_state.clone()[env_ids] - large_gear_state[:, 0:7] = fixed_state[:, 0:7] - large_gear_state[:, 7:] = 0.0 # vel - self._large_gear_asset.write_root_pose_to_sim(large_gear_state[:, 0:7], env_ids=env_ids) - self._large_gear_asset.write_root_velocity_to_sim(large_gear_state[:, 7:], env_ids=env_ids) + large_gear_pose = self._large_gear_asset.data.default_root_pose.torch.clone()[env_ids] + large_gear_vel = self._large_gear_asset.data.default_root_vel.torch.clone()[env_ids] + large_gear_pose[:, 0:7] = fixed_pose[:, 0:7] + large_gear_vel[:] = 0.0 # vel + self._large_gear_asset.write_root_pose_to_sim_index(root_pose=large_gear_pose, env_ids=env_ids) + self._large_gear_asset.write_root_velocity_to_sim_index(root_velocity=large_gear_vel, env_ids=env_ids) self._large_gear_asset.reset() # (3) Randomize asset-in-gripper location. # flip gripper z orientation - flip_z_quat = torch.tensor([0.0, 0.0, 1.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) - fingertip_flipped_quat, fingertip_flipped_pos = torch_utils.tf_combine( - q1=self.fingertip_midpoint_quat, - t1=self.fingertip_midpoint_pos, - q2=flip_z_quat, - t2=torch.zeros((self.num_envs, 3), device=self.device), + flip_z_quat = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + fingertip_flipped_pos, fingertip_flipped_quat = torch_utils.combine_frame_transforms( + self.fingertip_midpoint_pos, + self.fingertip_midpoint_quat, + torch.zeros((self.num_envs, 3), device=self.device), + flip_z_quat, ) # get default gripper in asset transform held_asset_relative_pos, held_asset_relative_quat = self.get_handheld_asset_relative_pose() - asset_in_hand_quat, asset_in_hand_pos = torch_utils.tf_inverse( - held_asset_relative_quat, held_asset_relative_pos - ) + # Compute inverse of relative transform: inv(quat, pos) = (quat_conjugate, -quat_rotate_inverse(quat, pos)) + asset_in_hand_quat = torch_utils.quat_inv(held_asset_relative_quat) + asset_in_hand_pos = torch_utils.quat_apply_inverse(held_asset_relative_quat, -held_asset_relative_pos) - translated_held_asset_quat, translated_held_asset_pos = torch_utils.tf_combine( - q1=fingertip_flipped_quat, t1=fingertip_flipped_pos, q2=asset_in_hand_quat, t2=asset_in_hand_pos + translated_held_asset_pos, translated_held_asset_quat = torch_utils.combine_frame_transforms( + fingertip_flipped_pos, fingertip_flipped_quat, asset_in_hand_pos, asset_in_hand_quat ) # Add asset in hand randomization @@ -767,19 +778,20 @@ def randomize_initial_state(self, env_ids): held_asset_pos_noise_level = torch.tensor(self.cfg_task.held_asset_pos_noise, device=self.device) held_asset_pos_noise = held_asset_pos_noise @ torch.diag(held_asset_pos_noise_level) - translated_held_asset_quat, translated_held_asset_pos = torch_utils.tf_combine( - q1=translated_held_asset_quat, - t1=translated_held_asset_pos, - q2=torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), - t2=held_asset_pos_noise, + translated_held_asset_pos, translated_held_asset_quat = torch_utils.combine_frame_transforms( + translated_held_asset_pos, + translated_held_asset_quat, + held_asset_pos_noise, + torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1), ) - held_state = self._held_asset.data.default_root_state.clone() - held_state[:, 0:3] = translated_held_asset_pos + self.scene.env_origins - held_state[:, 3:7] = translated_held_asset_quat - held_state[:, 7:] = 0.0 - self._held_asset.write_root_pose_to_sim(held_state[:, 0:7]) - self._held_asset.write_root_velocity_to_sim(held_state[:, 7:]) + held_pose = self._held_asset.data.default_root_pose.torch.clone() + held_vel = self._held_asset.data.default_root_vel.torch.clone() + held_pose[:, 0:3] = translated_held_asset_pos + self.scene.env_origins + held_pose[:, 3:7] = translated_held_asset_quat + held_vel[:] = 0.0 + self._held_asset.write_root_pose_to_sim_index(root_pose=held_pose) + self._held_asset.write_root_velocity_to_sim_index(root_velocity=held_vel) self._held_asset.reset() # Close hand diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env_cfg.py index 0e6b5db0a73c..028184ff7ba7 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env_cfg.py @@ -3,12 +3,14 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.assets import ArticulationCfg from isaaclab.envs import DirectRLEnvCfg from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import PhysxCfg, SimulationCfg +from isaaclab.sim import SimulationCfg from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from isaaclab.utils import configclass @@ -98,7 +100,7 @@ class FactoryEnvCfg(DirectRLEnvCfg): device="cuda:0", dt=1 / 120, gravity=(0.0, 0.0, -9.81), - physx=PhysxCfg( + physics=PhysxCfg( solver_type=1, max_position_iteration_count=192, # Important to avoid interpenetration. max_velocity_iteration_count=1, @@ -154,7 +156,7 @@ class FactoryEnvCfg(DirectRLEnvCfg): "panda_finger_joint2": 0.04, }, pos=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), actuators={ "panda_arm1": ImplicitActuatorCfg( diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_tasks_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_tasks_cfg.py index c631856816cb..24b35617bce4 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_tasks_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_tasks_cfg.py @@ -152,7 +152,7 @@ class PegInsert(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.6, 0.0, 0.05), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.6, 0.0, 0.05), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) @@ -177,7 +177,7 @@ class PegInsert(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0.4, 0.1), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.0, 0.4, 0.1), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) @@ -232,7 +232,7 @@ class GearMesh(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0.4, 0.1), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.0, 0.4, 0.1), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) @@ -258,7 +258,7 @@ class GearMesh(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0.4, 0.1), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.0, 0.4, 0.1), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) @@ -310,7 +310,7 @@ class GearMesh(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.6, 0.0, 0.05), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.6, 0.0, 0.05), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) @@ -335,7 +335,7 @@ class GearMesh(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0.4, 0.1), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.0, 0.4, 0.1), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) @@ -416,7 +416,7 @@ class NutThread(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.6, 0.0, 0.05), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.6, 0.0, 0.05), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) @@ -441,7 +441,7 @@ class NutThread(FactoryTask): collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0.4, 0.1), rot=(1.0, 0.0, 0.0, 0.0), joint_pos={}, joint_vel={} + pos=(0.0, 0.4, 0.1), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} ), actuators={}, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py index 962b3872bf09..ea88e806dba4 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py @@ -5,8 +5,9 @@ import numpy as np import torch +import warp as wp -import isaacsim.core.utils.torch as torch_utils +from isaaclab.utils import math as torch_utils def get_keypoint_offsets(num_keypoints, device): @@ -30,20 +31,22 @@ def wrap_yaw(angle): def set_friction(asset, value, num_envs): """Update material properties for a given asset.""" - materials = asset.root_physx_view.get_material_properties() + materials = wp.to_torch(asset.root_view.get_material_properties()) materials[..., 0] = value # Static friction. materials[..., 1] = value # Dynamic friction. env_ids = torch.arange(num_envs, device="cpu") - asset.root_physx_view.set_material_properties(materials, env_ids) + asset.root_view.set_material_properties( + wp.from_torch(materials.contiguous()), wp.from_torch(env_ids.to(torch.int32)) + ) def set_body_inertias(robot, num_envs): """Note: this is to account for the asset_options.armature parameter in IGE.""" - inertias = robot.root_physx_view.get_inertias() + inertias = wp.to_torch(robot.root_view.get_inertias()) offset = torch.zeros_like(inertias) offset[:, :, [0, 4, 8]] += 0.01 new_inertias = inertias + offset - robot.root_physx_view.set_inertias(new_inertias, torch.arange(num_envs)) + robot.root_view.set_inertias(wp.from_torch(new_inertias), wp.from_torch(torch.arange(num_envs, dtype=torch.int32))) def get_held_base_pos_local(task_name, fixed_asset_cfg, num_envs, device): @@ -70,10 +73,10 @@ def get_held_base_pos_local(task_name, fixed_asset_cfg, num_envs, device): def get_held_base_pose(held_pos, held_quat, task_name, fixed_asset_cfg, num_envs, device): """Get current poses for keypoint and success computation.""" held_base_pos_local = get_held_base_pos_local(task_name, fixed_asset_cfg, num_envs, device) - held_base_quat_local = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).unsqueeze(0).repeat(num_envs, 1) + held_base_quat_local = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device).unsqueeze(0).repeat(num_envs, 1) - held_base_quat, held_base_pos = torch_utils.tf_combine( - held_quat, held_pos, held_base_quat_local, held_base_pos_local + held_base_pos, held_base_quat = torch_utils.combine_frame_transforms( + held_pos, held_quat, held_base_pos_local, held_base_quat_local ) return held_base_pos, held_base_quat @@ -94,10 +97,10 @@ def get_target_held_base_pose(fixed_pos, fixed_quat, task_name, fixed_asset_cfg, fixed_success_pos_local[:, 2] = head_height + shank_length - thread_pitch * 1.5 else: raise NotImplementedError("Task not implemented") - fixed_success_quat_local = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).unsqueeze(0).repeat(num_envs, 1) + fixed_success_quat_local = torch.tensor([0.0, 0.0, 0.0, 1.0], device=device).unsqueeze(0).repeat(num_envs, 1) - target_held_base_quat, target_held_base_pos = torch_utils.tf_combine( - fixed_quat, fixed_pos, fixed_success_quat_local, fixed_success_pos_local + target_held_base_pos, target_held_base_quat = torch_utils.combine_frame_transforms( + fixed_pos, fixed_quat, fixed_success_pos_local, fixed_success_quat_local ) return target_held_base_pos, target_held_base_quat diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg.yaml index 08081f97e035..d995081b0cb9 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg.yaml @@ -52,7 +52,6 @@ params: config: name: Forge device: cuda:0 - full_experiment_name: test env_name: rlgpu multi_gpu: False ppo: True diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg_nut_thread.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg_nut_thread.yaml index a73dd178f6e0..b7d446f64f3a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg_nut_thread.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/agents/rl_games_ppo_cfg_nut_thread.yaml @@ -52,7 +52,6 @@ params: config: name: Forge device: cuda:0 - full_experiment_name: test env_name: rlgpu multi_gpu: False ppo: True diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py index 75484cbd8f17..c59de96552e0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py @@ -4,10 +4,16 @@ # SPDX-License-Identifier: BSD-3-Clause import numpy as np import torch +import warp as wp -import isaacsim.core.utils.torch as torch_utils - -from isaaclab.utils.math import axis_angle_from_quat +from isaaclab.utils.math import ( + axis_angle_from_quat, + euler_xyz_from_quat, + quat_conjugate, + quat_from_angle_axis, + quat_from_euler_xyz, + quat_mul, +) from isaaclab_tasks.direct.factory import factory_utils from isaaclab_tasks.direct.factory.factory_env import FactoryEnv @@ -71,8 +77,8 @@ def _compute_intermediate_values(self, dt): rot_noise_angle = torch.randn((self.num_envs,), dtype=torch.float32, device=self.device) * np.deg2rad( rot_noise_level_deg ) - self.noisy_fingertip_quat = torch_utils.quat_mul( - self.fingertip_midpoint_quat, torch_utils.quat_from_angle_axis(rot_noise_angle, rot_noise_axis) + self.noisy_fingertip_quat = quat_mul( + self.fingertip_midpoint_quat, quat_from_angle_axis(rot_noise_angle, rot_noise_axis) ) self.noisy_fingertip_quat[:, [0, 3]] = 0.0 self.noisy_fingertip_quat = self.noisy_fingertip_quat * self.flip_quats.unsqueeze(-1) @@ -82,17 +88,15 @@ def _compute_intermediate_values(self, dt): self.prev_fingertip_pos = self.noisy_fingertip_pos.clone() # Add state differences if velocity isn't being added. - rot_diff_quat = torch_utils.quat_mul( - self.noisy_fingertip_quat, torch_utils.quat_conjugate(self.prev_fingertip_quat) - ) - rot_diff_quat *= torch.sign(rot_diff_quat[:, 0]).unsqueeze(-1) + rot_diff_quat = quat_mul(self.noisy_fingertip_quat, quat_conjugate(self.prev_fingertip_quat)) + rot_diff_quat *= torch.sign(rot_diff_quat[:, 3]).unsqueeze(-1) # W component is at index 3 in XYZW format rot_diff_aa = axis_angle_from_quat(rot_diff_quat) self.ee_angvel_fd = rot_diff_aa / dt self.ee_angvel_fd[:, 0:2] = 0.0 self.prev_fingertip_quat = self.noisy_fingertip_quat.clone() # Update and smooth force values. - self.force_sensor_world = self._robot.root_physx_view.get_link_incoming_joint_force()[ + self.force_sensor_world = wp.to_torch(self._robot.root_view.get_link_incoming_joint_force())[ :, self.force_sensor_body_idx ] @@ -100,7 +104,7 @@ def _compute_intermediate_values(self, dt): self.force_sensor_world_smooth = alpha * self.force_sensor_world + (1 - alpha) * self.force_sensor_world_smooth self.force_sensor_smooth = torch.zeros_like(self.force_sensor_world) - identity_quat = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) + identity_quat = torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1) self.force_sensor_smooth[:, :3], self.force_sensor_smooth[:, 3:6] = forge_utils.change_FT_frame( self.force_sensor_world_smooth[:, 0:3], self.force_sensor_world_smooth[:, 3:6], @@ -167,16 +171,14 @@ def _apply_action(self): # Assumes joint limit is in (+x, -y)-quadrant of world frame. rot_actions[:, 2] = np.deg2rad(-180.0) + np.deg2rad(270.0) * (rot_actions[:, 2] + 1.0) / 2.0 # Joint limit. # (1.c) Get desired orientation target. - bolt_frame_quat = torch_utils.quat_from_euler_xyz( - roll=rot_actions[:, 0], pitch=rot_actions[:, 1], yaw=rot_actions[:, 2] - ) + bolt_frame_quat = quat_from_euler_xyz(roll=rot_actions[:, 0], pitch=rot_actions[:, 1], yaw=rot_actions[:, 2]) rot_180_euler = torch.tensor([np.pi, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1) - quat_bolt_to_ee = torch_utils.quat_from_euler_xyz( + quat_bolt_to_ee = quat_from_euler_xyz( roll=rot_180_euler[:, 0], pitch=rot_180_euler[:, 1], yaw=rot_180_euler[:, 2] ) - ctrl_target_fingertip_preclipped_quat = torch_utils.quat_mul(quat_bolt_to_ee, bolt_frame_quat) + ctrl_target_fingertip_preclipped_quat = quat_mul(quat_bolt_to_ee, bolt_frame_quat) # Step (2): Clip targets if they are too far from current EE pose. # (2.a): Clip position targets. @@ -189,8 +191,8 @@ def _apply_action(self): # sure we avoid the joint limit. # (2.b.i) Get current and desired Euler angles. - curr_roll, curr_pitch, curr_yaw = torch_utils.get_euler_xyz(self.fingertip_midpoint_quat) - desired_roll, desired_pitch, desired_yaw = torch_utils.get_euler_xyz(ctrl_target_fingertip_preclipped_quat) + curr_roll, curr_pitch, curr_yaw = euler_xyz_from_quat(self.fingertip_midpoint_quat) + desired_roll, desired_pitch, desired_yaw = euler_xyz_from_quat(ctrl_target_fingertip_preclipped_quat) desired_xyz = torch.stack([desired_roll, desired_pitch, desired_yaw], dim=1) # (2.b.ii) Correct the direction of motion to avoid joint limit. @@ -219,7 +221,7 @@ def _apply_action(self): clipped_pitch = torch.clip(delta_pitch, -self.rot_threshold[:, 1], self.rot_threshold[:, 1]) desired_xyz[:, 1] = curr_pitch + clipped_pitch - ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz( + ctrl_target_fingertip_midpoint_quat = quat_from_euler_xyz( roll=desired_xyz[:, 0], pitch=desired_xyz[:, 1], yaw=desired_xyz[:, 2] ) @@ -236,10 +238,10 @@ def _get_rewards(self): rew_dict, rew_scales = {}, {} # Calculate action penalty for the asset-relative action space. - pos_error = torch.norm(self.delta_pos, p=2, dim=-1) / self.cfg.ctrl.pos_action_threshold[0] + pos_error = torch.linalg.norm(self.delta_pos, ord=2, dim=-1) / self.cfg.ctrl.pos_action_threshold[0] rot_error = torch.abs(self.delta_yaw) / self.cfg.ctrl.rot_action_threshold[0] # Contact penalty. - contact_force = torch.norm(self.force_sensor_smooth[:, 0:3], p=2, dim=-1, keepdim=False) + contact_force = torch.linalg.norm(self.force_sensor_smooth[:, 0:3], ord=2, dim=-1, keepdim=False) contact_penalty = torch.nn.functional.relu(contact_force - self.contact_penalty_thresholds) # Add success prediction rewards. check_rot = self.cfg_task.name == "nut_thread" @@ -282,12 +284,12 @@ def _reset_idx(self, env_ids): # Relative yaw to bolt. unrot_180_euler = torch.tensor([-np.pi, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1) - unrot_quat = torch_utils.quat_from_euler_xyz( + unrot_quat = quat_from_euler_xyz( roll=unrot_180_euler[:, 0], pitch=unrot_180_euler[:, 1], yaw=unrot_180_euler[:, 2] ) - fingertip_quat_rel_bolt = torch_utils.quat_mul(unrot_quat, self.fingertip_midpoint_quat) - fingertip_yaw_bolt = torch_utils.get_euler_xyz(fingertip_quat_rel_bolt)[-1] + fingertip_quat_rel_bolt = quat_mul(unrot_quat, self.fingertip_midpoint_quat) + fingertip_yaw_bolt = euler_xyz_from_quat(fingertip_quat_rel_bolt)[-1] fingertip_yaw_bolt = torch.where( fingertip_yaw_bolt > torch.pi / 2, fingertip_yaw_bolt - 2 * torch.pi, fingertip_yaw_bolt ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_events.py b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_events.py index 15ced1c2b1a9..f926f1c562fc 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_events.py @@ -4,9 +4,12 @@ # SPDX-License-Identifier: BSD-3-Clause from __future__ import annotations +from typing import TYPE_CHECKING + import torch -from isaaclab.envs import DirectRLEnv +if TYPE_CHECKING: + from isaaclab.envs import DirectRLEnv def randomize_dead_zone(env: DirectRLEnv, env_ids: torch.Tensor | None): diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_utils.py b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_utils.py index e966cf93f218..0e575b14c88c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_utils.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_utils.py @@ -5,7 +5,7 @@ import torch -import isaacsim.core.utils.torch as torch_utils +from isaaclab.utils.math import combine_frame_transforms, quat_apply, quat_inv def get_random_prop_gains(default_values, noise_levels, num_envs, device): @@ -22,14 +22,27 @@ def get_random_prop_gains(default_values, noise_levels, num_envs, device): def change_FT_frame(source_F, source_T, source_frame, target_frame): - """Convert force/torque reading from source to target frame.""" + """Convert force/torque reading from source to target frame. + + Args: + source_F: Force in source frame. + source_T: Torque in source frame. + source_frame: Tuple of (quat_xyzw, pos) for source frame. + target_frame: Tuple of (quat_xyzw, pos) for target frame. + + Returns: + Tuple of (target_F, target_T) - force and torque in target frame. + """ # Modern Robotics eq. 3.95 - source_frame_inv = torch_utils.tf_inverse(source_frame[0], source_frame[1]) - target_T_source_quat, target_T_source_pos = torch_utils.tf_combine( - source_frame_inv[0], source_frame_inv[1], target_frame[0], target_frame[1] - ) - target_F = torch_utils.quat_apply(target_T_source_quat, source_F) - target_T = torch_utils.quat_apply( - target_T_source_quat, (source_T + torch.cross(target_T_source_pos, source_F, dim=-1)) + # Compute inverse of source frame + source_quat_inv = quat_inv(source_frame[0]) + source_pos_inv = -quat_apply(source_quat_inv, source_frame[1]) + + # Combine: source_inv * target = target_T_source + target_T_source_pos, target_T_source_quat = combine_frame_transforms( + source_pos_inv, source_quat_inv, target_frame[1], target_frame[0] ) + + target_F = quat_apply(target_T_source_quat, source_F) + target_T = quat_apply(target_T_source_quat, (source_T + torch.cross(target_T_source_pos, source_F, dim=-1))) return target_F, target_T diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/__init__.py index c282be85730f..564b1dd53bb1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/__init__.py @@ -19,7 +19,7 @@ entry_point=f"{__name__}.franka_cabinet_env:FrankaCabinetEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.franka_cabinet_env:FrankaCabinetEnvCfg", + "env_cfg_entry_point": f"{__name__}.franka_cabinet_env_cfg:FrankaCabinetEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:FrankaCabinetPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/franka_cabinet_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/franka_cabinet_env.py index 120a1d1c3a9c..3f3d341c1aad 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/franka_cabinet_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/franka_cabinet_env.py @@ -6,156 +6,17 @@ from __future__ import annotations import torch +import warp as wp -from isaacsim.core.utils.torch.transformations import tf_combine, tf_inverse, tf_vector from pxr import UsdGeom import isaaclab.sim as sim_utils -from isaaclab.actuators import ImplicitActuatorCfg -from isaaclab.assets import Articulation, ArticulationCfg -from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg -from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import SimulationCfg +from isaaclab.assets import Articulation +from isaaclab.envs import DirectRLEnv from isaaclab.sim.utils.stage import get_current_stage -from isaaclab.terrains import TerrainImporterCfg -from isaaclab.utils import configclass -from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR -from isaaclab.utils.math import sample_uniform - - -@configclass -class FrankaCabinetEnvCfg(DirectRLEnvCfg): - # env - episode_length_s = 8.3333 # 500 timesteps - decimation = 2 - action_space = 9 - observation_space = 23 - state_space = 0 - - # simulation - sim: SimulationCfg = SimulationCfg( - dt=1 / 120, - render_interval=decimation, - physics_material=sim_utils.RigidBodyMaterialCfg( - friction_combine_mode="multiply", - restitution_combine_mode="multiply", - static_friction=1.0, - dynamic_friction=1.0, - restitution=0.0, - ), - ) - - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg( - num_envs=4096, env_spacing=3.0, replicate_physics=True, clone_in_fabric=True - ) - - # robot - robot = ArticulationCfg( - prim_path="/World/envs/env_.*/Robot", - spawn=sim_utils.UsdFileCfg( - usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd", - activate_contact_sensors=False, - rigid_props=sim_utils.RigidBodyPropertiesCfg( - disable_gravity=False, - max_depenetration_velocity=5.0, - ), - articulation_props=sim_utils.ArticulationRootPropertiesCfg( - enabled_self_collisions=False, solver_position_iteration_count=12, solver_velocity_iteration_count=1 - ), - ), - init_state=ArticulationCfg.InitialStateCfg( - joint_pos={ - "panda_joint1": 1.157, - "panda_joint2": -1.066, - "panda_joint3": -0.155, - "panda_joint4": -2.239, - "panda_joint5": -1.841, - "panda_joint6": 1.003, - "panda_joint7": 0.469, - "panda_finger_joint.*": 0.035, - }, - pos=(1.0, 0.0, 0.0), - rot=(0.0, 0.0, 0.0, 1.0), - ), - actuators={ - "panda_shoulder": ImplicitActuatorCfg( - joint_names_expr=["panda_joint[1-4]"], - effort_limit_sim=87.0, - stiffness=80.0, - damping=4.0, - ), - "panda_forearm": ImplicitActuatorCfg( - joint_names_expr=["panda_joint[5-7]"], - effort_limit_sim=12.0, - stiffness=80.0, - damping=4.0, - ), - "panda_hand": ImplicitActuatorCfg( - joint_names_expr=["panda_finger_joint.*"], - effort_limit_sim=200.0, - stiffness=2e3, - damping=1e2, - ), - }, - ) - - # cabinet - cabinet = ArticulationCfg( - prim_path="/World/envs/env_.*/Cabinet", - spawn=sim_utils.UsdFileCfg( - usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd", - activate_contact_sensors=False, - ), - init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0, 0.4), - rot=(0.1, 0.0, 0.0, 0.0), - joint_pos={ - "door_left_joint": 0.0, - "door_right_joint": 0.0, - "drawer_bottom_joint": 0.0, - "drawer_top_joint": 0.0, - }, - ), - actuators={ - "drawers": ImplicitActuatorCfg( - joint_names_expr=["drawer_top_joint", "drawer_bottom_joint"], - effort_limit_sim=87.0, - stiffness=10.0, - damping=1.0, - ), - "doors": ImplicitActuatorCfg( - joint_names_expr=["door_left_joint", "door_right_joint"], - effort_limit_sim=87.0, - stiffness=10.0, - damping=2.5, - ), - }, - ) - - # ground plane - terrain = TerrainImporterCfg( - prim_path="/World/ground", - terrain_type="plane", - collision_group=-1, - physics_material=sim_utils.RigidBodyMaterialCfg( - friction_combine_mode="multiply", - restitution_combine_mode="multiply", - static_friction=1.0, - dynamic_friction=1.0, - restitution=0.0, - ), - ) - - action_scale = 7.5 - dof_velocity_scale = 0.1 - - # reward scales - dist_reward_scale = 1.5 - rot_reward_scale = 1.5 - open_reward_scale = 10.0 - action_penalty_scale = 0.05 - finger_reward_scale = 2.0 +from isaaclab.utils.math import combine_frame_transforms, quat_apply, quat_conjugate, sample_uniform + +from .franka_cabinet_env_cfg import FrankaCabinetEnvCfg # noqa: F401 class FrankaCabinetEnv(DirectRLEnv): @@ -187,13 +48,14 @@ def get_env_local_pose(env_pos: torch.Tensor, xformable: UsdGeom.Xformable, devi qz = world_quat.imaginary[2] qw = world_quat.real - return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device) + # Return pose as [pos(3), quat_xyzw(4)] + return torch.tensor([px, py, pz, qx, qy, qz, qw], device=device) self.dt = self.cfg.sim.dt * self.cfg.decimation # create auxiliary variables for computing applied action, observations and rewards - self.robot_dof_lower_limits = self._robot.data.soft_joint_pos_limits[0, :, 0].to(device=self.device) - self.robot_dof_upper_limits = self._robot.data.soft_joint_pos_limits[0, :, 1].to(device=self.device) + self.robot_dof_lower_limits = self._robot.data.soft_joint_pos_limits.torch[0, :, 0].to(device=self.device) + self.robot_dof_upper_limits = self._robot.data.soft_joint_pos_limits.torch[0, :, 1].to(device=self.device) self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits) self.robot_dof_speed_scales[self._robot.find_joints("panda_finger_joint1")[0]] = 0.1 @@ -221,16 +83,18 @@ def get_env_local_pose(env_pos: torch.Tensor, xformable: UsdGeom.Xformable, devi finger_pose = torch.zeros(7, device=self.device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] - hand_pose_inv_rot, hand_pose_inv_pos = tf_inverse(hand_pose[3:7], hand_pose[0:3]) + hand_pose_inv_rot = quat_conjugate(hand_pose[3:7]) + hand_pose_inv_pos = -quat_apply(hand_pose_inv_rot, hand_pose[0:3]) - robot_local_grasp_pose_rot, robot_local_pose_pos = tf_combine( - hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3] + robot_local_pose_pos, robot_local_grasp_pose_rot = combine_frame_transforms( + hand_pose_inv_pos, hand_pose_inv_rot, finger_pose[0:3], finger_pose[3:7] ) - robot_local_pose_pos += torch.tensor([0, 0.04, 0], device=self.device) + robot_local_pose_pos = robot_local_pose_pos + torch.tensor([0, 0.04, 0], device=self.device) self.robot_local_grasp_pos = robot_local_pose_pos.repeat((self.num_envs, 1)) self.robot_local_grasp_rot = robot_local_grasp_pose_rot.repeat((self.num_envs, 1)) - drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self.device) + # Drawer local grasp pose: [pos(3), quat_xyzw(4)] - identity quaternion is [0,0,0,1] + drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 0.0, 0.0, 0.0, 1.0], device=self.device) self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self.num_envs, 1)) self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self.num_envs, 1)) @@ -258,6 +122,9 @@ def get_env_local_pose(env_pos: torch.Tensor, xformable: UsdGeom.Xformable, devi self.drawer_grasp_rot = torch.zeros((self.num_envs, 4), device=self.device) self.drawer_grasp_pos = torch.zeros((self.num_envs, 3), device=self.device) + # Sticky per-env flag: True once the drawer was opened past the success threshold. + self._episode_succeeded = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) + def _setup_scene(self): self._robot = Articulation(self.cfg.robot) self._cabinet = Articulation(self.cfg.cabinet) @@ -286,24 +153,24 @@ def _pre_physics_step(self, actions: torch.Tensor): self.robot_dof_targets[:] = torch.clamp(targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits) def _apply_action(self): - self._robot.set_joint_position_target(self.robot_dof_targets) + self._robot.set_joint_position_target_index(target=self.robot_dof_targets) # post-physics step calls def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: - terminated = self._cabinet.data.joint_pos[:, self.drawer_joint_idx] > 0.39 + terminated = self._cabinet.data.joint_pos.torch[:, self.drawer_joint_idx] > 0.39 truncated = self.episode_length_buf >= self.max_episode_length - 1 return terminated, truncated def _get_rewards(self) -> torch.Tensor: # Refresh the intermediate values after the physics steps self._compute_intermediate_values() - robot_left_finger_pos = self._robot.data.body_pos_w[:, self.left_finger_link_idx] - robot_right_finger_pos = self._robot.data.body_pos_w[:, self.right_finger_link_idx] + robot_left_finger_pos = self._robot.data.body_pos_w.torch[:, self.left_finger_link_idx] + robot_right_finger_pos = self._robot.data.body_pos_w.torch[:, self.right_finger_link_idx] - return self._compute_rewards( + reward = self._compute_rewards( self.actions, - self._cabinet.data.joint_pos, + self._cabinet.data.joint_pos.torch, self.robot_grasp_pos, self.drawer_grasp_pos, self.robot_grasp_rot, @@ -320,13 +187,23 @@ def _get_rewards(self) -> torch.Tensor: self.cfg.open_reward_scale, self.cfg.action_penalty_scale, self.cfg.finger_reward_scale, - self._robot.data.joint_pos, + self._robot.data.joint_pos.torch, ) + drawer_pos = self._cabinet.data.joint_pos.torch[:, self.drawer_joint_idx] + self._episode_succeeded |= drawer_pos > self.cfg.success_drawer_pos_threshold + return reward def _reset_idx(self, env_ids: torch.Tensor | None): + # Flush per-episode success (sticky binary: drawer ever opened past the cfg threshold). + drawer_pos = self._cabinet.data.joint_pos.torch[env_ids, self.drawer_joint_idx] + log = self.extras.setdefault("log", {}) + log["Metrics/success_rate"] = self._episode_succeeded[env_ids].float().mean().item() + log["Metrics/drawer_pos"] = drawer_pos.mean().item() + self._episode_succeeded[env_ids] = False + super()._reset_idx(env_ids) # robot state - joint_pos = self._robot.data.default_joint_pos[env_ids] + sample_uniform( + joint_pos = self._robot.data.default_joint_pos.torch[env_ids] + sample_uniform( -0.125, 0.125, (len(env_ids), self._robot.num_joints), @@ -334,12 +211,14 @@ def _reset_idx(self, env_ids: torch.Tensor | None): ) joint_pos = torch.clamp(joint_pos, self.robot_dof_lower_limits, self.robot_dof_upper_limits) joint_vel = torch.zeros_like(joint_pos) - self._robot.set_joint_position_target(joint_pos, env_ids=env_ids) - self._robot.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + self._robot.set_joint_position_target_index(target=joint_pos, env_ids=env_ids) + self._robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) # cabinet state zeros = torch.zeros((len(env_ids), self._cabinet.num_joints), device=self.device) - self._cabinet.write_joint_state_to_sim(zeros, zeros, env_ids=env_ids) + self._cabinet.write_joint_position_to_sim_index(position=zeros, env_ids=env_ids) + self._cabinet.write_joint_velocity_to_sim_index(velocity=zeros, env_ids=env_ids) # Need to refresh the intermediate values so that _get_observations() can use the latest values self._compute_intermediate_values(env_ids) @@ -347,7 +226,7 @@ def _reset_idx(self, env_ids: torch.Tensor | None): def _get_observations(self) -> dict: dof_pos_scaled = ( 2.0 - * (self._robot.data.joint_pos - self.robot_dof_lower_limits) + * (self._robot.data.joint_pos.torch - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0 ) @@ -356,10 +235,10 @@ def _get_observations(self) -> dict: obs = torch.cat( ( dof_pos_scaled, - self._robot.data.joint_vel * self.cfg.dof_velocity_scale, + self._robot.data.joint_vel.torch * self.cfg.dof_velocity_scale, to_target, - self._cabinet.data.joint_pos[:, self.drawer_joint_idx].unsqueeze(-1), - self._cabinet.data.joint_vel[:, self.drawer_joint_idx].unsqueeze(-1), + self._cabinet.data.joint_pos.torch[:, self.drawer_joint_idx].unsqueeze(-1), + self._cabinet.data.joint_vel.torch[:, self.drawer_joint_idx].unsqueeze(-1), ), dim=-1, ) @@ -369,12 +248,12 @@ def _get_observations(self) -> dict: def _compute_intermediate_values(self, env_ids: torch.Tensor | None = None): if env_ids is None: - env_ids = self._robot._ALL_INDICES + env_ids = wp.to_torch(self._robot._ALL_INDICES) - hand_pos = self._robot.data.body_pos_w[env_ids, self.hand_link_idx] - hand_rot = self._robot.data.body_quat_w[env_ids, self.hand_link_idx] - drawer_pos = self._cabinet.data.body_pos_w[env_ids, self.drawer_link_idx] - drawer_rot = self._cabinet.data.body_quat_w[env_ids, self.drawer_link_idx] + hand_pos = self._robot.data.body_pos_w.torch[env_ids, self.hand_link_idx] + hand_rot = self._robot.data.body_quat_w.torch[env_ids, self.hand_link_idx] + drawer_pos = self._cabinet.data.body_pos_w.torch[env_ids, self.drawer_link_idx] + drawer_rot = self._cabinet.data.body_quat_w.torch[env_ids, self.drawer_link_idx] ( self.robot_grasp_rot[env_ids], self.robot_grasp_pos[env_ids], @@ -414,15 +293,15 @@ def _compute_rewards( joint_positions, ): # distance from hand to the drawer - d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1) + d = torch.linalg.norm(franka_grasp_pos - drawer_grasp_pos, ord=2, dim=-1) dist_reward = 1.0 / (1.0 + d**2) dist_reward *= dist_reward dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward) - axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis) - axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis) - axis3 = tf_vector(franka_grasp_rot, gripper_up_axis) - axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis) + axis1 = quat_apply(franka_grasp_rot, gripper_forward_axis) + axis2 = quat_apply(drawer_grasp_rot, drawer_inward_axis) + axis3 = quat_apply(franka_grasp_rot, gripper_up_axis) + axis4 = quat_apply(drawer_grasp_rot, drawer_up_axis) dot1 = ( torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) @@ -483,11 +362,11 @@ def _compute_grasp_transforms( drawer_local_grasp_rot, drawer_local_grasp_pos, ): - global_franka_rot, global_franka_pos = tf_combine( - hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos + global_franka_pos, global_franka_rot = combine_frame_transforms( + hand_pos, hand_rot, franka_local_grasp_pos, franka_local_grasp_rot ) - global_drawer_rot, global_drawer_pos = tf_combine( - drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos + global_drawer_pos, global_drawer_rot = combine_frame_transforms( + drawer_pos, drawer_rot, drawer_local_grasp_pos, drawer_local_grasp_rot ) return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/franka_cabinet_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/franka_cabinet_env_cfg.py new file mode 100644 index 000000000000..32b7a88d3e9b --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/franka_cabinet/franka_cabinet_env_cfg.py @@ -0,0 +1,155 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR + + +@configclass +class FrankaCabinetEnvCfg(DirectRLEnvCfg): + # env + episode_length_s = 8.3333 # 500 timesteps + decimation = 2 + action_space = 9 + observation_space = 23 + state_space = 0 + + # simulation + sim: SimulationCfg = SimulationCfg( + dt=1 / 120, + render_interval=decimation, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="multiply", + restitution_combine_mode="multiply", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + ) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=3.0, replicate_physics=True, clone_in_fabric=True + ) + + # robot + robot = ArticulationCfg( + prim_path="/World/envs/env_.*/Robot", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd", + activate_contact_sensors=False, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + max_depenetration_velocity=5.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, solver_position_iteration_count=12, solver_velocity_iteration_count=1 + ), + ), + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={ + "panda_joint1": 1.157, + "panda_joint2": -1.066, + "panda_joint3": -0.155, + "panda_joint4": -2.239, + "panda_joint5": -1.841, + "panda_joint6": 1.003, + "panda_joint7": 0.469, + "panda_finger_joint.*": 0.035, + }, + pos=(1.0, 0.0, 0.0), + rot=(0.0, 0.0, 1.0, 0.0), + ), + actuators={ + "panda_shoulder": ImplicitActuatorCfg( + joint_names_expr=["panda_joint[1-4]"], + effort_limit_sim=87.0, + stiffness=80.0, + damping=4.0, + ), + "panda_forearm": ImplicitActuatorCfg( + joint_names_expr=["panda_joint[5-7]"], + effort_limit_sim=12.0, + stiffness=80.0, + damping=4.0, + ), + "panda_hand": ImplicitActuatorCfg( + joint_names_expr=["panda_finger_joint.*"], + effort_limit_sim=200.0, + stiffness=2e3, + damping=1e2, + ), + }, + ) + + # cabinet + cabinet = ArticulationCfg( + prim_path="/World/envs/env_.*/Cabinet", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd", + activate_contact_sensors=False, + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0, 0.4), + rot=(0.0, 0.0, 0.0, 1.0), + joint_pos={ + "door_left_joint": 0.0, + "door_right_joint": 0.0, + "drawer_bottom_joint": 0.0, + "drawer_top_joint": 0.0, + }, + ), + actuators={ + "drawers": ImplicitActuatorCfg( + joint_names_expr=["drawer_top_joint", "drawer_bottom_joint"], + effort_limit_sim=87.0, + stiffness=10.0, + damping=1.0, + ), + "doors": ImplicitActuatorCfg( + joint_names_expr=["door_left_joint", "door_right_joint"], + effort_limit_sim=87.0, + stiffness=10.0, + damping=2.5, + ), + }, + ) + + # ground plane + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="multiply", + restitution_combine_mode="multiply", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + ) + + action_scale = 7.5 + dof_velocity_scale = 0.1 + + # reward scales + dist_reward_scale = 1.5 + rot_reward_scale = 1.5 + open_reward_scale = 10.0 + action_penalty_scale = 0.05 + finger_reward_scale = 2.0 + + # success criteria + success_drawer_pos_threshold: float = 0.30 + """Drawer joint position above which the drawer is considered successfully opened [m].""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/__init__.py index f7e0f518b666..dbaeb7a468cc 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/__init__.py @@ -20,7 +20,7 @@ entry_point=f"{__name__}.humanoid_env:HumanoidEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.humanoid_env:HumanoidEnvCfg", + "env_cfg_entry_point": f"{__name__}.humanoid_env_cfg:HumanoidEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:HumanoidPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/humanoid_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/humanoid_env.py index 402409e9d35b..3b1ea9a50091 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/humanoid_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/humanoid_env.py @@ -5,89 +5,9 @@ from __future__ import annotations -import isaaclab.sim as sim_utils -from isaaclab.assets import ArticulationCfg -from isaaclab.envs import DirectRLEnvCfg -from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import SimulationCfg -from isaaclab.terrains import TerrainImporterCfg -from isaaclab.utils import configclass - from isaaclab_tasks.direct.locomotion.locomotion_env import LocomotionEnv -from isaaclab_assets import HUMANOID_CFG - - -@configclass -class HumanoidEnvCfg(DirectRLEnvCfg): - # env - episode_length_s = 15.0 - decimation = 2 - action_scale = 1.0 - action_space = 21 - observation_space = 75 - state_space = 0 - - # simulation - sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation) - terrain = TerrainImporterCfg( - prim_path="/World/ground", - terrain_type="plane", - collision_group=-1, - physics_material=sim_utils.RigidBodyMaterialCfg( - friction_combine_mode="average", - restitution_combine_mode="average", - static_friction=1.0, - dynamic_friction=1.0, - restitution=0.0, - ), - debug_vis=False, - ) - - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg( - num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True - ) - - # robot - robot: ArticulationCfg = HUMANOID_CFG.replace(prim_path="/World/envs/env_.*/Robot") - joint_gears: list = [ - 67.5000, # lower_waist - 67.5000, # lower_waist - 67.5000, # right_upper_arm - 67.5000, # right_upper_arm - 67.5000, # left_upper_arm - 67.5000, # left_upper_arm - 67.5000, # pelvis - 45.0000, # right_lower_arm - 45.0000, # left_lower_arm - 45.0000, # right_thigh: x - 135.0000, # right_thigh: y - 45.0000, # right_thigh: z - 45.0000, # left_thigh: x - 135.0000, # left_thigh: y - 45.0000, # left_thigh: z - 90.0000, # right_knee - 90.0000, # left_knee - 22.5, # right_foot - 22.5, # right_foot - 22.5, # left_foot - 22.5, # left_foot - ] - - heading_weight: float = 0.5 - up_weight: float = 0.1 - - energy_cost_scale: float = 0.05 - actions_cost_scale: float = 0.01 - alive_reward_scale: float = 2.0 - dof_vel_scale: float = 0.1 - - death_cost: float = -1.0 - termination_height: float = 0.8 - - angular_velocity_scale: float = 0.25 - contact_force_scale: float = 0.01 +from .humanoid_env_cfg import HumanoidEnvCfg # noqa: F401 class HumanoidEnv(LocomotionEnv): diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/humanoid_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/humanoid_env_cfg.py new file mode 100644 index 000000000000..b2b10dc2d911 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid/humanoid_env_cfg.py @@ -0,0 +1,141 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_ovphysx.physics import OvPhysxCfg +from isaaclab_physx.physics import PhysxCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.utils import PresetCfg + +from isaaclab_assets import HUMANOID_CFG + + +@configclass +class HumanoidPhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + physx: PhysxCfg = PhysxCfg() + newton_mjwarp: NewtonCfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=80, + nconmax=25, + cone="pyramidal", + update_data_interval=2, + integrator="implicitfast", + impratio=1, + ), + num_substeps=2, + debug_mode=False, + ) + ovphysx: OvPhysxCfg = OvPhysxCfg() + + +@configclass +class HumanoidEnvCfg(DirectRLEnvCfg): + # env + episode_length_s = 15.0 + decimation = 2 + action_scale = 1.0 + action_space = 21 + observation_space = 75 + state_space = 0 + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation, physics=HumanoidPhysicsCfg()) + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="average", + restitution_combine_mode="average", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + debug_vis=False, + ) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True + ) + + # robot + robot: ArticulationCfg = HUMANOID_CFG.replace(prim_path="/World/envs/env_.*/Robot") + + physx_joint_gears: list = [ + 67.5000, # lower_waist + 67.5000, # lower_waist + 67.5000, # right_upper_arm + 67.5000, # right_upper_arm + 67.5000, # left_upper_arm + 67.5000, # left_upper_arm + 67.5000, # pelvis + 45.0000, # right_lower_arm + 45.0000, # left_lower_arm + 45.0000, # right_thigh: x + 135.0000, # right_thigh: y + 45.0000, # right_thigh: z + 45.0000, # left_thigh: x + 135.0000, # left_thigh: y + 45.0000, # left_thigh: z + 90.0000, # right_knee + 90.0000, # left_knee + 22.5, # right_foot + 22.5, # right_foot + 22.5, # left_foot + 22.5, # left_foot + ] + newton_joint_gears: list = [ + 67.5000, # left_upper_arm + 67.5000, # left_upper_arm + 45.0000, # left_lower_arm + 67.5000, # lower_waist + 67.5000, # lower_waist + 67.5000, # pelvis + 45.0000, # left_thigh: x + 135.0000, # left_thigh: y + 45.0000, # left_thigh: z + 90.0000, # left_knee + 22.5, # left_foot + 22.5, # left_foot + 45.0000, # right_thigh: x + 135.0000, # right_thigh: y + 45.0000, # right_thigh: z + 90.0000, # right_knee + 22.5, # right_foot + 22.5, # right_foot + 67.5000, # right_upper_arm + 67.5000, # right_upper_arm + 45.0000, # right_lower_arm + ] + joint_gears = { + "physx": physx_joint_gears, + "newton": newton_joint_gears, + } + + heading_weight: float = 0.5 + up_weight: float = 0.1 + + energy_cost_scale: float = 0.05 + actions_cost_scale: float = 0.01 + alive_reward_scale: float = 2.0 + dof_vel_scale: float = 0.1 + + death_cost: float = -1.0 + termination_height: float = 0.8 + + angular_velocity_scale: float = 0.25 + contact_force_scale: float = 0.01 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml index 3071d039b88c..437fa80c6058 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_dance_amp_cfg.yaml @@ -75,12 +75,14 @@ agent: learning_rate: 5.0e-05 learning_rate_scheduler: null learning_rate_scheduler_kwargs: null - state_preprocessor: RunningStandardScaler + observation_preprocessor: RunningStandardScaler + observation_preprocessor_kwargs: null + state_preprocessor: null state_preprocessor_kwargs: null value_preprocessor: RunningStandardScaler value_preprocessor_kwargs: null - amp_state_preprocessor: RunningStandardScaler - amp_state_preprocessor_kwargs: null + amp_observation_preprocessor: RunningStandardScaler + amp_observation_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 0.0 @@ -91,10 +93,9 @@ agent: value_loss_scale: 2.5 discriminator_loss_scale: 5.0 amp_batch_size: 512 - task_reward_weight: 0.0 - style_reward_weight: 1.0 + task_reward_scale: 0.0 + style_reward_scale: 2.0 discriminator_batch_size: 4096 - discriminator_reward_scale: 2.0 discriminator_logit_regularization_scale: 0.05 discriminator_gradient_penalty_scale: 5.0 discriminator_weight_decay_scale: 1.0e-04 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml index 0f6fcdc1a03f..fa3c590a9584 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_run_amp_cfg.yaml @@ -75,12 +75,14 @@ agent: learning_rate: 5.0e-05 learning_rate_scheduler: null learning_rate_scheduler_kwargs: null - state_preprocessor: RunningStandardScaler + observation_preprocessor: RunningStandardScaler + observation_preprocessor_kwargs: null + state_preprocessor: null state_preprocessor_kwargs: null value_preprocessor: RunningStandardScaler value_preprocessor_kwargs: null - amp_state_preprocessor: RunningStandardScaler - amp_state_preprocessor_kwargs: null + amp_observation_preprocessor: RunningStandardScaler + amp_observation_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 0.0 @@ -91,10 +93,9 @@ agent: value_loss_scale: 2.5 discriminator_loss_scale: 5.0 amp_batch_size: 512 - task_reward_weight: 0.0 - style_reward_weight: 1.0 + task_reward_scale: 0.0 + style_reward_scale: 2.0 discriminator_batch_size: 4096 - discriminator_reward_scale: 2.0 discriminator_logit_regularization_scale: 0.05 discriminator_gradient_penalty_scale: 5.0 discriminator_weight_decay_scale: 1.0e-04 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml index efb34f0d2f5b..8b52c661d48b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/agents/skrl_walk_amp_cfg.yaml @@ -75,12 +75,14 @@ agent: learning_rate: 5.0e-05 learning_rate_scheduler: null learning_rate_scheduler_kwargs: null - state_preprocessor: RunningStandardScaler + observation_preprocessor: RunningStandardScaler + observation_preprocessor_kwargs: null + state_preprocessor: null state_preprocessor_kwargs: null value_preprocessor: RunningStandardScaler value_preprocessor_kwargs: null - amp_state_preprocessor: RunningStandardScaler - amp_state_preprocessor_kwargs: null + amp_observation_preprocessor: RunningStandardScaler + amp_observation_preprocessor_kwargs: null random_timesteps: 0 learning_starts: 0 grad_norm_clip: 0.0 @@ -91,10 +93,9 @@ agent: value_loss_scale: 2.5 discriminator_loss_scale: 5.0 amp_batch_size: 512 - task_reward_weight: 0.0 - style_reward_weight: 1.0 + task_reward_scale: 0.0 + style_reward_scale: 2.0 discriminator_batch_size: 4096 - discriminator_reward_scale: 2.0 discriminator_logit_regularization_scale: 0.05 discriminator_gradient_penalty_scale: 5.0 discriminator_weight_decay_scale: 1.0e-04 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env.py index 5d6a01e9d4e4..7c0fe5036132 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env.py @@ -8,6 +8,7 @@ import gymnasium as gym import numpy as np import torch +import warp as wp import isaaclab.sim as sim_utils from isaaclab.assets import Articulation @@ -26,8 +27,9 @@ def __init__(self, cfg: HumanoidAmpEnvCfg, render_mode: str | None = None, **kwa super().__init__(cfg, render_mode, **kwargs) # action offset and scale - dof_lower_limits = self.robot.data.soft_joint_pos_limits[0, :, 0] - dof_upper_limits = self.robot.data.soft_joint_pos_limits[0, :, 1] + soft_joint_pos_limits = self.robot.data.soft_joint_pos_limits.torch + dof_lower_limits = soft_joint_pos_limits[0, :, 0] + dof_upper_limits = soft_joint_pos_limits[0, :, 1] self.action_offset = 0.5 * (dof_upper_limits + dof_lower_limits) self.action_scale = dof_upper_limits - dof_lower_limits @@ -79,18 +81,18 @@ def _pre_physics_step(self, actions: torch.Tensor): def _apply_action(self): target = self.action_offset + self.action_scale * self.actions - self.robot.set_joint_position_target(target) + self.robot.set_joint_position_target_index(target=target) def _get_observations(self) -> dict: # build task observation obs = compute_obs( - self.robot.data.joint_pos, - self.robot.data.joint_vel, - self.robot.data.body_pos_w[:, self.ref_body_index], - self.robot.data.body_quat_w[:, self.ref_body_index], - self.robot.data.body_lin_vel_w[:, self.ref_body_index], - self.robot.data.body_ang_vel_w[:, self.ref_body_index], - self.robot.data.body_pos_w[:, self.key_body_indexes], + self.robot.data.joint_pos.torch, + self.robot.data.joint_vel.torch, + self.robot.data.body_pos_w.torch[:, self.ref_body_index], + self.robot.data.body_quat_w.torch[:, self.ref_body_index], + self.robot.data.body_lin_vel_w.torch[:, self.ref_body_index], + self.robot.data.body_ang_vel_w.torch[:, self.ref_body_index], + self.robot.data.body_pos_w.torch[:, self.key_body_indexes], ) # update AMP observation history @@ -108,14 +110,19 @@ def _get_rewards(self) -> torch.Tensor: def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: time_out = self.episode_length_buf >= self.max_episode_length - 1 if self.cfg.early_termination: - died = self.robot.data.body_pos_w[:, self.ref_body_index, 2] < self.cfg.termination_height + died = self.robot.data.body_pos_w.torch[:, self.ref_body_index, 2] < self.cfg.termination_height else: died = torch.zeros_like(time_out) return died, time_out def _reset_idx(self, env_ids: torch.Tensor | None): if env_ids is None or len(env_ids) == self.num_envs: - env_ids = self.robot._ALL_INDICES + # Convert warp array to torch tensor if needed + env_ids = ( + wp.to_torch(self.robot._ALL_INDICES) + if isinstance(self.robot._ALL_INDICES, wp.array) + else self.robot._ALL_INDICES + ) self.robot.reset(env_ids) super()._reset_idx(env_ids) @@ -127,17 +134,20 @@ def _reset_idx(self, env_ids: torch.Tensor | None): else: raise ValueError(f"Unknown reset strategy: {self.cfg.reset_strategy}") - self.robot.write_root_link_pose_to_sim(root_state[:, :7], env_ids) - self.robot.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids) - self.robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + self.robot.write_root_link_pose_to_sim_index(root_pose=root_state[:, :7], env_ids=env_ids) + self.robot.write_root_com_velocity_to_sim_index(root_velocity=root_state[:, 7:], env_ids=env_ids) + self.robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self.robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) # reset strategies def _reset_strategy_default(self, env_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - root_state = self.robot.data.default_root_state[env_ids].clone() - root_state[:, :3] += self.scene.env_origins[env_ids] - joint_pos = self.robot.data.default_joint_pos[env_ids].clone() - joint_vel = self.robot.data.default_joint_vel[env_ids].clone() + default_root_pose = self.robot.data.default_root_pose.torch[env_ids].clone() + default_root_vel = self.robot.data.default_root_vel.torch[env_ids].clone() + default_root_pose[:, :3] += self.scene.env_origins[env_ids] + root_state = torch.cat([default_root_pose, default_root_vel], dim=-1) + joint_pos = self.robot.data.default_joint_pos.torch[env_ids].clone() + joint_vel = self.robot.data.default_joint_vel.torch[env_ids].clone() return root_state, joint_pos, joint_vel def _reset_strategy_random( @@ -158,7 +168,13 @@ def _reset_strategy_random( # get root transforms (the humanoid torso) motion_torso_index = self._motion_loader.get_body_index(["torso"])[0] - root_state = self.robot.data.default_root_state[env_ids].clone() + root_state = torch.cat( + [ + self.robot.data.default_root_pose.torch[env_ids], + self.robot.data.default_root_vel.torch[env_ids], + ], + dim=-1, + ).clone() root_state[:, 0:3] = body_positions[:, motion_torso_index] + self.scene.env_origins[env_ids] root_state[:, 2] += 0.15 # lift the humanoid slightly to avoid collisions with the ground root_state[:, 3:7] = body_rotations[:, motion_torso_index] diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env_cfg.py index c7178f746c3f..324717dd6c18 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/humanoid_amp_env_cfg.py @@ -8,11 +8,13 @@ import os from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.assets import ArticulationCfg from isaaclab.envs import DirectRLEnvCfg from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import PhysxCfg, SimulationCfg +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass from isaaclab_assets import HUMANOID_28_CFG @@ -52,10 +54,7 @@ class HumanoidAmpEnvCfg(DirectRLEnvCfg): sim: SimulationCfg = SimulationCfg( dt=1 / 60, render_interval=decimation, - physx=PhysxCfg( - gpu_found_lost_pairs_capacity=2**23, - gpu_total_aggregate_pairs_capacity=2**23, - ), + physics=PhysxCfg(gpu_found_lost_pairs_capacity=2**23, gpu_total_aggregate_pairs_capacity=2**23), ) # scene diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/motions/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/motions/__init__.py index 06a047fc65ec..a52ebe069b30 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/motions/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/motions/__init__.py @@ -7,5 +7,6 @@ AMP Motion Loader and motion files. """ -from .motion_loader import MotionLoader -from .motion_viewer import MotionViewer +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/motions/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/motions/__init__.pyi new file mode 100644 index 000000000000..38eab1bfbff9 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/humanoid_amp/motions/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "MotionLoader", + "MotionViewer", +] + +from .motion_loader import MotionLoader +from .motion_viewer import MotionViewer diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py index c8d4fbf9e2d0..5969d8c9c4be 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py @@ -16,6 +16,7 @@ from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import DirectRLEnv from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors import JointWrenchSensor, JointWrenchSensorCfg from isaaclab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane from isaaclab.utils.math import quat_conjugate, quat_from_angle_axis, quat_mul, sample_uniform, saturate @@ -50,15 +51,21 @@ def __init__(self, cfg: AllegroHandEnvCfg | ShadowHandEnvCfg, render_mode: str | self.finger_bodies.sort() self.num_fingertips = len(self.finger_bodies) + self.finger_wrench_bodies = [] + if getattr(self, "_joint_wrench_sensor", None) is not None: + for body_name in self.cfg.fingertip_body_names: + self.finger_wrench_bodies.append(self._joint_wrench_sensor.body_names.index(body_name)) + self.finger_wrench_bodies.sort() + # joint limits - joint_pos_limits = self.hand.root_physx_view.get_dof_limits().to(self.device) + joint_pos_limits = self.hand.data.joint_limits.torch.to(self.device) self.hand_dof_lower_limits = joint_pos_limits[..., 0] self.hand_dof_upper_limits = joint_pos_limits[..., 1] # track goal resets self.reset_goal_buf = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) # used to compare object position - self.in_hand_pos = self.object.data.default_root_state[:, 0:3].clone() + self.in_hand_pos = self.object.data.default_root_pose.torch[:, 0:3].clone() self.in_hand_pos[:, 2] -= 0.04 # default goal positions self.goal_rot = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device) @@ -71,16 +78,35 @@ def __init__(self, cfg: AllegroHandEnvCfg | ShadowHandEnvCfg, render_mode: str | # track successes self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) + self._last_episode_success = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) # unit tensors self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.z_unit_tensor = torch.tensor([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) + # bind backend-optimal write methods (Newton prefers mask-based, PhysX prefers indexed) + use_mask = "newton" in self.sim.physics_manager.__name__.lower() + if use_mask: + self._set_joint_pos_target = self.hand.set_joint_position_target + self._write_obj_root_pose = self.object.write_root_pose_to_sim + self._write_obj_root_vel = self.object.write_root_velocity_to_sim + self._write_hand_joint_pos = self.hand.write_joint_position_to_sim + self._write_hand_joint_vel = self.hand.write_joint_velocity_to_sim + else: + self._set_joint_pos_target = self.hand.set_joint_position_target_index + self._write_obj_root_pose = self.object.write_root_pose_to_sim_index + self._write_obj_root_vel = self.object.write_root_velocity_to_sim_index + self._write_hand_joint_pos = self.hand.write_joint_position_to_sim_index + self._write_hand_joint_vel = self.hand.write_joint_velocity_to_sim_index + def _setup_scene(self): # add hand, in-hand object, and goal object self.hand = Articulation(self.cfg.robot_cfg) - self.object = RigidObject(self.cfg.object_cfg) + self.object: Articulation | RigidObject = self.cfg.object_cfg.class_type(self.cfg.object_cfg) + self._joint_wrench_sensor = None + if self.cfg.asymmetric_obs: + self._joint_wrench_sensor = self._create_joint_wrench_sensor() # add ground plane spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg()) # clone and replicate (no need to filter for this environment) @@ -88,10 +114,16 @@ def _setup_scene(self): # add articulation to scene - we must register to scene to randomize with EventManager self.scene.articulations["robot"] = self.hand self.scene.rigid_objects["object"] = self.object + if self._joint_wrench_sensor is not None: + self.scene.sensors["joint_wrench"] = self._joint_wrench_sensor # add lights light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) light_cfg.func("/World/Light", light_cfg) + def _create_joint_wrench_sensor(self) -> JointWrenchSensor: + """Create the joint-wrench sensor used for fingertip force/torque observations.""" + return JointWrenchSensor(JointWrenchSensorCfg(prim_path=self.cfg.robot_cfg.prim_path)) + def _pre_physics_step(self, actions: torch.Tensor) -> None: self.actions = actions.clone() @@ -113,15 +145,13 @@ def _apply_action(self) -> None: self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices] - self.hand.set_joint_position_target( - self.cur_targets[:, self.actuated_dof_indices], joint_ids=self.actuated_dof_indices + self._set_joint_pos_target( + target=self.cur_targets[:, self.actuated_dof_indices], joint_ids=self.actuated_dof_indices ) def _get_observations(self) -> dict: if self.cfg.asymmetric_obs: - self.fingertip_force_sensors = self.hand.root_physx_view.get_link_incoming_joint_force()[ - :, self.finger_bodies - ] + self._update_fingertip_force_sensors() if self.cfg.obs_type == "openai": obs = self.compute_reduced_observations() @@ -138,6 +168,27 @@ def _get_observations(self) -> dict: observations = {"policy": obs, "critic": states} return observations + def _update_fingertip_force_sensors(self) -> None: + """Update fingertip force/torque observations from the joint-wrench sensor.""" + if getattr(self, "_joint_wrench_sensor", None) is None: + self.fingertip_force_sensors = torch.zeros( + self.num_envs, len(self.finger_bodies), 6, dtype=torch.float32, device=self.device + ) + return + + sensor_data = self._joint_wrench_sensor.data + force_data = sensor_data.force + torque_data = sensor_data.torque + if force_data is None or torque_data is None: + self.fingertip_force_sensors = torch.zeros( + self.num_envs, len(self.finger_bodies), 6, dtype=torch.float32, device=self.device + ) + return + + force = force_data.torch[:, self.finger_wrench_bodies] + torque = torque_data.torch[:, self.finger_wrench_bodies] + self.fingertip_force_sensors = torch.cat((force, torque), dim=-1) + def _get_rewards(self) -> torch.Tensor: ( total_reward, @@ -181,7 +232,7 @@ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: self._compute_intermediate_values() # reset when cube has fallen - goal_dist = torch.norm(self.object_pos - self.in_hand_pos, p=2, dim=-1) + goal_dist = torch.linalg.norm(self.object_pos - self.in_hand_pos, ord=2, dim=-1) out_of_reach = goal_dist >= self.cfg.fall_dist if self.cfg.max_consecutive_success > 0: @@ -199,49 +250,54 @@ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: time_out = time_out | max_success_reached return out_of_reach, time_out - def _reset_idx(self, env_ids: Sequence[int] | None): - if env_ids is None: - env_ids = self.hand._ALL_INDICES - # resets articulation and rigid body attributes + def _reset_idx(self, env_ids: Sequence[int]): + # Episode counts as successful when goals reached >= cfg.success_count_threshold. + self._last_episode_success[env_ids] = self.successes[env_ids] >= self.cfg.success_count_threshold + self.extras.setdefault("log", {})["Metrics/success_rate"] = ( + self._last_episode_success[env_ids].float().mean().item() + ) + super()._reset_idx(env_ids) # reset goals self._reset_target_pose(env_ids) # reset object - object_default_state = self.object.data.default_root_state.clone()[env_ids] + object_default_pose = self.object.data.default_root_pose.torch.clone()[env_ids] + object_default_vel = self.object.data.default_root_vel.torch.clone()[env_ids] pos_noise = sample_uniform(-1.0, 1.0, (len(env_ids), 3), device=self.device) # global object positions - object_default_state[:, 0:3] = ( - object_default_state[:, 0:3] + self.cfg.reset_position_noise * pos_noise + self.scene.env_origins[env_ids] + object_default_pose[:, 0:3] = ( + object_default_pose[:, 0:3] + self.cfg.reset_position_noise * pos_noise + self.scene.env_origins[env_ids] ) rot_noise = sample_uniform(-1.0, 1.0, (len(env_ids), 2), device=self.device) # noise for X and Y rotation - object_default_state[:, 3:7] = randomize_rotation( + object_default_pose[:, 3:7] = randomize_rotation( rot_noise[:, 0], rot_noise[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids] ) - object_default_state[:, 7:] = torch.zeros_like(self.object.data.default_root_state[env_ids, 7:]) - self.object.write_root_pose_to_sim(object_default_state[:, :7], env_ids) - self.object.write_root_velocity_to_sim(object_default_state[:, 7:], env_ids) + object_default_vel[:] = 0.0 + self._write_obj_root_pose(root_pose=object_default_pose, env_ids=env_ids) + self._write_obj_root_vel(root_velocity=object_default_vel, env_ids=env_ids) # reset hand - delta_max = self.hand_dof_upper_limits[env_ids] - self.hand.data.default_joint_pos[env_ids] - delta_min = self.hand_dof_lower_limits[env_ids] - self.hand.data.default_joint_pos[env_ids] + delta_max = self.hand_dof_upper_limits[env_ids] - self.hand.data.default_joint_pos.torch[env_ids] + delta_min = self.hand_dof_lower_limits[env_ids] - self.hand.data.default_joint_pos.torch[env_ids] dof_pos_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device) rand_delta = delta_min + (delta_max - delta_min) * 0.5 * dof_pos_noise - dof_pos = self.hand.data.default_joint_pos[env_ids] + self.cfg.reset_dof_pos_noise * rand_delta + dof_pos = self.hand.data.default_joint_pos.torch[env_ids] + self.cfg.reset_dof_pos_noise * rand_delta dof_vel_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device) - dof_vel = self.hand.data.default_joint_vel[env_ids] + self.cfg.reset_dof_vel_noise * dof_vel_noise + dof_vel = self.hand.data.default_joint_vel.torch[env_ids] + self.cfg.reset_dof_vel_noise * dof_vel_noise self.prev_targets[env_ids] = dof_pos self.cur_targets[env_ids] = dof_pos self.hand_dof_targets[env_ids] = dof_pos - self.hand.set_joint_position_target(dof_pos, env_ids=env_ids) - self.hand.write_joint_state_to_sim(dof_pos, dof_vel, env_ids=env_ids) + self._set_joint_pos_target(target=dof_pos, env_ids=env_ids) + self._write_hand_joint_pos(position=dof_pos, env_ids=env_ids) + self._write_hand_joint_vel(velocity=dof_vel, env_ids=env_ids) self.successes[env_ids] = 0 self._compute_intermediate_values() @@ -262,22 +318,22 @@ def _reset_target_pose(self, env_ids): def _compute_intermediate_values(self): # data for hand - self.fingertip_pos = self.hand.data.body_pos_w[:, self.finger_bodies] - self.fingertip_rot = self.hand.data.body_quat_w[:, self.finger_bodies] + self.fingertip_pos = self.hand.data.body_pos_w.torch[:, self.finger_bodies] + self.fingertip_rot = self.hand.data.body_quat_w.torch[:, self.finger_bodies] self.fingertip_pos -= self.scene.env_origins.repeat((1, self.num_fingertips)).reshape( self.num_envs, self.num_fingertips, 3 ) - self.fingertip_velocities = self.hand.data.body_vel_w[:, self.finger_bodies] + self.fingertip_velocities = self.hand.data.body_vel_w.torch[:, self.finger_bodies] - self.hand_dof_pos = self.hand.data.joint_pos - self.hand_dof_vel = self.hand.data.joint_vel + self.hand_dof_pos = self.hand.data.joint_pos.torch + self.hand_dof_vel = self.hand.data.joint_vel.torch # data for object - self.object_pos = self.object.data.root_pos_w - self.scene.env_origins - self.object_rot = self.object.data.root_quat_w - self.object_velocities = self.object.data.root_vel_w - self.object_linvel = self.object.data.root_lin_vel_w - self.object_angvel = self.object.data.root_ang_vel_w + self.object_pos = self.object.data.root_pos_w.torch - self.scene.env_origins + self.object_rot = self.object.data.root_quat_w.torch + self.object_velocities = self.object.data.root_vel_w.torch + self.object_linvel = self.object.data.root_lin_vel_w.torch + self.object_angvel = self.object.data.root_ang_vel_w.torch def compute_reduced_observations(self): # Per https://arxiv.org/pdf/1808.00177.pdf Table 2 @@ -372,7 +428,7 @@ def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor): def rotation_distance(object_rot, target_rot): # Orientation alignment for the cube in hand and goal cube quat_diff = quat_mul(object_rot, quat_conjugate(target_rot)) - return 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 1:4], p=2, dim=-1), max=1.0)) # changed quat convention + return 2.0 * torch.asin(torch.clamp(torch.linalg.norm(quat_diff[:, 0:3], ord=2, dim=-1), max=1.0)) @torch.jit.script @@ -397,7 +453,7 @@ def compute_rewards( fall_penalty: float, av_factor: float, ): - goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1) + goal_dist = torch.linalg.norm(object_pos - target_pos, ord=2, dim=-1) rot_dist = rotation_distance(object_rot, target_rot) dist_rew = goal_dist * dist_reward_scale diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/locomotion/locomotion_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/locomotion/locomotion_env.py index faac10e1a718..f97871ca83bf 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/locomotion/locomotion_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/locomotion/locomotion_env.py @@ -6,19 +6,71 @@ from __future__ import annotations import torch - -import isaacsim.core.utils.torch as torch_utils -from isaacsim.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate +import warp as wp +from isaaclab_newton.physics import NewtonCfg +from isaaclab_ovphysx.physics import OvPhysxCfg +from isaaclab_physx.physics import PhysxCfg import isaaclab.sim as sim_utils from isaaclab.assets import Articulation from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg +from isaaclab.utils.math import ( + euler_xyz_from_quat, + normalize, + quat_apply, + quat_apply_inverse, + quat_conjugate, + quat_mul, + scale_transform, +) def normalize_angle(x): return torch.atan2(torch.sin(x), torch.cos(x)) +@torch.jit.script +def compute_heading_and_up( + torso_rotation: torch.Tensor, + inv_start_rot: torch.Tensor, + to_target: torch.Tensor, + vec0: torch.Tensor, + vec1: torch.Tensor, + up_idx: int, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute heading and up vectors for locomotion tasks.""" + num_envs = torso_rotation.shape[0] + target_dirs = normalize(to_target) + + torso_quat = quat_mul(torso_rotation, inv_start_rot) + up_vec = quat_apply(torso_quat, vec1).view(num_envs, 3) + heading_vec = quat_apply(torso_quat, vec0).view(num_envs, 3) + up_proj = up_vec[:, up_idx] + heading_proj = torch.bmm(heading_vec.view(num_envs, 1, 3), target_dirs.view(num_envs, 3, 1)).view(num_envs) + + return torso_quat, up_proj, heading_proj, up_vec, heading_vec + + +@torch.jit.script +def compute_rot( + torso_quat: torch.Tensor, + velocity: torch.Tensor, + ang_velocity: torch.Tensor, + targets: torch.Tensor, + torso_positions: torch.Tensor, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute rotation-related quantities for locomotion tasks.""" + vel_loc = quat_apply_inverse(torso_quat, velocity) + angvel_loc = quat_apply_inverse(torso_quat, ang_velocity) + + roll, pitch, yaw = euler_xyz_from_quat(torso_quat) + + walk_target_angle = torch.atan2(targets[:, 1] - torso_positions[:, 1], targets[:, 0] - torso_positions[:, 0]) + angle_to_target = walk_target_angle - yaw + + return vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target + + class LocomotionEnv(DirectRLEnv): cfg: DirectRLEnvCfg @@ -26,7 +78,17 @@ def __init__(self, cfg: DirectRLEnvCfg, render_mode: str | None = None, **kwargs super().__init__(cfg, render_mode, **kwargs) self.action_scale = self.cfg.action_scale - self.joint_gears = torch.tensor(self.cfg.joint_gears, dtype=torch.float32, device=self.sim.device) + # Resolve the joint gears based on the physics type, since they do not have the same joint ordering. + if isinstance(self.cfg.joint_gears, dict): + if isinstance(self.cfg.sim.physics, (PhysxCfg, OvPhysxCfg)): + joint_gears = self.cfg.joint_gears["physx"] + elif isinstance(self.cfg.sim.physics, NewtonCfg): + joint_gears = self.cfg.joint_gears["newton"] + else: + raise ValueError(f"Invalid physics type: {self.cfg.sim.physics}") + else: + joint_gears = self.cfg.joint_gears + self.joint_gears = torch.tensor(joint_gears, dtype=torch.float32, device=self.sim.device) self.motor_effort_ratio = torch.ones_like(self.joint_gears, device=self.sim.device) self._joint_dof_idx, _ = self.robot.find_joints(".*") @@ -36,7 +98,7 @@ def __init__(self, cfg: DirectRLEnvCfg, render_mode: str | None = None, **kwargs (self.num_envs, 1) ) self.targets += self.scene.env_origins - self.start_rotation = torch.tensor([1, 0, 0, 0], device=self.sim.device, dtype=torch.float32) + self.start_rotation = torch.tensor([0, 0, 0, 1], device=self.sim.device, dtype=torch.float32) self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self.sim.device).repeat((self.num_envs, 1)) self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self.sim.device).repeat( (self.num_envs, 1) @@ -66,13 +128,19 @@ def _pre_physics_step(self, actions: torch.Tensor): self.actions = actions.clone() def _apply_action(self): - forces = self.action_scale * self.joint_gears * self.actions - self.robot.set_joint_effort_target(forces, joint_ids=self._joint_dof_idx) + forces = self.action_scale * self.joint_gears * torch.clamp(self.actions, -1.0, 1.0) + self.robot.set_joint_effort_target_index(target=forces, joint_ids=self._joint_dof_idx) def _compute_intermediate_values(self): - self.torso_position, self.torso_rotation = self.robot.data.root_pos_w, self.robot.data.root_quat_w - self.velocity, self.ang_velocity = self.robot.data.root_lin_vel_w, self.robot.data.root_ang_vel_w - self.dof_pos, self.dof_vel = self.robot.data.joint_pos, self.robot.data.joint_vel + self.torso_position, self.torso_rotation = ( + self.robot.data.root_pos_w.torch, + self.robot.data.root_quat_w.torch, + ) + self.velocity, self.ang_velocity = ( + self.robot.data.root_lin_vel_w.torch, + self.robot.data.root_ang_vel_w.torch, + ) + self.dof_pos, self.dof_vel = self.robot.data.joint_pos.torch, self.robot.data.joint_vel.torch ( self.up_proj, @@ -95,8 +163,8 @@ def _compute_intermediate_values(self): self.velocity, self.ang_velocity, self.dof_pos, - self.robot.data.soft_joint_pos_limits[0, :, 0], - self.robot.data.soft_joint_pos_limits[0, :, 1], + self.robot.data.soft_joint_pos_limits.torch[0, :, 0], + self.robot.data.soft_joint_pos_limits.torch[0, :, 1], self.inv_start_rot, self.basis_vec0, self.basis_vec1, @@ -154,22 +222,29 @@ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: def _reset_idx(self, env_ids: torch.Tensor | None): if env_ids is None or len(env_ids) == self.num_envs: - env_ids = self.robot._ALL_INDICES + env_ids = wp.to_torch(self.robot._ALL_INDICES) + + # Log survival success rate (survived = timed out without falling) + survived = self.reset_time_outs[env_ids].float() + self.extras.setdefault("log", {})["Metrics/success_rate"] = survived.mean().item() + self.robot.reset(env_ids) super()._reset_idx(env_ids) - joint_pos = self.robot.data.default_joint_pos[env_ids] - joint_vel = self.robot.data.default_joint_vel[env_ids] - default_root_state = self.robot.data.default_root_state[env_ids] - default_root_state[:, :3] += self.scene.env_origins[env_ids] + joint_pos = self.robot.data.default_joint_pos.torch[env_ids].clone() + joint_vel = self.robot.data.default_joint_vel.torch[env_ids].clone() + default_root_pose = self.robot.data.default_root_pose.torch[env_ids].clone() + default_root_vel = self.robot.data.default_root_vel.torch[env_ids].clone() + default_root_pose[:, :3] += self.scene.env_origins[env_ids] - self.robot.write_root_pose_to_sim(default_root_state[:, :7], env_ids) - self.robot.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids) - self.robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + self.robot.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + self.robot.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) + self.robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self.robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) - to_target = self.targets[env_ids] - default_root_state[:, :3] + to_target = self.targets[env_ids] - default_root_pose[:, :3] to_target[:, 2] = 0.0 - self.potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.cfg.sim.dt + self.potentials[env_ids] = -torch.linalg.norm(to_target, ord=2, dim=-1) / self.cfg.sim.dt self._compute_intermediate_values() @@ -256,12 +331,12 @@ def compute_intermediate_values( torso_quat, velocity, ang_velocity, targets, torso_position ) - dof_pos_scaled = torch_utils.maths.unscale(dof_pos, dof_lower_limits, dof_upper_limits) + dof_pos_scaled = scale_transform(dof_pos, dof_lower_limits, dof_upper_limits) to_target = targets - torso_position to_target[:, 2] = 0.0 prev_potentials[:] = potentials - potentials = -torch.norm(to_target, p=2, dim=-1) / dt + potentials = -torch.linalg.norm(to_target, ord=2, dim=-1) / dt return ( up_proj, diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/__init__.py index a1a5c9ef913a..6d26209cd022 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/__init__.py @@ -20,7 +20,7 @@ entry_point=f"{__name__}.quadcopter_env:QuadcopterEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.quadcopter_env:QuadcopterEnvCfg", + "env_cfg_entry_point": f"{__name__}.quadcopter_env_cfg:QuadcopterEnvCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:QuadcopterPPORunnerCfg", "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py index 02857c63d348..c1e3a1b3c9ee 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py @@ -7,24 +7,19 @@ import gymnasium as gym import torch +import warp as wp import isaaclab.sim as sim_utils -from isaaclab.assets import Articulation, ArticulationCfg -from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg +from isaaclab.assets import Articulation +from isaaclab.envs import DirectRLEnv from isaaclab.envs.ui import BaseEnvWindow from isaaclab.markers import VisualizationMarkers -from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import SimulationCfg -from isaaclab.terrains import TerrainImporterCfg -from isaaclab.utils import configclass from isaaclab.utils.math import subtract_frame_transforms -## -# Pre-defined configs -## -from isaaclab_assets import CRAZYFLIE_CFG # isort: skip from isaaclab.markers import CUBOID_MARKER_CFG # isort: skip +from .quadcopter_env_cfg import QuadcopterEnvCfg # noqa: F401 + class QuadcopterEnvWindow(BaseEnvWindow): """Window manager for the Quadcopter environment.""" @@ -46,60 +41,6 @@ def __init__(self, env: QuadcopterEnv, window_name: str = "IsaacLab"): self._create_debug_vis_ui_element("targets", self.env) -@configclass -class QuadcopterEnvCfg(DirectRLEnvCfg): - # env - episode_length_s = 10.0 - decimation = 2 - action_space = 4 - observation_space = 12 - state_space = 0 - debug_vis = True - - ui_window_class_type = QuadcopterEnvWindow - - # simulation - sim: SimulationCfg = SimulationCfg( - dt=1 / 100, - render_interval=decimation, - physics_material=sim_utils.RigidBodyMaterialCfg( - friction_combine_mode="multiply", - restitution_combine_mode="multiply", - static_friction=1.0, - dynamic_friction=1.0, - restitution=0.0, - ), - ) - terrain = TerrainImporterCfg( - prim_path="/World/ground", - terrain_type="plane", - collision_group=-1, - physics_material=sim_utils.RigidBodyMaterialCfg( - friction_combine_mode="multiply", - restitution_combine_mode="multiply", - static_friction=1.0, - dynamic_friction=1.0, - restitution=0.0, - ), - debug_vis=False, - ) - - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg( - num_envs=4096, env_spacing=2.5, replicate_physics=True, clone_in_fabric=True - ) - - # robot - robot: ArticulationCfg = CRAZYFLIE_CFG.replace(prim_path="/World/envs/env_.*/Robot") - thrust_to_weight = 1.9 - moment_scale = 0.01 - - # reward scales - lin_vel_reward_scale = -0.05 - ang_vel_reward_scale = -0.01 - distance_to_goal_reward_scale = 15.0 - - class QuadcopterEnv(DirectRLEnv): cfg: QuadcopterEnvCfg @@ -113,6 +54,9 @@ def __init__(self, cfg: QuadcopterEnvCfg, render_mode: str | None = None, **kwar # Goal position self._desired_pos_w = torch.zeros(self.num_envs, 3, device=self.device) + # per-env count of steps meeting the success condition + self._success_step_count = torch.zeros(self.num_envs, dtype=torch.long, device=self.device) + # Logging self._episode_sums = { key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) @@ -124,7 +68,7 @@ def __init__(self, cfg: QuadcopterEnvCfg, render_mode: str | None = None, **kwar } # Get specific body indices self._body_id = self._robot.find_bodies("body")[0] - self._robot_mass = self._robot.root_physx_view.get_masses()[0].sum() + self._robot_mass = wp.to_torch(self._robot.root_view.get_masses())[0].sum() self._gravity_magnitude = torch.tensor(self.sim.cfg.gravity, device=self.device).norm() self._robot_weight = (self._robot_mass * self._gravity_magnitude).item() @@ -153,19 +97,19 @@ def _pre_physics_step(self, actions: torch.Tensor): self._moment[:, 0, :] = self.cfg.moment_scale * self._actions[:, 1:] def _apply_action(self): - self._robot.permanent_wrench_composer.set_forces_and_torques( + self._robot.permanent_wrench_composer.set_forces_and_torques_index( body_ids=self._body_id, forces=self._thrust, torques=self._moment ) def _get_observations(self) -> dict: desired_pos_b, _ = subtract_frame_transforms( - self._robot.data.root_pos_w, self._robot.data.root_quat_w, self._desired_pos_w + self._robot.data.root_pos_w.torch, self._robot.data.root_quat_w.torch, self._desired_pos_w ) obs = torch.cat( [ - self._robot.data.root_lin_vel_b, - self._robot.data.root_ang_vel_b, - self._robot.data.projected_gravity_b, + self._robot.data.root_lin_vel_b.torch, + self._robot.data.root_ang_vel_b.torch, + self._robot.data.projected_gravity_b.torch, desired_pos_b, ], dim=-1, @@ -174,9 +118,9 @@ def _get_observations(self) -> dict: return observations def _get_rewards(self) -> torch.Tensor: - lin_vel = torch.sum(torch.square(self._robot.data.root_lin_vel_b), dim=1) - ang_vel = torch.sum(torch.square(self._robot.data.root_ang_vel_b), dim=1) - distance_to_goal = torch.linalg.norm(self._desired_pos_w - self._robot.data.root_pos_w, dim=1) + lin_vel = torch.sum(torch.square(self._robot.data.root_lin_vel_b.torch), dim=1) + ang_vel = torch.sum(torch.square(self._robot.data.root_ang_vel_b.torch), dim=1) + distance_to_goal = torch.linalg.norm(self._desired_pos_w - self._robot.data.root_pos_w.torch, dim=1) distance_to_goal_mapped = 1 - torch.tanh(distance_to_goal / 0.8) rewards = { "lin_vel": lin_vel * self.cfg.lin_vel_reward_scale * self.step_dt, @@ -184,6 +128,8 @@ def _get_rewards(self) -> torch.Tensor: "distance_to_goal": distance_to_goal_mapped * self.cfg.distance_to_goal_reward_scale * self.step_dt, } reward = torch.sum(torch.stack(list(rewards.values())), dim=0) + # Accumulate per-step in-region count for the hover success criterion. + self._success_step_count += (distance_to_goal < self.cfg.success_distance_threshold).long() # Logging for key, value in rewards.items(): self._episode_sums[key] += value @@ -191,16 +137,18 @@ def _get_rewards(self) -> torch.Tensor: def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: time_out = self.episode_length_buf >= self.max_episode_length - 1 - died = torch.logical_or(self._robot.data.root_pos_w[:, 2] < 0.1, self._robot.data.root_pos_w[:, 2] > 2.0) + died = torch.logical_or( + self._robot.data.root_pos_w.torch[:, 2] < 0.1, self._robot.data.root_pos_w.torch[:, 2] > 2.0 + ) return died, time_out def _reset_idx(self, env_ids: torch.Tensor | None): if env_ids is None or len(env_ids) == self.num_envs: - env_ids = self._robot._ALL_INDICES + env_ids = wp.to_torch(self._robot._ALL_INDICES) # Logging final_distance_to_goal = torch.linalg.norm( - self._desired_pos_w[env_ids] - self._robot.data.root_pos_w[env_ids], dim=1 + self._desired_pos_w[env_ids] - self._robot.data.root_pos_w.torch[env_ids], dim=1 ).mean() extras = dict() for key in self._episode_sums.keys(): @@ -213,6 +161,11 @@ def _reset_idx(self, env_ids: torch.Tensor | None): extras["Episode_Termination/died"] = torch.count_nonzero(self.reset_terminated[env_ids]).item() extras["Episode_Termination/time_out"] = torch.count_nonzero(self.reset_time_outs[env_ids]).item() extras["Metrics/final_distance_to_goal"] = final_distance_to_goal.item() + success_step_threshold = int(self.max_episode_length * self.cfg.success_step_ratio) + extras["Metrics/success_rate"] = ( + (self._success_step_count[env_ids] >= success_step_threshold).float().mean().item() + ) + self._success_step_count[env_ids] = 0 self.extras["log"].update(extras) self._robot.reset(env_ids) @@ -227,13 +180,15 @@ def _reset_idx(self, env_ids: torch.Tensor | None): self._desired_pos_w[env_ids, :2] += self._terrain.env_origins[env_ids, :2] self._desired_pos_w[env_ids, 2] = torch.zeros_like(self._desired_pos_w[env_ids, 2]).uniform_(0.5, 1.5) # Reset robot state - joint_pos = self._robot.data.default_joint_pos[env_ids] - joint_vel = self._robot.data.default_joint_vel[env_ids] - default_root_state = self._robot.data.default_root_state[env_ids] - default_root_state[:, :3] += self._terrain.env_origins[env_ids] - self._robot.write_root_pose_to_sim(default_root_state[:, :7], env_ids) - self._robot.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids) - self._robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids) + joint_pos = self._robot.data.default_joint_pos.torch[env_ids] + joint_vel = self._robot.data.default_joint_vel.torch[env_ids] + default_root_pose = self._robot.data.default_root_pose.torch[env_ids] + default_root_vel = self._robot.data.default_root_vel.torch[env_ids] + default_root_pose[:, :3] += self._terrain.env_origins[env_ids] + self._robot.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + self._robot.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) + self._robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) def _set_debug_vis_impl(self, debug_vis: bool): # create markers if necessary for the first time diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env_cfg.py new file mode 100644 index 000000000000..d893535fe210 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env_cfg.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +from isaaclab_assets import CRAZYFLIE_CFG # isort: skip + + +@configclass +class QuadcopterEnvCfg(DirectRLEnvCfg): + # env + episode_length_s = 10.0 + decimation = 2 + action_space = 4 + observation_space = 12 + state_space = 0 + debug_vis = True + + ui_window_class_type: type | str | None = "{DIR}.quadcopter_env:QuadcopterEnvWindow" + + # simulation + sim: SimulationCfg = SimulationCfg( + dt=1 / 100, + render_interval=decimation, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="multiply", + restitution_combine_mode="multiply", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + ) + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="multiply", + restitution_combine_mode="multiply", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + debug_vis=False, + ) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=2.5, replicate_physics=True, clone_in_fabric=True + ) + + # robot + robot: ArticulationCfg = CRAZYFLIE_CFG.replace(prim_path="/World/envs/env_.*/Robot") + thrust_to_weight = 1.9 + moment_scale = 0.01 + + # reward scales + lin_vel_reward_scale = -0.05 + ang_vel_reward_scale = -0.01 + distance_to_goal_reward_scale = 15.0 + + # success criteria (hovering task: success requires staying near goal for a fraction of the episode) + success_distance_threshold: float = 0.5 + """Distance to goal below which a step counts toward the hover success criterion [m].""" + success_step_ratio: float = 0.25 + """Fraction of episode steps that must satisfy the distance threshold for the episode to count as successful.""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/__init__.py index 4d2f0f3eee50..0041b165cb0a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/__init__.py @@ -60,7 +60,7 @@ entry_point=f"{__name__}.shadow_hand_vision_env:ShadowHandVisionEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.shadow_hand_vision_env:ShadowHandVisionEnvCfg", + "env_cfg_entry_point": f"{__name__}.shadow_hand_vision_env_cfg:ShadowHandVisionEnvCfg", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:ShadowHandVisionFFPPORunnerCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_vision_cfg.yaml", }, @@ -71,7 +71,18 @@ entry_point=f"{__name__}.shadow_hand_vision_env:ShadowHandVisionEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.shadow_hand_vision_env:ShadowHandVisionEnvPlayCfg", + "env_cfg_entry_point": f"{__name__}.shadow_hand_vision_env_cfg:ShadowHandVisionEnvPlayCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:ShadowHandVisionFFPPORunnerCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_vision_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Repose-Cube-Shadow-Vision-Benchmark-Direct-v0", + entry_point=f"{__name__}.shadow_hand_vision_env:ShadowHandVisionEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.shadow_hand_vision_env_cfg:ShadowHandVisionBenchmarkEnvCfg", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:ShadowHandVisionFFPPORunnerCfg", "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_vision_cfg.yaml", }, diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/rl_games_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/rl_games_ppo_cfg.yaml index 30b3b0b012f4..abd30459893d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/rl_games_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/agents/rl_games_ppo_cfg.yaml @@ -89,3 +89,25 @@ params: deterministic: True games_num: 100000 print_stats: True + +pbt: + enabled: False + policy_idx: 0 + num_policies: 8 + directory: . + workspace: "pbt_workspace" + objective: "episode.consecutive_successes" + interval_steps: 50000000 + threshold_std: 0.1 + threshold_abs: 0.025 + mutation_rate: 0.25 + change_range: [1.1, 2.0] + mutation: + agent.params.config.learning_rate: "mutate_float" + agent.params.config.grad_norm: "mutate_float" + agent.params.config.entropy_coef: "mutate_float" + agent.params.config.critic_coef: "mutate_float" + agent.params.config.bounds_loss_coef: "mutate_float" + agent.params.config.kl_threshold: "mutate_float" + agent.params.config.gamma: "mutate_discount" + agent.params.config.tau: "mutate_discount" diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py index 75a7b6d04a20..d7dedbf196a6 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/feature_extractor.py @@ -13,27 +13,81 @@ from isaaclab.sensors import save_images_to_file from isaaclab.utils import configclass +# Number of output channels for each supported camera data type. +_DATA_TYPE_CHANNELS: dict[str, int] = { + "rgb": 3, + "depth": 1, + "semantic_segmentation": 3, + "albedo": 3, + "simple_shading_constant_diffuse": 3, + "simple_shading_diffuse_mdl": 3, + "simple_shading_full_mdl": 3, +} + +# Data types whose channels should receive ImageNet normalization in the CNN forward pass. +_IMAGENET_NORM_TYPES: frozenset[str] = frozenset( + { + "rgb", + "semantic_segmentation", + "albedo", + "simple_shading_constant_diffuse", + "simple_shading_diffuse_mdl", + "simple_shading_full_mdl", + } +) + + +def _conv_out(size: int, kernel: int, stride: int, padding: int = 0) -> int: + """Compute the spatial output size of a single convolutional layer.""" + return (size + 2 * padding - kernel) // stride + 1 + class FeatureExtractorNetwork(nn.Module): """CNN architecture used to regress keypoint positions of the in-hand cube from image data.""" - def __init__(self): + def __init__( + self, + num_channel: int = 7, + data_types: list[str] | None = None, + height: int = 120, + width: int = 120, + ): + """Initialize the CNN. + + Args: + num_channel: Total number of input channels across all data types. + data_types: Ordered list of camera data types that form the channel stack. + Used to determine which channel ranges receive ImageNet normalization. + Defaults to ``["rgb", "depth", "semantic_segmentation"]``. + height: Input image height [px]. Used to compute :class:`~torch.nn.LayerNorm` + spatial dimensions. Default is ``120``. + width: Input image width [px]. Used to compute :class:`~torch.nn.LayerNorm` + spatial dimensions. Default is ``120``. + """ super().__init__() - num_channel = 7 + if data_types is None: + data_types = ["rgb", "depth", "semantic_segmentation"] + + # Compute spatial sizes after each conv to build resolution-adaptive LayerNorms. + h1, w1 = _conv_out(height, 6, 2), _conv_out(width, 6, 2) + h2, w2 = _conv_out(h1, 4, 2), _conv_out(w1, 4, 2) + h3, w3 = _conv_out(h2, 4, 2), _conv_out(w2, 4, 2) + h4, w4 = _conv_out(h3, 3, 2), _conv_out(w3, 3, 2) + self.cnn = nn.Sequential( nn.Conv2d(num_channel, 16, kernel_size=6, stride=2, padding=0), nn.ReLU(), - nn.LayerNorm([16, 58, 58]), + nn.LayerNorm([16, h1, w1]), nn.Conv2d(16, 32, kernel_size=4, stride=2, padding=0), nn.ReLU(), - nn.LayerNorm([32, 28, 28]), + nn.LayerNorm([32, h2, w2]), nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0), nn.ReLU(), - nn.LayerNorm([64, 13, 13]), + nn.LayerNorm([64, h3, w3]), nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=0), nn.ReLU(), - nn.LayerNorm([128, 6, 6]), - nn.AvgPool2d(6), + nn.LayerNorm([128, h4, w4]), + nn.AdaptiveAvgPool2d(1), ) self.linear = nn.Sequential( @@ -46,10 +100,19 @@ def __init__(self): ] ) + # Pre-compute channel ranges that require ImageNet normalization. + self._imagenet_norm_ranges: list[tuple[int, int]] = [] + channel_idx = 0 + for dt in data_types: + n_ch = _DATA_TYPE_CHANNELS.get(dt, 3) + if dt in _IMAGENET_NORM_TYPES: + self._imagenet_norm_ranges.append((channel_idx, channel_idx + n_ch)) + channel_idx += n_ch + def forward(self, x): - x = x.permute(0, 3, 1, 2) - x[:, 0:3, :, :] = self.data_transforms(x[:, 0:3, :, :]) - x[:, 4:7, :, :] = self.data_transforms(x[:, 4:7, :, :]) + x = x.permute(0, 3, 1, 2).clone() + for start, end in self._imagenet_norm_ranges: + x[:, start:end, :, :] = self.data_transforms(x[:, start:end, :, :]) cnn_x = self.cnn(x) out = self.linear(cnn_x.view(-1, 128)) return out @@ -60,7 +123,7 @@ class FeatureExtractorCfg: """Configuration for the feature extractor model.""" train: bool = True - """If True, the feature extractor model is trained during the rollout process. Default is False.""" + """If True, the feature extractor model is trained during the rollout process. Default is True.""" load_checkpoint: bool = False """If True, the feature extractor model is loaded from a checkpoint. Default is False.""" @@ -68,29 +131,64 @@ class FeatureExtractorCfg: write_image_to_file: bool = False """If True, the images from the camera sensor are written to file. Default is False.""" + enabled: bool = True + """If True, the CNN forward pass is executed each step. + + Set to False to bypass the network entirely and return zero embeddings. This is useful + for benchmarking rendering throughput without CNN inference overhead. Default is True. + """ + class FeatureExtractor: """Class for extracting features from image data. - It uses a CNN to regress keypoint positions from normalized RGB, depth, and segmentation images. - If the train flag is set to True, the CNN is trained during the rollout process. + It uses a CNN to regress keypoint positions from normalized images. + If :attr:`FeatureExtractorCfg.train` is ``True``, the CNN is trained during rollouts. + If :attr:`FeatureExtractorCfg.enabled` is ``False``, the network is bypassed and zero + embeddings are returned (useful for benchmarking rendering throughput). + + The input data types (and therefore the CNN's input channel count) are determined by + the camera's ``data_types`` at construction time, passed via the ``data_types`` argument. + This means changing the camera preset (e.g. ``presets=rgb``) automatically reconfigures + the CNN without requiring a separate environment config class. """ - def __init__(self, cfg: FeatureExtractorCfg, device: str, log_dir: str | None = None): + def __init__( + self, + cfg: FeatureExtractorCfg, + device: str, + data_types: list[str], + log_dir: str | None = None, + height: int = 120, + width: int = 120, + ): """Initialize the feature extractor model. Args: cfg: Configuration for the feature extractor model. device: Device to run the model on. + data_types: Ordered list of camera data types that form the CNN input channel + stack. Should match the resolved :attr:`~isaaclab.sensors.CameraCfg.data_types` + of the camera. Total input channels are derived from + :data:`_DATA_TYPE_CHANNELS`. log_dir: Directory to save checkpoints. Default is None, which uses the local "logs" folder resolved relative to this file. + height: Camera image height [px]. Must match the camera + :attr:`~isaaclab.sensors.CameraCfg.height`. Default is ``120``. + width: Camera image width [px]. Must match the camera + :attr:`~isaaclab.sensors.CameraCfg.width`. Default is ``120``. """ - self.cfg = cfg self.device = device + self.data_types = data_types - # Feature extractor model - self.feature_extractor = FeatureExtractorNetwork() + # Compute total input channels from the camera data types. + num_channel = sum(_DATA_TYPE_CHANNELS.get(dt, 3) for dt in data_types) + + # Feature extractor model. + self.feature_extractor = FeatureExtractorNetwork( + num_channel=num_channel, data_types=data_types, height=height, width=width + ) self.feature_extractor.to(self.device) self.step_count = 0 @@ -115,66 +213,96 @@ def __init__(self, cfg: FeatureExtractorCfg, device: str, log_dir: str | None = else: self.feature_extractor.eval() - def _preprocess_images( - self, rgb_img: torch.Tensor, depth_img: torch.Tensor, segmentation_img: torch.Tensor - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Preprocesses the input images. + def _preprocess_images(self, camera_output: dict[str, torch.Tensor]) -> torch.Tensor: + """Preprocesses and concatenates camera images into a single tensor. + + Each data type in :attr:`FeatureExtractorCfg.data_types` is extracted from + ``camera_output``, normalized, and concatenated along the channel dimension. Args: - rgb_img (torch.Tensor): RGB image tensor. Shape: (N, H, W, 3). - depth_img (torch.Tensor): Depth image tensor. Shape: (N, H, W, 1). - segmentation_img (torch.Tensor): Segmentation image tensor. Shape: (N, H, W, 3) + camera_output: Dictionary mapping data type names to image tensors. Returns: - tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Preprocessed RGB, depth, and segmentation + Concatenated preprocessed image tensor of shape (N, H, W, C). """ - rgb_img = rgb_img / 255.0 - # process depth image - depth_img[depth_img == float("inf")] = 0 - depth_img /= 5.0 - depth_img /= torch.max(depth_img) - # process segmentation image - segmentation_img = segmentation_img / 255.0 - mean_tensor = torch.mean(segmentation_img, dim=(1, 2), keepdim=True) - segmentation_img -= mean_tensor - return rgb_img, depth_img, segmentation_img - - def _save_images(self, rgb_img: torch.Tensor, depth_img: torch.Tensor, segmentation_img: torch.Tensor): - """Writes image buffers to file. + tensors = [] + for dt in self.data_types: + img = camera_output[dt].float() + if dt == "rgb": + img = img / 255.0 + elif dt == "depth": + img[img == float("inf")] = 0 + img /= 5.0 + max_val = img.max() + if max_val > 0: + img /= max_val + elif dt == "semantic_segmentation": + img = img[..., :3] / 255.0 + mean_tensor = torch.mean(img, dim=(1, 2), keepdim=True) + img = img - mean_tensor + else: + # albedo and simple_shading_* are RGB-like 3-channel outputs. + img = img[..., :3] / 255.0 + tensors.append(img) + return torch.cat(tensors, dim=-1) + + def _save_images(self, camera_output: dict[str, torch.Tensor]): + """Writes configured camera data buffers to file as normalized float images. + + Raw camera tensors are converted to float ``[0, 1]`` before saving so that + :func:`~isaaclab.sensors.save_images_to_file` (which delegates to + ``torchvision.utils.save_image``) receives the expected float input. Args: - rgb_img (torch.Tensor): RGB image tensor. Shape: (N, H, W, 3). - depth_img (torch.Tensor): Depth image tensor. Shape: (N, H, W, 1). - segmentation_img (torch.Tensor): Segmentation image tensor. Shape: (N, H, W, 3). + camera_output: Dictionary mapping data type names to image tensors. """ - save_images_to_file(rgb_img, "shadow_hand_rgb.png") - save_images_to_file(depth_img, "shadow_hand_depth.png") - save_images_to_file(segmentation_img, "shadow_hand_segmentation.png") + for dt in self.data_types: + if dt not in camera_output: + continue + img = camera_output[dt].float() + if dt == "depth": + img = img.clone() + img[img == float("inf")] = 0 + max_val = img.max() + if max_val > 0: + img = img / max_val + else: + # rgb, semantic_segmentation, albedo, and simple_shading_* are uint8 [0, 255] + img = img[..., :3] / 255.0 + save_images_to_file(img, f"shadow_hand_{dt}.png") def step( - self, rgb_img: torch.Tensor, depth_img: torch.Tensor, segmentation_img: torch.Tensor, gt_pose: torch.Tensor - ) -> tuple[torch.Tensor, torch.Tensor]: - """Extracts the features using the images and trains the model if the train flag is set to True. + self, camera_output: dict[str, torch.Tensor], gt_pose: torch.Tensor + ) -> tuple[torch.Tensor | None, torch.Tensor]: + """Extracts features and optionally trains the CNN. + + Image saving (when :attr:`FeatureExtractorCfg.write_image_to_file` is ``True``) always + runs first, regardless of whether the network is enabled. When + :attr:`FeatureExtractorCfg.enabled` is ``False``, the network is then bypassed and + zero embeddings are returned without any further image preprocessing. Args: - rgb_img (torch.Tensor): RGB image tensor. Shape: (N, H, W, 3). - depth_img (torch.Tensor): Depth image tensor. Shape: (N, H, W, 1). - segmentation_img (torch.Tensor): Segmentation image tensor. Shape: (N, H, W, 3). - gt_pose (torch.Tensor): Ground truth pose tensor (position and corners). Shape: (N, 27). + camera_output: Dictionary mapping data type names to image tensors from the + tiled camera sensor. + gt_pose: Ground truth pose tensor (position and keypoint corners). Shape: (N, 27). Returns: - tuple[torch.Tensor, torch.Tensor]: Pose loss and predicted pose. + tuple[torch.Tensor | None, torch.Tensor]: Pose loss (``None`` when not training + or when the network is disabled) and the predicted pose embedding of shape + (N, 27). """ + if self.cfg.write_image_to_file: + self._save_images(camera_output) - rgb_img, depth_img, segmentation_img = self._preprocess_images(rgb_img, depth_img, segmentation_img) + if not self.cfg.enabled: + batch_size = next(iter(camera_output.values())).shape[0] + return None, torch.zeros(batch_size, 27, dtype=torch.float32, device=self.device) - if self.cfg.write_image_to_file: - self._save_images(rgb_img, depth_img, segmentation_img) + img_input = self._preprocess_images(camera_output) if self.cfg.train: with torch.enable_grad(): with torch.inference_mode(False): - img_input = torch.cat((rgb_img, depth_img, segmentation_img), dim=-1) self.optimizer.zero_grad() predicted_pose = self.feature_extractor(img_input) @@ -193,6 +321,5 @@ def step( return pose_loss, predicted_pose else: - img_input = torch.cat((rgb_img, depth_img, segmentation_img), dim=-1) predicted_pose = self.feature_extractor(img_input) return None, predicted_pose diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_env_cfg.py index f9c92f18fbe1..0a2258d0b0b1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_env_cfg.py @@ -3,41 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg import isaaclab.envs.mdp as mdp import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.assets import ArticulationCfg, RigidObjectCfg from isaaclab.envs import DirectRLEnvCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import SceneEntityCfg from isaaclab.markers import VisualizationMarkersCfg from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import PhysxCfg, SimulationCfg +from isaaclab.sim import SimulationCfg from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR from isaaclab.utils.noise import GaussianNoiseCfg, NoiseModelWithAdditiveBiasCfg +from isaaclab_tasks.utils import PresetCfg + from isaaclab_assets.robots.shadow_hand import SHADOW_HAND_CFG @configclass -class EventCfg: - """Configuration for randomization.""" +class NewtonEventCfg: + """Event randomization config for the Newton physics backend. + + Includes joint-parameter, mass, and gravity randomization. + Material and tendon randomization are omitted: Newton does not expose + per-body friction-material buckets or fixed-tendon APIs. + """ - # -- robot - robot_physics_material = EventTerm( - func=mdp.randomize_rigid_body_material, - mode="reset", - min_step_count_between_reset=720, - params={ - "asset_cfg": SceneEntityCfg("robot"), - "static_friction_range": (0.7, 1.3), - "dynamic_friction_range": (1.0, 1.0), - "restitution_range": (1.0, 1.0), - "num_buckets": 250, - }, - ) robot_joint_stiffness_and_damping = EventTerm( func=mdp.randomize_actuator_gains, min_step_count_between_reset=720, @@ -50,18 +47,48 @@ class EventCfg: "distribution": "log_uniform", }, ) - robot_joint_pos_limits = EventTerm( - func=mdp.randomize_joint_parameters, + object_scale_mass = EventTerm( + func=mdp.randomize_rigid_body_mass, min_step_count_between_reset=720, mode="reset", params={ - "asset_cfg": SceneEntityCfg("robot", joint_names=".*"), - "lower_limit_distribution_params": (0.00, 0.01), - "upper_limit_distribution_params": (0.00, 0.01), + "asset_cfg": SceneEntityCfg("object"), + "mass_distribution_params": (0.5, 1.5), + "operation": "scale", + "distribution": "uniform", + "recompute_inertia": False, + }, + ) + + # -- scene + reset_gravity = EventTerm( + func=mdp.randomize_physics_scene_gravity, + mode="interval", + is_global_time=True, + interval_range_s=(36.0, 36.0), # time_s = num_steps * (decimation * dt) + params={ + "gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.4]), "operation": "add", "distribution": "gaussian", }, ) + + +@configclass +class PhysxEventCfg: + # -- robot + robot_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="reset", + min_step_count_between_reset=720, + params={ + "asset_cfg": SceneEntityCfg("robot"), + "static_friction_range": (0.7, 1.3), + "dynamic_friction_range": (1.0, 1.0), + "restitution_range": (1.0, 1.0), + "num_buckets": 250, + }, + ) robot_tendon_properties = EventTerm( func=mdp.randomize_fixed_tendon_parameters, min_step_count_between_reset=720, @@ -88,30 +115,164 @@ class EventCfg: "num_buckets": 250, }, ) - object_scale_mass = EventTerm( - func=mdp.randomize_rigid_body_mass, - min_step_count_between_reset=720, - mode="reset", - params={ - "asset_cfg": SceneEntityCfg("object"), - "mass_distribution_params": (0.5, 1.5), - "operation": "scale", - "distribution": "uniform", + + +@configclass +class ShadowHandEventCfg(PresetCfg): + physx = PhysxEventCfg() + newton_mjwarp = NewtonEventCfg() + default = physx + + +@configclass +class ShadowHandRobotCfg(PresetCfg): + physx = SHADOW_HAND_CFG.replace(prim_path="/World/envs/env_.*/Robot").replace( + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 0.5), + rot=(0.0, 0.0, 0.0, 1.0), + joint_pos={".*": 0.0}, + ) + ) + newton_mjwarp = ArticulationCfg( + prim_path="/World/envs/env_.*/Robot", + spawn=sim_utils.UsdFileCfg( + # newton requires implicitactuators be specified in usd and there's a bug with physx tendons + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/ShadowRobot/ShadowHand/shadow_hand_instanceable_newton.usd", + activate_contact_sensors=False, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=True, + retain_accelerations=True, + max_depenetration_velocity=1000.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg(enabled_self_collisions=True), + joint_drive_props=sim_utils.JointDrivePropertiesCfg(drive_type="force"), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 0.5), + # WARNING(Octi): Newton's import_usd.py bakes the USD body xformOp rotation into + # joint_X_p for the root fixed joint, which cancels with the matching localPose1 + # rotation in joint_X_c during FK (joint_X_p * inv(joint_X_c) ≈ identity). This + # discards the root body's native USD orientation, so we must re-apply it here as a + # spawn rotation. PhysX or USD does not have this issue. Remove once Newton fixes root joint + # transform handling in import_usd.py. + rot=(0.0, 0.0, 0.0, 1.0), + joint_pos={".*": 0.0}, + ), + actuators={ + "fingers": ImplicitActuatorCfg( + joint_names_expr=["robot0_WR.*", "robot0_(FF|MF|RF|LF|TH)J(3|2|1)", "robot0_(LF|TH)J4", "robot0_THJ0"], + effort_limit_sim={ + "robot0_WRJ1": 4.785, + "robot0_WRJ0": 2.175, + "robot0_(FF|MF|RF|LF)J1": 0.7245, + "robot0_FFJ(3|2)": 0.9, + "robot0_MFJ(3|2)": 0.9, + "robot0_RFJ(3|2)": 0.9, + "robot0_LFJ(4|3|2)": 0.9, + "robot0_THJ4": 2.3722, + "robot0_THJ3": 1.45, + "robot0_THJ(2|1)": 0.99, + "robot0_THJ0": 0.81, + }, + stiffness={ + "robot0_WRJ.*": 5.0, + "robot0_(FF|MF|RF|LF|TH)J(3|2|1)": 1.0, + "robot0_(LF|TH)J4": 1.0, + "robot0_THJ0": 1.0, + }, + damping={ + "robot0_WRJ.*": 0.5, + "robot0_(FF|MF|RF|LF|TH)J(3|2|1)": 0.1, + "robot0_(LF|TH)J4": 0.1, + "robot0_THJ0": 0.1, + }, + friction=1e-2, + armature=2e-3, + ), }, + soft_joint_pos_limit_factor=1.0, ) + default = physx - # -- scene - reset_gravity = EventTerm( - func=mdp.randomize_physics_scene_gravity, - mode="interval", - is_global_time=True, - interval_range_s=(36.0, 36.0), # time_s = num_steps * (decimation * dt) - params={ - "gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.4]), - "operation": "add", - "distribution": "gaussian", - }, + +@configclass +class ObjectCfg(PresetCfg): + physx = RigidObjectCfg( + prim_path="/World/envs/env_.*/object", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg( + kinematic_enabled=False, + disable_gravity=False, + enable_gyroscopic_forces=True, + solver_position_iteration_count=8, + solver_velocity_iteration_count=0, + sleep_threshold=0.005, + stabilization_threshold=0.0025, + max_depenetration_velocity=1000.0, + ), + mass_props=sim_utils.MassPropertiesCfg(density=567.0), + semantic_tags=[("class", "cube")], + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.39, 0.6), rot=(0.0, 0.0, 0.0, 1.0)), + ) + + newton_mjwarp = ArticulationCfg( + prim_path="/World/envs/env_.*/object", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + mass_props=sim_utils.MassPropertiesCfg(density=400.0), + semantic_tags=[("class", "cube")], + scale=(0.9, 0.9, 0.9), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, -0.36, 0.535), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} + ), + actuators={}, + articulation_root_prim_path="", + ) + default = physx + + +@configclass +class ShadowHandSceneCfg(PresetCfg): + """Scene configuration presets for the shadow hand environment. + + PhysX supports ``clone_in_fabric=True`` for faster scene cloning via the Fabric layer. + Newton does not support Fabric cloning, so ``clone_in_fabric`` must be ``False``. + """ + + physx: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=8192, env_spacing=0.75, replicate_physics=True, clone_in_fabric=True + ) + newton_mjwarp: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=8192, env_spacing=0.75, replicate_physics=True, clone_in_fabric=False + ) + default: InteractiveSceneCfg = physx + + +@configclass +class PhysicsCfg(PresetCfg): + physx = PhysxCfg( + bounce_threshold_velocity=0.2, + gpu_max_rigid_contact_count=2**23, + gpu_max_rigid_patch_count=2**23, + ) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + solver="newton", + integrator="implicitfast", + njmax=200, + nconmax=70, + impratio=10.0, + cone="elliptic", + update_data_interval=2, + iterations=100, + ), + num_substeps=2, + debug_mode=False, ) + default = physx @configclass @@ -129,22 +290,11 @@ class ShadowHandEnvCfg(DirectRLEnvCfg): sim: SimulationCfg = SimulationCfg( dt=1 / 120, render_interval=decimation, - physics_material=RigidBodyMaterialCfg( - static_friction=1.0, - dynamic_friction=1.0, - ), - physx=PhysxCfg( - bounce_threshold_velocity=0.2, - ), + physics_material=RigidBodyMaterialCfg(static_friction=1.0, dynamic_friction=1.0), + physics=PhysicsCfg(), ) # robot - robot_cfg: ArticulationCfg = SHADOW_HAND_CFG.replace(prim_path="/World/envs/env_.*/Robot").replace( - init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0.0, 0.5), - rot=(1.0, 0.0, 0.0, 0.0), - joint_pos={".*": 0.0}, - ) - ) + robot_cfg: ShadowHandRobotCfg = ShadowHandRobotCfg() actuated_joint_names = [ "robot0_WRJ1", "robot0_WRJ0", @@ -176,25 +326,7 @@ class ShadowHandEnvCfg(DirectRLEnvCfg): ] # in-hand object - object_cfg: RigidObjectCfg = RigidObjectCfg( - prim_path="/World/envs/env_.*/object", - spawn=sim_utils.UsdFileCfg( - usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", - rigid_props=sim_utils.RigidBodyPropertiesCfg( - kinematic_enabled=False, - disable_gravity=False, - enable_gyroscopic_forces=True, - solver_position_iteration_count=8, - solver_velocity_iteration_count=0, - sleep_threshold=0.005, - stabilization_threshold=0.0025, - max_depenetration_velocity=1000.0, - ), - mass_props=sim_utils.MassPropertiesCfg(density=567.0), - semantic_tags=[("class", "cube")], - ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.39, 0.6), rot=(1.0, 0.0, 0.0, 0.0)), - ) + object_cfg: ObjectCfg = ObjectCfg() # goal object goal_object_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg( prim_path="/Visuals/goal_marker", @@ -205,10 +337,8 @@ class ShadowHandEnvCfg(DirectRLEnvCfg): ) }, ) - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg( - num_envs=8192, env_spacing=0.75, replicate_physics=True, clone_in_fabric=True - ) + # scene — use ShadowHandSceneCfg so that presets=newton_mjwarp disables clone_in_fabric automatically + scene: ShadowHandSceneCfg = ShadowHandSceneCfg() # reset reset_position_noise = 0.01 # range of position at reset @@ -225,6 +355,8 @@ class ShadowHandEnvCfg(DirectRLEnvCfg): vel_obs_scale = 0.2 success_tolerance = 0.1 max_consecutive_success = 0 + success_count_threshold: int = 1 + """Minimum number of goals reached in an episode to count it as a successful episode.""" av_factor = 0.1 act_moving_average = 1.0 force_torque_obs_scale = 10.0 @@ -244,15 +376,8 @@ class ShadowHandOpenAIEnvCfg(ShadowHandEnvCfg): sim: SimulationCfg = SimulationCfg( dt=1 / 60, render_interval=decimation, - physics_material=RigidBodyMaterialCfg( - static_friction=1.0, - dynamic_friction=1.0, - ), - physx=PhysxCfg( - bounce_threshold_velocity=0.2, - gpu_max_rigid_contact_count=2**23, - gpu_max_rigid_patch_count=2**23, - ), + physics_material=RigidBodyMaterialCfg(static_friction=1.0, dynamic_friction=1.0), + physics=PhysicsCfg(), ) # reset reset_position_noise = 0.01 # range of position at reset @@ -265,7 +390,6 @@ class ShadowHandOpenAIEnvCfg(ShadowHandEnvCfg): action_penalty_scale = -0.0002 reach_goal_bonus = 250 fall_penalty = -50 - fall_dist = 0.24 vel_obs_scale = 0.2 success_tolerance = 0.4 max_consecutive_success = 50 @@ -273,7 +397,7 @@ class ShadowHandOpenAIEnvCfg(ShadowHandEnvCfg): act_moving_average = 0.3 force_torque_obs_scale = 10.0 # domain randomization config - events: EventCfg = EventCfg() + events: ShadowHandEventCfg = ShadowHandEventCfg() # at every time-step add gaussian noise + bias. The bias is a gaussian sampled at reset action_noise_model: NoiseModelWithAdditiveBiasCfg = NoiseModelWithAdditiveBiasCfg( noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.05, operation="add"), diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py index b5c781c1a9f2..7da4db48e853 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py @@ -6,50 +6,21 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import torch import isaaclab.sim as sim_utils from isaaclab.assets import Articulation, RigidObject -from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sensors import TiledCamera, TiledCameraCfg -from isaaclab.utils import configclass +from isaaclab.sensors import Camera from isaaclab.utils.math import quat_apply from isaaclab_tasks.direct.inhand_manipulation.inhand_manipulation_env import InHandManipulationEnv, unscale -from .feature_extractor import FeatureExtractor, FeatureExtractorCfg -from .shadow_hand_env_cfg import ShadowHandEnvCfg - - -@configclass -class ShadowHandVisionEnvCfg(ShadowHandEnvCfg): - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=1225, env_spacing=2.0, replicate_physics=True) - - # camera - tiled_camera: TiledCameraCfg = TiledCameraCfg( - prim_path="/World/envs/env_.*/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(0, -0.35, 1.0), rot=(0.7071, 0.0, 0.7071, 0.0), convention="world"), - data_types=["rgb", "depth", "semantic_segmentation"], - spawn=sim_utils.PinholeCameraCfg( - focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) - ), - width=120, - height=120, - ) - feature_extractor = FeatureExtractorCfg() +from .feature_extractor import FeatureExtractor - # env - observation_space = 164 + 27 # state observation + vision CNN embedding - state_space = 187 + 27 # asymettric states + vision CNN embedding - - -@configclass -class ShadowHandVisionEnvPlayCfg(ShadowHandVisionEnvCfg): - # scene - scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=64, env_spacing=2.0, replicate_physics=True) - # inference for CNN - feature_extractor = FeatureExtractorCfg(train=False, load_checkpoint=True) +if TYPE_CHECKING: + from .shadow_hand_vision_env_cfg import ShadowHandVisionEnvCfg class ShadowHandVisionEnv(InHandManipulationEnv): @@ -57,8 +28,17 @@ class ShadowHandVisionEnv(InHandManipulationEnv): def __init__(self, cfg: ShadowHandVisionEnvCfg, render_mode: str | None = None, **kwargs): super().__init__(cfg, render_mode, **kwargs) - # Use the log directory from the configuration - self.feature_extractor = FeatureExtractor(self.cfg.feature_extractor, self.device, self.cfg.log_dir) + # Derive CNN input data types from the resolved camera config so that any camera + # preset (e.g. presets=rgb, presets=albedo) automatically configures the right + # network input channels without requiring a separate env config class. + self.feature_extractor = FeatureExtractor( + self.cfg.feature_extractor, + self.device, + self.cfg.tiled_camera.data_types, + self.cfg.log_dir, + height=self.cfg.tiled_camera.height, + width=self.cfg.tiled_camera.width, + ) # hide goal cubes self.goal_pos[:, :] = torch.tensor([-0.2, 0.1, 0.6], device=self.device) # keypoints buffer @@ -68,13 +48,15 @@ def __init__(self, cfg: ShadowHandVisionEnvCfg, render_mode: str | None = None, def _setup_scene(self): # add hand, in-hand object, and goal object self.hand = Articulation(self.cfg.robot_cfg) - self.object = RigidObject(self.cfg.object_cfg) - self._tiled_camera = TiledCamera(self.cfg.tiled_camera) + self.object: Articulation | RigidObject = self.cfg.object_cfg.class_type(self.cfg.object_cfg) + self._joint_wrench_sensor = self._create_joint_wrench_sensor() + self._tiled_camera = Camera(self.cfg.tiled_camera) # clone and replicate (no need to filter for this environment) self.scene.clone_environments(copy_from_source=False) # add articulation to scene - we must register to scene to randomize with EventManager self.scene.articulations["robot"] = self.hand self.scene.rigid_objects["object"] = self.object + self.scene.sensors["joint_wrench"] = self._joint_wrench_sensor self.scene.sensors["tiled_camera"] = self._tiled_camera # add lights light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) @@ -88,9 +70,7 @@ def _compute_image_observations(self): # train CNN to regress on keypoint positions pose_loss, embeddings = self.feature_extractor.step( - self._tiled_camera.data.output["rgb"], - self._tiled_camera.data.output["depth"], - self._tiled_camera.data.output["semantic_segmentation"][..., :3], + self._tiled_camera.data.output, object_pose, ) @@ -108,10 +88,11 @@ def _compute_image_observations(self): dim=-1, ) - # log pose loss from CNN training - if "log" not in self.extras: - self.extras["log"] = dict() - self.extras["log"]["pose_loss"] = pose_loss + # log pose loss from CNN training (None when disabled or in inference mode) + if pose_loss is not None: + if "log" not in self.extras: + self.extras["log"] = dict() + self.extras["log"]["pose_loss"] = pose_loss return obs @@ -148,8 +129,7 @@ def _get_observations(self) -> dict: # vision observations from CMM image_obs = self._compute_image_observations() obs = torch.cat((state_obs, image_obs), dim=-1) - # asymmetric critic states - self.fingertip_force_sensors = self.hand.root_physx_view.get_link_incoming_joint_force()[:, self.finger_bodies] + self._update_fingertip_force_sensors() state = self._compute_states() observations = {"policy": obs, "critic": state} diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env_cfg.py new file mode 100644 index 000000000000..5612b8c6d54e --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env_cfg.py @@ -0,0 +1,172 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import isaaclab.sim as sim_utils +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sensors import CameraCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.utils import PresetCfg +from isaaclab_tasks.utils.presets import MultiBackendRendererCfg + +from .feature_extractor import FeatureExtractorCfg +from .shadow_hand_env_cfg import ShadowHandEnvCfg + + +@configclass +class _ShadowHandBaseTiledCameraCfg(CameraCfg): + """Base camera configuration for the shadow hand vision environment. + + This is an internal config used by :class:`ShadowHandVisionTiledCameraCfg` presets and + by derived env configs that hard-code a specific data type. It embeds + :class:`~isaaclab_tasks.utils.MultiBackendRendererCfg` so the renderer backend can + still be selected via the ``presets`` CLI argument. + """ + + prim_path: str = "/World/envs/env_.*/Camera" + offset: CameraCfg.OffsetCfg = CameraCfg.OffsetCfg( + pos=(0, -0.35, 1.0), rot=(0.0, 0.7071, 0.0, 0.7071), convention="world" + ) + data_types: list[str] = [] + spawn: sim_utils.PinholeCameraCfg = sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) + ) + width: int = 120 + height: int = 120 + renderer_cfg: MultiBackendRendererCfg = MultiBackendRendererCfg() + + +@configclass +class ShadowHandVisionTiledCameraCfg(PresetCfg): + """Camera data-type presets for the shadow hand vision environment. + + Each preset selects which image modalities are captured. The selected data types must + match :attr:`FeatureExtractorCfg.data_types` so the CNN receives the expected channels. + + Select a data-type preset via the ``presets`` CLI argument, e.g.:: + + presets = rgb # RGB only (3 channels) + presets = albedo # albedo (3 channels) + presets = simple_shading_constant_diffuse # simple shading, constant diffuse (3 channels) + + Renderer and data-type presets can be combined:: + + presets = newton_renderer, rgb + """ + + default: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg( + data_types=["rgb", "depth", "semantic_segmentation"] + ) + """Default: RGB + depth + semantic segmentation (7 CNN input channels).""" + + full: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg( + data_types=["rgb", "depth", "semantic_segmentation"] + ) + """Full modalities: RGB + depth + semantic segmentation (7 channels). Alias for default.""" + + rgb: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg(data_types=["rgb"]) + """RGB only (3 CNN input channels).""" + + albedo: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg(data_types=["albedo"]) + """Albedo (3 CNN input channels).""" + + simple_shading_constant_diffuse: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg( + data_types=["simple_shading_constant_diffuse"] + ) + """Simple shading with constant diffuse (3 CNN input channels).""" + + simple_shading_diffuse_mdl: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg( + data_types=["simple_shading_diffuse_mdl"] + ) + """Simple shading with diffuse MDL (3 CNN input channels).""" + + simple_shading_full_mdl: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg( + data_types=["simple_shading_full_mdl"] + ) + """Simple shading with full MDL (3 CNN input channels).""" + + depth: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg(data_types=["depth"]) + """Depth only (1 channel). + + .. warning:: + This preset is intended for **benchmarking only**. The keypoint-regression CNN + cannot be meaningfully trained from depth alone. Use it with + :class:`ShadowHandVisionBenchmarkEnvCfg` (``feature_extractor.enabled=False``) + to measure pure depth-rendering throughput, e.g.:: + + presets=depth # depth rendering, default renderer + presets=depth,newton_renderer # depth rendering with Newton renderer + presets=depth,ovrtx_renderer # depth rendering with OVRTX renderer + """ + + semantic_segmentation: _ShadowHandBaseTiledCameraCfg = _ShadowHandBaseTiledCameraCfg( + data_types=["semantic_segmentation"] + ) + """Semantic segmentation (3 CNN input channels).""" + + +@configclass +class ShadowHandVisionEnvCfg(ShadowHandEnvCfg): + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=1225, env_spacing=2.0, replicate_physics=True) + + # camera — data-type and renderer backend selectable via CLI presets + tiled_camera: ShadowHandVisionTiledCameraCfg = ShadowHandVisionTiledCameraCfg() + feature_extractor: FeatureExtractorCfg = FeatureExtractorCfg() + + # env + observation_space = 164 + 27 # state observation + vision CNN embedding + state_space = 187 + 27 # asymmetric states + vision CNN embedding + + def validate_config(self): + """Check renderer/data-type and feature-extractor compatibility.""" + renderer_type = getattr(self.tiled_camera.renderer_cfg, "renderer_type", None) + warp_supported = {"rgb", "depth"} + if renderer_type == "newton_warp": + unsupported = set(self.tiled_camera.data_types) - warp_supported + if unsupported: + raise ValueError( + f"Warp renderer only supports data types {sorted(warp_supported)}, " + f"but the camera is configured with unsupported types: {sorted(unsupported)}. " + "Choose a compatible preset, e.g. presets=newton_renderer,rgb." + ) + + if set(self.tiled_camera.data_types) == {"depth"} and self.feature_extractor.enabled: + raise ValueError( + "Depth-only camera data type is intended for benchmarking only. " + "The keypoint-regression CNN cannot be meaningfully trained from depth alone. " + "Disable the feature extractor with 'feature_extractor.enabled=False' " + "(e.g. use Isaac-Repose-Cube-Shadow-Vision-Benchmark-Direct-v0), " + "or choose a data type that includes colour, e.g. presets=rgb." + ) + + +@configclass +class ShadowHandVisionEnvPlayCfg(ShadowHandVisionEnvCfg): + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=64, env_spacing=2.0, replicate_physics=True) + # inference for CNN + feature_extractor: FeatureExtractorCfg = FeatureExtractorCfg(train=False, load_checkpoint=True) + + +@configclass +class ShadowHandVisionBenchmarkEnvCfg(ShadowHandVisionEnvCfg): + """Benchmark configuration with the feature extractor CNN disabled. + + The tiled camera renders frames each step as normal, but the CNN forward pass is + bypassed — zero embeddings are returned instead. This isolates rendering throughput + from CNN inference overhead when profiling. + + The renderer backend and camera data types can still be selected via ``presets``:: + + presets = newton_renderer # benchmark with Newton renderer + presets = ovrtx_renderer # benchmark with OVRTX renderer + presets = rgb # benchmark RGB rendering only + presets = depth, newton_renderer # benchmark depth rendering with Newton + """ + + feature_extractor: FeatureExtractorCfg = FeatureExtractorCfg(enabled=False) diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml index 60be7d18110e..793f972fb7ce 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_ippo_cfg.yaml @@ -54,7 +54,9 @@ agent: learning_rate_scheduler: KLAdaptiveLR learning_rate_scheduler_kwargs: kl_threshold: 0.016 - state_preprocessor: RunningStandardScaler + observation_preprocessor: RunningStandardScaler + observation_preprocessor_kwargs: null + state_preprocessor: null state_preprocessor_kwargs: null value_preprocessor: RunningStandardScaler value_preprocessor_kwargs: null diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml index 57c1c455185d..31c6087f500a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/agents/skrl_mappo_cfg.yaml @@ -28,7 +28,7 @@ models: clip_actions: False network: - name: net - input: OBSERVATIONS + input: STATES layers: [512, 512, 256, 128] activations: elu output: ONE @@ -54,10 +54,10 @@ agent: learning_rate_scheduler: KLAdaptiveLR learning_rate_scheduler_kwargs: kl_threshold: 0.016 + observation_preprocessor: RunningStandardScaler + observation_preprocessor_kwargs: null state_preprocessor: RunningStandardScaler state_preprocessor_kwargs: null - shared_state_preprocessor: RunningStandardScaler - shared_state_preprocessor_kwargs: null value_preprocessor: RunningStandardScaler value_preprocessor_kwargs: null random_timesteps: 0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py index 09bbff6e97c0..a692253cf2fb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py @@ -10,6 +10,7 @@ import numpy as np import torch +import warp as wp import isaaclab.sim as sim_utils from isaaclab.assets import Articulation, RigidObject @@ -63,12 +64,12 @@ def __init__(self, cfg: ShadowHandOverEnvCfg, render_mode: str | None = None, ** self.num_fingertips = len(self.finger_bodies) # joint limits - joint_pos_limits = self.right_hand.root_physx_view.get_dof_limits().to(self.device) + joint_pos_limits = wp.to_torch(self.right_hand.root_view.get_dof_limits()).to(self.device) self.hand_dof_lower_limits = joint_pos_limits[..., 0] self.hand_dof_upper_limits = joint_pos_limits[..., 1] # used to compare object position - self.in_hand_pos = self.object.data.default_root_state[:, 0:3].clone() + self.in_hand_pos = self.object.data.default_root_pose.torch[:, 0:3].clone() self.in_hand_pos[:, 2] -= 0.04 # default goal positions self.goal_rot = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device) @@ -78,6 +79,9 @@ def __init__(self, cfg: ShadowHandOverEnvCfg, render_mode: str | None = None, ** # initialize goal marker self.goal_markers = VisualizationMarkers(self.cfg.goal_object_cfg) + # Sticky per-env flag: True once the object reached the goal within threshold. + self._episode_succeeded = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device) + # unit tensors self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1)) @@ -145,11 +149,11 @@ def _apply_action(self) -> None: ] # set targets - self.right_hand.set_joint_position_target( - self.right_hand_curr_targets[:, self.actuated_dof_indices], joint_ids=self.actuated_dof_indices + self.right_hand.set_joint_position_target_index( + target=self.right_hand_curr_targets[:, self.actuated_dof_indices], joint_ids=self.actuated_dof_indices ) - self.left_hand.set_joint_position_target( - self.left_hand_curr_targets[:, self.actuated_dof_indices], joint_ids=self.actuated_dof_indices + self.left_hand.set_joint_position_target_index( + target=self.left_hand_curr_targets[:, self.actuated_dof_indices], joint_ids=self.actuated_dof_indices ) def _get_observations(self) -> dict[str, torch.Tensor]: @@ -277,7 +281,7 @@ def _get_states(self) -> torch.Tensor: def _get_rewards(self) -> dict[str, torch.Tensor]: # compute reward - goal_dist = torch.norm(self.object_pos - self.goal_pos, p=2, dim=-1) + goal_dist = torch.linalg.norm(self.object_pos - self.goal_pos, ord=2, dim=-1) rew_dist = 2 * torch.exp(-self.cfg.dist_reward_scale * goal_dist) # log reward components @@ -285,6 +289,9 @@ def _get_rewards(self) -> dict[str, torch.Tensor]: self.extras["log"] = dict() self.extras["log"]["dist_reward"] = rew_dist.mean() self.extras["log"]["dist_goal"] = goal_dist.mean() + self.extras["log"]["Metrics/goal_distance"] = goal_dist.mean().item() + # Sticky per-env success: True once the object reached the goal within threshold. + self._episode_succeeded |= goal_dist < self.cfg.success_distance_threshold return {"right_hand": rew_dist, "left_hand": rew_dist} @@ -303,6 +310,11 @@ def _get_dones(self) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: def _reset_idx(self, env_ids: Sequence[int] | torch.Tensor | None): if env_ids is None: env_ids = self.right_hand._ALL_INDICES + # Flush per-episode success (sticky binary: object ever reached the goal within threshold). + self.extras.setdefault("log", {})["Metrics/success_rate"] = ( + self._episode_succeeded[env_ids].float().mean().item() + ) + self._episode_succeeded[env_ids] = False # reset articulation and rigid body attributes super()._reset_idx(env_ids) @@ -310,57 +322,60 @@ def _reset_idx(self, env_ids: Sequence[int] | torch.Tensor | None): self._reset_target_pose(env_ids) # reset object - object_default_state = self.object.data.default_root_state.clone()[env_ids] + object_default_pose = self.object.data.default_root_pose.torch.clone()[env_ids] + object_default_vel = self.object.data.default_root_vel.torch.clone()[env_ids] pos_noise = sample_uniform(-1.0, 1.0, (len(env_ids), 3), device=self.device) - object_default_state[:, 0:3] = ( - object_default_state[:, 0:3] + self.cfg.reset_position_noise * pos_noise + self.scene.env_origins[env_ids] + object_default_pose[:, 0:3] = ( + object_default_pose[:, 0:3] + self.cfg.reset_position_noise * pos_noise + self.scene.env_origins[env_ids] ) rot_noise = sample_uniform(-1.0, 1.0, (len(env_ids), 2), device=self.device) # noise for X and Y rotation - object_default_state[:, 3:7] = randomize_rotation( + object_default_pose[:, 3:7] = randomize_rotation( rot_noise[:, 0], rot_noise[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids] ) - object_default_state[:, 7:] = torch.zeros_like(self.object.data.default_root_state[env_ids, 7:]) - self.object.write_root_pose_to_sim(object_default_state[:, :7], env_ids) - self.object.write_root_velocity_to_sim(object_default_state[:, 7:], env_ids) + object_default_vel[:] = 0.0 + self.object.write_root_pose_to_sim_index(root_pose=object_default_pose, env_ids=env_ids) + self.object.write_root_velocity_to_sim_index(root_velocity=object_default_vel, env_ids=env_ids) # reset right hand - delta_max = self.hand_dof_upper_limits[env_ids] - self.right_hand.data.default_joint_pos[env_ids] - delta_min = self.hand_dof_lower_limits[env_ids] - self.right_hand.data.default_joint_pos[env_ids] + delta_max = self.hand_dof_upper_limits[env_ids] - self.right_hand.data.default_joint_pos.torch[env_ids] + delta_min = self.hand_dof_lower_limits[env_ids] - self.right_hand.data.default_joint_pos.torch[env_ids] dof_pos_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device) rand_delta = delta_min + (delta_max - delta_min) * 0.5 * dof_pos_noise - dof_pos = self.right_hand.data.default_joint_pos[env_ids] + self.cfg.reset_dof_pos_noise * rand_delta + dof_pos = self.right_hand.data.default_joint_pos.torch[env_ids] + self.cfg.reset_dof_pos_noise * rand_delta dof_vel_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device) - dof_vel = self.right_hand.data.default_joint_vel[env_ids] + self.cfg.reset_dof_vel_noise * dof_vel_noise + dof_vel = self.right_hand.data.default_joint_vel.torch[env_ids] + self.cfg.reset_dof_vel_noise * dof_vel_noise self.right_hand_prev_targets[env_ids] = dof_pos self.right_hand_curr_targets[env_ids] = dof_pos self.right_hand_dof_targets[env_ids] = dof_pos - self.right_hand.set_joint_position_target(dof_pos, env_ids=env_ids) - self.right_hand.write_joint_state_to_sim(dof_pos, dof_vel, env_ids=env_ids) + self.right_hand.set_joint_position_target_index(target=dof_pos, env_ids=env_ids) + self.right_hand.write_joint_position_to_sim_index(position=dof_pos, env_ids=env_ids) + self.right_hand.write_joint_velocity_to_sim_index(velocity=dof_vel, env_ids=env_ids) # reset left hand - delta_max = self.hand_dof_upper_limits[env_ids] - self.left_hand.data.default_joint_pos[env_ids] - delta_min = self.hand_dof_lower_limits[env_ids] - self.left_hand.data.default_joint_pos[env_ids] + delta_max = self.hand_dof_upper_limits[env_ids] - self.left_hand.data.default_joint_pos.torch[env_ids] + delta_min = self.hand_dof_lower_limits[env_ids] - self.left_hand.data.default_joint_pos.torch[env_ids] dof_pos_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device) rand_delta = delta_min + (delta_max - delta_min) * 0.5 * dof_pos_noise - dof_pos = self.left_hand.data.default_joint_pos[env_ids] + self.cfg.reset_dof_pos_noise * rand_delta + dof_pos = self.left_hand.data.default_joint_pos.torch[env_ids] + self.cfg.reset_dof_pos_noise * rand_delta dof_vel_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device) - dof_vel = self.left_hand.data.default_joint_vel[env_ids] + self.cfg.reset_dof_vel_noise * dof_vel_noise + dof_vel = self.left_hand.data.default_joint_vel.torch[env_ids] + self.cfg.reset_dof_vel_noise * dof_vel_noise self.left_hand_prev_targets[env_ids] = dof_pos self.left_hand_curr_targets[env_ids] = dof_pos self.left_hand_dof_targets[env_ids] = dof_pos - self.left_hand.set_joint_position_target(dof_pos, env_ids=env_ids) - self.left_hand.write_joint_state_to_sim(dof_pos, dof_vel, env_ids=env_ids) + self.left_hand.set_joint_position_target_index(target=dof_pos, env_ids=env_ids) + self.left_hand.write_joint_position_to_sim_index(position=dof_pos, env_ids=env_ids) + self.left_hand.write_joint_velocity_to_sim_index(velocity=dof_vel, env_ids=env_ids) self._compute_intermediate_values() @@ -378,33 +393,33 @@ def _reset_target_pose(self, env_ids): def _compute_intermediate_values(self): # data for right hand - self.right_fingertip_pos = self.right_hand.data.body_pos_w[:, self.finger_bodies] - self.right_fingertip_rot = self.right_hand.data.body_quat_w[:, self.finger_bodies] + self.right_fingertip_pos = self.right_hand.data.body_pos_w.torch[:, self.finger_bodies] + self.right_fingertip_rot = self.right_hand.data.body_quat_w.torch[:, self.finger_bodies] self.right_fingertip_pos -= self.scene.env_origins.repeat((1, self.num_fingertips)).reshape( self.num_envs, self.num_fingertips, 3 ) - self.right_fingertip_velocities = self.right_hand.data.body_vel_w[:, self.finger_bodies] + self.right_fingertip_velocities = self.right_hand.data.body_vel_w.torch[:, self.finger_bodies] - self.right_hand_dof_pos = self.right_hand.data.joint_pos - self.right_hand_dof_vel = self.right_hand.data.joint_vel + self.right_hand_dof_pos = self.right_hand.data.joint_pos.torch + self.right_hand_dof_vel = self.right_hand.data.joint_vel.torch # data for left hand - self.left_fingertip_pos = self.left_hand.data.body_pos_w[:, self.finger_bodies] - self.left_fingertip_rot = self.left_hand.data.body_quat_w[:, self.finger_bodies] + self.left_fingertip_pos = self.left_hand.data.body_pos_w.torch[:, self.finger_bodies] + self.left_fingertip_rot = self.left_hand.data.body_quat_w.torch[:, self.finger_bodies] self.left_fingertip_pos -= self.scene.env_origins.repeat((1, self.num_fingertips)).reshape( self.num_envs, self.num_fingertips, 3 ) - self.left_fingertip_velocities = self.left_hand.data.body_vel_w[:, self.finger_bodies] + self.left_fingertip_velocities = self.left_hand.data.body_vel_w.torch[:, self.finger_bodies] - self.left_hand_dof_pos = self.left_hand.data.joint_pos - self.left_hand_dof_vel = self.left_hand.data.joint_vel + self.left_hand_dof_pos = self.left_hand.data.joint_pos.torch + self.left_hand_dof_vel = self.left_hand.data.joint_vel.torch # data for object - self.object_pos = self.object.data.root_pos_w - self.scene.env_origins - self.object_rot = self.object.data.root_quat_w - self.object_velocities = self.object.data.root_vel_w - self.object_linvel = self.object.data.root_lin_vel_w - self.object_angvel = self.object.data.root_ang_vel_w + self.object_pos = self.object.data.root_pos_w.torch - self.scene.env_origins + self.object_rot = self.object.data.root_quat_w.torch + self.object_velocities = self.object.data.root_vel_w.torch + self.object_linvel = self.object.data.root_lin_vel_w.torch + self.object_angvel = self.object.data.root_ang_vel_w.torch @torch.jit.script diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env_cfg.py index 855939392a22..acf37a54c4b0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env_cfg.py @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_physx.physics import PhysxCfg import isaaclab.envs.mdp as mdp import isaaclab.sim as sim_utils @@ -12,7 +13,7 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.markers import VisualizationMarkersCfg from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim import PhysxCfg, SimulationCfg +from isaaclab.sim import SimulationCfg from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg from isaaclab.utils import configclass @@ -130,7 +131,7 @@ class ShadowHandOverEnvCfg(DirectMARLEnvCfg): static_friction=1.0, dynamic_friction=1.0, ), - physx=PhysxCfg( + physics=PhysxCfg( bounce_threshold_velocity=0.2, ), ) @@ -138,14 +139,14 @@ class ShadowHandOverEnvCfg(DirectMARLEnvCfg): right_robot_cfg: ArticulationCfg = SHADOW_HAND_CFG.replace(prim_path="/World/envs/env_.*/RightRobot").replace( init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.5), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), joint_pos={".*": 0.0}, ) ) left_robot_cfg: ArticulationCfg = SHADOW_HAND_CFG.replace(prim_path="/World/envs/env_.*/LeftRobot").replace( init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, -1.0, 0.5), - rot=(0.0, 0.0, 0.0, 1.0), + rot=(0.0, 0.0, 1.0, 0.0), joint_pos={".*": 0.0}, ) ) @@ -199,7 +200,7 @@ class ShadowHandOverEnvCfg(DirectMARLEnvCfg): collision_props=sim_utils.CollisionPropertiesCfg(), mass_props=sim_utils.MassPropertiesCfg(density=500.0), ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.39, 0.54), rot=(1.0, 0.0, 0.0, 0.0)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.39, 0.54), rot=(0.0, 0.0, 0.0, 1.0)), ) # goal object goal_object_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg( @@ -222,5 +223,8 @@ class ShadowHandOverEnvCfg(DirectMARLEnvCfg): fall_dist = 0.24 vel_obs_scale = 0.2 act_moving_average = 1.0 + # success criteria + success_distance_threshold: float = 0.1 + """Object-to-goal distance below which the handover is considered successful [m].""" # reward-related scales dist_reward_scale = 20.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/ant_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/ant_env_cfg.py index 289d4f75f8c4..53781c0ced16 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/ant_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/ant/ant_env_cfg.py @@ -3,6 +3,9 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import KaminoSolverCfg, MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import AssetBaseCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -13,10 +16,12 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sensors import JointWrenchSensorCfg from isaaclab.terrains import TerrainImporterCfg from isaaclab.utils import configclass import isaaclab_tasks.manager_based.classic.humanoid.mdp as mdp +from isaaclab_tasks.utils import PresetCfg ## # Pre-defined configs @@ -24,6 +29,46 @@ from isaaclab_assets.robots.ant import ANT_CFG # isort: skip +@configclass +class AntPhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg(bounce_threshold_velocity=0.2) + physx: PhysxCfg = PhysxCfg(bounce_threshold_velocity=0.2) + newton_mjwarp: NewtonCfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=38, + nconmax=15, + cone="pyramidal", + integrator="implicitfast", + impratio=1, + ), + num_substeps=1, + debug_mode=False, + ) + newton_kamino: NewtonCfg = NewtonCfg( + solver_cfg=KaminoSolverCfg( + integrator="moreau", + use_collision_detector=False, + sparse_jacobian=True, + constraints_alpha=0.1, + padmm_max_iterations=100, + padmm_primal_tolerance=1e-4, + padmm_dual_tolerance=1e-4, + padmm_compl_tolerance=1e-4, + padmm_rho_0=0.05, + padmm_eta=1e-5, + padmm_use_acceleration=True, + padmm_warmstart_mode="containers", + padmm_contact_warmstart_method="geom_pair_net_force", + padmm_use_graph_conditionals=False, + collision_detector_pipeline="unified", + collision_detector_max_contacts_per_pair=8, + ), + num_substeps=2, + debug_mode=False, + use_cuda_graph=True, + ) + + @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with an ant robot.""" @@ -46,6 +91,9 @@ class MySceneCfg(InteractiveSceneCfg): # robot robot = ANT_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + # sensors + joint_wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + # lights light = AssetBaseCfg( prim_path="/World/light", @@ -86,8 +134,9 @@ class PolicyCfg(ObsGroup): func=mdp.body_incoming_wrench, scale=0.1, params={ - "asset_cfg": SceneEntityCfg( - "robot", body_names=["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"] + "sensor_cfg": SceneEntityCfg( + "joint_wrench", + body_names=["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"], ) }, ) @@ -101,6 +150,13 @@ def __post_init__(self): policy: PolicyCfg = PolicyCfg() +@configclass +class AntObservationsCfg(PresetCfg): + default: ObservationsCfg = ObservationsCfg() + physx: ObservationsCfg = ObservationsCfg() + newton_mjwarp: ObservationsCfg = ObservationsCfg() + + @configclass class EventCfg: """Configuration for events.""" @@ -162,7 +218,7 @@ class AntEnvCfg(ManagerBasedRLEnvCfg): # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=5.0, clone_in_fabric=True) # Basic settings - observations: ObservationsCfg = ObservationsCfg() + observations: AntObservationsCfg = AntObservationsCfg() actions: ActionsCfg = ActionsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() @@ -177,7 +233,7 @@ def __post_init__(self): # simulation settings self.sim.dt = 1 / 120.0 self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.2 + self.sim.physics = AntPhysicsCfg() # default friction material self.sim.physics_material.static_friction = 1.0 self.sim.physics_material.dynamic_friction = 1.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py index d0840c4c1ed6..2c4092677fec 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py @@ -7,7 +7,7 @@ from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import TiledCameraCfg +from isaaclab.sensors import CameraCfg from isaaclab.utils import configclass import isaaclab_tasks.manager_based.classic.cartpole.mdp as mdp @@ -24,9 +24,9 @@ class CartpoleRGBCameraSceneCfg(CartpoleSceneCfg): """Configuration for the cartpole environment with RGB camera.""" # add camera to the scene - tiled_camera: TiledCameraCfg = TiledCameraCfg( + tiled_camera: CameraCfg = CameraCfg( prim_path="{ENV_REGEX_NS}/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"), + offset=CameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.0, 0.1045, 0.0, 0.9945), convention="world"), data_types=["rgb"], spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) @@ -39,9 +39,9 @@ class CartpoleRGBCameraSceneCfg(CartpoleSceneCfg): @configclass class CartpoleDepthCameraSceneCfg(CartpoleSceneCfg): # add camera to the scene - tiled_camera: TiledCameraCfg = TiledCameraCfg( + tiled_camera: CameraCfg = CameraCfg( prim_path="{ENV_REGEX_NS}/Camera", - offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"), + offset=CameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.0, 0.1045, 0.0, 0.9945), convention="world"), data_types=["distance_to_camera"], spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py index 788920af058c..f351e635ff6a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py @@ -5,6 +5,9 @@ import math +from isaaclab_newton.physics import KaminoSolverCfg, MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -18,6 +21,7 @@ from isaaclab.utils import configclass import isaaclab_tasks.manager_based.classic.cartpole.mdp as mdp +from isaaclab_tasks.utils import PresetCfg ## # Pre-defined configs @@ -25,6 +29,52 @@ from isaaclab_assets.robots.cartpole import CARTPOLE_CFG # isort:skip +## +# Physics backend presets +## + + +@configclass +class CartpolePhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + physx: PhysxCfg = PhysxCfg() + newton_mjwarp: NewtonCfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=5, + nconmax=3, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + newton_kamino: NewtonCfg = NewtonCfg( + solver_cfg=KaminoSolverCfg( + integrator="moreau", + use_collision_detector=True, + sparse_jacobian=True, + constraints_alpha=0.1, + padmm_max_iterations=100, + padmm_primal_tolerance=1e-4, + padmm_dual_tolerance=1e-4, + padmm_compl_tolerance=1e-4, + padmm_rho_0=0.05, + padmm_eta=1e-5, + padmm_use_acceleration=True, + padmm_warmstart_mode="containers", + padmm_contact_warmstart_method="geom_pair_net_force", + padmm_use_graph_conditionals=False, + collision_detector_pipeline="unified", + collision_detector_max_contacts_per_pair=8, + ), + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + + ## # Scene definition ## @@ -134,6 +184,8 @@ class RewardsCfg: weight=-0.005, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"])}, ) + # (6) Success rate tracking (zero-weight, metric only) + success_rate = RewTerm(func=mdp.survival_success_rate, weight=0.0) @configclass @@ -179,3 +231,4 @@ def __post_init__(self) -> None: # simulation settings self.sim.dt = 1 / 120 self.sim.render_interval = self.decimation + self.sim.physics = CartpolePhysicsCfg() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/__init__.py index 155079c558f6..1ce92a1952e8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/__init__.py @@ -5,6 +5,6 @@ """This sub-module contains the functions that are specific to the cartpole environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .rewards import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/__init__.pyi new file mode 100644 index 000000000000..c5601c1fd946 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "joint_pos_target_l2", + "survival_success_rate", +] + +from .rewards import joint_pos_target_l2, survival_success_rate +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/rewards.py index 5500089d7f94..b2838a0cce9a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/cartpole/mdp/rewards.py @@ -9,11 +9,11 @@ import torch -from isaaclab.assets import Articulation -from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg from isaaclab.utils.math import wrap_to_pi if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedRLEnv @@ -22,6 +22,25 @@ def joint_pos_target_l2(env: ManagerBasedRLEnv, target: float, asset_cfg: SceneE # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # wrap the joint positions to (-pi, pi) - joint_pos = wrap_to_pi(asset.data.joint_pos[:, asset_cfg.joint_ids]) + joint_pos = wrap_to_pi(asset.data.joint_pos.torch[:, asset_cfg.joint_ids]) # compute the reward return torch.sum(torch.square(joint_pos - target), dim=1) + + +class survival_success_rate(ManagerTermBase): + """Tracks episode survival as the success metric. + + Returns zero reward (pure metric tracking). Flushes ``Metrics/success_rate`` + into ``extras["log"]`` on episode reset, where success = timed out without + early termination. + """ + + def __init__(self, env: ManagerBasedRLEnv, cfg: RewardTermCfg): + super().__init__(cfg, env) + + def reset(self, env_ids: torch.Tensor): + survived = self._env.termination_manager.time_outs[env_ids] + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = survived.float().mean().item() + + def __call__(self, env: ManagerBasedRLEnv) -> torch.Tensor: + return torch.zeros(env.num_envs, device=env.device) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/humanoid_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/humanoid_env_cfg.py index 37b9426df9b6..3058fc02caf2 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/humanoid_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/humanoid_env_cfg.py @@ -3,6 +3,9 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import AssetBaseCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -13,14 +16,34 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sensors import JointWrenchSensorCfg from isaaclab.terrains import TerrainImporterCfg from isaaclab.utils import configclass import isaaclab_tasks.manager_based.classic.humanoid.mdp as mdp +from isaaclab_tasks.utils import PresetCfg from isaaclab_assets.robots.humanoid import HUMANOID_CFG # isort:skip +@configclass +class HumanoidPhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg(bounce_threshold_velocity=0.2) + physx: PhysxCfg = PhysxCfg(bounce_threshold_velocity=0.2) + newton_mjwarp: NewtonCfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=80, + nconmax=25, + cone="pyramidal", + update_data_interval=2, + integrator="implicitfast", + impratio=1, + ), + num_substeps=2, + debug_mode=False, + ) + + ## # Scene definition ## @@ -42,6 +65,9 @@ class MySceneCfg(InteractiveSceneCfg): # robot robot = HUMANOID_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + # sensors + joint_wrench = JointWrenchSensorCfg(prim_path="{ENV_REGEX_NS}/Robot") + # lights light = AssetBaseCfg( prim_path="/World/light", @@ -95,7 +121,7 @@ class PolicyCfg(ObsGroup): feet_body_forces = ObsTerm( func=mdp.body_incoming_wrench, scale=0.01, - params={"asset_cfg": SceneEntityCfg("robot", body_names=["left_foot", "right_foot"])}, + params={"sensor_cfg": SceneEntityCfg("joint_wrench", body_names=["left_foot", "right_foot"])}, ) actions = ObsTerm(func=mdp.last_action) @@ -107,6 +133,13 @@ def __post_init__(self): policy: PolicyCfg = PolicyCfg() +@configclass +class HumanoidObservationsCfg(PresetCfg): + default: ObservationsCfg = ObservationsCfg() + physx: ObservationsCfg = ObservationsCfg() + newton_mjwarp: ObservationsCfg = ObservationsCfg() + + @configclass class EventCfg: """Configuration for events.""" @@ -199,7 +232,7 @@ class HumanoidEnvCfg(ManagerBasedRLEnvCfg): # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=5.0, clone_in_fabric=True) # Basic settings - observations: ObservationsCfg = ObservationsCfg() + observations: HumanoidObservationsCfg = HumanoidObservationsCfg() actions: ActionsCfg = ActionsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() @@ -214,7 +247,7 @@ def __post_init__(self): # simulation settings self.sim.dt = 1 / 120.0 self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.2 + self.sim.physics = HumanoidPhysicsCfg() # default friction material self.sim.physics_material.static_friction = 1.0 self.sim.physics_material.dynamic_friction = 1.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/__init__.py index 9fd05f556350..95af9b285800 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/__init__.py @@ -5,7 +5,6 @@ """This sub-module contains the functions that are specific to the humanoid environment.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .observations import * -from .rewards import * +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/__init__.pyi new file mode 100644 index 000000000000..94bf0f7f2c5c --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/__init__.pyi @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "base_angle_to_target", + "base_heading_proj", + "base_up_proj", + "base_yaw_roll", + "joint_pos_limits_penalty_ratio", + "move_to_target_bonus", + "power_consumption", + "progress_reward", + "upright_posture_bonus", +] + +from .observations import base_angle_to_target, base_heading_proj, base_up_proj, base_yaw_roll +from .rewards import ( + joint_pos_limits_penalty_ratio, + move_to_target_bonus, + power_consumption, + progress_reward, + upright_posture_bonus, +) +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py index 123c4eb7de34..d7195b0f380e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py @@ -10,10 +10,10 @@ import torch import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedEnv @@ -22,7 +22,7 @@ def base_yaw_roll(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityC # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # extract euler angles (in world frame) - roll, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) + roll, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w.torch) # normalize angle to [-pi, pi] roll = torch.atan2(torch.sin(roll), torch.cos(roll)) yaw = torch.atan2(torch.sin(yaw), torch.cos(yaw)) @@ -35,7 +35,7 @@ def base_up_proj(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCf # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute base up vector - base_up_vec = -asset.data.projected_gravity_b + base_up_vec = -asset.data.projected_gravity_b.torch return base_up_vec[:, 2].unsqueeze(-1) @@ -47,11 +47,11 @@ def base_heading_proj( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction - to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] - to_target_pos[:, 2] = 0.0 + to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w.torch[:, :3] + to_target_pos = torch.cat((to_target_pos[:, :2], torch.zeros_like(to_target_pos[:, 2:3])), dim=-1) to_target_dir = math_utils.normalize(to_target_pos) # compute base forward vector - heading_vec = math_utils.quat_apply(asset.data.root_quat_w, asset.data.FORWARD_VEC_B) + heading_vec = math_utils.quat_apply(asset.data.root_quat_w.torch, asset.data.FORWARD_VEC_B.torch) # compute dot product between heading and target direction heading_proj = torch.bmm(heading_vec.view(env.num_envs, 1, 3), to_target_dir.view(env.num_envs, 3, 1)) @@ -65,10 +65,10 @@ def base_angle_to_target( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction - to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w[:, :3] + to_target_pos = torch.tensor(target_pos, device=env.device) - asset.data.root_pos_w.torch[:, :3] walk_target_angle = torch.atan2(to_target_pos[:, 1], to_target_pos[:, 0]) # compute base forward vector - _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) + _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w.torch) # normalize angle to target to [-pi, pi] angle_to_target = walk_target_angle - yaw angle_to_target = torch.atan2(torch.sin(angle_to_target), torch.cos(angle_to_target)) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/rewards.py index 51b47d11449e..18e73a4f22eb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/rewards.py @@ -11,12 +11,12 @@ import isaaclab.utils.math as math_utils import isaaclab.utils.string as string_utils -from isaaclab.assets import Articulation from isaaclab.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg from . import observations as obs if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedRLEnv @@ -54,10 +54,13 @@ def reset(self, env_ids: torch.Tensor): asset: Articulation = self._env.scene["robot"] # compute projection of current heading to desired heading vector target_pos = torch.tensor(self.cfg.params["target_pos"], device=self.device) - to_target_pos = target_pos - asset.data.root_pos_w[env_ids, :3] + to_target_pos = target_pos - asset.data.root_pos_w.torch[env_ids, :3] # reward terms - self.potentials[env_ids] = -torch.norm(to_target_pos, p=2, dim=-1) / self._env.step_dt + self.potentials[env_ids] = -torch.linalg.norm(to_target_pos, ord=2, dim=-1) / self._env.step_dt self.prev_potentials[env_ids] = self.potentials[env_ids] + # flush survival success rate (survived = timed out without falling) + survived = self._env.termination_manager.time_outs[env_ids] + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = survived.float().mean().item() def __call__( self, @@ -69,11 +72,11 @@ def __call__( asset: Articulation = env.scene[asset_cfg.name] # compute vector to target target_pos = torch.tensor(target_pos, device=env.device) - to_target_pos = target_pos - asset.data.root_pos_w[:, :3] + to_target_pos = target_pos - asset.data.root_pos_w.torch[:, :3] to_target_pos[:, 2] = 0.0 # update history buffer and compute new potential self.prev_potentials[:] = self.potentials[:] - self.potentials[:] = -torch.norm(to_target_pos, p=2, dim=-1) / env.step_dt + self.potentials[:] = -torch.linalg.norm(to_target_pos, ord=2, dim=-1) / env.step_dt return self.potentials - self.prev_potentials @@ -106,7 +109,9 @@ def __call__( asset: Articulation = env.scene[asset_cfg.name] # compute the penalty over normalized joints joint_pos_scaled = math_utils.scale_transform( - asset.data.joint_pos, asset.data.soft_joint_pos_limits[..., 0], asset.data.soft_joint_pos_limits[..., 1] + asset.data.joint_pos.torch, + asset.data.soft_joint_pos_limits.torch[..., 0], + asset.data.soft_joint_pos_limits.torch[..., 1], ) # scale the violation amount by the gear ratio violation_amount = (torch.abs(joint_pos_scaled) - threshold) / (1 - threshold) @@ -141,4 +146,6 @@ def __call__( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # return power = torque * velocity (here actions: joint torques) - return torch.sum(torch.abs(env.action_manager.action * asset.data.joint_vel * self.gear_ratio_scaled), dim=-1) + return torch.sum( + torch.abs(env.action_manager.action * asset.data.joint_vel.torch * self.gear_ratio_scaled), dim=-1 + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/__init__.py index bc4d65f5b1f2..1a58aa96d858 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/__init__.py @@ -5,10 +5,6 @@ """This sub-module contains the functions that are specific to the drone ARL environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from isaaclab_contrib.mdp import * # noqa: F401, F403 - -from .commands import * # noqa: F401, F403 -from .observations import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/__init__.pyi new file mode 100644 index 000000000000..d54142b0609d --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/__init__.pyi @@ -0,0 +1,21 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "DroneUniformPoseCommand", + "DroneUniformPoseCommandCfg", + "base_roll_pitch", + "generated_drone_commands", + "ang_vel_xyz_exp", + "distance_to_goal_exp", + "lin_vel_xyz_exp", + "yaw_aligned", +] + +from .commands import DroneUniformPoseCommand, DroneUniformPoseCommandCfg +from .observations import base_roll_pitch, generated_drone_commands +from .rewards import ang_vel_xyz_exp, distance_to_goal_exp, lin_vel_xyz_exp, yaw_aligned +from isaaclab.envs.mdp import * +from isaaclab_contrib.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/__init__.py index a7386d3ce539..1467b1d58929 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/__init__.py @@ -5,5 +5,6 @@ """Various command terms that can be used in the environment.""" -from .commands_cfg import DroneUniformPoseCommandCfg -from .drone_pose_command import DroneUniformPoseCommand +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/__init__.pyi new file mode 100644 index 000000000000..ec2c530126a6 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "DroneUniformPoseCommandCfg", + "DroneUniformPoseCommand", +] + +from .commands_cfg import DroneUniformPoseCommandCfg +from .drone_pose_command import DroneUniformPoseCommand diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/commands_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/commands_cfg.py index f12cf1be082f..cbcb4577308c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/commands_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/commands_cfg.py @@ -3,14 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause +from typing import TYPE_CHECKING + from isaaclab.envs.mdp.commands.commands_cfg import UniformPoseCommandCfg from isaaclab.utils import configclass -from .drone_pose_command import DroneUniformPoseCommand +if TYPE_CHECKING: + from .drone_pose_command import DroneUniformPoseCommand @configclass class DroneUniformPoseCommandCfg(UniformPoseCommandCfg): """Configuration for uniform drone pose command generator.""" - class_type: type = DroneUniformPoseCommand + class_type: type["DroneUniformPoseCommand"] | str = "{DIR}.drone_pose_command:DroneUniformPoseCommand" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/drone_pose_command.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/drone_pose_command.py index f33aa41be4c9..a9072367c83b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/drone_pose_command.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/commands/drone_pose_command.py @@ -35,8 +35,8 @@ class DroneUniformPoseCommand(UniformPoseCommand): def _update_metrics(self): # transform command from base frame to simulation world frame self.pose_command_w[:, :3], self.pose_command_w[:, 3:] = combine_frame_transforms( - self.robot.data.root_pos_w, - self.robot.data.root_quat_w, + self.robot.data.root_pos_w.torch, + self.robot.data.root_quat_w.torch, self.pose_command_b[:, :3], self.pose_command_b[:, 3:], ) @@ -45,11 +45,11 @@ def _update_metrics(self): # Sub-terrain shift for correct position error calculation @grzemal self.pose_command_b[:, :3] + self._env.scene.env_origins, self.pose_command_w[:, 3:], - self.robot.data.body_pos_w[:, self.body_idx], - self.robot.data.body_quat_w[:, self.body_idx], + self.robot.data.body_pos_w.torch[:, self.body_idx], + self.robot.data.body_quat_w.torch[:, self.body_idx], ) - self.metrics["position_error"] = torch.norm(pos_error, dim=-1) - self.metrics["orientation_error"] = torch.norm(rot_error, dim=-1) + self.metrics["position_error"] = torch.linalg.norm(pos_error, dim=-1) + self.metrics["orientation_error"] = torch.linalg.norm(rot_error, dim=-1) def _debug_vis_callback(self, event): # check if robot is initialized @@ -63,5 +63,5 @@ def _debug_vis_callback(self, event): self.pose_command_b[:, :3] + self._env.scene.env_origins, self.pose_command_b[:, 3:] ) # -- current body pose - body_link_pose_w = self.robot.data.body_link_pose_w[:, self.body_idx] + body_link_pose_w = self.robot.data.body_link_pose_w.torch[:, self.body_idx] self.current_pose_visualizer.visualize(body_link_pose_w[:, :3], body_link_pose_w[:, 3:7]) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/observations.py index 866eb04e00d5..c8b8048bd68b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/observations.py @@ -14,17 +14,16 @@ from typing import TYPE_CHECKING import torch -import torch.jit import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation from isaaclab.managers import SceneEntityCfg -from isaaclab_contrib.assets import Multirotor - if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedEnv, ManagerBasedRLEnv + from isaaclab_contrib.assets import Multirotor + from isaaclab.envs.utils.io_descriptors import generic_io_descriptor, record_shape """ @@ -50,7 +49,7 @@ def base_roll_pitch(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntit # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # extract euler angles (in world frame) - roll, pitch, _ = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) + roll, pitch, _ = math_utils.euler_xyz_from_quat(asset.data.root_quat_w.torch) # normalize angle to [-pi, pi] roll = math_utils.wrap_to_pi(roll) pitch = math_utils.wrap_to_pi(pitch) @@ -94,9 +93,11 @@ def generated_drone_commands( - A small epsilon (1e-8) is used to guard against zero-length direction vectors. """ asset: Multirotor = env.scene[asset_cfg.name] - current_position_w = asset.data.root_pos_w - env.scene.env_origins + current_position_w = asset.data.root_pos_w.torch - env.scene.env_origins command = env.command_manager.get_command(command_name) - current_position_b = math_utils.quat_apply_inverse(asset.data.root_link_quat_w, command[:, :3] - current_position_w) - current_position_b_dir = current_position_b / (torch.norm(current_position_b, dim=-1, keepdim=True) + 1e-8) - current_position_b_mag = torch.norm(current_position_b, dim=-1, keepdim=True) + current_position_b = math_utils.quat_apply_inverse( + asset.data.root_link_quat_w.torch, command[:, :3] - current_position_w + ) + current_position_b_dir = current_position_b / (torch.linalg.norm(current_position_b, dim=-1, keepdim=True) + 1e-8) + current_position_b_mag = torch.linalg.norm(current_position_b, dim=-1, keepdim=True) return torch.cat((current_position_b_dir, current_position_b_mag), dim=-1) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/rewards.py index 4ad040563a42..ce635cc544d8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/mdp/rewards.py @@ -10,13 +10,12 @@ import torch import isaaclab.utils.math as math_utils +from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv -from isaaclab.assets import RigidObject -from isaaclab.managers import SceneEntityCfg - """ Drone control rewards. """ @@ -51,7 +50,7 @@ def distance_to_goal_exp( command = env.command_manager.get_command(command_name) target_position_w = command[:, :3].clone() - current_position = asset.data.root_pos_w - env.scene.env_origins + current_position = asset.data.root_pos_w.torch - env.scene.env_origins # compute the error position_error_square = torch.sum(torch.square(target_position_w - current_position), dim=1) @@ -82,7 +81,7 @@ def ang_vel_xyz_exp( asset: RigidObject = env.scene[asset_cfg.name] # compute squared magnitude of angular velocity (all axes) - ang_vel_squared = torch.sum(torch.square(asset.data.root_ang_vel_b), dim=1) + ang_vel_squared = torch.sum(torch.square(asset.data.root_ang_vel_b.torch), dim=1) return torch.exp(-ang_vel_squared / std**2) @@ -109,7 +108,7 @@ def lin_vel_xyz_exp( asset: RigidObject = env.scene[asset_cfg.name] # compute squared magnitude of linear velocity (all axes) - lin_vel_squared = torch.sum(torch.square(asset.data.root_lin_vel_w), dim=1) + lin_vel_squared = torch.sum(torch.square(asset.data.root_lin_vel_w.torch), dim=1) return torch.exp(-lin_vel_squared / std**2) @@ -138,7 +137,7 @@ def yaw_aligned( asset: RigidObject = env.scene[asset_cfg.name] # extract yaw from current orientation - _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w) + _, _, yaw = math_utils.euler_xyz_from_quat(asset.data.root_quat_w.torch) # normalize yaw to [-pi, pi] (target is 0) yaw = math_utils.wrap_to_pi(yaw) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/track_position_state_based/config/arl_robot_1/track_position_state_based_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/track_position_state_based/config/arl_robot_1/track_position_state_based_env_cfg.py index 238ca65b5ef8..a78e4a151162 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/track_position_state_based/config/arl_robot_1/track_position_state_based_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/drone_arl/track_position_state_based/config/arl_robot_1/track_position_state_based_env_cfg.py @@ -6,6 +6,8 @@ import math from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import AssetBaseCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -18,7 +20,7 @@ from isaaclab.scene import InteractiveSceneCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise from isaaclab_contrib.assets import MultirotorCfg @@ -226,4 +228,4 @@ def __post_init__(self): static_friction=1.0, dynamic_friction=1.0, ) - self.sim.physx.gpu_max_rigid_patch_count = 10 * 2**15 + self.sim.physics = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py index 72b01368b499..e1eda9c2fa98 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py @@ -5,5 +5,3 @@ """This sub-module contains the functions that are specific to the locomanipulation environments.""" - -from .tracking import * # noqa diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py index ff7e927b05e8..bd71c58d675b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py @@ -7,16 +7,15 @@ """This sub-module contains the functions that are specific to the locomanipulation environments.""" import gymnasium as gym -import os -from . import agents, fixed_base_upper_body_ik_g1_env_cfg, locomanipulation_g1_env_cfg +from . import agents gym.register( id="Isaac-PickPlace-Locomanipulation-G1-Abs-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": locomanipulation_g1_env_cfg.LocomanipulationG1EnvCfg, - "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc_rnn_low_dim.json"), + "env_cfg_entry_point": f"{__name__}.locomanipulation_g1_env_cfg:LocomanipulationG1EnvCfg", + "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", }, disable_env_checker=True, ) @@ -25,7 +24,7 @@ id="Isaac-PickPlace-FixedBaseUpperBodyIK-G1-Abs-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", kwargs={ - "env_cfg_entry_point": fixed_base_upper_body_ik_g1_env_cfg.FixedBaseUpperBodyIKG1EnvCfg, + "env_cfg_entry_point": f"{__name__}.fixed_base_upper_body_ik_g1_env_cfg:FixedBaseUpperBodyIKG1EnvCfg", }, disable_env_checker=True, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py index b195334ba684..320d7d738ea0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py @@ -4,18 +4,22 @@ # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING +from typing import TYPE_CHECKING -from isaaclab.managers.action_manager import ActionTerm, ActionTermCfg +from isaaclab.managers.action_manager import ActionTermCfg from isaaclab.utils import configclass -from ..mdp.actions import AgileBasedLowerBodyAction +if TYPE_CHECKING: + from ..mdp.actions import AgileBasedLowerBodyAction @configclass class AgileBasedLowerBodyActionCfg(ActionTermCfg): """Configuration for the lower body action term that is based on Agile lower body RL policy.""" - class_type: type[ActionTerm] = AgileBasedLowerBodyAction + class_type: type["AgileBasedLowerBodyAction"] | str = ( + "isaaclab_tasks.manager_based.locomanipulation.pick_place.mdp.actions:AgileBasedLowerBodyAction" + ) """The class type for the lower body action term.""" joint_names: list[str] = MISSING diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py index 488d22c137b8..82532475eb69 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py @@ -9,9 +9,7 @@ including both fixed base and mobile configurations for upper body manipulation. """ -from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask -from isaaclab.controllers.pink_ik.null_space_posture_task import NullSpacePostureTask -from isaaclab.controllers.pink_ik.pink_ik_cfg import PinkIKControllerCfg +from isaaclab.controllers.pink_ik import LocalFrameTaskCfg, NullSpacePostureTaskCfg, PinkIKControllerCfg from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg ## @@ -25,25 +23,25 @@ show_ik_warnings=True, fail_on_joint_limit_violation=False, variable_input_tasks=[ - LocalFrameTask( - "g1_29dof_with_hand_rev_1_0_left_wrist_yaw_link", + LocalFrameTaskCfg( + frame="g1_29dof_with_hand_rev_1_0_left_wrist_yaw_link", base_link_frame_name="g1_29dof_with_hand_rev_1_0_pelvis", position_cost=8.0, # [cost] / [m] - orientation_cost=2.0, # [cost] / [rad] - lm_damping=10, # dampening for solver for step jumps - gain=0.5, + orientation_cost=4.0, # [cost] / [rad] + lm_damping=75, # dampening for solver for step jumps + gain=0.075, ), - LocalFrameTask( - "g1_29dof_with_hand_rev_1_0_right_wrist_yaw_link", + LocalFrameTaskCfg( + frame="g1_29dof_with_hand_rev_1_0_right_wrist_yaw_link", base_link_frame_name="g1_29dof_with_hand_rev_1_0_pelvis", position_cost=8.0, # [cost] / [m] - orientation_cost=2.0, # [cost] / [rad] - lm_damping=10, # dampening for solver for step jumps - gain=0.5, + orientation_cost=4.0, # [cost] / [rad] + lm_damping=75, # dampening for solver for step jumps + gain=0.075, ), - NullSpacePostureTask( - cost=0.5, - lm_damping=1, + NullSpacePostureTaskCfg( + cost=0.05, + lm_damping=75, controlled_frames=[ "g1_29dof_with_hand_rev_1_0_left_wrist_yaw_link", "g1_29dof_with_hand_rev_1_0_right_wrist_yaw_link", @@ -59,7 +57,7 @@ "waist_pitch_joint", "waist_roll_joint", ], - gain=0.3, + gain=0.075, ), ], fixed_input_tasks=[], diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py index 87c100a6cec3..06b64c781701 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py @@ -3,15 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_teleop import IsaacTeleopCfg, XrCfg import isaaclab.envs.mdp as base_mdp import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab.devices.device_base import DevicesCfg -from isaaclab.devices.openxr import OpenXRDeviceCfg, XrCfg -from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_retargeter import ( - G1TriHandUpperBodyRetargeterCfg, -) from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm @@ -20,7 +16,7 @@ from isaaclab.scene import InteractiveSceneCfg from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg from isaaclab.utils import configclass -from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR from isaaclab_tasks.manager_based.locomanipulation.pick_place import mdp as locomanip_mdp from isaaclab_tasks.manager_based.manipulation.pick_place import mdp as manip_mdp @@ -32,6 +28,203 @@ ) +def _build_g1_upper_body_pipeline(): + """Build an IsaacTeleop retargeting pipeline for G1 upper body teleoperation. + + Creates two Se3AbsRetargeters for left and right wrist pose tracking + and two TriHandMotionControllerRetargeters for left and right hand joint + control from VR controller buttons, flattened into a single action tensor + via TensorReorderer. + + Returns: + Tuple of (OutputCombiner, list): the pipeline with a single "action" + output containing the flattened 28D action tensor + [left_wrist(7), right_wrist(7), hand_joints(14)], and the list of + retargeter instances [left_se3, right_se3] for tuning UI. + """ + from isaacteleop.retargeters import ( + Se3AbsRetargeter, + Se3RetargeterConfig, + TensorReorderer, + TriHandMotionControllerConfig, + TriHandMotionControllerRetargeter, + ) + from isaacteleop.retargeting_engine.deviceio_source_nodes import ControllersSource + from isaacteleop.retargeting_engine.interface import OutputCombiner, ValueInput + from isaacteleop.retargeting_engine.tensor_types import TransformMatrix + + # Create input sources (trackers are auto-discovered from pipeline) + controllers = ControllersSource(name="controllers") + + # External input: world-to-anchor 4x4 transform matrix provided by IsaacTeleopDevice + transform_input = ValueInput("world_T_anchor", TransformMatrix()) + + # Apply the coordinate-frame transform to controller poses so that + # downstream retargeters receive data in the simulation world frame. + transformed_controllers = controllers.transformed(transform_input.output(ValueInput.VALUE)) + + # ------------------------------------------------------------------------- + # SE3 Absolute Pose Retargeters (left and right wrists) + # ------------------------------------------------------------------------- + # Rotation offsets from G1TriHandUpperBodyRetargeter._retarget_abs: + # Left: (-0.2706, 0.6533, 0.2706, 0.6533) xyzw -- 90 deg about Y then -45 deg about X + # Right: (-0.7071, 0, 0.7071, 0) xyzw + + left_se3_cfg = Se3RetargeterConfig( + input_device=ControllersSource.LEFT, + zero_out_xy_rotation=False, + use_wrist_rotation=False, + use_wrist_position=False, + target_offset_roll=45.0, + target_offset_pitch=180.0, + target_offset_yaw=-90.0, + ) + left_se3 = Se3AbsRetargeter(left_se3_cfg, name="left_ee_pose") + connected_left_se3 = left_se3.connect( + { + ControllersSource.LEFT: transformed_controllers.output(ControllersSource.LEFT), + } + ) + + right_se3_cfg = Se3RetargeterConfig( + input_device=ControllersSource.RIGHT, + zero_out_xy_rotation=False, + use_wrist_rotation=False, + use_wrist_position=False, + target_offset_roll=-135.0, + target_offset_pitch=0.0, + target_offset_yaw=90.0, + ) + right_se3 = Se3AbsRetargeter(right_se3_cfg, name="right_ee_pose") + connected_right_se3 = right_se3.connect( + { + ControllersSource.RIGHT: transformed_controllers.output(ControllersSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # TriHand Motion Controller Retargeters (left and right hands) + # ------------------------------------------------------------------------- + # Generic joint names matching TriHand 7-DOF output order: + # [thumb_rotation, thumb_proximal, thumb_distal, + # index_proximal, index_distal, middle_proximal, middle_distal] + hand_joint_names = [ + "thumb_rotation", + "thumb_proximal", + "thumb_distal", + "index_proximal", + "index_distal", + "middle_proximal", + "middle_distal", + ] + + left_trihand_cfg = TriHandMotionControllerConfig( + hand_joint_names=hand_joint_names, + controller_side="left", + ) + left_trihand = TriHandMotionControllerRetargeter(left_trihand_cfg, name="trihand_left") + connected_left_trihand = left_trihand.connect( + { + ControllersSource.LEFT: transformed_controllers.output(ControllersSource.LEFT), + } + ) + + right_trihand_cfg = TriHandMotionControllerConfig( + hand_joint_names=hand_joint_names, + controller_side="right", + ) + right_trihand = TriHandMotionControllerRetargeter(right_trihand_cfg, name="trihand_right") + connected_right_trihand = right_trihand.connect( + { + ControllersSource.RIGHT: transformed_controllers.output(ControllersSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # TensorReorderer: flatten into a 28D action tensor + # ------------------------------------------------------------------------- + # Se3AbsRetargeter outputs 7D arrays: [pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w] + left_ee_elements = ["l_pos_x", "l_pos_y", "l_pos_z", "l_quat_x", "l_quat_y", "l_quat_z", "l_quat_w"] + right_ee_elements = ["r_pos_x", "r_pos_y", "r_pos_z", "r_quat_x", "r_quat_y", "r_quat_z", "r_quat_w"] + + # TriHand outputs 7 scalars per hand (positionally mapped): + # [thumb_rotation, thumb_proximal, thumb_distal, + # index_proximal, index_distal, middle_proximal, middle_distal] + left_hand_elements = [ + "l_thumb_rotation", + "l_thumb_proximal", + "l_thumb_distal", + "l_index_proximal", + "l_index_distal", + "l_middle_proximal", + "l_middle_distal", + ] + right_hand_elements = [ + "r_thumb_rotation", + "r_thumb_proximal", + "r_thumb_distal", + "r_index_proximal", + "r_index_distal", + "r_middle_proximal", + "r_middle_distal", + ] + + # Output order must match the PinkInverseKinematicsActionCfg expected tensor layout: + # [left_wrist(7), right_wrist(7), hand_joints(14)] + # Hand joints follow hand_joint_names order from G1_UPPER_BODY_IK_ACTION_CFG. + output_order = ( + left_ee_elements + + right_ee_elements + + [ + # hand_joint_names indices 0-5 (proximal / 0-joints) + "l_index_proximal", + "l_middle_proximal", + "l_thumb_rotation", + "r_index_proximal", + "r_middle_proximal", + "r_thumb_rotation", + # hand_joint_names indices 6-11 (distal / 1-joints) + "l_index_distal", + "l_middle_distal", + "l_thumb_proximal", + "r_index_distal", + "r_middle_distal", + "r_thumb_proximal", + # hand_joint_names indices 12-13 (thumb tip / 2-joints) + "l_thumb_distal", + "r_thumb_distal", + ] + ) + + reorderer = TensorReorderer( + input_config={ + "left_ee_pose": left_ee_elements, + "right_ee_pose": right_ee_elements, + "left_hand_joints": left_hand_elements, + "right_hand_joints": right_hand_elements, + }, + output_order=output_order, + name="action_reorderer", + input_types={ + "left_ee_pose": "array", + "right_ee_pose": "array", + "left_hand_joints": "scalar", + "right_hand_joints": "scalar", + }, + ) + connected_reorderer = reorderer.connect( + { + "left_ee_pose": connected_left_se3.output("ee_pose"), + "right_ee_pose": connected_right_se3.output("ee_pose"), + "left_hand_joints": connected_left_trihand.output("hand_joints"), + "right_hand_joints": connected_right_trihand.output("hand_joints"), + } + ) + + pipeline = OutputCombiner({"action": connected_reorderer.output("output")}) + return pipeline, [left_se3, right_se3] + + ## # Scene definition ## @@ -47,7 +240,7 @@ class FixedBaseUpperBodyIKG1SceneCfg(InteractiveSceneCfg): # Table packing_table = AssetBaseCfg( prim_path="/World/envs/env_.*/PackingTable", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[1.0, 0.0, 0.0, 0.0]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), @@ -56,7 +249,7 @@ class FixedBaseUpperBodyIKG1SceneCfg(InteractiveSceneCfg): object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.6996], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.6996], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", scale=(0.75, 0.75, 0.75), @@ -144,7 +337,10 @@ class TerminationsCfg: func=base_mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} ) - success = DoneTerm(func=manip_mdp.task_done_pick_place, params={"task_link_name": "right_wrist_yaw_link"}) + success = DoneTerm( + func=manip_mdp.task_done_pick_place, + params={"task_link_name": "right_wrist_yaw_link"}, + ) ## @@ -175,12 +371,6 @@ class FixedBaseUpperBodyIKG1EnvCfg(ManagerBasedRLEnvCfg): rewards = None curriculum = None - # Position of the XR anchor in the world frame - xr: XrCfg = XrCfg( - anchor_pos=(0.0, 0.0, -0.45), - anchor_rot=(1.0, 0.0, 0.0, 0.0), - ) - def __post_init__(self): """Post initialization.""" # general settings @@ -193,23 +383,18 @@ def __post_init__(self): # Set the URDF and mesh paths for the IK controller urdf_omniverse_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" # noqa: E501 - # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. - self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) - - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - G1TriHandUpperBodyRetargeterCfg( - enable_visualization=True, - # OpenXR hand tracking has 26 joints per hand - num_open_xr_hand_joints=2 * 26, - sim_device=self.sim.device, - hand_joint_names=self.actions.upper_body_ik.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + # Defer Nucleus path resolution to controller initialization at runtime. + self.actions.upper_body_ik.controller.urdf_path = urdf_omniverse_path + + # IsaacTeleop-based teleoperation pipeline (resolved lazily at runtime). + self.xr = XrCfg( + anchor_pos=(0.0, 0.0, -0.30), + anchor_rot=(0.0, 0.0, 0.0, 1.0), + ) + + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=lambda: _build_g1_upper_body_pipeline()[0], + # retargeters_to_tune=lambda: _build_g1_upper_body_pipeline()[1], + sim_device=self.sim.device, + xr_cfg=self.xr, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py index 9ff8102fe2e5..1a2ef2826de4 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py @@ -3,22 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_teleop import IsaacTeleopCfg, XrAnchorRotationMode, XrCfg + import isaaclab.envs.mdp as base_mdp import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab.devices.device_base import DevicesCfg -from isaaclab.devices.openxr import OpenXRDeviceCfg, XrCfg -from isaaclab.devices.openxr.retargeters.humanoid.unitree.g1_lower_body_standing import G1LowerBodyStandingRetargeterCfg -from isaaclab.devices.openxr.retargeters.humanoid.unitree.g1_motion_controller_locomotion import ( - G1LowerBodyStandingMotionControllerRetargeterCfg, -) -from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_motion_ctrl_retargeter import ( - G1TriHandUpperBodyMotionControllerRetargeterCfg, -) -from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_retargeter import ( - G1TriHandUpperBodyRetargeterCfg, -) -from isaaclab.devices.openxr.xr_cfg import XrAnchorRotationMode from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm @@ -27,7 +16,7 @@ from isaaclab.scene import InteractiveSceneCfg from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg from isaaclab.utils import configclass -from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR from isaaclab_tasks.manager_based.locomanipulation.pick_place import mdp as locomanip_mdp from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.action_cfg import AgileBasedLowerBodyActionCfg @@ -43,6 +32,228 @@ ) +def _build_g1_locomanipulation_pipeline(): + """Build an IsaacTeleop retargeting pipeline for G1 locomanipulation teleoperation. + + Creates two Se3AbsRetargeters for left and right wrist pose tracking, + two TriHandMotionControllerRetargeters for left and right hand joint + control from VR controller buttons, and a LocomotionRootCmdRetargeter + for base velocity commands from controller thumbsticks. All outputs + are flattened into a single action tensor via TensorReorderer. + + Returns: + OutputCombiner with a single "action" output containing the flattened + 32D action tensor: [left_wrist(7), right_wrist(7), hand_joints(14), locomotion(4)]. + """ + from isaacteleop.retargeters import ( + LocomotionRootCmdRetargeter, + LocomotionRootCmdRetargeterConfig, + Se3AbsRetargeter, + Se3RetargeterConfig, + TensorReorderer, + TriHandMotionControllerConfig, + TriHandMotionControllerRetargeter, + ) + from isaacteleop.retargeting_engine.deviceio_source_nodes import ControllersSource + from isaacteleop.retargeting_engine.interface import OutputCombiner, ValueInput + from isaacteleop.retargeting_engine.tensor_types import TransformMatrix + + # Create input sources (trackers are auto-discovered from pipeline) + controllers = ControllersSource(name="controllers") + + # External input: world-to-anchor 4x4 transform matrix provided by IsaacTeleopDevice + transform_input = ValueInput("world_T_anchor", TransformMatrix()) + + # Apply the coordinate-frame transform to controller poses so that + # downstream retargeters receive data in the simulation world frame. + transformed_controllers = controllers.transformed(transform_input.output(ValueInput.VALUE)) + + # ------------------------------------------------------------------------- + # SE3 Absolute Pose Retargeters (left and right wrists) + # ------------------------------------------------------------------------- + # Rotation offsets from G1TriHandUpperBodyRetargeter._retarget_abs: + # Left: (-0.2706, 0.6533, 0.2706, 0.6533) xyzw -- 90 deg about Y then -45 deg about X + # Right: (-0.7071, 0, 0.7071, 0) xyzw + + left_se3_cfg = Se3RetargeterConfig( + input_device=ControllersSource.LEFT, + zero_out_xy_rotation=False, + use_wrist_rotation=False, + use_wrist_position=False, + target_offset_roll=45.0, + target_offset_pitch=180.0, + target_offset_yaw=-90.0, + ) + left_se3 = Se3AbsRetargeter(left_se3_cfg, name="left_ee_pose") + connected_left_se3 = left_se3.connect( + { + ControllersSource.LEFT: transformed_controllers.output(ControllersSource.LEFT), + } + ) + + right_se3_cfg = Se3RetargeterConfig( + input_device=ControllersSource.RIGHT, + zero_out_xy_rotation=False, + use_wrist_rotation=False, + use_wrist_position=False, + target_offset_roll=-135.0, + target_offset_pitch=0.0, + target_offset_yaw=90.0, + ) + right_se3 = Se3AbsRetargeter(right_se3_cfg, name="right_ee_pose") + connected_right_se3 = right_se3.connect( + { + ControllersSource.RIGHT: transformed_controllers.output(ControllersSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # TriHand Motion Controller Retargeters (left and right hands) + # ------------------------------------------------------------------------- + # Generic joint names matching TriHand 7-DOF output order: + # [thumb_rotation, thumb_proximal, thumb_distal, + # index_proximal, index_distal, middle_proximal, middle_distal] + hand_joint_names = [ + "thumb_rotation", + "thumb_proximal", + "thumb_distal", + "index_proximal", + "index_distal", + "middle_proximal", + "middle_distal", + ] + + left_trihand_cfg = TriHandMotionControllerConfig( + hand_joint_names=hand_joint_names, + controller_side="left", + ) + left_trihand = TriHandMotionControllerRetargeter(left_trihand_cfg, name="trihand_left") + connected_left_trihand = left_trihand.connect( + { + ControllersSource.LEFT: transformed_controllers.output(ControllersSource.LEFT), + } + ) + + right_trihand_cfg = TriHandMotionControllerConfig( + hand_joint_names=hand_joint_names, + controller_side="right", + ) + right_trihand = TriHandMotionControllerRetargeter(right_trihand_cfg, name="trihand_right") + connected_right_trihand = right_trihand.connect( + { + ControllersSource.RIGHT: transformed_controllers.output(ControllersSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # Locomotion Root Command Retargeter (base velocity from thumbsticks) + # ------------------------------------------------------------------------- + locomotion_cfg = LocomotionRootCmdRetargeterConfig( + initial_hip_height=0.72, + movement_scale=0.5, + rotation_scale=0.35, + dt=1.0 / 100.0, # Must match rendering dt: sim.dt (1/200) * render_interval (2) + ) + locomotion = LocomotionRootCmdRetargeter(locomotion_cfg, name="locomotion") + connected_locomotion = locomotion.connect( + { + "controller_left": controllers.output(ControllersSource.LEFT), + "controller_right": controllers.output(ControllersSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # TensorReorderer: flatten into a 32D action tensor + # ------------------------------------------------------------------------- + # Se3AbsRetargeter outputs 7D arrays: [pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w] + left_ee_elements = ["l_pos_x", "l_pos_y", "l_pos_z", "l_quat_x", "l_quat_y", "l_quat_z", "l_quat_w"] + right_ee_elements = ["r_pos_x", "r_pos_y", "r_pos_z", "r_quat_x", "r_quat_y", "r_quat_z", "r_quat_w"] + + # TriHand outputs 7 scalars per hand (positionally mapped): + # [thumb_rotation, thumb_proximal, thumb_distal, + # index_proximal, index_distal, middle_proximal, middle_distal] + left_hand_elements = [ + "l_thumb_rotation", + "l_thumb_proximal", + "l_thumb_distal", + "l_index_proximal", + "l_index_distal", + "l_middle_proximal", + "l_middle_distal", + ] + right_hand_elements = [ + "r_thumb_rotation", + "r_thumb_proximal", + "r_thumb_distal", + "r_index_proximal", + "r_index_distal", + "r_middle_proximal", + "r_middle_distal", + ] + + # Locomotion outputs 4D array: [vel_x, vel_y, rot_vel_z, hip_height] + locomotion_elements = ["loco_vel_x", "loco_vel_y", "loco_rot_vel_z", "loco_hip_height"] + + # Output order must match the action space layout expected by the environment: + # [left_wrist(7), right_wrist(7), hand_joints(14), locomotion(4)] + # Hand joints follow hand_joint_names order from G1_UPPER_BODY_IK_ACTION_CFG. + # Locomotion (4D) is consumed by AgileBasedLowerBodyAction. + output_order = ( + left_ee_elements + + right_ee_elements + + [ + # hand_joint_names indices 0-5 (proximal / 0-joints) + "l_index_proximal", + "l_middle_proximal", + "l_thumb_rotation", + "r_index_proximal", + "r_middle_proximal", + "r_thumb_rotation", + # hand_joint_names indices 6-11 (distal / 1-joints) + "l_index_distal", + "l_middle_distal", + "l_thumb_proximal", + "r_index_distal", + "r_middle_distal", + "r_thumb_proximal", + # hand_joint_names indices 12-13 (thumb tip / 2-joints) + "l_thumb_distal", + "r_thumb_distal", + ] + + locomotion_elements + ) + + reorderer = TensorReorderer( + input_config={ + "left_ee_pose": left_ee_elements, + "right_ee_pose": right_ee_elements, + "left_hand_joints": left_hand_elements, + "right_hand_joints": right_hand_elements, + "locomotion": locomotion_elements, + }, + output_order=output_order, + name="action_reorderer", + input_types={ + "left_ee_pose": "array", + "right_ee_pose": "array", + "left_hand_joints": "scalar", + "right_hand_joints": "scalar", + "locomotion": "array", + }, + ) + connected_reorderer = reorderer.connect( + { + "left_ee_pose": connected_left_se3.output("ee_pose"), + "right_ee_pose": connected_right_se3.output("ee_pose"), + "left_hand_joints": connected_left_trihand.output("hand_joints"), + "right_hand_joints": connected_right_trihand.output("hand_joints"), + "locomotion": connected_locomotion.output("root_command"), + } + ) + + return OutputCombiner({"action": connected_reorderer.output("output")}) + + ## # Scene definition ## @@ -58,7 +269,7 @@ class LocomanipulationG1SceneCfg(InteractiveSceneCfg): # Table packing_table = AssetBaseCfg( prim_path="/World/envs/env_.*/PackingTable", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[1.0, 0.0, 0.0, 0.0]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), @@ -67,7 +278,7 @@ class LocomanipulationG1SceneCfg(InteractiveSceneCfg): object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.6996], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.6996], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", scale=(0.75, 0.75, 0.75), @@ -162,7 +373,21 @@ class TerminationsCfg: func=base_mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} ) - success = DoneTerm(func=manip_mdp.task_done_pick_place, params={"task_link_name": "right_wrist_yaw_link"}) + object_too_far = DoneTerm( + func=locomanip_mdp.object_too_far_from_robot, + params={ + "robot_cfg": SceneEntityCfg("robot"), + "object_cfg": SceneEntityCfg("object"), + "max_distance": 1.0, + }, + ) + + success = DoneTerm( + func=manip_mdp.task_done_pick_place, + params={ + "task_link_name": "right_wrist_yaw_link", + }, + ) ## @@ -192,12 +417,6 @@ class LocomanipulationG1EnvCfg(ManagerBasedRLEnvCfg): rewards = None curriculum = None - # Position of the XR anchor in the world frame - xr: XrCfg = XrCfg( - anchor_pos=(0.0, 0.0, -0.35), - anchor_rot=(1.0, 0.0, 0.0, 0.0), - ) - def __post_init__(self): """Post initialization.""" # general settings @@ -207,48 +426,19 @@ def __post_init__(self): self.sim.dt = 1 / 200 # 200Hz self.sim.render_interval = 2 - # Set the URDF and mesh paths for the IK controller - urdf_omniverse_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" # noqa: E501 - - # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. - self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) + # Set the URDF path for the IK controller. Path resolution (Nucleus → local) happens at runtime. + self.actions.upper_body_ik.controller.urdf_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" # noqa: E501 + self.xr = XrCfg( + anchor_pos=(0.0, 0.0, -0.95), + anchor_rot=(0.0, 0.0, 0.0, 1.0), + ) self.xr.anchor_prim_path = "/World/envs/env_0/Robot/pelvis" self.xr.fixed_anchor_height = True - # Ensure XR anchor rotation follows the robot pelvis (yaw only), with smoothing for comfort self.xr.anchor_rotation_mode = XrAnchorRotationMode.FOLLOW_PRIM_SMOOTHED - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - G1TriHandUpperBodyRetargeterCfg( - enable_visualization=True, - # OpenXR hand tracking has 26 joints per hand - num_open_xr_hand_joints=2 * 26, - sim_device=self.sim.device, - hand_joint_names=self.actions.upper_body_ik.hand_joint_names, - ), - G1LowerBodyStandingRetargeterCfg( - sim_device=self.sim.device, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - "motion_controllers": OpenXRDeviceCfg( - retargeters=[ - G1TriHandUpperBodyMotionControllerRetargeterCfg( - enable_visualization=True, - sim_device=self.sim.device, - hand_joint_names=self.actions.upper_body_ik.hand_joint_names, - ), - G1LowerBodyStandingMotionControllerRetargeterCfg( - sim_device=self.sim.device, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=_build_g1_locomanipulation_pipeline, + sim_device=self.sim.device, + xr_cfg=self.xr, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py index 7e559309b5cf..d5fea96a02fd 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py @@ -6,7 +6,6 @@ """This sub-module contains the functions that are specific to the locomanipulation environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .actions import * # noqa: F401, F403 -from .observations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.pyi new file mode 100644 index 000000000000..7398059fa3ef --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "AgileBasedLowerBodyAction", + "upper_body_last_action", + "object_too_far_from_robot", + "task_done_pick_place_table_frame", +] + +from .actions import AgileBasedLowerBodyAction +from .observations import upper_body_last_action +from .terminations import object_too_far_from_robot, task_done_pick_place_table_frame +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py index 64d27dbc2f2a..32c768981b3e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py @@ -46,7 +46,7 @@ def __init__(self, cfg: AgileBasedLowerBodyActionCfg, env: ManagerBasedEnv): # Get the scale and offset from the configuration self._policy_output_scale = torch.tensor(cfg.policy_output_scale, device=env.device) - self._policy_output_offset = self._asset.data.default_joint_pos[:, self._joint_ids].clone() + self._policy_output_offset = self._asset.data.default_joint_pos.torch[:, self._joint_ids].clone() # Create tensors to store raw and processed actions self._raw_actions = torch.zeros(self.num_envs, len(self._joint_ids), device=self.device) @@ -123,4 +123,4 @@ def process_actions(self, actions: torch.Tensor): def apply_actions(self): """Apply the actions to the environment.""" # Store the raw actions - self._asset.set_joint_position_target(self._processed_actions, joint_ids=self._joint_ids) + self._asset.set_joint_position_target_index(target=self._processed_actions, joint_ids=self._joint_ids) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py index d4b3f2b4bdf4..4057bc6fe590 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py @@ -3,9 +3,15 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +from typing import TYPE_CHECKING + import torch -from isaaclab.envs import ManagerBasedRLEnv +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.managers import SceneEntityCfg @@ -15,7 +21,7 @@ def upper_body_last_action( ) -> torch.Tensor: """Extract the last action of the upper body.""" asset = env.scene[asset_cfg.name] - joint_pos_target = asset.data.joint_pos_target + joint_pos_target = asset.data.joint_pos_target.torch # Use joint_names from asset_cfg to find indices joint_names = asset_cfg.joint_names if hasattr(asset_cfg, "joint_names") else None diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/terminations.py new file mode 100644 index 000000000000..db89776c3e37 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/terminations.py @@ -0,0 +1,154 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Termination functions specific to locomanipulation environments.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import torch + +from isaaclab.managers import SceneEntityCfg +from isaaclab.utils.math import quat_apply_inverse + +if TYPE_CHECKING: + from isaaclab.assets import RigidObject + from isaaclab.envs import ManagerBasedRLEnv + + +def task_done_pick_place_table_frame( + env: ManagerBasedRLEnv, + task_link_name: str = "", + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), + table_cfg: SceneEntityCfg = SceneEntityCfg("table"), + right_wrist_max_x: float = 0.26, + min_x: float = 0.40, + max_x: float = 0.85, + min_y: float = -0.20, + max_y: float = 0.05, + max_height: float = 1.10, + min_vel: float = 0.20, + debug: bool = False, +) -> torch.Tensor: + """Determine if the object placement task is complete for locomanipulation environments. + + Unlike the base pick-place termination, this version checks object position relative + to the destination table frame rather than the environment origin. This is necessary + for locomanipulation tasks where the destination table can be placed at arbitrary + world positions. + + This function checks whether all success conditions for the task have been met: + 1. Object is within the target x/y range relative to the destination table + 2. Object is below a maximum height relative to the destination table + 3. Object velocity is below threshold + 4. Right robot wrist is retracted back towards body (past a given x threshold in table frame) + + Args: + env: The RL environment instance. + task_link_name: Name of the right wrist link on the robot. + object_cfg: Configuration for the object entity. + table_cfg: Configuration for the destination table entity (must be a FrameView). + right_wrist_max_x: Maximum x position of the right wrist in table frame for task completion. + min_x: Minimum x position of the object relative to the table for task completion. + max_x: Maximum x position of the object relative to the table for task completion. + min_y: Minimum y position of the object relative to the table for task completion. + max_y: Maximum y position of the object relative to the table for task completion. + max_height: Maximum height (z) of the object relative to the table for task completion. + min_vel: Maximum velocity magnitude of the object for task completion. + debug: If True, print debug info for the first environment each step. + + Returns: + Boolean tensor indicating which environments have completed the task. + """ + if task_link_name == "": + raise ValueError("task_link_name must be provided to task_done_pick_place") + + object: RigidObject = env.scene[object_cfg.name] + table = env.scene[table_cfg.name] + # Avoid importing sim views at module-load time for pure cfg loading. + if not hasattr(table, "get_world_poses"): + raise TypeError(f"Expected table '{table_cfg.name}' to expose get_world_poses(), got {type(table)}") + + # Get table world pose + table_pos_w, table_quat_w = table.get_world_poses() + table_pos_w, table_quat_w = table_pos_w.torch, table_quat_w.torch + + # Broadcast table pose if a single table is shared across all envs + object_root_pos_w = object.data.root_pos_w.torch # [num_envs, 3] + if table_pos_w.shape[0] == 1 and object_root_pos_w.shape[0] > 1: + table_pos_w = table_pos_w.expand(object_root_pos_w.shape[0], -1) + table_quat_w = table_quat_w.expand(object_root_pos_w.shape[0], -1) + + # Object position in table frame + object_to_table_pos = quat_apply_inverse(table_quat_w, object_root_pos_w - table_pos_w) + object_x = object_to_table_pos[:, 0] + object_y = object_to_table_pos[:, 1] + object_height = object_to_table_pos[:, 2] + object_vel = torch.abs(object.data.root_vel_w.torch) + + # Right wrist position in table frame + robot_body_pos_w = env.scene["robot"].data.body_pos_w.torch + right_eef_idx = env.scene["robot"].data.body_names.index(task_link_name) + right_wrist_pos_w = robot_body_pos_w[:, right_eef_idx, :] - table_pos_w + right_wrist_x = quat_apply_inverse(table_quat_w, right_wrist_pos_w)[:, 0] + + # Check all success conditions + done = object_x < max_x + done = torch.logical_and(done, object_x > min_x) + done = torch.logical_and(done, object_y < max_y) + done = torch.logical_and(done, object_y > min_y) + done = torch.logical_and(done, object_height < max_height) + done = torch.logical_and(done, right_wrist_x < right_wrist_max_x) + done = torch.logical_and(done, object_vel[:, 0] < min_vel) + done = torch.logical_and(done, object_vel[:, 1] < min_vel) + done = torch.logical_and(done, object_vel[:, 2] < min_vel) + + if debug: + env_id = 0 + obj_vel_env = object_vel[env_id] + print( + "[task_done_pick_place] " + f"obj_pos_t=({object_x[env_id]:.3f}, {object_y[env_id]:.3f}, {object_height[env_id]:.3f}), " + f"obj_vel=({obj_vel_env[0]:.3f}, {obj_vel_env[1]:.3f}, {obj_vel_env[2]:.3f}), " + f"right_wrist_x={right_wrist_x[env_id]:.3f} | " + f"x[{min_x:.3f},{max_x:.3f}] y[{min_y:.3f},{max_y:.3f}] " + f"z<{max_height:.3f} wrist_x<{right_wrist_max_x:.3f} vel<{min_vel:.3f} " + f"=> done={bool(done[env_id])}", + flush=True, + ) + + return done + + +def object_too_far_from_robot( + env: ManagerBasedRLEnv, + robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), + max_distance: float = 1.0, +) -> torch.Tensor: + """Terminate when the object is too far from the robot (failed to pick up). + + This checks the distance between the robot's root position and the object's position. + If the distance exceeds max_distance, the episode is terminated as a failure. + + Args: + env: The RL environment instance. + robot_cfg: Configuration for the robot entity. + object_cfg: Configuration for the object entity. + max_distance: Maximum allowed distance between robot and object. + + Returns: + Boolean tensor indicating which environments have exceeded the distance threshold. + """ + robot: RigidObject = env.scene[robot_cfg.name] + object: RigidObject = env.scene[object_cfg.name] + + robot_pos = robot.data.root_pos_w.torch[:, :3] + object_pos = object.data.root_pos_w.torch[:, :3] + + distance = torch.norm(robot_pos - object_pos, dim=1) + + return distance > max_distance diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/loco_manip_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/loco_manip_env_cfg.py index a91ff0907dc7..a83e3e2b1e62 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/loco_manip_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/tracking/config/digit/loco_manip_env_cfg.py @@ -10,12 +10,12 @@ from isaaclab.managers import ObservationTermCfg as ObsTerm from isaaclab.managers import RewardTermCfg as RewTerm from isaaclab.utils import configclass -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp import isaaclab_tasks.manager_based.manipulation.reach.mdp as manipulation_mdp from isaaclab_tasks.manager_based.locomotion.velocity.config.digit.rough_env_cfg import DigitRewards, DigitRoughEnvCfg -from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import EventCfg +from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import EventsCfg from isaaclab_assets.robots.agility import ARM_JOINT_NAMES, LEG_JOINT_NAMES @@ -185,7 +185,7 @@ class DigitLocoManipCommands: @configclass -class DigitEvents(EventCfg): +class DigitEvents(EventsCfg): # Add an external force to simulate a payload being carried. left_hand_force = EventTermCfg( func=mdp.apply_external_force_torque, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/__init__.py index 5ce57dc1bd56..18091487866a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/__init__.py @@ -4,5 +4,3 @@ # SPDX-License-Identifier: BSD-3-Clause """Locomotion environments for legged robots.""" - -from .velocity import * # noqa diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/flat_env_cfg.py index eb239d363775..93541ff765ae 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/flat_env_cfg.py @@ -3,13 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import UnitreeA1RoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=60, + nconmax=30, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class UnitreeA1FlatEnvCfg(UnitreeA1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/rough_env_cfg.py index 371ccb5b0cd4..2857d3dc0341 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/a1/rough_env_cfg.py @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.utils import configclass from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg @@ -26,27 +27,13 @@ def __post_init__(self): self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 - # reduce action scale - self.actions.joint_pos.scale = 0.25 - - # event - self.events.push_robot = None - self.events.add_base_mass.params["mass_distribution_params"] = (-1.0, 3.0) + # A1 uses "trunk" as base body self.events.add_base_mass.params["asset_cfg"].body_names = "trunk" self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk" - self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) - self.events.reset_base.params = { - "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, - "velocity_range": { - "x": (0.0, 0.0), - "y": (0.0, 0.0), - "z": (0.0, 0.0), - "roll": (0.0, 0.0), - "pitch": (0.0, 0.0), - "yaw": (0.0, 0.0), - }, - } - self.events.base_com = None + self.events.base_com.default.params["asset_cfg"].body_names = "trunk" + + # reduce action scale + self.actions.joint_pos.scale = 0.25 # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py index 134fd0154bf8..7b745dfe1a07 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py @@ -3,13 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import AnymalBRoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=75, + nconmax=15, + cone="elliptic", + impratio=100, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class AnymalBFlatEnvCfg(AnymalBRoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py index dd11ad858479..cf3fc2c3f232 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.utils import configclass from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg @@ -18,7 +19,7 @@ class AnymalBRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() - # switch robot to anymal-d + # switch robot to anymal-b self.scene.robot = ANYMAL_B_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py index 76ccb79b48ab..54c1076625a1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py @@ -3,13 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import AnymalCRoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=120, + nconmax=15, + cone="elliptic", + impratio=100, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class AnymalCFlatEnvCfg(AnymalCRoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py index ed62e06fc944..8c4cbb7a84c3 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py @@ -3,9 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.utils import configclass from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg +from isaaclab_tasks.utils import preset ## # Pre-defined configs @@ -20,6 +22,7 @@ def __post_init__(self): super().__post_init__() # switch robot to anymal-c self.scene.robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot.actuators["legs"].armature = preset(default=0.0, newton_mjwarp=0.01, physx=0.0) @configclass diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py index 7abab44fdb9b..9a213f9d0f64 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py @@ -3,13 +3,37 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import AnymalDRoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=60, + nconmax=25, + cone="elliptic", + impratio=100.0, + ), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py index c672dcacc0ce..c1c49677e66a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.utils import configclass from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py index 5ca23455cd0e..e9539b6551c5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py @@ -3,13 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import CassieRoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=52, + nconmax=15, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class CassieFlatEnvCfg(CassieRoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py index 2a13f35213ca..d8a2ef0874dd 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py @@ -3,12 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.managers import RewardTermCfg as RewTerm from isaaclab.managers import SceneEntityCfg from isaaclab.utils import configclass import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp -from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg, RewardsCfg +from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import ( + LocomotionVelocityRoughEnvCfg, + RewardsCfg, +) +from isaaclab_tasks.utils import preset ## # Pre-defined configs @@ -54,31 +59,33 @@ class CassieRoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): super().__post_init__() + + # biped yaw control is harder than quadruped — relax the per-episode-mean yaw + # threshold to 0.8 rad/s (defaults work for quadrupeds). + self.commands.base_velocity.vel_yaw_success_threshold = 0.8 # scene self.scene.robot = CASSIE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + # Cassie Newton-only armature for biped stability on rough terrain; PhysX unchanged + self.scene.robot.actuators["legs"].armature = preset(default=0.0, newton_mjwarp=0.02) + self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/pelvis" + # Cassie uses "pelvis" as base body. Override the shared symmetric + # (1/1.25, 1.25) log-uniform scale with asymmetric (1.0, 1.25) — + # lighter-than-nominal pelvis destabilizes Cassie's closed-loop + # Achilles coupling + hip PD response, so only heavier perturbations + # are safe. Symmetric ±25% regressed reward 40% vs disabled; + # (1.0, 1.25) recovers to 90% of baseline. + self.events.add_base_mass.params["asset_cfg"].body_names = "pelvis" + self.events.add_base_mass.params["mass_distribution_params"] = (1.0, 1.25) + self.events.base_com = None + self.events.base_external_force_torque.params["asset_cfg"].body_names = ".*pelvis" + # Cassie has precise initial pose — don't scale joint defaults randomly on reset + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) + # actions self.actions.joint_pos.scale = 0.5 - # events - self.events.push_robot = None - self.events.add_base_mass = None - self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) - self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*pelvis"] - self.events.reset_base.params = { - "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, - "velocity_range": { - "x": (0.0, 0.0), - "y": (0.0, 0.0), - "z": (0.0, 0.0), - "roll": (0.0, 0.0), - "pitch": (0.0, 0.0), - "yaw": (0.0, 0.0), - }, - } - self.events.base_com = None - # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = [".*pelvis"] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/digit/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/digit/rough_env_cfg.py index 792e6f639479..aa0f433e4ecc 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/digit/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/digit/rough_env_cfg.py @@ -5,9 +5,10 @@ import math +from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.managers import ObservationGroupCfg, ObservationTermCfg, RewardTermCfg, SceneEntityCfg, TerminationTermCfg from isaaclab.utils import configclass -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg @@ -179,7 +180,7 @@ def __post_init__(self): @configclass -class TerminationsCfg: +class DigitTerminationsCfg: """Termination terms for the MDP.""" time_out = TerminationTermCfg(func=mdp.time_out, time_out=True) @@ -197,7 +198,7 @@ class TerminationsCfg: @configclass -class ActionsCfg: +class DigitActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg( @@ -212,8 +213,8 @@ class ActionsCfg: class DigitRoughEnvCfg(LocomotionVelocityRoughEnvCfg): rewards: DigitRewards = DigitRewards() observations: DigitObservations = DigitObservations() - terminations: TerminationsCfg = TerminationsCfg() - actions: ActionsCfg = ActionsCfg() + terminations: DigitTerminationsCfg = DigitTerminationsCfg() + actions: DigitActionsCfg = DigitActionsCfg() def __post_init__(self): super().__post_init__() @@ -227,13 +228,24 @@ def __post_init__(self): self.scene.contact_forces.update_period = self.sim.dt self.scene.height_scanner.update_period = self.decimation * self.sim.dt - # Events: - self.events.add_base_mass.params["asset_cfg"] = SceneEntityCfg("robot", body_names="torso_base") - self.events.base_external_force_torque.params["asset_cfg"] = SceneEntityCfg("robot", body_names="torso_base") - # Don't randomize the initial joint positions because we have closed loops. + # Digit uses "torso_base" as base body + self.events.add_base_mass.params["asset_cfg"].body_names = "torso_base" + self.events.base_external_force_torque.params["asset_cfg"].body_names = "torso_base" + self.events.base_com.default.params["asset_cfg"].body_names = "torso_base" + # Digit has precise initial pose — don't scale joint defaults randomly on reset self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) - # remove COM randomization - self.events.base_com = None + + # Override actuator to target only actuated joints. Digit has ball joints (rod constraints) + # that MuJoCo represents with 4 DoFs instead of 3, inflating joint_pos to 74 columns while + # joint_pos_target stays at 64. Using ".*" gives slice(None) which indexes both buffers + # differently. Explicit joint names produce a concrete index tensor that works correctly. + self.scene.robot.actuators = { + "legs_arms": ImplicitActuatorCfg( + joint_names_expr=LEG_JOINT_NAMES + ARM_JOINT_NAMES, + stiffness=None, + damping=None, + ), + } # Commands self.commands.base_velocity.ranges.lin_vel_x = (-0.8, 0.8) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/rsl_rl_ppo_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/rsl_rl_ppo_cfg.py index 61a6d0261b9f..7b61c184d353 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/rsl_rl_ppo_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/agents/rsl_rl_ppo_cfg.py @@ -7,11 +7,19 @@ from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg +from isaaclab_tasks.utils import preset + @configclass class G1RoughPPORunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 24 - max_iterations = 3000 + # Newton needs ~1.7x the PPO iterations to match PhysX on G1. PhysX saturates near iter 3000 + # (reward ≈ +18, ep_len ≈ 980) and does not meaningfully improve on either metric past that — + # reward oscillates +16 to +19 through iter 7500, ep_len stays flat. Newton reaches the same + # (reward, ep_len) quality at iter 5000 (+16 / 984). Comparing reward alone is misleading: + # ep_len confirms the robot is stable in both cases. The gap is sample-efficiency, not a + # ceiling — no physics or reward tuning closes it. + max_iterations = preset(default=3000, newton=5000) save_interval = 50 experiment_name = "g1_rough" policy = RslRlPpoActorCriticCfg( diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/flat_env_cfg.py index e8d3b5edc451..1df1a91447ea 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/flat_env_cfg.py @@ -3,14 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + from isaaclab.managers import SceneEntityCfg +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import G1RoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=95, + nconmax=10, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + + @configclass class G1FlatEnvCfg(G1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/rough_env_cfg.py index 04971c3d9f20..65dbb157c177 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/g1/rough_env_cfg.py @@ -3,12 +3,16 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.managers import RewardTermCfg as RewTerm from isaaclab.managers import SceneEntityCfg from isaaclab.utils import configclass import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp -from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg, RewardsCfg +from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import ( + LocomotionVelocityRoughEnvCfg, + RewardsCfg, +) ## # Pre-defined configs @@ -107,27 +111,20 @@ class G1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() + + # biped yaw control is harder than quadruped — relax the per-episode-mean yaw + # threshold to 0.8 rad/s (defaults work for quadrupeds). + self.commands.base_velocity.vel_yaw_success_threshold = 0.8 # Scene self.scene.robot = G1_MINIMAL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/torso_link" - # Randomization - self.events.push_robot = None + # G1 uses "torso_link" as base body — disable mass randomization for bipeds self.events.add_base_mass = None - self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) - self.events.base_external_force_torque.params["asset_cfg"].body_names = ["torso_link"] - self.events.reset_base.params = { - "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, - "velocity_range": { - "x": (0.0, 0.0), - "y": (0.0, 0.0), - "z": (0.0, 0.0), - "roll": (0.0, 0.0), - "pitch": (0.0, 0.0), - "yaw": (0.0, 0.0), - }, - } self.events.base_com = None + self.events.base_external_force_torque.params["asset_cfg"].body_names = "torso_link" + # G1 has precise initial pose — don't scale joint defaults randomly on reset + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) # Rewards self.rewards.lin_vel_z_l2.weight = 0.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/flat_env_cfg.py index 760c1f5f5d07..03f62cd96642 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/flat_env_cfg.py @@ -3,13 +3,37 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import UnitreeGo1RoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=60, + nconmax=25, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + + @configclass class UnitreeGo1FlatEnvCfg(UnitreeGo1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/rough_env_cfg.py index 91efcc170245..e09ceee0d49d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go1/rough_env_cfg.py @@ -3,9 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.utils import configclass from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg +from isaaclab_tasks.utils import preset ## # Pre-defined configs @@ -20,33 +22,20 @@ def __post_init__(self): super().__post_init__() self.scene.robot = UNITREE_GO1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot.actuators["base_legs"].armature = preset(default=0.0, newton_mjwarp=0.02) self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/trunk" # scale down the terrains because the robot is small self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 - # reduce action scale - self.actions.joint_pos.scale = 0.25 - - # event - self.events.push_robot = None - self.events.add_base_mass.params["mass_distribution_params"] = (-1.0, 3.0) + # Go1 uses "trunk" as base body self.events.add_base_mass.params["asset_cfg"].body_names = "trunk" self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk" - self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) - self.events.reset_base.params = { - "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, - "velocity_range": { - "x": (0.0, 0.0), - "y": (0.0, 0.0), - "z": (0.0, 0.0), - "roll": (0.0, 0.0), - "pitch": (0.0, 0.0), - "yaw": (0.0, 0.0), - }, - } - self.events.base_com = None + self.events.base_com.default.params["asset_cfg"].body_names = "trunk" + + # reduce action scale + self.actions.joint_pos.scale = 0.25 # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/flat_env_cfg.py index 8bf8bb1373f9..9a1d481876d5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/flat_env_cfg.py @@ -3,13 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import UnitreeGo2RoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=65, + nconmax=35, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class UnitreeGo2FlatEnvCfg(UnitreeGo2RoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/rough_env_cfg.py index 69e6adddd042..758e9c4dbb09 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/go2/rough_env_cfg.py @@ -3,9 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.utils import configclass from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg +from isaaclab_tasks.utils import preset ## # Pre-defined configs @@ -20,6 +22,7 @@ def __post_init__(self): super().__post_init__() self.scene.robot = UNITREE_GO2_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot.actuators["base_legs"].armature = preset(default=0.0, newton_mjwarp=0.02) self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/base" # scale down the terrains because the robot is small self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) @@ -29,25 +32,6 @@ def __post_init__(self): # reduce action scale self.actions.joint_pos.scale = 0.25 - # event - self.events.push_robot = None - self.events.add_base_mass.params["mass_distribution_params"] = (-1.0, 3.0) - self.events.add_base_mass.params["asset_cfg"].body_names = "base" - self.events.base_external_force_torque.params["asset_cfg"].body_names = "base" - self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) - self.events.reset_base.params = { - "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, - "velocity_range": { - "x": (0.0, 0.0), - "y": (0.0, 0.0), - "z": (0.0, 0.0), - "roll": (0.0, 0.0), - "pitch": (0.0, 0.0), - "yaw": (0.0, 0.0), - }, - } - self.events.base_com = None - # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" self.rewards.feet_air_time.weight = 0.01 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py index e9b9e2a1fa27..eb501ad12ba7 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py @@ -3,13 +3,38 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from .rough_env_cfg import H1RoughEnvCfg +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=65, + nconmax=15, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class H1FlatEnvCfg(H1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + def __post_init__(self): # post init of parent super().__post_init__() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py index 799a7b95cc40..167141c1747e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py @@ -3,12 +3,16 @@ # # SPDX-License-Identifier: BSD-3-Clause + from isaaclab.managers import RewardTermCfg as RewTerm from isaaclab.managers import SceneEntityCfg from isaaclab.utils import configclass import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp -from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg, RewardsCfg +from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import ( + LocomotionVelocityRoughEnvCfg, + RewardsCfg, +) ## # Pre-defined configs @@ -74,28 +78,22 @@ class H1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() + + # biped yaw control is harder than quadruped — relax the per-episode-mean yaw + # threshold to 0.8 rad/s (defaults work for quadrupeds). + self.commands.base_velocity.vel_yaw_success_threshold = 0.8 # Scene self.scene.robot = H1_MINIMAL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") if self.scene.height_scanner: self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/torso_link" - # Randomization - self.events.push_robot = None - self.events.add_base_mass = None + # H1 uses "torso_link" as base body; inherits the shared log-uniform mass + # randomization scale from EventsCfg (no per-H1 override needed). + self.events.add_base_mass.params["asset_cfg"].body_names = "torso_link" + # H1 has precise initial pose — don't scale joint defaults randomly on reset self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) - self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*torso_link"] - self.events.reset_base.params = { - "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, - "velocity_range": { - "x": (0.0, 0.0), - "y": (0.0, 0.0), - "z": (0.0, 0.0), - "roll": (0.0, 0.0), - "pitch": (0.0, 0.0), - "yaw": (0.0, 0.0), - }, - } self.events.base_com = None + self.events.base_external_force_torque.params["asset_cfg"].body_names = ".*torso_link" # Rewards self.rewards.undesired_contacts = None diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/flat_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/flat_env_cfg.py index 6bf334e24536..cffe129dca7a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/flat_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/flat_env_cfg.py @@ -3,6 +3,9 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils import isaaclab.terrains as terrain_gen from isaaclab.envs import ViewerCfg @@ -11,14 +14,33 @@ from isaaclab.managers import ObservationTermCfg as ObsTerm from isaaclab.managers import RewardTermCfg, SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.sim import SimulationCfg from isaaclab.terrains import TerrainImporterCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.locomotion.velocity.config.spot.mdp as spot_mdp import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg +from isaaclab_tasks.utils import PresetCfg + + +@configclass +class PhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=45, + nconmax=30, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ## # Pre-defined configs @@ -107,31 +129,8 @@ def __post_init__(self): @configclass -class SpotEventCfg: - """Configuration for randomization.""" - - # startup - physics_material = EventTerm( - func=mdp.randomize_rigid_body_material, - mode="startup", - params={ - "asset_cfg": SceneEntityCfg("robot", body_names=".*"), - "static_friction_range": (0.3, 1.0), - "dynamic_friction_range": (0.3, 0.8), - "restitution_range": (0.0, 0.0), - "num_buckets": 64, - }, - ) - - add_base_mass = EventTerm( - func=mdp.randomize_rigid_body_mass, - mode="startup", - params={ - "asset_cfg": SceneEntityCfg("robot", body_names="body"), - "mass_distribution_params": (-2.5, 2.5), - "operation": "add", - }, - ) +class SpotNewtonEventCfg: + """Newton event configuration for Spot (reset + interval only).""" # reset base_external_force_torque = EventTerm( @@ -183,6 +182,46 @@ class SpotEventCfg: ) +@configclass +class SpotStartupEventCfg: + """PhysX-only startup randomization for Spot.""" + + # startup + physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names=".*"), + "static_friction_range": (0.3, 1.0), + "dynamic_friction_range": (0.3, 0.8), + "restitution_range": (0.0, 0.0), + "num_buckets": 64, + }, + ) + + add_base_mass = EventTerm( + func=mdp.randomize_rigid_body_mass, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names="body"), + "mass_distribution_params": (-2.5, 2.5), + "operation": "add", + }, + ) + + +@configclass +class SpotPhysxEventCfg(SpotNewtonEventCfg, SpotStartupEventCfg): + pass + + +@configclass +class SpotEventCfg(PresetCfg): + default = SpotPhysxEventCfg() + newton_mjwarp = SpotNewtonEventCfg() + physx = default + + @configclass class SpotRewardsCfg: # -- task @@ -297,6 +336,8 @@ class SpotTerminationsCfg: class SpotFlatEnvCfg(LocomotionVelocityRoughEnvCfg): """Configuration for the Spot robot in a flat environment.""" + sim: SimulationCfg = SimulationCfg(physics=PhysicsCfg()) + # Basic settings observations: SpotObservationsCfg = SpotObservationsCfg() actions: SpotActionsCfg = SpotActionsCfg() @@ -324,6 +365,7 @@ def __post_init__(self): self.sim.physics_material.dynamic_friction = 1.0 self.sim.physics_material.friction_combine_mode = "multiply" self.sim.physics_material.restitution_combine_mode = "multiply" + self.sim.physics = PhysicsCfg() # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) self.scene.contact_forces.update_period = self.sim.dt diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/__init__.py index cf460b5f33fe..7eb44219ce00 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/__init__.py @@ -6,5 +6,6 @@ """This sub-module contains the functions that are specific to the Spot locomotion task.""" -from .events import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/__init__.pyi new file mode 100644 index 000000000000..c303f58c75bd --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/__init__.pyi @@ -0,0 +1,40 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "reset_joints_around_default", + "GaitReward", + "action_smoothness_penalty", + "air_time_reward", + "air_time_variance_penalty", + "base_angular_velocity_reward", + "base_linear_velocity_reward", + "base_motion_penalty", + "base_orientation_penalty", + "foot_clearance_reward", + "foot_slip_penalty", + "joint_acceleration_penalty", + "joint_position_penalty", + "joint_torques_penalty", + "joint_velocity_penalty", +] + +from .events import reset_joints_around_default +from .rewards import ( + GaitReward, + action_smoothness_penalty, + air_time_reward, + air_time_variance_penalty, + base_angular_velocity_reward, + base_linear_velocity_reward, + base_motion_penalty, + base_orientation_penalty, + foot_clearance_reward, + foot_slip_penalty, + joint_acceleration_penalty, + joint_position_penalty, + joint_torques_penalty, + joint_velocity_penalty, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/events.py index b1a47934d95e..eb9929ada4e1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/events.py @@ -16,11 +16,11 @@ import torch -from isaaclab.assets import Articulation from isaaclab.managers import SceneEntityCfg from isaaclab.utils.math import sample_uniform if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedEnv @@ -40,20 +40,21 @@ def reset_joints_around_default( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # get default joint state - joint_min_pos = asset.data.default_joint_pos[env_ids] + position_range[0] - joint_max_pos = asset.data.default_joint_pos[env_ids] + position_range[1] - joint_min_vel = asset.data.default_joint_vel[env_ids] + velocity_range[0] - joint_max_vel = asset.data.default_joint_vel[env_ids] + velocity_range[1] + joint_min_pos = asset.data.default_joint_pos.torch[env_ids] + position_range[0] + joint_max_pos = asset.data.default_joint_pos.torch[env_ids] + position_range[1] + joint_min_vel = asset.data.default_joint_vel.torch[env_ids] + velocity_range[0] + joint_max_vel = asset.data.default_joint_vel.torch[env_ids] + velocity_range[1] # clip pos to range - joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids, ...] + joint_pos_limits = asset.data.soft_joint_pos_limits.torch[env_ids, ...] joint_min_pos = torch.clamp(joint_min_pos, min=joint_pos_limits[..., 0], max=joint_pos_limits[..., 1]) joint_max_pos = torch.clamp(joint_max_pos, min=joint_pos_limits[..., 0], max=joint_pos_limits[..., 1]) # clip vel to range - joint_vel_abs_limits = asset.data.soft_joint_vel_limits[env_ids] + joint_vel_abs_limits = asset.data.soft_joint_vel_limits.torch[env_ids] joint_min_vel = torch.clamp(joint_min_vel, min=-joint_vel_abs_limits, max=joint_vel_abs_limits) joint_max_vel = torch.clamp(joint_max_vel, min=-joint_vel_abs_limits, max=joint_vel_abs_limits) # sample these values randomly joint_pos = sample_uniform(joint_min_pos, joint_max_pos, joint_min_pos.shape, joint_min_pos.device) joint_vel = sample_uniform(joint_min_vel, joint_max_vel, joint_min_vel.shape, joint_min_vel.device) # set into the physics simulation - asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + asset.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + asset.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/rewards.py index 05680e437355..5cb88e0420aa 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/spot/mdp/rewards.py @@ -15,13 +15,13 @@ import torch -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import ManagerTermBase, SceneEntityCfg -from isaaclab.sensors import ContactSensor if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv from isaaclab.managers import RewardTermCfg + from isaaclab.sensors import ContactSensor ## @@ -43,14 +43,14 @@ def air_time_reward( if contact_sensor.cfg.track_air_time is False: raise RuntimeError("Activate ContactSensor's track_air_time!") # compute the reward - current_air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids] - current_contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids] + current_air_time = contact_sensor.data.current_air_time.torch[:, sensor_cfg.body_ids] + current_contact_time = contact_sensor.data.current_contact_time.torch[:, sensor_cfg.body_ids] t_max = torch.max(current_air_time, current_contact_time) t_min = torch.clip(t_max, max=mode_time) stance_cmd_reward = torch.clip(current_contact_time - current_air_time, -mode_time, mode_time) - cmd = torch.norm(env.command_manager.get_command("base_velocity"), dim=1).unsqueeze(dim=1).expand(-1, 4) - body_vel = torch.linalg.norm(asset.data.root_lin_vel_b[:, :2], dim=1).unsqueeze(dim=1).expand(-1, 4) + cmd = torch.linalg.norm(env.command_manager.get_command("base_velocity"), dim=1).unsqueeze(dim=1).expand(-1, 4) + body_vel = torch.linalg.norm(asset.data.root_lin_vel_b.torch[:, :2], dim=1).unsqueeze(dim=1).expand(-1, 4) reward = torch.where( torch.logical_or(cmd > 0.0, body_vel > velocity_threshold), torch.where(t_max < mode_time, t_min, 0), @@ -65,7 +65,7 @@ def base_angular_velocity_reward(env: ManagerBasedRLEnv, asset_cfg: SceneEntityC asset: RigidObject = env.scene[asset_cfg.name] # compute the error target = env.command_manager.get_command("base_velocity")[:, 2] - ang_vel_error = torch.linalg.norm((target - asset.data.root_ang_vel_b[:, 2]).unsqueeze(1), dim=1) + ang_vel_error = torch.linalg.norm((target - asset.data.root_ang_vel_b.torch[:, 2]).unsqueeze(1), dim=1) return torch.exp(-ang_vel_error / std) @@ -77,7 +77,7 @@ def base_linear_velocity_reward( asset: RigidObject = env.scene[asset_cfg.name] # compute the error target = env.command_manager.get_command("base_velocity")[:, :2] - lin_vel_error = torch.linalg.norm((target - asset.data.root_lin_vel_b[:, :2]), dim=1) + lin_vel_error = torch.linalg.norm((target - asset.data.root_lin_vel_b.torch[:, :2]), dim=1) # fixed 1.0 multiple for tracking below the ramp_at_vel value, then scale by the rate above vel_cmd_magnitude = torch.linalg.norm(target, dim=1) velocity_scaling_multiple = torch.clamp(1.0 + ramp_rate * (vel_cmd_magnitude - ramp_at_vel), min=1.0) @@ -114,8 +114,8 @@ def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): or len(synced_feet_pair_names[1]) != 2 ): raise ValueError("This reward only supports gaits with two pairs of synchronized feet, like trotting.") - synced_feet_pair_0 = self.contact_sensor.find_bodies(synced_feet_pair_names[0])[0] - synced_feet_pair_1 = self.contact_sensor.find_bodies(synced_feet_pair_names[1])[0] + synced_feet_pair_0 = self.contact_sensor.find_sensors(synced_feet_pair_names[0])[0] + synced_feet_pair_1 = self.contact_sensor.find_sensors(synced_feet_pair_names[1])[0] self.synced_feet_pairs = [synced_feet_pair_0, synced_feet_pair_1] def __call__( @@ -149,8 +149,8 @@ def __call__( async_reward_3 = self._async_reward_func(self.synced_feet_pairs[1][0], self.synced_feet_pairs[0][1]) async_reward = async_reward_0 * async_reward_1 * async_reward_2 * async_reward_3 # only enforce gait if cmd > 0 - cmd = torch.norm(env.command_manager.get_command("base_velocity"), dim=1) - body_vel = torch.linalg.norm(self.asset.data.root_lin_vel_b[:, :2], dim=1) + cmd = torch.linalg.norm(env.command_manager.get_command("base_velocity"), dim=1) + body_vel = torch.linalg.norm(self.asset.data.root_lin_vel_b.torch[:, :2], dim=1) return torch.where( torch.logical_or(cmd > 0.0, body_vel > self.velocity_threshold), sync_reward * async_reward, 0.0 ) @@ -161,8 +161,8 @@ def __call__( def _sync_reward_func(self, foot_0: int, foot_1: int) -> torch.Tensor: """Reward synchronization of two feet.""" - air_time = self.contact_sensor.data.current_air_time - contact_time = self.contact_sensor.data.current_contact_time + air_time = self.contact_sensor.data.current_air_time.torch + contact_time = self.contact_sensor.data.current_contact_time.torch # penalize the difference between the most recent air time and contact time of synced feet pairs. se_air = torch.clip(torch.square(air_time[:, foot_0] - air_time[:, foot_1]), max=self.max_err**2) se_contact = torch.clip(torch.square(contact_time[:, foot_0] - contact_time[:, foot_1]), max=self.max_err**2) @@ -170,8 +170,8 @@ def _sync_reward_func(self, foot_0: int, foot_1: int) -> torch.Tensor: def _async_reward_func(self, foot_0: int, foot_1: int) -> torch.Tensor: """Reward anti-synchronization of two feet.""" - air_time = self.contact_sensor.data.current_air_time - contact_time = self.contact_sensor.data.current_contact_time + air_time = self.contact_sensor.data.current_air_time.torch + contact_time = self.contact_sensor.data.current_contact_time.torch # penalize the difference between opposing contact modes air time of feet 1 to contact time of feet 2 # and contact time of feet 1 to air time of feet 2) of feet pairs that are not in sync with each other. se_act_0 = torch.clip(torch.square(air_time[:, foot_0] - contact_time[:, foot_1]), max=self.max_err**2) @@ -184,8 +184,10 @@ def foot_clearance_reward( ) -> torch.Tensor: """Reward the swinging feet for clearing a specified height off the ground""" asset: RigidObject = env.scene[asset_cfg.name] - foot_z_target_error = torch.square(asset.data.body_pos_w[:, asset_cfg.body_ids, 2] - target_height) - foot_velocity_tanh = torch.tanh(tanh_mult * torch.norm(asset.data.body_lin_vel_w[:, asset_cfg.body_ids, :2], dim=2)) + foot_z_target_error = torch.square(asset.data.body_pos_w.torch[:, asset_cfg.body_ids, 2] - target_height) + foot_velocity_tanh = torch.tanh( + tanh_mult * torch.linalg.norm(asset.data.body_lin_vel_w.torch[:, asset_cfg.body_ids, :2], dim=2) + ) reward = foot_z_target_error * foot_velocity_tanh return torch.exp(-torch.sum(reward, dim=1) / std) @@ -207,8 +209,8 @@ def air_time_variance_penalty(env: ManagerBasedRLEnv, sensor_cfg: SceneEntityCfg if contact_sensor.cfg.track_air_time is False: raise RuntimeError("Activate ContactSensor's track_air_time!") # compute the reward - last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids] - last_contact_time = contact_sensor.data.last_contact_time[:, sensor_cfg.body_ids] + last_air_time = contact_sensor.data.last_air_time.torch[:, sensor_cfg.body_ids] + last_contact_time = contact_sensor.data.last_contact_time.torch[:, sensor_cfg.body_ids] return torch.var(torch.clip(last_air_time, max=0.5), dim=1) + torch.var( torch.clip(last_contact_time, max=0.5), dim=1 ) @@ -219,8 +221,8 @@ def base_motion_penalty(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> to """Penalize base vertical and roll/pitch velocity""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return 0.8 * torch.square(asset.data.root_lin_vel_b[:, 2]) + 0.2 * torch.sum( - torch.abs(asset.data.root_ang_vel_b[:, :2]), dim=1 + return 0.8 * torch.square(asset.data.root_lin_vel_b.torch[:, 2]) + 0.2 * torch.sum( + torch.abs(asset.data.root_ang_vel_b.torch[:, :2]), dim=1 ) @@ -231,7 +233,7 @@ def base_orientation_penalty(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) """ # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return torch.linalg.norm((asset.data.projected_gravity_b[:, :2]), dim=1) + return torch.linalg.norm((asset.data.projected_gravity_b.torch[:, :2]), dim=1) def foot_slip_penalty( @@ -243,9 +245,11 @@ def foot_slip_penalty( contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # check if contact force is above threshold - net_contact_forces = contact_sensor.data.net_forces_w_history - is_contact = torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold - foot_planar_velocity = torch.linalg.norm(asset.data.body_lin_vel_w[:, asset_cfg.body_ids, :2], dim=2) + net_contact_forces = contact_sensor.data.net_forces_w_history.torch + is_contact = ( + torch.max(torch.linalg.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold + ) + foot_planar_velocity = torch.linalg.norm(asset.data.body_lin_vel_w.torch[:, asset_cfg.body_ids, :2], dim=2) reward = is_contact * foot_planar_velocity return torch.sum(reward, dim=1) @@ -255,7 +259,7 @@ def joint_acceleration_penalty(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg """Penalize joint accelerations on the articulation.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return torch.linalg.norm((asset.data.joint_acc), dim=1) + return torch.linalg.norm((asset.data.joint_acc.torch), dim=1) def joint_position_penalty( @@ -265,8 +269,8 @@ def joint_position_penalty( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] cmd = torch.linalg.norm(env.command_manager.get_command("base_velocity"), dim=1) - body_vel = torch.linalg.norm(asset.data.root_lin_vel_b[:, :2], dim=1) - reward = torch.linalg.norm((asset.data.joint_pos - asset.data.default_joint_pos), dim=1) + body_vel = torch.linalg.norm(asset.data.root_lin_vel_b.torch[:, :2], dim=1) + reward = torch.linalg.norm((asset.data.joint_pos.torch - asset.data.default_joint_pos.torch), dim=1) return torch.where(torch.logical_or(cmd > 0.0, body_vel > velocity_threshold), reward, stand_still_scale * reward) @@ -274,11 +278,11 @@ def joint_torques_penalty(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> """Penalize joint torques on the articulation.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return torch.linalg.norm((asset.data.applied_torque), dim=1) + return torch.linalg.norm((asset.data.applied_torque.torch), dim=1) def joint_velocity_penalty(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: """Penalize joint velocities on the articulation.""" # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] - return torch.linalg.norm((asset.data.joint_vel), dim=1) + return torch.linalg.norm((asset.data.joint_vel.torch), dim=1) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/__init__.py index 6f6cad007128..8f9a146abdc8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/__init__.py @@ -5,8 +5,6 @@ """This sub-module contains the functions that are specific to the locomotion environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .curriculums import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/__init__.pyi new file mode 100644 index 000000000000..653328b4faaf --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/__init__.pyi @@ -0,0 +1,27 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "terrain_levels_vel", + "feet_air_time", + "feet_air_time_positive_biped", + "feet_slide", + "stand_still_joint_deviation_l1", + "track_ang_vel_z_world_exp", + "track_lin_vel_xy_yaw_frame_exp", + "terrain_out_of_bounds", +] + +from .curriculums import terrain_levels_vel +from .rewards import ( + feet_air_time, + feet_air_time_positive_biped, + feet_slide, + stand_still_joint_deviation_l1, + track_ang_vel_z_world_exp, + track_lin_vel_xy_yaw_frame_exp, +) +from .terminations import terrain_out_of_bounds +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/curriculums.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/curriculums.py index 88187a6b816b..1438cbf03801 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/curriculums.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/curriculums.py @@ -16,12 +16,12 @@ import torch -from isaaclab.assets import Articulation from isaaclab.managers import SceneEntityCfg -from isaaclab.terrains import TerrainImporter if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.terrains import TerrainImporter def terrain_levels_vel( @@ -44,11 +44,11 @@ def terrain_levels_vel( terrain: TerrainImporter = env.scene.terrain command = env.command_manager.get_command("base_velocity") # compute the distance the robot walked - distance = torch.norm(asset.data.root_pos_w[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1) + distance = torch.linalg.norm(asset.data.root_pos_w.torch[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1) # robots that walked far enough progress to harder terrains move_up = distance > terrain.cfg.terrain_generator.size[0] / 2 # robots that walked less than half of their required distance go to simpler terrains - move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5 + move_down = distance < torch.linalg.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5 move_down *= ~move_up # update terrain levels terrain.update_env_origins(env_ids, move_up, move_down) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/rewards.py index f804aa6884c5..a5168f6c95d9 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/rewards.py @@ -17,11 +17,11 @@ from isaaclab.envs import mdp from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import ContactSensor from isaaclab.utils.math import quat_apply_inverse, yaw_quat if TYPE_CHECKING: from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import ContactSensor def feet_air_time( @@ -38,11 +38,11 @@ def feet_air_time( # extract the used quantities (to enable type-hinting) contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward - first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids] - last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids] + first_contact = contact_sensor.compute_first_contact(env.step_dt).torch[:, sensor_cfg.body_ids] + last_air_time = contact_sensor.data.last_air_time.torch[:, sensor_cfg.body_ids] reward = torch.sum((last_air_time - threshold) * first_contact, dim=1) # no reward for zero command - reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 + reward *= torch.linalg.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward @@ -56,15 +56,15 @@ def feet_air_time_positive_biped(env, command_name: str, threshold: float, senso """ contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] # compute the reward - air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids] - contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids] + air_time = contact_sensor.data.current_air_time.torch[:, sensor_cfg.body_ids] + contact_time = contact_sensor.data.current_contact_time.torch[:, sensor_cfg.body_ids] in_contact = contact_time > 0.0 in_mode_time = torch.where(in_contact, contact_time, air_time) single_stance = torch.sum(in_contact.int(), dim=1) == 1 reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0] reward = torch.clamp(reward, max=threshold) # no reward for zero command - reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 + reward *= torch.linalg.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1 return reward @@ -77,10 +77,12 @@ def feet_slide(env, sensor_cfg: SceneEntityCfg, asset_cfg: SceneEntityCfg = Scen """ # Penalize feet sliding contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] - contacts = contact_sensor.data.net_forces_w_history[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] > 1.0 + contacts = ( + contact_sensor.data.net_forces_w_history.torch[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] > 1.0 + ) asset = env.scene[asset_cfg.name] - body_vel = asset.data.body_lin_vel_w[:, asset_cfg.body_ids, :2] + body_vel = asset.data.body_lin_vel_w.torch[:, asset_cfg.body_ids, :2] reward = torch.sum(body_vel.norm(dim=-1) * contacts, dim=1) return reward @@ -93,7 +95,7 @@ def track_lin_vel_xy_yaw_frame_exp( """ # extract the used quantities (to enable type-hinting) asset = env.scene[asset_cfg.name] - vel_yaw = quat_apply_inverse(yaw_quat(asset.data.root_quat_w), asset.data.root_lin_vel_w[:, :3]) + vel_yaw = quat_apply_inverse(yaw_quat(asset.data.root_quat_w.torch), asset.data.root_lin_vel_w.torch[:, :3]) lin_vel_error = torch.sum( torch.square(env.command_manager.get_command(command_name)[:, :2] - vel_yaw[:, :2]), dim=1 ) @@ -106,7 +108,9 @@ def track_ang_vel_z_world_exp( """Reward tracking of angular velocity commands (yaw) in world frame using exponential kernel.""" # extract the used quantities (to enable type-hinting) asset = env.scene[asset_cfg.name] - ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_w[:, 2]) + ang_vel_error = torch.square( + env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_w.torch[:, 2] + ) return torch.exp(-ang_vel_error / std**2) @@ -116,4 +120,4 @@ def stand_still_joint_deviation_l1( """Penalize offsets from the default joint positions when the command is very small.""" command = env.command_manager.get_command(command_name) # Penalize motion when command is nearly zero. - return mdp.joint_deviation_l1(env, asset_cfg) * (torch.norm(command[:, :2], dim=1) < command_threshold) + return mdp.joint_deviation_l1(env, asset_cfg) * (torch.linalg.norm(command[:, :2], dim=1) < command_threshold) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/terminations.py index 6c037d01ea51..5559250d9555 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/mdp/terminations.py @@ -15,10 +15,10 @@ import torch -from isaaclab.assets import RigidObject from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv @@ -47,8 +47,8 @@ def terrain_out_of_bounds( asset: RigidObject = env.scene[asset_cfg.name] # check if the agent is out of bounds - x_out_of_bounds = torch.abs(asset.data.root_pos_w[:, 0]) > 0.5 * map_width - distance_buffer - y_out_of_bounds = torch.abs(asset.data.root_pos_w[:, 1]) > 0.5 * map_height - distance_buffer + x_out_of_bounds = torch.abs(asset.data.root_pos_w.torch[:, 0]) > 0.5 * map_width - distance_buffer + y_out_of_bounds = torch.abs(asset.data.root_pos_w.torch[:, 1]) > 0.5 * map_height - distance_buffer return torch.logical_or(x_out_of_bounds, y_out_of_bounds) else: raise ValueError("Received unsupported terrain type, must be either 'plane' or 'generator'.") diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py index d7094e777014..7ee1d811734b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py @@ -6,6 +6,11 @@ import math from dataclasses import MISSING +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg, NewtonCollisionPipelineCfg, NewtonShapeCfg +from isaaclab_newton.sensors import ContactSensorCfg as NewtonContactSensorCfg +from isaaclab_physx.physics import PhysxCfg +from isaaclab_physx.sensors import ContactSensorCfg as PhysXContactSensorCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -17,13 +22,15 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sensors import ContactSensorCfg, RayCasterCfg, patterns +from isaaclab.sensors import RayCasterCfg, patterns +from isaaclab.sim import SimulationCfg from isaaclab.terrains import TerrainImporterCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.locomotion.velocity.mdp as mdp +from isaaclab_tasks.utils import PresetCfg, preset ## # Pre-defined configs @@ -31,11 +38,48 @@ from isaaclab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip +## +# Physics presets +## + + +@configclass +class RoughPhysicsCfg(PresetCfg): + """Shared physics preset for all rough-terrain locomotion envs.""" + + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=200, + nconmax=100, + cone="pyramidal", + impratio=1.0, + integrator="implicitfast", + use_mujoco_contacts=False, + ), + collision_cfg=NewtonCollisionPipelineCfg(max_triangle_pairs=2_500_000), + num_substeps=1, + debug_mode=False, + # 1 cm shape margin is the single most important Newton setting for rough + # terrain — without it, non-Anymal-D robots fail to learn stable contact + # on triangle-mesh terrain. See isaaclab_newton 0.5.22 changelog. + default_shape_cfg=NewtonShapeCfg(margin=0.01), + ) + physx = default + + ## # Scene definition ## +@configclass +class VelocityEnvContactSensorCfg(PresetCfg): + default = PhysXContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True) + newton_mjwarp = NewtonContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True) + physx = default + + @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with a legged robot.""" @@ -71,7 +115,7 @@ class MySceneCfg(InteractiveSceneCfg): debug_vis=False, mesh_prim_paths=["/World/ground"], ) - contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True) + contact_forces = VelocityEnvContactSensorCfg() # lights sky_light = AssetBaseCfg( prim_path="/World/skyLight", @@ -147,7 +191,7 @@ def __post_init__(self): @configclass -class EventCfg: +class EventsCfg: """Configuration for events.""" # startup @@ -168,18 +212,25 @@ class EventCfg: mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names="base"), - "mass_distribution_params": (-5.0, 5.0), - "operation": "add", + # Multiplicative ±25% log-uniform. Scale-invariant across robot sizes + # (no per-robot kg overrides needed) with geometric mean 1.0 and + # symmetric inverse perturbation (acceleration symmetric around nominal). + "mass_distribution_params": (1 / 1.25, 1.25), + "operation": "scale", + "distribution": "log_uniform", }, ) - base_com = EventTerm( - func=mdp.randomize_rigid_body_com, - mode="startup", - params={ - "asset_cfg": SceneEntityCfg("robot", body_names="base"), - "com_range": {"x": (-0.05, 0.05), "y": (-0.05, 0.05), "z": (-0.01, 0.01)}, - }, + base_com = preset( + default=EventTerm( + func=mdp.randomize_rigid_body_com, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names="base"), + "com_range": {"x": (-0.05, 0.05), "y": (-0.05, 0.05), "z": (-0.01, 0.01)}, + }, + ), + newton_mjwarp=None, ) # reset @@ -290,6 +341,8 @@ class CurriculumCfg: class LocomotionVelocityRoughEnvCfg(ManagerBasedRLEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" + # Simulation settings — shared physics preset (PhysX + MJWarp) for all rough-terrain envs + sim: SimulationCfg = SimulationCfg(physics=RoughPhysicsCfg()) # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5) # Basic settings @@ -299,7 +352,7 @@ class LocomotionVelocityRoughEnvCfg(ManagerBasedRLEnvCfg): # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() - events: EventCfg = EventCfg() + events: EventsCfg = EventsCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): @@ -311,7 +364,6 @@ def __post_init__(self): self.sim.dt = 0.005 self.sim.render_interval = self.decimation self.sim.physics_material = self.scene.terrain.physics_material - self.sim.physx.gpu_max_rigid_patch_count = 10 * 2**15 # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) if self.scene.height_scanner is not None: diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/__init__.py index eaf0b09fbb66..23764f265a5f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/__init__.py @@ -4,5 +4,3 @@ # SPDX-License-Identifier: BSD-3-Clause """Manipulation environments for fixed-arm robots.""" - -from .reach import * # noqa diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/cabinet_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/cabinet_env_cfg.py index 85b7e5ae9ba8..43601c83b95e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/cabinet_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/cabinet_env_cfg.py @@ -6,6 +6,9 @@ from dataclasses import MISSING +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.assets import ArticulationCfg, AssetBaseCfg @@ -19,9 +22,12 @@ from isaaclab.scene import InteractiveSceneCfg from isaaclab.sensors import FrameTransformerCfg from isaaclab.sensors.frame_transformer import OffsetCfg +from isaaclab.sim import SimulationCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab_tasks.utils import PresetCfg + from . import mdp ## @@ -34,6 +40,41 @@ FRAME_MARKER_SMALL_CFG.markers["frame"].scale = (0.10, 0.10, 0.10) +@configclass +class CabinetSimCfg(PresetCfg): + """Simulation configuration presets for the cabinet environment. + + Wraps the full :class:`~isaaclab.sim.SimulationCfg` so that Newton can run at a + finer physics timestep (1/200 s) while PhysX keeps its default (1/60 s). + """ + + default: SimulationCfg = SimulationCfg( + dt=1 / 60, + render_interval=1, + physics=PhysxCfg(bounce_threshold_velocity=0.01, friction_correlation_distance=0.00625), + ) + physx: SimulationCfg = SimulationCfg( + dt=1 / 60, + render_interval=1, + physics=PhysxCfg(bounce_threshold_velocity=0.01, friction_correlation_distance=0.00625), + ) + newton_mjwarp: SimulationCfg = SimulationCfg( + dt=1 / 600, + render_interval=1, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=90, + nconmax=100, + cone="pyramidal", + integrator="implicitfast", + impratio=1, + ), + num_substeps=1, + debug_mode=False, + ), + ) + + ## # Scene definition ## @@ -60,7 +101,7 @@ class CabinetSceneCfg(InteractiveSceneCfg): ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.8, 0, 0.4), - rot=(0.0, 0.0, 0.0, 1.0), + rot=(0.0, 0.0, 1.0, 0.0), joint_pos={ "door_left_joint": 0.0, "door_right_joint": 0.0, @@ -95,7 +136,7 @@ class CabinetSceneCfg(InteractiveSceneCfg): name="drawer_handle_top", offset=OffsetCfg( pos=(0.305, 0.0, 0.01), - rot=(0.5, 0.5, -0.5, -0.5), # align with end-effector frame + rot=(0.5, -0.5, -0.5, 0.5), # align with end-effector frame ), ), ], @@ -199,6 +240,29 @@ class EventCfg: ) +@configclass +class _CabinetNewtonEventCfg: + """Newton-compatible events: excludes material randomization (not implemented in Newton).""" + + reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") + + reset_robot_joints = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "position_range": (-0.1, 0.1), + "velocity_range": (0.0, 0.0), + }, + ) + + +@configclass +class CabinetEventCfg(PresetCfg): + default: EventCfg = EventCfg() + physx: EventCfg = EventCfg() + newton_mjwarp: _CabinetNewtonEventCfg = _CabinetNewtonEventCfg() + + @configclass class RewardsCfg: """Reward terms for the MDP.""" @@ -221,10 +285,16 @@ class RewardsCfg: ) # 3. Open the drawer + # ``open_drawer_bonus`` doubles as the success metric host: passing ``success_threshold`` + # tells the term to flip a sticky per-env bit when the drawer crosses that joint position + # and to log the per-env mean as ``Metrics/success_rate`` on episode reset. open_drawer_bonus = RewTerm( func=mdp.open_drawer_bonus, weight=7.5, - params={"asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"])}, + params={ + "asset_cfg": SceneEntityCfg("cabinet", joint_names=["drawer_top_joint"]), + "success_threshold": 0.30, + }, ) multi_stage_open_drawer = RewTerm( func=mdp.multi_stage_open_drawer, @@ -253,6 +323,9 @@ class TerminationsCfg: class CabinetEnvCfg(ManagerBasedRLEnvCfg): """Configuration for the cabinet environment.""" + # Sim settings — override base-class SimulationCfg with a preset-aware wrapper so that + # Newton can use dt=1/200 while PhysX keeps dt=1/60. + sim: CabinetSimCfg = CabinetSimCfg() # Scene settings scene: CabinetSceneCfg = CabinetSceneCfg(num_envs=4096, env_spacing=2.0) # Basic settings @@ -261,7 +334,7 @@ class CabinetEnvCfg(ManagerBasedRLEnvCfg): # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() - events: EventCfg = EventCfg() + events: CabinetEventCfg = CabinetEventCfg() def __post_init__(self): """Post initialization.""" @@ -270,9 +343,4 @@ def __post_init__(self): self.episode_length_s = 8.0 self.viewer.eye = (-2.0, 2.0, 2.0) self.viewer.lookat = (0.8, 0.0, 0.5) - # simulation settings - self.sim.dt = 1 / 60 # 60Hz - self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.2 - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.friction_correlation_distance = 0.00625 + # simulation settings are defined in CabinetSimCfg (dt/physics vary per backend) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/openarm/cabinet_openarm_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/openarm/cabinet_openarm_env_cfg.py index 6e3eecb59382..8adcddcb8b0f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/openarm/cabinet_openarm_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/config/openarm/cabinet_openarm_env_cfg.py @@ -10,8 +10,10 @@ from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils -from isaaclab.actuators.actuator_cfg import ImplicitActuatorCfg +from isaaclab.actuators import ImplicitActuatorCfg from isaaclab.assets import ArticulationCfg, AssetBaseCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import EventTermCfg as EventTerm @@ -63,7 +65,7 @@ class CabinetSceneCfg(InteractiveSceneCfg): ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.7, 0, 0.3), - rot=(0.0, 0.0, 0.0, 1.0), + rot=(0.0, 0.0, 1.0, 0.0), joint_pos={ "door_left_joint": 0.0, "door_right_joint": 0.0, @@ -100,7 +102,7 @@ class CabinetSceneCfg(InteractiveSceneCfg): name="drawer_handle_bottom", offset=OffsetCfg( pos=(0.222, 0.0, 0.005), - rot=(0.5, 0.5, -0.5, -0.5), # align with end-effector frame + rot=(0.5, -0.5, -0.5, 0.5), # align with end-effector frame ), ), ], @@ -278,5 +280,7 @@ def __post_init__(self): # simulation settings self.sim.dt = 1 / 60 # 60Hz self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.friction_correlation_distance = 0.00625 + self.sim.physics = PhysxCfg( + bounce_threshold_velocity=0.01, + friction_correlation_distance=0.00625, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/__init__.py index 79a9af2f736b..3feb2d711bc5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/__init__.py @@ -5,7 +5,6 @@ """This sub-module contains the functions that are specific to the cabinet environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .observations import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/__init__.pyi new file mode 100644 index 000000000000..40e046d8d22e --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/__init__.pyi @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ee_pos", + "ee_quat", + "fingertips_pos", + "rel_ee_drawer_distance", + "rel_ee_object_distance", + "align_ee_handle", + "align_grasp_around_handle", + "approach_ee_handle", + "approach_gripper_handle", + "grasp_handle", + "multi_stage_open_drawer", + "open_drawer_bonus", +] + +from .observations import ( + ee_pos, + ee_quat, + fingertips_pos, + rel_ee_drawer_distance, + rel_ee_object_distance, +) +from .rewards import ( + align_ee_handle, + align_grasp_around_handle, + approach_ee_handle, + approach_gripper_handle, + grasp_handle, + multi_stage_open_drawer, + open_drawer_bonus, +) +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/observations.py index 66fb8bb38e97..1da8557cc0ca 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/observations.py @@ -10,11 +10,11 @@ import torch import isaaclab.utils.math as math_utils -from isaaclab.assets import ArticulationData -from isaaclab.sensors import FrameTransformerData if TYPE_CHECKING: + from isaaclab.assets import ArticulationData from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import FrameTransformerData def rel_ee_object_distance(env: ManagerBasedRLEnv) -> torch.Tensor: @@ -22,7 +22,7 @@ def rel_ee_object_distance(env: ManagerBasedRLEnv) -> torch.Tensor: ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data object_data: ArticulationData = env.scene["object"].data - return object_data.root_pos_w - ee_tf_data.target_pos_w[..., 0, :] + return object_data.root_pos_w.torch - ee_tf_data.target_pos_w.torch[..., 0, :] def rel_ee_drawer_distance(env: ManagerBasedRLEnv) -> torch.Tensor: @@ -30,13 +30,13 @@ def rel_ee_drawer_distance(env: ManagerBasedRLEnv) -> torch.Tensor: ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data cabinet_tf_data: FrameTransformerData = env.scene["cabinet_frame"].data - return cabinet_tf_data.target_pos_w[..., 0, :] - ee_tf_data.target_pos_w[..., 0, :] + return cabinet_tf_data.target_pos_w.torch[..., 0, :] - ee_tf_data.target_pos_w.torch[..., 0, :] def fingertips_pos(env: ManagerBasedRLEnv) -> torch.Tensor: """The position of the fingertips relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data - fingertips_pos = ee_tf_data.target_pos_w[..., 1:, :] - env.scene.env_origins.unsqueeze(1) + fingertips_pos = ee_tf_data.target_pos_w.torch[..., 1:, :] - env.scene.env_origins.unsqueeze(1) return fingertips_pos.view(env.num_envs, -1) @@ -44,7 +44,7 @@ def fingertips_pos(env: ManagerBasedRLEnv) -> torch.Tensor: def ee_pos(env: ManagerBasedRLEnv) -> torch.Tensor: """The position of the end-effector relative to the environment origins.""" ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data - ee_pos = ee_tf_data.target_pos_w[..., 0, :] - env.scene.env_origins + ee_pos = ee_tf_data.target_pos_w.torch[..., 0, :] - env.scene.env_origins return ee_pos @@ -55,6 +55,6 @@ def ee_quat(env: ManagerBasedRLEnv, make_quat_unique: bool = True) -> torch.Tens If :attr:`make_quat_unique` is True, the quaternion is made unique by ensuring the real part is positive. """ ee_tf_data: FrameTransformerData = env.scene["ee_frame"].data - ee_quat = ee_tf_data.target_quat_w[..., 0, :] + ee_quat = ee_tf_data.target_quat_w.torch[..., 0, :] # make first element of quaternion positive return math_utils.quat_unique(ee_quat) if make_quat_unique else ee_quat diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/rewards.py index 433a2a87732a..3847894db46a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/cabinet/mdp/rewards.py @@ -9,7 +9,7 @@ import torch -from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg from isaaclab.utils.math import matrix_from_quat if TYPE_CHECKING: @@ -29,11 +29,11 @@ def approach_ee_handle(env: ManagerBasedRLEnv, threshold: float) -> torch.Tensor \end{cases} """ - ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] - handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] + ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w.torch[..., 0, :] + handle_pos = env.scene["cabinet_frame"].data.target_pos_w.torch[..., 0, :] # Compute the distance of the end-effector to the handle - distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) + distance = torch.linalg.norm(handle_pos - ee_tcp_pos, dim=-1, ord=2) # Reward the robot for reaching the handle reward = 1.0 / (1.0 + distance**2) @@ -53,8 +53,8 @@ def align_ee_handle(env: ManagerBasedRLEnv) -> torch.Tensor: where :math:`align_z` is the dot product of the z direction of the gripper and the -x direction of the handle and :math:`align_x` is the dot product of the x direction of the gripper and the -y direction of the handle. """ - ee_tcp_quat = env.scene["ee_frame"].data.target_quat_w[..., 0, :] - handle_quat = env.scene["cabinet_frame"].data.target_quat_w[..., 0, :] + ee_tcp_quat = env.scene["ee_frame"].data.target_quat_w.torch[..., 0, :] + handle_quat = env.scene["cabinet_frame"].data.target_quat_w.torch[..., 0, :] ee_tcp_rot_mat = matrix_from_quat(ee_tcp_quat) handle_mat = matrix_from_quat(handle_quat) @@ -79,9 +79,9 @@ def align_grasp_around_handle(env: ManagerBasedRLEnv) -> torch.Tensor: The correct hand orientation is when the left finger is above the handle and the right finger is below the handle. """ # Target object position: (num_envs, 3) - handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] + handle_pos = env.scene["cabinet_frame"].data.target_pos_w.torch[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) - ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] + ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w.torch[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] @@ -99,9 +99,9 @@ def approach_gripper_handle(env: ManagerBasedRLEnv, offset: float = 0.04) -> tor (i.e., the left finger is above the handle and the right finger is below the handle). Otherwise, it returns zero. """ # Target object position: (num_envs, 3) - handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] + handle_pos = env.scene["cabinet_frame"].data.target_pos_w.torch[..., 0, :] # Fingertips position: (num_envs, n_fingertips, 3) - ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w[..., 1:, :] + ee_fingertips_w = env.scene["ee_frame"].data.target_pos_w.torch[..., 1:, :] lfinger_pos = ee_fingertips_w[..., 0, :] rfinger_pos = ee_fingertips_w[..., 1, :] @@ -126,25 +126,47 @@ def grasp_handle( Note: It is assumed that zero joint position corresponds to the fingers being closed. """ - ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w[..., 0, :] - handle_pos = env.scene["cabinet_frame"].data.target_pos_w[..., 0, :] - gripper_joint_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids] + ee_tcp_pos = env.scene["ee_frame"].data.target_pos_w.torch[..., 0, :] + handle_pos = env.scene["cabinet_frame"].data.target_pos_w.torch[..., 0, :] + gripper_joint_pos = env.scene[asset_cfg.name].data.joint_pos.torch[:, asset_cfg.joint_ids] - distance = torch.norm(handle_pos - ee_tcp_pos, dim=-1, p=2) + distance = torch.linalg.norm(handle_pos - ee_tcp_pos, dim=-1, ord=2) is_close = distance <= threshold return is_close * torch.sum(open_joint_pos - gripper_joint_pos, dim=-1) -def open_drawer_bonus(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: +class open_drawer_bonus(ManagerTermBase): """Bonus for opening the drawer given by the joint position of the drawer. The bonus is given when the drawer is open. If the grasp is around the handle, the bonus is doubled. + + If ``success_threshold`` is provided in the term params, this also tracks per-episode success + (sticky binary: drawer ever opened past ``success_threshold``) and logs the mean across + environments under ``Metrics/success_rate`` on reset. """ - drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] - is_graspable = align_grasp_around_handle(env).float() - return (is_graspable + 1.0) * drawer_pos + def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): + super().__init__(cfg, env) + self._track_success = cfg.params.get("success_threshold") is not None + if self._track_success: + self.succeeded = torch.zeros(env.num_envs, dtype=torch.bool, device=env.device) + + def reset(self, env_ids: torch.Tensor): + if self._track_success: + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = ( + self.succeeded[env_ids].float().mean().item() + ) + self.succeeded[env_ids] = False + + def __call__( + self, env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg, success_threshold: float | None = None + ) -> torch.Tensor: + drawer_pos = env.scene[asset_cfg.name].data.joint_pos.torch[:, asset_cfg.joint_ids[0]] + is_graspable = align_grasp_around_handle(env).float() + if success_threshold is not None: + self.succeeded |= drawer_pos > success_threshold + return (is_graspable + 1.0) * drawer_pos def multi_stage_open_drawer(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: @@ -153,7 +175,7 @@ def multi_stage_open_drawer(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) - Depending on the drawer's position, the reward is given in three stages: easy, medium, and hard. This helps the agent to learn to open the drawer in a controlled manner. """ - drawer_pos = env.scene[asset_cfg.name].data.joint_pos[:, asset_cfg.joint_ids[0]] + drawer_pos = env.scene[asset_cfg.name].data.joint_pos.torch[:, asset_cfg.joint_ids[0]] is_graspable = align_grasp_around_handle(env).float() open_easy = (drawer_pos > 0.01) * 0.5 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/__init__.py index 61fccf6b2c14..3de316ff3300 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/__init__.py @@ -13,5 +13,3 @@ - Reach environments for end-effector pose tracking """ - -from .reach import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/__init__.py new file mode 100644 index 000000000000..6940800ead36 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/__init__.py @@ -0,0 +1,46 @@ +# Copyright (c) 2025-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +from . import agents + +## +# Register Gym environments. +## + + +# Flexiv Rizon 4s +gym.register( + id="Isaac-Deploy-GearAssembly-Rizon4s-Grav-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.joint_pos_env_cfg:Rizon4sGearAssemblyEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:Rizon4sGearAssemblyRNNPPORunnerCfg", + }, +) + +# Flexiv Rizon 4s - Play / Debug (deterministic, no randomization) +gym.register( + id="Isaac-Deploy-GearAssembly-Rizon4s-Grav-Play-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.ros_inference_env_cfg:Rizon4sGearAssemblyEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:Rizon4sGearAssemblyRNNPPORunnerCfg", + }, +) + +# Flexiv Rizon 4s - ROS Inference +gym.register( + id="Isaac-Deploy-GearAssembly-Rizon4s-Grav-ROS-Inference-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.ros_inference_env_cfg:Rizon4sGearAssemblyROSInferenceEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:Rizon4sGearAssemblyRNNPPORunnerCfg", + }, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/agents/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/agents/__init__.py new file mode 100644 index 000000000000..cf59b16a1e2e --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/agents/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2025-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/agents/rsl_rl_ppo_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/agents/rsl_rl_ppo_cfg.py new file mode 100644 index 000000000000..06f64e731afc --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/agents/rsl_rl_ppo_cfg.py @@ -0,0 +1,49 @@ +# Copyright (c) 2025-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticRecurrentCfg, RslRlPpoAlgorithmCfg + + +@configclass +class Rizon4sGearAssemblyRNNPPORunnerCfg(RslRlOnPolicyRunnerCfg): + num_steps_per_env = 512 + max_iterations = 1500 + save_interval = 50 + experiment_name = "gear_assembly_rizon4s" + clip_actions = 1.0 + resume = False + obs_groups = { + "policy": ["policy"], + "critic": ["critic"], + } + policy = RslRlPpoActorCriticRecurrentCfg( + state_dependent_std=True, + init_noise_std=1.0, + actor_obs_normalization=True, + critic_obs_normalization=True, + actor_hidden_dims=[256, 128, 64], + critic_hidden_dims=[256, 128, 64], + noise_std_type="log", + activation="elu", + rnn_type="lstm", + rnn_hidden_dim=256, + rnn_num_layers=2, + ) + algorithm = RslRlPpoAlgorithmCfg( + value_loss_coef=1.0, + use_clipped_value_loss=True, + clip_param=0.2, + entropy_coef=0.0, + num_learning_epochs=8, + num_mini_batches=16, + learning_rate=5.0e-4, + schedule="adaptive", + gamma=0.99, + lam=0.95, + desired_kl=0.008, + max_grad_norm=1.0, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/joint_pos_env_cfg.py new file mode 100644 index 000000000000..bd8382b55212 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/joint_pos_env_cfg.py @@ -0,0 +1,425 @@ +# Copyright (c) 2025-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets import ArticulationCfg, RigidObjectCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import RewardTermCfg as RewTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.utils import configclass +from isaaclab.utils.noise import UniformNoiseCfg + +import isaaclab_tasks.manager_based.manipulation.deploy.mdp as mdp +import isaaclab_tasks.manager_based.manipulation.deploy.mdp.events as gear_assembly_events +from isaaclab_tasks.manager_based.manipulation.deploy.gear_assembly.gear_assembly_env_cfg import GearAssemblyEnvCfg +from isaaclab_tasks.manager_based.manipulation.deploy.mdp.noise_models import ( + ResetSampledConstantNoiseModelCfg, + ResetSampledQuaternionNoiseModelCfg, +) + +## +# Pre-defined configs +## +from isaaclab_assets import FLEXIV_RIZON4S_GRAV_GRIPPER_CFG # isort: skip + + +## +# Gripper-specific helper functions +## + + +def set_finger_joint_pos_grav( + joint_pos: torch.Tensor, + reset_ind_joint_pos: list[int], + finger_joints: list[int], + finger_joint_position: float, +): + """Set finger joint positions for Grav gripper. + + Args: + joint_pos: Joint positions tensor + reset_ind_joint_pos: Row indices into the sliced joint_pos tensor + finger_joints: List of all gripper joint indices (6 joints total) + finger_joint_position: Target position for main finger joint (in radians) + + Note: + Grav gripper joint structure (indices from finger_joints list): + [0] finger_joint - main controllable joint + [1] left_inner_knuckle_joint - mimic with -1 gearing + [2] right_inner_knuckle_joint - mimic with -1 gearing + [3] right_outer_knuckle_joint - mimic with -1 gearing + [4] left_outer_finger_joint - mimic with +1 gearing + [5] right_outer_finger_joint - mimic with +1 gearing + """ + for idx in reset_ind_joint_pos: + if len(finger_joints) < 6: + raise ValueError(f"Grav gripper requires at least 6 finger joints, got {len(finger_joints)}") + + # Main controllable joint + joint_pos[idx, finger_joints[0]] = finger_joint_position + + # Mimic joints with -1 gearing + joint_pos[idx, finger_joints[1]] = finger_joint_position # left_inner_knuckle_joint + joint_pos[idx, finger_joints[2]] = finger_joint_position # right_inner_knuckle_joint + joint_pos[idx, finger_joints[3]] = finger_joint_position # right_outer_knuckle_joint + + # Mimic joints with +1 gearing + joint_pos[idx, finger_joints[4]] = -finger_joint_position # left_outer_finger_joint + joint_pos[idx, finger_joints[5]] = -finger_joint_position # right_outer_finger_joint + + +## +# Environment configuration +## + + +@configclass +class EventCfg: + """Configuration for events.""" + + small_gear_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("factory_gear_small", body_names=".*"), + "static_friction_range": (0.75, 0.75), + "dynamic_friction_range": (0.75, 0.75), + "restitution_range": (0.0, 0.0), + "num_buckets": 16, + }, + ) + + medium_gear_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("factory_gear_medium", body_names=".*"), + "static_friction_range": (0.75, 0.75), + "dynamic_friction_range": (0.75, 0.75), + "restitution_range": (0.0, 0.0), + "num_buckets": 16, + }, + ) + + large_gear_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("factory_gear_large", body_names=".*"), + "static_friction_range": (0.75, 0.75), + "dynamic_friction_range": (0.75, 0.75), + "restitution_range": (0.0, 0.0), + "num_buckets": 16, + }, + ) + + gear_base_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("factory_gear_base", body_names=".*"), + "static_friction_range": (0.0, 0.0), + "dynamic_friction_range": (0.0, 0.0), + "restitution_range": (0.0, 0.0), + "num_buckets": 16, + }, + ) + + robot_physics_material = EventTerm( + func=mdp.randomize_rigid_body_material, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names=".*finger.*"), + "static_friction_range": (3.0, 3.0), + "dynamic_friction_range": (3.0, 3.0), + "restitution_range": (0.0, 0.0), + "num_buckets": 16, + }, + ) + + randomize_gear_type = EventTerm( + func=gear_assembly_events.randomize_gear_type, + mode="reset", + params={"gear_types": ["gear_small", "gear_medium", "gear_large"]}, + ) + + reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") + + randomize_gears_and_base_pose = EventTerm( + func=gear_assembly_events.randomize_gears_and_base_pose, + mode="reset", + params={ + "pose_range": { + "x": [-0.1, 0.1], + "y": [-0.25, 0.25], + "z": [-0.1, 0.1], + "roll": [-math.pi / 90, math.pi / 90], # 2 degree + "pitch": [-math.pi / 90, math.pi / 90], # 2 degree + "yaw": [-math.pi / 6, math.pi / 6], # 30 degree + }, + "gear_pos_range": { + "x": [-0.02, 0.02], + "y": [-0.02, 0.02], + "z": [0.0575, 0.0775], + }, + "velocity_range": {}, + }, + ) + + set_robot_to_grasp_pose = EventTerm( + func=gear_assembly_events.set_robot_to_grasp_pose, + mode="reset", + params={ + "robot_asset_cfg": SceneEntityCfg("robot"), + "pos_randomization_range": {"x": [-0.0, 0.0], "y": [-0.0, 0.0], "z": [-0.0, 0.0]}, + }, + ) + + +@configclass +class Rizon4sGearAssemblyEnvCfg(GearAssemblyEnvCfg): + """Configuration for Flexiv Rizon 4s with Grav Gripper Gear Assembly Environment. + + The Flexiv Rizon 4s is a 7-DOF collaborative robot arm equipped with the + Flexiv Grav parallel gripper for gear manipulation tasks. + """ + + ee_grasp_weight_ramp_steps: int = 512_000 + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # Flexiv-specific observation noise overrides + self.observations.policy.gear_shaft_pos.noise = ResetSampledConstantNoiseModelCfg( + noise_cfg=UniformNoiseCfg(n_min=-0.01, n_max=0.01, operation="add") + ) + self.observations.policy.gear_shaft_quat.noise = ResetSampledQuaternionNoiseModelCfg( + roll_range=(-0.03491, 0.03491), + pitch_range=(-0.03491, 0.03491), + yaw_range=(-0.03491, 0.03491), + ) + + # Robot-specific parameters for Flexiv Rizon 4s with Grav gripper + self.end_effector_body_name = "link7" # End effector body name for IK + self.num_arm_joints = 7 # Number of arm joints (Rizon 4s has 7 DOF) + # Rotation offset for grasp pose (quaternion [x, y, z, w]) + # Computed from IK convergence for downward-facing end effector + self.grasp_rot_offset = [ + -0.707, + 0.707, + 0.0, + 0.0, + ] + self.gripper_joint_setter_func = set_finger_joint_pos_grav # Grav gripper joint setter function + + # Gear orientation termination thresholds (in degrees) + self.gear_orientation_roll_threshold_deg = 15.0 # Maximum allowed roll deviation + self.gear_orientation_pitch_threshold_deg = 15.0 # Maximum allowed pitch deviation + self.gear_orientation_yaw_threshold_deg = 180.0 # Maximum allowed yaw deviation + + # Common observation configuration for Rizon 4s joints (arm only, not gripper) + self.observations.policy.joint_pos.params["asset_cfg"].joint_names = [ + "joint1", + "joint2", + "joint3", + "joint4", + "joint5", + "joint6", + "joint7", + ] + self.observations.policy.joint_vel.params["asset_cfg"].joint_names = [ + "joint1", + "joint2", + "joint3", + "joint4", + "joint5", + "joint6", + "joint7", + ] + + # override events + self.events = EventCfg() + + # Update termination thresholds from config + self.terminations.gear_orientation_exceeded.params["roll_threshold_deg"] = ( + self.gear_orientation_roll_threshold_deg + ) + self.terminations.gear_orientation_exceeded.params["pitch_threshold_deg"] = ( + self.gear_orientation_pitch_threshold_deg + ) + self.terminations.gear_orientation_exceeded.params["yaw_threshold_deg"] = ( + self.gear_orientation_yaw_threshold_deg + ) + + # Action configuration for Rizon 4s arm + # Using smaller action scale for stability + self.joint_action_scale = 0.025 + self.actions.arm_action = mdp.RelativeJointPositionActionCfg( + asset_name="robot", + joint_names=[ + "joint1", + "joint2", + "joint3", + "joint4", + "joint5", + "joint6", + "joint7", + ], + scale=self.joint_action_scale, + use_zero_offset=True, + ) + + # Switch robot to Flexiv Rizon 4s with Grav gripper + self.scene.robot = FLEXIV_RIZON4S_GRAV_GRIPPER_CFG.replace( + prim_path="{ENV_REGEX_NS}/Robot", + spawn=FLEXIV_RIZON4S_GRAV_GRIPPER_CFG.spawn.replace( + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=True, + max_depenetration_velocity=5.0, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=3666.0, + enable_gyroscopic_forces=True, + solver_position_iteration_count=4, + solver_velocity_iteration_count=1, + max_contact_impulse=1e32, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False, solver_position_iteration_count=4, solver_velocity_iteration_count=1 + ), + collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.005, rest_offset=0.0), + ), + # Joint positions based on IK from center of distribution for randomized gear positions + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={ + "joint1": 0.0, + "joint2": -0.698, + "joint3": 0.0, + "joint4": 1.571, + "joint5": 0.0, + "joint6": 0.698, + "joint7": 0.0, + }, + pos=(0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), + ), + ) + + # Grav gripper actuator configuration for gear manipulation + self.scene.robot.actuators["gripper_drive"] = ImplicitActuatorCfg( + joint_names_expr=["finger_joint"], + effort_limit_sim=2.0, + velocity_limit_sim=1.0, + stiffness=2e3, + damping=1e1, + friction=0.0, + armature=0.0, + ) + + # Passive/mimic joints in the gripper - set to zero stiffness/damping + self.scene.robot.actuators["gripper_passive"] = ImplicitActuatorCfg( + joint_names_expr=[".*_knuckle_joint"], + effort_limit_sim=1.0, + velocity_limit_sim=1.0, + stiffness=0.0, + damping=0.0, + friction=0.0, + armature=0.0, + ) + + # Override gear initial states for Rizon (closer to robot, centered) + self.scene.factory_gear_base.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, 0.071), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + self.scene.factory_gear_small.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, 0.071), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + self.scene.factory_gear_medium.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, 0.071), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + self.scene.factory_gear_large.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, 0.071), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + + # Gear offsets and grasp positions for Rizon 4s with Grav gripper + # These offsets are relative to the end effector frame (link7) + # Z offset accounts for the gripper length from link7 to finger tip + self.gear_offsets_grasp = { + "gear_small": [0.0, -self.gear_offsets["gear_small"][0], -0.35], + "gear_medium": [0.0, -self.gear_offsets["gear_medium"][0], -0.35], + "gear_large": [0.0, -self.gear_offsets["gear_large"][0], -0.35], + } + + # Grasp widths for Grav gripper (raw radian values for finger_joint) + self.hand_grasp_width = { + "gear_small": 0.05, + "gear_medium": 0.2, + "gear_large": 0.28, + } + + # Close widths for Grav gripper (raw radian values for finger_joint) + self.hand_close_width = { + "gear_small": 0.0, + "gear_medium": 0.139626, + "gear_large": 0.139626, + } + + # Populate event term parameters + self.events.set_robot_to_grasp_pose.params["gear_offsets_grasp"] = self.gear_offsets_grasp + self.events.set_robot_to_grasp_pose.params["end_effector_body_name"] = self.end_effector_body_name + self.events.set_robot_to_grasp_pose.params["num_arm_joints"] = self.num_arm_joints + self.events.set_robot_to_grasp_pose.params["grasp_rot_offset"] = self.grasp_rot_offset + self.events.set_robot_to_grasp_pose.params["gripper_joint_setter_func"] = self.gripper_joint_setter_func + + # Flexiv-specific reward terms for EE-grasp keypoint tracking + self.rewards.end_effector_grasp_keypoint_tracking = RewTerm( + func=mdp.keypoint_ee_grasp_error, + weight=-0.5, + params={ + "robot_asset_cfg": SceneEntityCfg("robot"), + "keypoint_scale": 0.15, + "ee_grasp_threshold": 0.00, + "weight_ramp_start": 0.0, + "weight_ramp_steps": self.ee_grasp_weight_ramp_steps, + "end_effector_body_name": self.end_effector_body_name, + "grasp_rot_offset": self.grasp_rot_offset, + "gear_offsets_grasp": self.gear_offsets_grasp, + }, + ) + self.rewards.end_effector_grasp_keypoint_tracking_exp = RewTerm( + func=mdp.keypoint_ee_grasp_error_exp, + weight=0.5, + params={ + "robot_asset_cfg": SceneEntityCfg("robot"), + "kp_exp_coeffs": [(50, 0.0001), (300, 0.0001)], + "kp_use_sum_of_exps": False, + "keypoint_scale": 0.15, + "ee_grasp_threshold": 0.00, + "weight_ramp_start": 0.0, + "weight_ramp_steps": self.ee_grasp_weight_ramp_steps, + "end_effector_body_name": self.end_effector_body_name, + "grasp_rot_offset": self.grasp_rot_offset, + "gear_offsets_grasp": self.gear_offsets_grasp, + }, + ) + + # Populate termination term parameters + self.terminations.gear_dropped.params["gear_offsets_grasp"] = self.gear_offsets_grasp + self.terminations.gear_dropped.params["end_effector_body_name"] = self.end_effector_body_name + self.terminations.gear_dropped.params["grasp_rot_offset"] = self.grasp_rot_offset + + self.terminations.gear_orientation_exceeded.params["end_effector_body_name"] = self.end_effector_body_name + self.terminations.gear_orientation_exceeded.params["grasp_rot_offset"] = self.grasp_rot_offset diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/ros_inference_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/ros_inference_env_cfg.py new file mode 100644 index 000000000000..504a3ccda288 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/rizon_4s/ros_inference_env_cfg.py @@ -0,0 +1,197 @@ +# Copyright (c) 2025-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math + +import torch + +from isaaclab.assets import RigidObjectCfg +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.utils import configclass + +from .joint_pos_env_cfg import Rizon4sGearAssemblyEnvCfg + + +def constant_obs(env, value: tuple) -> torch.Tensor: + """Observation function that returns a fixed tensor every step.""" + return torch.tensor([value], device=env.device, dtype=torch.float32).expand(env.num_envs, -1) + + +@configclass +class Rizon4sGearAssemblyROSInferenceEnvCfg(Rizon4sGearAssemblyEnvCfg): + """Configuration for ROS inference with Flexiv Rizon 4s and Grav gripper. + + This configuration: + - Exposes variables needed for ROS inference + - Overrides robot and gear initial poses for fixed/deterministic setup + """ + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # Variables used by Isaac Manipulator for on robot inference + # These parameters allow the ROS inference node to validate environment configuration, + # perform checks during inference, and correctly interpret observations and actions. + self.obs_order = ["arm_dof_pos", "arm_dof_vel", "shaft_pos", "shaft_quat"] + self.policy_action_space = "joint" + # Use inherited joint names from parent's observation configuration + self.arm_joint_names = self.observations.policy.joint_pos.params["asset_cfg"].joint_names + # Use inherited num_arm_joints from parent + self.action_space = self.num_arm_joints + # State space and observation space for Rizon 4s with Grav gripper (7 DOF arm + 1 gripper) + # State: 7 joint pos + 7 joint vel + 3 shaft pos + 4 shaft quat + 3 gear pos + 4 gear quat = 28 + # For critic: additional gear observations + self.state_space = 28 + # Observation: 7 joint pos + 7 joint vel + 3 shaft pos + 4 shaft quat = 21 + self.observation_space = 21 + + # Set joint_action_scale from the existing arm_action.scale + self.joint_action_scale = self.actions.arm_action.scale + + # Dynamically generate action_scale_joint_space based on action_space + self.action_scale_joint_space = [self.joint_action_scale] * self.action_space + + # Override robot initial pose for ROS inference (fixed pose, no randomization) + # Joint positions and pos are inherited from parent, only override rotation to be deterministic + self.scene.robot.init_state.rot = (0.0, 0.0, 0.0, 1.0) # Identity quaternion (x, y, z, w) + + # Override gear base initial pose (fixed pose for ROS inference) + # Position configured for Rizon 4s workspace + self.scene.factory_gear_base.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, -0.005), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + + # Override gear initial poses (fixed poses for ROS inference) + # Small gear + self.scene.factory_gear_small.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, -0.005), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + + # Medium gear + self.scene.factory_gear_medium.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, -0.005), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + + # Large gear + self.scene.factory_gear_large.init_state = RigidObjectCfg.InitialStateCfg( + pos=(0.481, -0.073, -0.005), + rot=(0.0, 0.0, 0.70711, -0.70711), + ) + + # Fixed asset parameters for ROS inference - derived from configuration + # These parameters are used by the ROS inference node to validate the environment setup + # and apply appropriate noise models for robust real-world deployment. + # Derive position center from gear base init state + self.fixed_asset_init_pos_center = list(self.scene.factory_gear_base.init_state.pos) + # Derive position range from parent's randomize_gears_and_base_pose event pose_range + pose_range = self.events.randomize_gears_and_base_pose.params["pose_range"] + self.fixed_asset_init_pos_range = [ + pose_range["x"][1], # max value + pose_range["y"][1], # max value + pose_range["z"][1], # max value + ] + # Orientation in degrees (quaternion (0.0, 0.0, 0.70711, -0.70711) = -90° around Z) + self.fixed_asset_init_orn_deg = [0.0, 0.0, -90.0] + # Derive orientation range from parent's pose_range (radians to degrees) + self.fixed_asset_init_orn_deg_range = [ + math.degrees(pose_range["roll"][1]), # convert radians to degrees + math.degrees(pose_range["pitch"][1]), + math.degrees(pose_range["yaw"][1]), + ] + # Derive observation noise level from parent's gear_shaft_pos noise configuration + gear_shaft_pos_noise = self.observations.policy.gear_shaft_pos.noise.noise_cfg.n_max + self.fixed_asset_pos_obs_noise_level = [ + gear_shaft_pos_noise, + gear_shaft_pos_noise, + gear_shaft_pos_noise, + ] + + +@configclass +class Rizon4sGearAssemblyEnvCfg_PLAY(Rizon4sGearAssemblyROSInferenceEnvCfg): + """Deterministic play/debug configuration for Flexiv Rizon 4s gear assembly. + + Inherits the full ROS-inference configuration and then disables all + randomization so the simulation is identical on every reset. Useful for + comparing simulated and real-world policy behavior at a known pose. + + To debug a specific real-world scenario, edit the constants below to match + the physical setup, then run:: + + python scripts/reinforcement_learning/rsl_rl/play.py \\ + --task Isaac-Deploy-GearAssembly-Rizon4s-Grav-Play-v0 \\ + --num_envs 1 --checkpoint + + Observation overrides (``OBS_SHAFT_POS``, ``OBS_SHAFT_QUAT``) let you + inject fixed values into the policy's observation tensor regardless of + simulation state. Set to ``None`` to use the simulated values. + """ + + # ╔══════════════════════════════════════════════════════════════════════╗ + # ║ SCENE SETUP — edit to match your real-world setup ║ + # ╚══════════════════════════════════════════════════════════════════════╝ + + GEAR_TYPE: str = "gear_large" + GEAR_BASE_POS: tuple = (0.481, -0.073, -0.005) + GEAR_BASE_ROT: tuple = (0.0, 0.0, -0.70711, 0.70711) + GEAR_Z_OFFSET: float = 0.0675 + + # ╔══════════════════════════════════════════════════════════════════════╗ + # ║ OBSERVATION OVERRIDES — set to None to use simulated values ║ + # ║ ║ + # ║ Obs layout: [joint_pos(7) | joint_vel(7) | shaft_pos(3) | ║ + # ║ shaft_quat(4)] ║ + # ╚══════════════════════════════════════════════════════════════════════╝ + + OBS_SHAFT_POS: tuple | None = None # e.g. (0.481, -0.028, -0.005) + OBS_SHAFT_QUAT: tuple | None = None # e.g. (0.0, 0.0, -0.70711, 0.70711) + + def __post_init__(self): + super().__post_init__() + + self.scene.num_envs = 1 + self.scene.env_spacing = 2.5 + + # ── Fix gear type (no random selection) ─────────────────────────── + self.events.randomize_gear_type.params["gear_types"] = [self.GEAR_TYPE] + + # ── Override gear base pose ─────────────────────────────────────── + self.scene.factory_gear_base.init_state = RigidObjectCfg.InitialStateCfg( + pos=self.GEAR_BASE_POS, + rot=self.GEAR_BASE_ROT, + ) + for attr in ("factory_gear_small", "factory_gear_medium", "factory_gear_large"): + getattr(self.scene, attr).init_state = RigidObjectCfg.InitialStateCfg( + pos=self.GEAR_BASE_POS, + rot=self.GEAR_BASE_ROT, + ) + + # ── Zero out all pose randomization ─────────────────────────────── + self.events.randomize_gears_and_base_pose.params["pose_range"] = { + "x": [0.0, 0.0], + "y": [0.0, 0.0], + "z": [0.0, 0.0], + "roll": [0.0, 0.0], + "pitch": [0.0, 0.0], + "yaw": [0.0, 0.0], + } + self.events.randomize_gears_and_base_pose.params["gear_pos_range"] = { + "x": [0.0, 0.0], + "y": [0.0, 0.0], + "z": [self.GEAR_Z_OFFSET, self.GEAR_Z_OFFSET], + } + + # ── Disable observation noise ───────────────────────────────────── + self.observations.policy.enable_corruption = False + + # ── Observation overrides (replace terms with constant functions) ─ + if self.OBS_SHAFT_POS is not None: + self.observations.policy.gear_shaft_pos = ObsTerm(func=constant_obs, params={"value": self.OBS_SHAFT_POS}) + if self.OBS_SHAFT_QUAT is not None: + self.observations.policy.gear_shaft_quat = ObsTerm(func=constant_obs, params={"value": self.OBS_SHAFT_QUAT}) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/joint_pos_env_cfg.py index 22921e717895..3cb8eeae956e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/joint_pos_env_cfg.py @@ -244,11 +244,11 @@ def __post_init__(self): self.end_effector_body_name = "wrist_3_link" # End effector body name for IK and termination checks self.num_arm_joints = 6 # Number of arm joints (excluding gripper) self.grasp_rot_offset = [ - 0.0, math.sqrt(2) / 2, math.sqrt(2) / 2, 0.0, - ] # Rotation offset for grasp pose (quaternion [w, x, y, z]) + 0.0, + ] # Rotation offset for grasp pose (quaternion [x, y, z, w]) self.gripper_joint_setter_func = None # Gripper-specific joint setter function (set in subclass) # Gear orientation termination thresholds (in degrees) @@ -347,7 +347,7 @@ def __post_init__(self): "wrist_3_joint": -1.9896, }, pos=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), ) @@ -436,7 +436,7 @@ def __post_init__(self): "wrist_3_joint": -1.9896, }, pos=(0.0, 0.0, 0.0), - rot=(1.0, 0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), ), ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/ros_inference_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/ros_inference_env_cfg.py index bdb337de5c99..cdce81364692 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/ros_inference_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/config/ur_10e/ros_inference_env_cfg.py @@ -54,31 +54,31 @@ def __post_init__(self): # (rotated 180° around Z from base_link), not the base_link frame (USD origin). # See: https://docs.universal-robots.com/Universal_Robots_ROS2_Documentation/doc/ur_description/doc/robot_frames.html # Joint positions and pos are inherited from parent, only override rotation to be deterministic - self.scene.robot.init_state.rot = (0.0, 0.0, 0.0, 1.0) + self.scene.robot.init_state.rot = (0.0, 0.0, 1.0, 0.0) # Override gear base initial pose (fixed pose for ROS inference) self.scene.factory_gear_base.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Override gear initial poses (fixed poses for ROS inference) # Small gear self.scene.factory_gear_small.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), # z = base_z + 0.1675 (above base) - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Medium gear self.scene.factory_gear_medium.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Large gear self.scene.factory_gear_large.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Fixed asset parameters for ROS inference - derived from configuration @@ -153,31 +153,31 @@ def __post_init__(self): # (rotated 180° around Z from base_link), not the base_link frame (USD origin). # See: https://docs.universal-robots.com/Universal_Robots_ROS2_Documentation/doc/ur_description/doc/robot_frames.html # Joint positions and pos are inherited from parent, only override rotation to be deterministic - self.scene.robot.init_state.rot = (0.0, 0.0, 0.0, 1.0) + self.scene.robot.init_state.rot = (0.0, 0.0, 1.0, 0.0) # Override gear base initial pose (fixed pose for ROS inference) self.scene.factory_gear_base.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Override gear initial poses (fixed poses for ROS inference) # Small gear self.scene.factory_gear_small.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), # z = base_z + 0.1675 (above base) - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Medium gear self.scene.factory_gear_medium.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Large gear self.scene.factory_gear_large.init_state = RigidObjectCfg.InitialStateCfg( pos=(1.0200, -0.2100, -0.1), - rot=(-0.70711, 0.0, 0.0, 0.70711), + rot=(0.0, 0.0, 0.70711, -0.70711), ) # Fixed asset parameters for ROS inference - derived from configuration diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/gear_assembly_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/gear_assembly_env_cfg.py index 8a15d7b3a52d..6a3fc7f080f4 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/gear_assembly_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/gear_assembly/gear_assembly_env_cfg.py @@ -6,6 +6,8 @@ import os from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -17,7 +19,7 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim.simulation_cfg import PhysxCfg, SimulationCfg +from isaaclab.sim.simulation_cfg import SimulationCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR from isaaclab.utils.noise import UniformNoiseCfg @@ -71,7 +73,7 @@ class GearAssemblySceneCfg(InteractiveSceneCfg): mass_props=sim_utils.MassPropertiesCfg(mass=None), collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.70711, 0.0, 0.0, 0.70711)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.0, 0.0, 0.70711, 0.70711)), ) factory_gear_small = RigidObjectCfg( @@ -96,7 +98,7 @@ class GearAssemblySceneCfg(InteractiveSceneCfg): mass_props=sim_utils.MassPropertiesCfg(mass=None), collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.70711, 0.0, 0.0, 0.70711)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.0, 0.0, 0.70711, 0.70711)), ) factory_gear_medium = RigidObjectCfg( @@ -121,7 +123,7 @@ class GearAssemblySceneCfg(InteractiveSceneCfg): mass_props=sim_utils.MassPropertiesCfg(mass=None), collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.70711, 0.0, 0.0, 0.70711)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.0, 0.0, 0.70711, 0.70711)), ) factory_gear_large = RigidObjectCfg( @@ -146,7 +148,7 @@ class GearAssemblySceneCfg(InteractiveSceneCfg): mass_props=sim_utils.MassPropertiesCfg(mass=None), collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.70711, 0.0, 0.0, 0.70711)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(-1.0200, 0.2100, -0.1), rot=(0.0, 0.0, 0.70711, 0.70711)), ) # robots @@ -301,11 +303,8 @@ class GearAssemblyEnvCfg(ManagerBasedRLEnvCfg): terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() sim: SimulationCfg = SimulationCfg( - physx=PhysxCfg( - # Important to prevent collisionStackSize buffer overflow in contact-rich environments. - gpu_collision_stack_size=2**28, - gpu_max_rigid_contact_count=2**23, - gpu_max_rigid_patch_count=2**23, + physics=PhysxCfg( # Important to prevent collisionStackSize buffer overflow in contact-rich environments. + gpu_collision_stack_size=2**30, gpu_max_rigid_contact_count=2**23, gpu_max_rigid_patch_count=2**23 ), ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/__init__.py index 10ab3ea7e7fd..81af1c012910 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/__init__.py @@ -5,10 +5,6 @@ """This sub-module contains the functions that are specific to the locomotion environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .events import * # noqa: F401, F403 -from .noise_models import * # noqa: F401, F403 -from .observations import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/__init__.pyi new file mode 100644 index 000000000000..2a200c888bc0 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/__init__.pyi @@ -0,0 +1,38 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "randomize_gear_type", + "randomize_gears_and_base_pose", + "set_robot_to_grasp_pose", + "ResetSampledConstantNoiseModel", + "ResetSampledConstantNoiseModelCfg", + "gear_pos_w", + "gear_quat_w", + "gear_shaft_pos_w", + "gear_shaft_quat_w", + "keypoint_command_error", + "keypoint_command_error_exp", + "keypoint_entity_error", + "keypoint_entity_error_exp", + "keypoint_ee_grasp_error", + "keypoint_ee_grasp_error_exp", + "reset_when_gear_dropped", + "reset_when_gear_orientation_exceeds_threshold", +] + +from .events import randomize_gear_type, randomize_gears_and_base_pose, set_robot_to_grasp_pose +from .noise_models import ResetSampledConstantNoiseModel, ResetSampledConstantNoiseModelCfg +from .observations import gear_pos_w, gear_quat_w, gear_shaft_pos_w, gear_shaft_quat_w +from .rewards import ( + keypoint_command_error, + keypoint_command_error_exp, + keypoint_entity_error, + keypoint_entity_error_exp, + keypoint_ee_grasp_error, + keypoint_ee_grasp_error_exp, +) +from .terminations import reset_when_gear_dropped, reset_when_gear_orientation_exceeds_threshold +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py index 7666875435fb..b651a002966e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py @@ -11,14 +11,15 @@ from typing import TYPE_CHECKING import torch +import warp as wp import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from isaaclab_tasks.direct.automate import factory_control as fc if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedEnv @@ -132,7 +133,7 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): if "grasp_rot_offset" not in cfg.params: raise ValueError( "'grasp_rot_offset' parameter is required in set_robot_to_grasp_pose configuration. " - "It should be a quaternion [w, x, y, z]. Example: [0.0, 0.707, 0.707, 0.0]" + "It should be a quaternion [x, y, z, w]. Example: [0.707, 0.707, 0.0, 0.0]" ) if "gripper_joint_setter_func" not in cfg.params: raise ValueError( @@ -215,7 +216,7 @@ def __call__( robot_asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), pos_threshold: float = 1e-6, rot_threshold: float = 1e-6, - max_iterations: int = 10, + max_iterations: int = 50, pos_randomization_range: dict | None = None, gear_offsets_grasp: dict | None = None, end_effector_body_name: str | None = None, @@ -253,24 +254,24 @@ def __call__( # IK loop for i in range(max_iterations): # Get current joint state - joint_pos = self.robot_asset.data.joint_pos[env_ids].clone() - joint_vel = self.robot_asset.data.joint_vel[env_ids].clone() + joint_pos = self.robot_asset.data.joint_pos.torch[env_ids].clone() + joint_vel = self.robot_asset.data.joint_vel.torch[env_ids].clone() # Stack all gear positions and quaternions all_gear_pos = torch.stack( [ - env.scene["factory_gear_small"].data.root_link_pos_w, - env.scene["factory_gear_medium"].data.root_link_pos_w, - env.scene["factory_gear_large"].data.root_link_pos_w, + env.scene["factory_gear_small"].data.root_link_pos_w.torch, + env.scene["factory_gear_medium"].data.root_link_pos_w.torch, + env.scene["factory_gear_large"].data.root_link_pos_w.torch, ], dim=1, )[env_ids] all_gear_quat = torch.stack( [ - env.scene["factory_gear_small"].data.root_link_quat_w, - env.scene["factory_gear_medium"].data.root_link_quat_w, - env.scene["factory_gear_large"].data.root_link_quat_w, + env.scene["factory_gear_small"].data.root_link_quat_w.torch, + env.scene["factory_gear_medium"].data.root_link_quat_w.torch, + env.scene["factory_gear_large"].data.root_link_quat_w.torch, ], dim=1, )[env_ids] @@ -305,8 +306,8 @@ def __call__( ) # Get end effector pose - eef_pos = self.robot_asset.data.body_pos_w[env_ids, self.eef_idx] - eef_quat = self.robot_asset.data.body_quat_w[env_ids, self.eef_idx] + eef_pos = self.robot_asset.data.body_pos_w.torch[env_ids, self.eef_idx] + eef_quat = self.robot_asset.data.body_quat_w.torch[env_ids, self.eef_idx] # Compute pose error pos_error, axis_angle_error = fc.get_pose_error( @@ -320,14 +321,14 @@ def __call__( delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1) # Check convergence - pos_error_norm = torch.norm(pos_error, dim=-1) - rot_error_norm = torch.norm(axis_angle_error, dim=-1) + pos_error_norm = torch.linalg.norm(pos_error, dim=-1) + rot_error_norm = torch.linalg.norm(axis_angle_error, dim=-1) if torch.all(pos_error_norm < pos_threshold) and torch.all(rot_error_norm < rot_threshold): break # Solve IK using jacobian - jacobians = self.robot_asset.root_physx_view.get_jacobians().clone() + jacobians = wp.to_torch(self.robot_asset.root_view.get_jacobians()).clone() jacobian = jacobians[env_ids, self.jacobi_body_idx, :, :] delta_dof_pos = fc._get_delta_dof_pos( @@ -339,15 +340,35 @@ def __call__( # Update joint positions joint_pos = joint_pos + delta_dof_pos + + # Wrap arm joint positions to fall within robot's actual joint limits + joint_pos_limits = self.robot_asset.data.joint_pos_limits.torch[env_ids, : self.num_arm_joints, :] + joint_min = joint_pos_limits[:, :, 0] + joint_max = joint_pos_limits[:, :, 1] + joint_range = joint_max - joint_min + + # Wrap only the arm joint positions (not gripper joints) + arm_joint_pos = joint_pos[:, : self.num_arm_joints] + arm_joint_pos = torch.where( + joint_range > 0, + joint_min + torch.remainder(arm_joint_pos - joint_min, joint_range), + arm_joint_pos, + ) + joint_pos[:, : self.num_arm_joints] = arm_joint_pos + joint_vel = torch.zeros_like(joint_pos) # Write to sim - self.robot_asset.set_joint_position_target(joint_pos, env_ids=env_ids) - self.robot_asset.set_joint_velocity_target(joint_vel, env_ids=env_ids) - self.robot_asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + self.robot_asset.set_joint_position_target_index(target=joint_pos, env_ids=env_ids) + self.robot_asset.set_joint_velocity_target_index(target=joint_vel, env_ids=env_ids) + self.robot_asset.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self.robot_asset.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) + + # Reset joint velocities to zero after IK convergence + joint_vel = torch.zeros_like(self.robot_asset.data.joint_vel.torch[env_ids]) # Set gripper to grasp position - joint_pos = self.robot_asset.data.joint_pos[env_ids].clone() + joint_pos = self.robot_asset.data.joint_pos.torch[env_ids].clone() # Get gear types for all environments all_gear_types = gear_type_manager.get_all_gear_types() @@ -356,8 +377,9 @@ def __call__( hand_grasp_width = self.hand_grasp_width[gear_key] self.gripper_joint_setter_func(joint_pos, [row_idx], self.finger_joints, hand_grasp_width) - self.robot_asset.set_joint_position_target(joint_pos, joint_ids=self.all_joints, env_ids=env_ids) - self.robot_asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + self.robot_asset.set_joint_position_target_index(target=joint_pos, joint_ids=self.all_joints, env_ids=env_ids) + self.robot_asset.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self.robot_asset.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) # Set gripper to closed position for row_idx, env_id in enumerate(env_ids.tolist()): @@ -365,7 +387,7 @@ def __call__( hand_close_width = self.hand_close_width[gear_key] self.gripper_joint_setter_func(joint_pos, [row_idx], self.finger_joints, hand_close_width) - self.robot_asset.set_joint_position_target(joint_pos, joint_ids=self.all_joints, env_ids=env_ids) + self.robot_asset.set_joint_position_target_index(target=joint_pos, joint_ids=self.all_joints, env_ids=env_ids) class randomize_gears_and_base_pose(ManagerTermBase): @@ -444,10 +466,11 @@ def __call__( asset_names_to_process = [self.base_asset_name] + self.gear_asset_names for asset_name in asset_names_to_process: asset: RigidObject | Articulation = env.scene[asset_name] - root_states = asset.data.default_root_state[env_ids].clone() - positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_pose_samples[:, 0:3] - orientations = math_utils.quat_mul(root_states[:, 3:7], orientations_delta) - velocities = root_states[:, 7:13] + rand_vel_samples + default_root_pose = asset.data.default_root_pose.torch[env_ids].clone() + default_root_vel = asset.data.default_root_vel.torch[env_ids].clone() + positions = default_root_pose[:, 0:3] + env.scene.env_origins[env_ids] + rand_pose_samples[:, 0:3] + orientations = math_utils.quat_mul(default_root_pose[:, 3:7], orientations_delta) + velocities = default_root_vel + rand_vel_samples positions_by_asset[asset_name] = positions orientations_by_asset[asset_name] = orientations velocities_by_asset[asset_name] = velocities @@ -477,5 +500,5 @@ def __call__( positions = positions_by_asset[asset_name] orientations = orientations_by_asset[asset_name] velocities = velocities_by_asset[asset_name] - asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids) - asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) + asset.write_root_pose_to_sim_index(root_pose=torch.cat([positions, orientations], dim=-1), env_ids=env_ids) + asset.write_root_velocity_to_sim_index(root_velocity=velocities, env_ids=env_ids) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/noise_models.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/noise_models.py index 2d5411e96977..740099a169ee 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/noise_models.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/noise_models.py @@ -7,7 +7,12 @@ from __future__ import annotations -__all__ = ["ResetSampledConstantNoiseModel", "ResetSampledConstantNoiseModelCfg"] +__all__ = [ + "ResetSampledConstantNoiseModel", + "ResetSampledConstantNoiseModelCfg", + "ResetSampledQuaternionNoiseModel", + "ResetSampledQuaternionNoiseModelCfg", +] from collections.abc import Sequence from dataclasses import MISSING @@ -16,7 +21,8 @@ import torch from isaaclab.utils import configclass -from isaaclab.utils.noise import NoiseModel, NoiseModelCfg +from isaaclab.utils.math import quat_from_euler_xyz, quat_mul +from isaaclab.utils.noise import ConstantNoiseCfg, NoiseModel, NoiseModelCfg if TYPE_CHECKING: from isaaclab.utils.noise import NoiseCfg @@ -107,3 +113,77 @@ class ResetSampledConstantNoiseModelCfg(NoiseModelCfg): Based on this configuration, the noise is sampled at every reset of the noise model. """ + + +class ResetSampledQuaternionNoiseModel(NoiseModel): + """Noise model that applies a rotation perturbation to quaternion observations. + + At each episode reset, small Euler angle perturbations (roll, pitch, yaw) are sampled + uniformly from configurable ranges and converted to a perturbation quaternion. This + perturbation is then applied via quaternion multiplication at every step, producing a + geometrically valid rotated quaternion (unlike additive noise on raw components). + + The perturbation is held constant for the entire episode until the next reset. + """ + + def __init__(self, noise_model_cfg: NoiseModelCfg, num_envs: int, device: str): + super().__init__(noise_model_cfg, num_envs, device) + self._roll_range = noise_model_cfg.roll_range + self._pitch_range = noise_model_cfg.pitch_range + self._yaw_range = noise_model_cfg.yaw_range + # Identity quaternion in (x, y, z, w) format + self._perturbation_quat = torch.zeros((num_envs, 4), device=device) + self._perturbation_quat[:, 3] = 1.0 + + def reset(self, env_ids: Sequence[int] | None = None): + """Sample new rotation perturbations for the specified environments. + + Args: + env_ids: The environment ids to reset. Defaults to None (all environments). + """ + if env_ids is None: + env_ids = slice(None) + + num_resets = env_ids.stop - env_ids.start if isinstance(env_ids, slice) else len(env_ids) + + roll = torch.empty(num_resets, device=self._device).uniform_(*self._roll_range) + pitch = torch.empty(num_resets, device=self._device).uniform_(*self._pitch_range) + yaw = torch.empty(num_resets, device=self._device).uniform_(*self._yaw_range) + + self._perturbation_quat[env_ids] = quat_from_euler_xyz(roll, pitch, yaw) + + def __call__(self, data: torch.Tensor) -> torch.Tensor: + """Apply the pre-sampled rotation perturbation to the quaternion data. + + Args: + data: Quaternion observations in (x, y, z, w) format. Shape is (num_envs, 4). + + Returns: + Perturbed quaternions in (x, y, z, w) format. Shape is (num_envs, 4). + """ + return quat_mul(self._perturbation_quat, data) + + +@configclass +class ResetSampledQuaternionNoiseModelCfg(NoiseModelCfg): + """Configuration for a quaternion noise model that samples rotation perturbations at reset. + + The perturbation is specified as independent uniform ranges for roll, pitch, and yaw + (in radians). At each episode reset, Euler angles are sampled and converted to a + perturbation quaternion that is multiplied with the observed quaternion. + """ + + class_type: type = ResetSampledQuaternionNoiseModel + + noise_cfg: ConstantNoiseCfg = ConstantNoiseCfg(bias=0.0) + """Unused placeholder inherited from NoiseModelCfg. Quaternion perturbation is + controlled by roll_range, pitch_range, and yaw_range instead.""" + + roll_range: tuple[float, float] = (-0.01745, 0.01745) + """Uniform range for roll perturbation in radians. Default is ±1 degree.""" + + pitch_range: tuple[float, float] = (-0.01745, 0.01745) + """Uniform range for pitch perturbation in radians. Default is ±1 degree.""" + + yaw_range: tuple[float, float] = (-0.01745, 0.01745) + """Uniform range for yaw perturbation in radians. Default is ±1 degree.""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py index cf9ae56ee2da..ac12d8b22f7f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py @@ -11,11 +11,11 @@ import torch -from isaaclab.assets import RigidObject from isaaclab.managers import ManagerTermBase, ObservationTermCfg, SceneEntityCfg from isaaclab.utils.math import combine_frame_transforms if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv from .events import randomize_gear_type @@ -96,7 +96,7 @@ def __init__(self, cfg: ObservationTermCfg, env: ManagerBasedRLEnv): self.offsets_buffer = torch.zeros(env.num_envs, 3, device=env.device, dtype=torch.float32) self.env_indices = torch.arange(env.num_envs, device=env.device) self.identity_quat = ( - torch.tensor([[1.0, 0.0, 0.0, 0.0]], device=env.device, dtype=torch.float32) + torch.tensor([[0.0, 0.0, 0.0, 1.0]], device=env.device, dtype=torch.float32) .repeat(env.num_envs, 1) .contiguous() ) @@ -117,19 +117,18 @@ def __call__( Gear shaft position tensor of shape (num_envs, 3) """ # Check if gear type manager exists + # During initialization (shape checking), the manager may not exist yet if not hasattr(env, "_gear_type_manager"): - raise RuntimeError( - "Gear type manager not initialized. Ensure randomize_gear_type event is configured " - "in your environment's event configuration before this observation term is used." - ) + # Return default shape during initialization + return torch.zeros(env.num_envs, 3, device=env.device) gear_type_manager: randomize_gear_type = env._gear_type_manager # Get gear type indices directly as tensor (no Python loops!) gear_type_indices = gear_type_manager.get_all_gear_type_indices() # Get base gear position and orientation - base_pos = self.asset.data.root_pos_w - base_quat = self.asset.data.root_quat_w + base_pos = self.asset.data.root_pos_w.torch + base_quat = self.asset.data.root_quat_w.torch # Update offsets using vectorized indexing self.offsets_buffer = self.gear_offsets_stacked[gear_type_indices] @@ -182,13 +181,12 @@ def __call__( Gear shaft orientation tensor of shape (num_envs, 4) """ # Get base quaternion - base_quat = self.asset.data.root_quat_w + base_quat = self.asset.data.root_quat_w.torch # Ensure w component is positive (q and -q represent the same rotation) # Pick one canonical form to reduce observation variation seen by the policy - w_negative = base_quat[:, 0] < 0 - positive_quat = base_quat.clone() - positive_quat[w_negative] = -base_quat[w_negative] + w_negative = base_quat[:, 3] < 0 + positive_quat = torch.where(w_negative.unsqueeze(-1), -base_quat, base_quat) return positive_quat @@ -238,11 +236,10 @@ def __call__(self, env: ManagerBasedRLEnv) -> torch.Tensor: Gear position tensor of shape (num_envs, 3) """ # Check if gear type manager exists + # During initialization (shape checking), the manager may not exist yet if not hasattr(env, "_gear_type_manager"): - raise RuntimeError( - "Gear type manager not initialized. Ensure randomize_gear_type event is configured " - "in your environment's event configuration before this observation term is used." - ) + # Return default shape during initialization + return torch.zeros(env.num_envs, 3, device=env.device) gear_type_manager: randomize_gear_type = env._gear_type_manager # Get gear type indices directly as tensor (no Python loops!) @@ -251,9 +248,9 @@ def __call__(self, env: ManagerBasedRLEnv) -> torch.Tensor: # Stack all gear positions all_gear_positions = torch.stack( [ - self.gear_assets["gear_small"].data.root_pos_w, - self.gear_assets["gear_medium"].data.root_pos_w, - self.gear_assets["gear_large"].data.root_pos_w, + self.gear_assets["gear_small"].data.root_pos_w.torch, + self.gear_assets["gear_medium"].data.root_pos_w.torch, + self.gear_assets["gear_large"].data.root_pos_w.torch, ], dim=1, ) @@ -310,11 +307,12 @@ def __call__(self, env: ManagerBasedRLEnv) -> torch.Tensor: Gear orientation tensor of shape (num_envs, 4) """ # Check if gear type manager exists + # During initialization (shape checking), the manager may not exist yet if not hasattr(env, "_gear_type_manager"): - raise RuntimeError( - "Gear type manager not initialized. Ensure randomize_gear_type event is configured " - "in your environment's event configuration before this observation term is used." - ) + # Return default shape during initialization (identity quaternion) + default_quat = torch.zeros(env.num_envs, 4, device=env.device) + default_quat[:, 3] = 1.0 + return default_quat gear_type_manager: randomize_gear_type = env._gear_type_manager # Get gear type indices directly as tensor (no Python loops!) @@ -323,9 +321,9 @@ def __call__(self, env: ManagerBasedRLEnv) -> torch.Tensor: # Stack all gear quaternions all_gear_quat = torch.stack( [ - self.gear_assets["gear_small"].data.root_quat_w, - self.gear_assets["gear_medium"].data.root_quat_w, - self.gear_assets["gear_large"].data.root_quat_w, + self.gear_assets["gear_small"].data.root_quat_w.torch, + self.gear_assets["gear_medium"].data.root_quat_w.torch, + self.gear_assets["gear_large"].data.root_quat_w.torch, ], dim=1, ) @@ -335,7 +333,7 @@ def __call__(self, env: ManagerBasedRLEnv) -> torch.Tensor: # Ensure w component is positive (q and -q represent the same rotation) # Pick one canonical form to reduce observation variation seen by the policy - w_negative = gear_quat[:, 0] < 0 + w_negative = gear_quat[:, 3] < 0 gear_positive_quat = gear_quat.clone() gear_positive_quat[w_negative] = -gear_quat[w_negative] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/rewards.py index 482cab8f69b8..c776168e5b48 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/rewards.py @@ -12,11 +12,12 @@ import torch from isaaclab.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg -from isaaclab.sensors.frame_transformer.frame_transformer import FrameTransformer -from isaaclab.utils.math import combine_frame_transforms +from isaaclab.utils.math import combine_frame_transforms, quat_apply, quat_mul if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors.frame_transformer.frame_transformer import FrameTransformer from .events import randomize_gear_type @@ -72,8 +73,8 @@ def __call__( des_quat_w = command[:, 3:7] # Get current pose from frame transformer - curr_pos_w = asset.data.target_pos_source[:, 0] - curr_quat_w = asset.data.target_quat_source[:, 0] + curr_pos_w = asset.data.target_pos_source.torch[:, 0] + curr_quat_w = asset.data.target_quat_source.torch[:, 0] # Compute keypoint distance keypoint_dist_sep = self.keypoint_computer.compute( @@ -143,8 +144,8 @@ def __call__( des_quat_w = command[:, 3:7] # Get current pose from frame transformer - curr_pos_w = asset.data.target_pos_source[:, 0] - curr_quat_w = asset.data.target_quat_source[:, 0] + curr_pos_w = asset.data.target_pos_source.torch[:, 0] + curr_quat_w = asset.data.target_quat_source.torch[:, 0] # Compute keypoint distance keypoint_dist_sep = self.keypoint_computer.compute( @@ -192,44 +193,29 @@ def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): self.asset_cfg_1: SceneEntityCfg = cfg.params.get("asset_cfg_1", SceneEntityCfg("factory_gear_base")) self.asset_1 = env.scene[self.asset_cfg_1.name] - # Pre-allocate gear type mapping and indices + self._init_gear_selection(env) + + # Create keypoint distance computer + self.keypoint_computer = _compute_keypoint_distance(cfg, env) + + def _init_gear_selection(self, env: ManagerBasedRLEnv) -> None: + """Pre-allocate gear type mapping, index tensors, and cache gear scene assets.""" self.gear_type_map = {"gear_small": 0, "gear_medium": 1, "gear_large": 2} self.gear_type_indices = torch.zeros(env.num_envs, device=env.device, dtype=torch.long) self.env_indices = torch.arange(env.num_envs, device=env.device) - # Cache gear assets self.gear_assets = { "gear_small": env.scene["factory_gear_small"], "gear_medium": env.scene["factory_gear_medium"], "gear_large": env.scene["factory_gear_large"], } - # Create keypoint distance computer - self.keypoint_computer = _compute_keypoint_distance(cfg, env) - - def __call__( - self, - env: ManagerBasedRLEnv, - asset_cfg_1: SceneEntityCfg, - keypoint_scale: float = 1.0, - add_cube_center_kp: bool = True, - ) -> torch.Tensor: - """Compute keypoint distance error. - - Args: - env: Environment instance - asset_cfg_1: Configuration of the first asset (RigidObject) - keypoint_scale: Scale factor for keypoint offsets - add_cube_center_kp: Whether to include center keypoint + def _get_selected_gear_poses(self, env: ManagerBasedRLEnv) -> tuple[torch.Tensor, torch.Tensor]: + """Retrieve world-frame position and quaternion of the active gear per environment. Returns: - Mean keypoint distance tensor of shape (num_envs,) + Tuple of (gear_pos, gear_quat) with shapes (num_envs, 3) and (num_envs, 4). """ - # Get current pose of asset_1 (RigidObject) - curr_pos_1 = self.asset_1.data.body_pos_w[:, 0] - curr_quat_1 = self.asset_1.data.body_quat_w[:, 0] - - # Check if gear type manager exists if not hasattr(env, "_gear_type_manager"): raise RuntimeError( "Gear type manager not initialized. Ensure randomize_gear_type event is configured " @@ -237,31 +223,55 @@ def __call__( ) gear_type_manager: randomize_gear_type = env._gear_type_manager - # Get gear type indices directly as tensor self.gear_type_indices = gear_type_manager.get_all_gear_type_indices() - # Stack all gear positions and quaternions all_gear_pos = torch.stack( [ - self.gear_assets["gear_small"].data.body_pos_w[:, 0], - self.gear_assets["gear_medium"].data.body_pos_w[:, 0], - self.gear_assets["gear_large"].data.body_pos_w[:, 0], + self.gear_assets["gear_small"].data.body_pos_w.torch[:, 0], + self.gear_assets["gear_medium"].data.body_pos_w.torch[:, 0], + self.gear_assets["gear_large"].data.body_pos_w.torch[:, 0], ], dim=1, ) all_gear_quat = torch.stack( [ - self.gear_assets["gear_small"].data.body_quat_w[:, 0], - self.gear_assets["gear_medium"].data.body_quat_w[:, 0], - self.gear_assets["gear_large"].data.body_quat_w[:, 0], + self.gear_assets["gear_small"].data.body_quat_w.torch[:, 0], + self.gear_assets["gear_medium"].data.body_quat_w.torch[:, 0], + self.gear_assets["gear_large"].data.body_quat_w.torch[:, 0], ], dim=1, ) - # Select positions and quaternions using advanced indexing - curr_pos_2 = all_gear_pos[self.env_indices, self.gear_type_indices] - curr_quat_2 = all_gear_quat[self.env_indices, self.gear_type_indices] + gear_pos = all_gear_pos[self.env_indices, self.gear_type_indices] + gear_quat = all_gear_quat[self.env_indices, self.gear_type_indices] + + return gear_pos, gear_quat + + def __call__( + self, + env: ManagerBasedRLEnv, + asset_cfg_1: SceneEntityCfg, + keypoint_scale: float = 1.0, + add_cube_center_kp: bool = True, + ) -> torch.Tensor: + """Compute keypoint distance error. + + Args: + env: Environment instance + asset_cfg_1: Configuration of the first asset (RigidObject) + keypoint_scale: Scale factor for keypoint offsets + add_cube_center_kp: Whether to include center keypoint + + Returns: + Mean keypoint distance tensor of shape (num_envs,) + """ + # Get current pose of asset_1 (RigidObject) + curr_pos_1 = self.asset_1.data.body_pos_w.torch[:, 0] + curr_quat_1 = self.asset_1.data.body_quat_w.torch[:, 0] + + # Get selected gear pose + curr_pos_2, curr_quat_2 = self._get_selected_gear_poses(env) # Compute keypoint distance keypoint_dist_sep = self.keypoint_computer.compute( @@ -275,40 +285,13 @@ def __call__( return keypoint_dist_sep.mean(-1) -class keypoint_entity_error_exp(ManagerTermBase): +class keypoint_entity_error_exp(keypoint_entity_error): """Compute exponential keypoint reward between a RigidObject and the dynamically selected gear. - This class-based term pre-caches gear type mapping and asset references. + Inherits gear selection and initialization from :class:`keypoint_entity_error` + and applies an exponential reward transformation to the keypoint distances. """ - def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): - """Initialize the keypoint entity error exponential term. - - Args: - cfg: Reward term configuration - env: Environment instance - """ - super().__init__(cfg, env) - - # Cache asset configuration - self.asset_cfg_1: SceneEntityCfg = cfg.params.get("asset_cfg_1", SceneEntityCfg("factory_gear_base")) - self.asset_1 = env.scene[self.asset_cfg_1.name] - - # Pre-allocate gear type mapping and indices - self.gear_type_map = {"gear_small": 0, "gear_medium": 1, "gear_large": 2} - self.gear_type_indices = torch.zeros(env.num_envs, device=env.device, dtype=torch.long) - self.env_indices = torch.arange(env.num_envs, device=env.device) - - # Cache gear assets - self.gear_assets = { - "gear_small": env.scene["factory_gear_small"], - "gear_medium": env.scene["factory_gear_medium"], - "gear_large": env.scene["factory_gear_large"], - } - - # Create keypoint distance computer - self.keypoint_computer = _compute_keypoint_distance(cfg, env) - def __call__( self, env: ManagerBasedRLEnv, @@ -332,42 +315,11 @@ def __call__( Exponential keypoint reward tensor of shape (num_envs,) """ # Get current pose of asset_1 (RigidObject) - curr_pos_1 = self.asset_1.data.body_pos_w[:, 0] - curr_quat_1 = self.asset_1.data.body_quat_w[:, 0] - - # Check if gear type manager exists - if not hasattr(env, "_gear_type_manager"): - raise RuntimeError( - "Gear type manager not initialized. Ensure randomize_gear_type event is configured " - "in your environment's event configuration before this reward term is used." - ) - - gear_type_manager: randomize_gear_type = env._gear_type_manager - # Get gear type indices directly as tensor - self.gear_type_indices = gear_type_manager.get_all_gear_type_indices() - - # Stack all gear positions and quaternions - all_gear_pos = torch.stack( - [ - self.gear_assets["gear_small"].data.body_pos_w[:, 0], - self.gear_assets["gear_medium"].data.body_pos_w[:, 0], - self.gear_assets["gear_large"].data.body_pos_w[:, 0], - ], - dim=1, - ) - - all_gear_quat = torch.stack( - [ - self.gear_assets["gear_small"].data.body_quat_w[:, 0], - self.gear_assets["gear_medium"].data.body_quat_w[:, 0], - self.gear_assets["gear_large"].data.body_quat_w[:, 0], - ], - dim=1, - ) + curr_pos_1 = self.asset_1.data.body_pos_w.torch[:, 0] + curr_quat_1 = self.asset_1.data.body_quat_w.torch[:, 0] - # Select positions and quaternions using advanced indexing - curr_pos_2 = all_gear_pos[self.env_indices, self.gear_type_indices] - curr_quat_2 = all_gear_quat[self.env_indices, self.gear_type_indices] + # Get selected gear pose + curr_pos_2, curr_quat_2 = self._get_selected_gear_poses(env) # Compute keypoint distance keypoint_dist_sep = self.keypoint_computer.compute( @@ -396,6 +348,226 @@ def __call__( return keypoint_reward_exp +class keypoint_ee_grasp_error(keypoint_entity_error): + """Compute keypoint distance between the robot end effector and the gear's grasp-corrected pose. + + Transforms the gear's actual world pose into the expected EE position/orientation + using grasp offsets, so that the distance is ~0 when properly holding the gear + and increases when the gripper drifts away. + + The penalty is gated by ``ee_grasp_threshold``: It only activates when the mean + keypoint error exceeds the threshold, i.e., when the EE has drifted away from the + expected grasp pose. With threshold=0.0, the penalty is effectively always active. + + Supports linear weight ramp-up: The returned reward is scaled by a factor that + linearly increases from ``weight_ramp_start`` to 1.0 over ``weight_ramp_steps`` + env steps, allowing the reward to grow in importance as training progresses. + """ + + def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): + ManagerTermBase.__init__(self, cfg, env) + + self._init_gear_selection(env) + self.keypoint_computer = _compute_keypoint_distance(cfg, env) + + self.robot_asset_cfg: SceneEntityCfg = cfg.params.get("robot_asset_cfg", SceneEntityCfg("robot")) + self.robot_asset: Articulation = env.scene[self.robot_asset_cfg.name] + + self.end_effector_body_name: str = cfg.params["end_effector_body_name"] + grasp_rot_offset = cfg.params["grasp_rot_offset"] + self.grasp_rot_offset_tensor = ( + torch.tensor(grasp_rot_offset, device=env.device, dtype=torch.float32).unsqueeze(0).repeat(env.num_envs, 1) + ) + + gear_offsets_grasp = cfg.params["gear_offsets_grasp"] + self.gear_grasp_offsets_stacked = torch.stack( + [ + torch.tensor(gear_offsets_grasp["gear_small"], device=env.device, dtype=torch.float32), + torch.tensor(gear_offsets_grasp["gear_medium"], device=env.device, dtype=torch.float32), + torch.tensor(gear_offsets_grasp["gear_large"], device=env.device, dtype=torch.float32), + ], + dim=0, + ) + + self.weight_ramp_start: float = cfg.params.get("weight_ramp_start", 0.0) + self.weight_ramp_steps: int = cfg.params.get("weight_ramp_steps", 1) + self.ee_grasp_threshold: float = cfg.params.get("ee_grasp_threshold", 0.0) + + eef_indices, _ = self.robot_asset.find_bodies([self.end_effector_body_name]) + self.eef_idx = eef_indices[0] if len(eef_indices) > 0 else None + self._step_count = 0 + + def _get_weight_scale(self, env: ManagerBasedRLEnv) -> float: + progress = min(env.common_step_counter / max(self.weight_ramp_steps, 1), 1.0) + return self.weight_ramp_start + (1.0 - self.weight_ramp_start) * progress + + def _get_grasp_corrected_target( + self, env: ManagerBasedRLEnv + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute EE pose and grasp-corrected target pose. + + Returns: + Tuple of (eef_pos, eef_quat, gear_grasp_pos, gear_quat_grasp). + """ + eef_pos = self.robot_asset.data.body_link_pos_w.torch[:, self.eef_idx] + eef_quat = self.robot_asset.data.body_link_quat_w.torch[:, self.eef_idx] + + gear_pos, gear_quat = self._get_selected_gear_poses(env) + + gear_quat_grasp = quat_mul(gear_quat, self.grasp_rot_offset_tensor) + grasp_offsets = self.gear_grasp_offsets_stacked[self.gear_type_indices] + gear_grasp_pos = gear_pos + quat_apply(gear_quat_grasp, grasp_offsets) + + return eef_pos, eef_quat, gear_grasp_pos, gear_quat_grasp + + def __call__( + self, + env: ManagerBasedRLEnv, + robot_asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + end_effector_body_name: str = "", + grasp_rot_offset: list | None = None, + gear_offsets_grasp: dict | None = None, + keypoint_scale: float = 1.0, + add_cube_center_kp: bool = True, + weight_ramp_start: float = 0.0, + weight_ramp_steps: int = 1, + ee_grasp_threshold: float = 0.0, + ) -> torch.Tensor: + if self.eef_idx is None: + return torch.zeros(env.num_envs, device=env.device) + + eef_pos, eef_quat, gear_grasp_pos, gear_quat_grasp = self._get_grasp_corrected_target(env) + + keypoint_dist_sep = self.keypoint_computer.compute( + current_pos=eef_pos, + current_quat=eef_quat, + target_pos=gear_grasp_pos, + target_quat=gear_quat_grasp, + keypoint_scale=keypoint_scale, + ) + + mean_kp_error = keypoint_dist_sep.mean(-1) + + is_active = (mean_kp_error > self.ee_grasp_threshold).float() + + weight_scale = self._get_weight_scale(env) + scaled_reward = mean_kp_error * weight_scale * is_active + + mean_error_scalar = mean_kp_error.mean().item() + pct_active = is_active.mean().item() + + if not hasattr(env, "extras"): + env.extras = {} + if "log" not in env.extras: + env.extras["log"] = {} + env.extras["log"]["ee_grasp_kp_error/mean_keypoint_dist"] = mean_error_scalar + env.extras["log"]["ee_grasp_kp_error/pct_envs_active"] = pct_active + env.extras["log"]["ee_grasp_kp_error/weight_scale"] = weight_scale + + self._step_count += 1 + import carb + + carb.log_info( + f"[ee_grasp_kp_error] step={self._step_count}" + f" | mean_kp_error={mean_error_scalar:.5f}" + f" | pct_active={pct_active:.3f}" + f" | weight_scale={weight_scale:.4f}" + ) + + return scaled_reward + + +class keypoint_ee_grasp_error_exp(keypoint_ee_grasp_error): + """Compute exponential keypoint reward between the robot end effector and the gear's grasp-corrected pose. + + Transforms the gear's actual world pose into the expected EE position/orientation + using grasp offsets, so that the reward is high (~1) when properly holding the gear + and drops sharply when the gripper drifts away. + + The reward is gated by ``ee_grasp_threshold``: It only activates when the mean + keypoint error exceeds the threshold, i.e. when the EE has drifted away from the + expected grasp pose. With threshold=0.0 the reward is effectively always active. + + Supports linear weight ramp-up: The returned reward is scaled by a factor that + linearly increases from ``weight_ramp_start`` to 1.0 over ``weight_ramp_steps`` + env steps, allowing the reward to grow in importance as training progresses. + """ + + def __call__( + self, + env: ManagerBasedRLEnv, + robot_asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + end_effector_body_name: str = "", + grasp_rot_offset: list | None = None, + gear_offsets_grasp: dict | None = None, + kp_exp_coeffs: list[tuple[float, float]] = [(1.0, 0.1)], + kp_use_sum_of_exps: bool = True, + keypoint_scale: float = 1.0, + add_cube_center_kp: bool = True, + weight_ramp_start: float = 0.0, + weight_ramp_steps: int = 1, + ee_grasp_threshold: float = 0.0, + ) -> torch.Tensor: + if self.eef_idx is None: + return torch.zeros(env.num_envs, device=env.device) + + eef_pos, eef_quat, gear_grasp_pos, gear_quat_grasp = self._get_grasp_corrected_target(env) + + keypoint_dist_sep = self.keypoint_computer.compute( + current_pos=eef_pos, + current_quat=eef_quat, + target_pos=gear_grasp_pos, + target_quat=gear_quat_grasp, + keypoint_scale=keypoint_scale, + ) + + mean_kp_error = keypoint_dist_sep.mean(-1) + + is_active = (mean_kp_error > self.ee_grasp_threshold).float() + + keypoint_reward_exp = torch.zeros_like(keypoint_dist_sep[:, 0]) + if kp_use_sum_of_exps: + for coeff in kp_exp_coeffs: + a, b = coeff + keypoint_reward_exp += ( + 1.0 / (torch.exp(a * keypoint_dist_sep) + b + torch.exp(-a * keypoint_dist_sep)) + ).mean(-1) + else: + kp_dist_mean = keypoint_dist_sep.mean(-1) + for coeff in kp_exp_coeffs: + a, b = coeff + keypoint_reward_exp += 1.0 / (torch.exp(a * kp_dist_mean) + b + torch.exp(-a * kp_dist_mean)) + + weight_scale = self._get_weight_scale(env) + scaled_reward = keypoint_reward_exp * weight_scale * is_active + + mean_error_scalar = mean_kp_error.mean().item() + mean_reward_scalar = keypoint_reward_exp.mean().item() + pct_active = is_active.mean().item() + + if not hasattr(env, "extras"): + env.extras = {} + if "log" not in env.extras: + env.extras["log"] = {} + env.extras["log"]["ee_grasp_kp_error_exp/mean_keypoint_dist"] = mean_error_scalar + env.extras["log"]["ee_grasp_kp_error_exp/mean_exp_reward"] = mean_reward_scalar + env.extras["log"]["ee_grasp_kp_error_exp/pct_envs_active"] = pct_active + env.extras["log"]["ee_grasp_kp_error_exp/weight_scale"] = weight_scale + + self._step_count += 1 + import carb + + carb.log_info( + f"[ee_grasp_kp_error_exp] step={self._step_count}" + f" | mean_kp_error={mean_error_scalar:.5f}" + f" | pct_active={pct_active:.3f}" + f" | weight_scale={weight_scale:.4f}" + f" | mean_exp_reward={mean_reward_scalar:.5f}" + ) + + return scaled_reward + + ## # Helper functions and classes ## @@ -447,7 +619,7 @@ def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): # Pre-allocate identity quaternion for keypoint transforms self.identity_quat_keypoints = ( - torch.tensor([[1.0, 0.0, 0.0, 0.0]], device=env.device, dtype=torch.float32) + torch.tensor([[0.0, 0.0, 0.0, 1.0]], device=env.device, dtype=torch.float32) .repeat(env.num_envs * self.num_keypoints, 1) .contiguous() ) @@ -508,6 +680,6 @@ def compute( keypoints_target = keypoints_target_flat.reshape(num_envs, self.num_keypoints, 3) # Calculate L2 norm distance - keypoint_dist_sep = torch.norm(keypoints_target - keypoints_current, p=2, dim=-1) + keypoint_dist_sep = torch.linalg.norm(keypoints_target - keypoints_current, ord=2, dim=-1) return keypoint_dist_sep diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/terminations.py index b623148c5b3b..9f217c316372 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/terminations.py @@ -7,17 +7,18 @@ from __future__ import annotations +import logging from typing import TYPE_CHECKING import torch -import carb - import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation from isaaclab.managers import ManagerTermBase, SceneEntityCfg, TerminationTermCfg +logger = logging.getLogger(__name__) + if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedEnv from .events import randomize_gear_type @@ -51,7 +52,7 @@ def __init__(self, cfg: TerminationTermCfg, env: ManagerBasedEnv): if "grasp_rot_offset" not in cfg.params: raise ValueError( "'grasp_rot_offset' parameter is required in reset_when_gear_dropped configuration. " - "It should be a quaternion [w, x, y, z]. Example: [0.0, 0.707, 0.707, 0.0]" + "It should be a quaternion [x, y, z, w]. Example: [0.707, 0.707, 0.0, 0.0]" ) self.end_effector_body_name = cfg.params["end_effector_body_name"] @@ -116,7 +117,7 @@ def __init__(self, cfg: TerminationTermCfg, env: ManagerBasedEnv): # Find end effector index once eef_indices, _ = self.robot_asset.find_bodies([self.end_effector_body_name]) if len(eef_indices) == 0: - carb.log_warn( + logger.warning( f"{self.end_effector_body_name} not found in robot body names. Cannot check gear drop condition." ) self.eef_idx = None @@ -160,16 +161,16 @@ def __call__( self.gear_type_indices = gear_type_manager.get_all_gear_type_indices() # Get end effector position - eef_pos_world = self.robot_asset.data.body_link_pos_w[:, self.eef_idx] + eef_pos_world = self.robot_asset.data.body_link_pos_w.torch[:, self.eef_idx] # Update gear positions and quaternions in buffers - self.all_gear_pos_buffer[:, 0, :] = self.gear_assets["gear_small"].data.root_link_pos_w - self.all_gear_pos_buffer[:, 1, :] = self.gear_assets["gear_medium"].data.root_link_pos_w - self.all_gear_pos_buffer[:, 2, :] = self.gear_assets["gear_large"].data.root_link_pos_w + self.all_gear_pos_buffer[:, 0, :] = self.gear_assets["gear_small"].data.root_link_pos_w.torch + self.all_gear_pos_buffer[:, 1, :] = self.gear_assets["gear_medium"].data.root_link_pos_w.torch + self.all_gear_pos_buffer[:, 2, :] = self.gear_assets["gear_large"].data.root_link_pos_w.torch - self.all_gear_quat_buffer[:, 0, :] = self.gear_assets["gear_small"].data.root_link_quat_w - self.all_gear_quat_buffer[:, 1, :] = self.gear_assets["gear_medium"].data.root_link_quat_w - self.all_gear_quat_buffer[:, 2, :] = self.gear_assets["gear_large"].data.root_link_quat_w + self.all_gear_quat_buffer[:, 0, :] = self.gear_assets["gear_small"].data.root_link_quat_w.torch + self.all_gear_quat_buffer[:, 1, :] = self.gear_assets["gear_medium"].data.root_link_quat_w.torch + self.all_gear_quat_buffer[:, 2, :] = self.gear_assets["gear_large"].data.root_link_quat_w.torch # Select gear data using advanced indexing gear_pos_world = self.all_gear_pos_buffer[self.env_indices, self.gear_type_indices] @@ -185,7 +186,7 @@ def __call__( gear_grasp_pos_world = gear_pos_world + math_utils.quat_apply(gear_quat_world, self.gear_grasp_offsets_buffer) # Compute distances - distances = torch.norm(gear_grasp_pos_world - eef_pos_world, dim=-1) + distances = torch.linalg.norm(gear_grasp_pos_world - eef_pos_world, dim=-1) # Check distance threshold self.reset_flags[:] = distances > distance_threshold @@ -221,7 +222,7 @@ def __init__(self, cfg: TerminationTermCfg, env: ManagerBasedEnv): if "grasp_rot_offset" not in cfg.params: raise ValueError( "'grasp_rot_offset' parameter is required in reset_when_gear_orientation_exceeds_threshold" - " configuration. It should be a quaternion [w, x, y, z]. Example: [0.0, 0.707, 0.707, 0.0]" + " configuration. It should be a quaternion [x, y, z, w]. Example: [0.707, 0.707, 0.0, 0.0]" ) self.end_effector_body_name = cfg.params["end_effector_body_name"] @@ -249,7 +250,7 @@ def __init__(self, cfg: TerminationTermCfg, env: ManagerBasedEnv): # Find end effector index once eef_indices, _ = self.robot_asset.find_bodies([self.end_effector_body_name]) if len(eef_indices) == 0: - carb.log_warn( + logger.warning( f"{self.end_effector_body_name} not found in robot body names. Cannot check gear orientation condition." ) self.eef_idx = None @@ -301,12 +302,12 @@ def __call__( yaw_threshold_rad = torch.deg2rad(torch.tensor(yaw_threshold_deg, device=env.device)) # Get end effector orientation - eef_quat_world = self.robot_asset.data.body_link_quat_w[:, self.eef_idx] + eef_quat_world = self.robot_asset.data.body_link_quat_w.torch[:, self.eef_idx] # Update gear quaternions in buffer - self.all_gear_quat_buffer[:, 0, :] = self.gear_assets["gear_small"].data.root_link_quat_w - self.all_gear_quat_buffer[:, 1, :] = self.gear_assets["gear_medium"].data.root_link_quat_w - self.all_gear_quat_buffer[:, 2, :] = self.gear_assets["gear_large"].data.root_link_quat_w + self.all_gear_quat_buffer[:, 0, :] = self.gear_assets["gear_small"].data.root_link_quat_w.torch + self.all_gear_quat_buffer[:, 1, :] = self.gear_assets["gear_medium"].data.root_link_quat_w.torch + self.all_gear_quat_buffer[:, 2, :] = self.gear_assets["gear_large"].data.root_link_quat_w.torch # Select gear data using advanced indexing gear_quat_world = self.all_gear_quat_buffer[self.env_indices, self.gear_type_indices] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/__init__.py new file mode 100644 index 000000000000..4409ddee18b6 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2026-2027, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +from . import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Deploy-Reach-Rizon4s-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.joint_pos_env_cfg:Rizon4sReachEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:Rizon4sReachPPORunnerCfg", + }, +) + +gym.register( + id="Isaac-Deploy-Reach-Rizon4s-Play-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.joint_pos_env_cfg:Rizon4sReachEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:Rizon4sReachPPORunnerCfg", + }, +) + +gym.register( + id="Isaac-Deploy-Reach-Rizon4s-ROS-Inference-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.ros_inference_env_cfg:Rizon4sReachROSInferenceEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:Rizon4sReachPPORunnerCfg", + }, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/agents/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/agents/__init__.py new file mode 100644 index 000000000000..7acda4b49bdd --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/agents/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2026-2027, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/agents/rsl_rl_ppo_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/agents/rsl_rl_ppo_cfg.py new file mode 100644 index 000000000000..cb37cb700181 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/agents/rsl_rl_ppo_cfg.py @@ -0,0 +1,38 @@ +# Copyright (c) 2026-2027, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg + + +@configclass +class Rizon4sReachPPORunnerCfg(RslRlOnPolicyRunnerCfg): + num_steps_per_env = 512 + max_iterations = 1500 + save_interval = 50 + experiment_name = "reach_rizon4s" + empirical_normalization = True + obs_groups = {"policy": ["policy"], "critic": ["policy"]} + policy = RslRlPpoActorCriticCfg( + init_noise_std=1.0, + actor_hidden_dims=[256, 128, 64], + critic_hidden_dims=[256, 128, 64], + activation="elu", + ) + algorithm = RslRlPpoAlgorithmCfg( + value_loss_coef=1.0, + use_clipped_value_loss=True, + clip_param=0.2, + entropy_coef=0.0, + num_learning_epochs=8, + num_mini_batches=8, + learning_rate=5.0e-4, + schedule="adaptive", + gamma=0.99, + lam=0.95, + desired_kl=0.008, + max_grad_norm=1.0, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/joint_pos_env_cfg.py new file mode 100644 index 000000000000..d453af36e4eb --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/joint_pos_env_cfg.py @@ -0,0 +1,107 @@ +# Copyright (c) 2026-2027, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math + +from isaaclab.managers import SceneEntityCfg +from isaaclab.markers.config import FRAME_MARKER_CFG +from isaaclab.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg, OffsetCfg +from isaaclab.utils import configclass + +import isaaclab_tasks.manager_based.manipulation.deploy.mdp as mdp +from isaaclab_tasks.manager_based.manipulation.deploy.reach.reach_env_cfg import ReachEnvCfg + +## +# Pre-defined configs +## +from isaaclab_assets import FLEXIV_RIZON4S_CFG # isort: skip + + +## +# Environment configuration +## + + +@configclass +class Rizon4sReachEnvCfg(ReachEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.events.robot_joint_stiffness_and_damping.params["asset_cfg"].joint_names = [ + "joint[1-2]", + "joint[3-4]", + "joint[5-7]", + ] + self.events.joint_friction.params["asset_cfg"].joint_names = ["joint[1-2]", "joint[3-4]", "joint[5-7]"] + + # switch robot to Flexiv Rizon 4s + self.scene.robot = FLEXIV_RIZON4S_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Configure the end-effector frame relative to base frame for Rizon 4s + self.rewards.end_effector_keypoint_tracking.params["asset_cfg"] = SceneEntityCfg("ee_frame_wrt_base_frame") + self.rewards.end_effector_keypoint_tracking_exp.params["asset_cfg"] = SceneEntityCfg("ee_frame_wrt_base_frame") + self.scene.ee_frame_wrt_base_frame = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base_link", + visualizer_cfg=FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer"), + source_frame_offset=OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/flange", + name="end_effector", + ), + ], + ) + + self.commands.ee_pose.debug_vis = True + + # Incremental joint position action configuration + self.actions.arm_action = mdp.RelativeJointPositionActionCfg( + asset_name="robot", joint_names=["joint[1-7]"], scale=0.0625, use_zero_offset=True + ) + # override command generator body + # end-effector is along z-direction for Rizon 4s + self.target_pos_centre = (0.4, 0.0, 0.4) + self.target_pos_range = (0.4, 0.4, 0.35) + self.commands.ee_pose.body_name = "flange" + self.commands.ee_pose.ranges.pos_x = ( + self.target_pos_centre[0] - self.target_pos_range[0], + self.target_pos_centre[0] + self.target_pos_range[0], + ) + self.commands.ee_pose.ranges.pos_y = ( + self.target_pos_centre[1] - self.target_pos_range[1], + self.target_pos_centre[1] + self.target_pos_range[1], + ) + self.commands.ee_pose.ranges.pos_z = ( + self.target_pos_centre[2] - self.target_pos_range[2], + self.target_pos_centre[2] + self.target_pos_range[2], + ) + + self.target_rot_centre = (math.pi, 0.0, 0.0) # end-effector facing down + self.target_rot_range = (math.pi / 2, math.pi / 2, math.pi) + self.commands.ee_pose.ranges.roll = ( + self.target_rot_centre[0] - self.target_rot_range[0], + self.target_rot_centre[0] + self.target_rot_range[0], + ) + self.commands.ee_pose.ranges.pitch = ( + self.target_rot_centre[1] - self.target_rot_range[1], + self.target_rot_centre[1] + self.target_rot_range[1], + ) + self.commands.ee_pose.ranges.yaw = ( + self.target_rot_centre[2] - self.target_rot_range[2], + self.target_rot_centre[2] + self.target_rot_range[2], + ) + + +@configclass +class Rizon4sReachEnvCfg_PLAY(Rizon4sReachEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + # disable randomization for play + self.observations.policy.enable_corruption = False diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/ros_inference_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/ros_inference_env_cfg.py new file mode 100644 index 000000000000..91de220071bc --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/rizon_4s/ros_inference_env_cfg.py @@ -0,0 +1,111 @@ +# Copyright (c) 2026-2027, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math + +from isaaclab.utils import configclass + +from .joint_pos_env_cfg import Rizon4sReachEnvCfg + + +@configclass +class Rizon4sReachROSInferenceEnvCfg(Rizon4sReachEnvCfg): + """ROS / Isaac Manipulator inference fields plus deployment alignment for NVIDIA Hubble Lab. + + The Hubble-specific block in this config matches how the Flexiv Rizon 4s is mounted there. + """ + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # --- NVIDIA Hubble Lab: Flexiv Rizon 4s mount and workspace --- + # Remove vertical mount stand since Hubble deployment does not use the sim stand asset + self.scene.table = None + + # Lab home joint pose (radians); aligns sim defaults / reset with the physical stand + self.scene.robot.init_state.joint_pos = { + "joint1": math.radians(-90.0), + "joint2": math.radians(90.0), + "joint3": 0.0, + "joint4": math.radians(90.0), + "joint5": 0.0, + "joint6": 0.0, + "joint7": 0.0, + } + + # Orientation of robot is based on the Flexiv Rizon 4s mount in the Hubble Lab + self.scene.robot.init_state.pos = (0.0, 0.0, 0.0) + self.scene.robot.init_state.rot = (0.5, 0.5, 0.5, 0.5) + + # end-effector is along z-direction for Rizon 4s + # target_pos_centre and target_rot_centre are approximately the end effector pose when + # the robot is in the self.scene.robot.init_state.joint_pos pose + self.target_pos_centre = (0.0, 0.3, 0.9) + self.target_pos_range = (0.4, 0.4, 0.35) + self.commands.ee_pose.body_name = "flange" + self.commands.ee_pose.ranges.pos_x = ( + self.target_pos_centre[0] - self.target_pos_range[0], + self.target_pos_centre[0] + self.target_pos_range[0], + ) + self.commands.ee_pose.ranges.pos_y = ( + self.target_pos_centre[1] - self.target_pos_range[1], + self.target_pos_centre[1] + self.target_pos_range[1], + ) + self.commands.ee_pose.ranges.pos_z = ( + self.target_pos_centre[2] - self.target_pos_range[2], + self.target_pos_centre[2] + self.target_pos_range[2], + ) + + self.target_rot_centre = (math.pi / 2, math.pi / 2, 0.0) # end-effector facing down + self.target_rot_range = (math.pi / 2, math.pi / 2, math.pi) + self.commands.ee_pose.ranges.roll = ( + self.target_rot_centre[0] - self.target_rot_range[0], + self.target_rot_centre[0] + self.target_rot_range[0], + ) + self.commands.ee_pose.ranges.pitch = ( + self.target_rot_centre[1] - self.target_rot_range[1], + self.target_rot_centre[1] + self.target_rot_range[1], + ) + self.commands.ee_pose.ranges.yaw = ( + self.target_rot_centre[2] - self.target_rot_range[2], + self.target_rot_centre[2] + self.target_rot_range[2], + ) + + # Variables used by Isaac Manipulator for on robot inference + # TODO: @ashwinvk: Remove these from env cfg once the generic inference node has been implemented + self.obs_order = ["arm_dof_pos", "arm_dof_vel", "target_pos", "target_quat"] + self.policy_action_space = "joint" + self.arm_joint_names = [ + "joint1", + "joint2", + "joint3", + "joint4", + "joint5", + "joint6", + "joint7", + ] + self.policy_action_space = "joint" + self.action_space = 7 + self.state_space = 21 + self.observation_space = 21 + + # Set joint_action_scale from the existing arm_action.scale + self.joint_action_scale = self.actions.arm_action.scale + + self.action_scale_joint_space = [ + self.joint_action_scale, + self.joint_action_scale, + self.joint_action_scale, + self.joint_action_scale, + self.joint_action_scale, + self.joint_action_scale, + self.joint_action_scale, + ] + + # Extract initial joint positions from robot configuration + self.initial_joint_pos = [ + self.scene.robot.init_state.joint_pos[joint_name] for joint_name in self.arm_joint_names + ] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/ur_10e/joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/ur_10e/joint_pos_env_cfg.py index 4abcf4369764..1cdc8fd77661 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/ur_10e/joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/config/ur_10e/joint_pos_env_cfg.py @@ -50,7 +50,7 @@ def __post_init__(self): self.scene.ee_frame_wrt_base_frame = FrameTransformerCfg( prim_path="{ENV_REGEX_NS}/Robot/base_link", visualizer_cfg=FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer"), - source_frame_offset=OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0)), + source_frame_offset=OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 1.0, 0.0)), target_frames=[ FrameTransformerCfg.FrameCfg( prim_path="{ENV_REGEX_NS}/Robot/wrist_3_link", diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/reach_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/reach_env_cfg.py index 90b65a0f96c2..018ef3a49c9c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/reach_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/reach/reach_env_cfg.py @@ -18,7 +18,7 @@ from isaaclab.scene import InteractiveSceneCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.manipulation.deploy.mdp as mdp diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/__init__.py index 8c9e9617fce3..4ee4de7552ee 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/__init__.py @@ -22,7 +22,6 @@ disable_env_checker=True, kwargs={ "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroReorientEnvCfg", - "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", }, ) @@ -33,7 +32,6 @@ disable_env_checker=True, kwargs={ "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroReorientEnvCfg_PLAY", - "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", }, ) @@ -45,7 +43,6 @@ disable_env_checker=True, kwargs={ "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroLiftEnvCfg", - "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", }, ) @@ -57,7 +54,6 @@ disable_env_checker=True, kwargs={ "env_cfg_entry_point": f"{__name__}.dexsuite_kuka_allegro_env_cfg:DexsuiteKukaAllegroLiftEnvCfg_PLAY", - "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:DexsuiteKukaAllegroPPORunnerCfg", }, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rl_games_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rl_games_ppo_cfg.yaml deleted file mode 100644 index 3a9e96eaeb05..000000000000 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rl_games_ppo_cfg.yaml +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -params: - seed: 42 - - # environment wrapper clipping - env: - clip_observations: 100.0 - clip_actions: 100.0 - obs_groups: - obs: ["policy", "proprio", "perception"] - states: ["policy", "proprio", "perception"] - concate_obs_groups: True - - algo: - name: a2c_continuous - - model: - name: continuous_a2c_logstd - - network: - name: actor_critic - separate: True - space: - continuous: - mu_activation: None - sigma_activation: None - mu_init: - name: default - sigma_init: - name: const_initializer - val: 0 - fixed_sigma: True - mlp: - units: [512, 256, 128] - activation: elu - d2rl: False - initializer: - name: default - regularizer: - name: None - - load_checkpoint: False # flag which sets whether to load the checkpoint - load_path: '' # path to the checkpoint to load - - config: - name: reorient - env_name: rlgpu - device: 'cuda:0' - device_name: 'cuda:0' - multi_gpu: False - ppo: True - mixed_precision: False - normalize_input: True - normalize_value: True - value_bootstrap: False - num_actors: -1 - reward_shaper: - scale_value: 0.01 - normalize_advantage: True - gamma: 0.99 - tau: 0.95 - learning_rate: 1e-3 - lr_schedule: adaptive - schedule_type: legacy - kl_threshold: 0.01 - score_to_win: 100000000 - max_epochs: 750000 - save_best_after: 100 - save_frequency: 50 - print_stats: True - grad_norm: 1.0 - entropy_coef: 0.001 - truncate_grads: True - e_clip: 0.2 - horizon_length: 36 - minibatch_size: 36864 - mini_epochs: 5 - critic_coef: 4 - clip_value: True - clip_actions: False - seq_len: 4 - bounds_loss_coef: 0.0001 - -pbt: - enabled: False - policy_idx: 0 # policy index in a population - num_policies: 8 # total number of policies in the population - directory: . - workspace: "pbt_workspace" # suffix of the workspace dir name inside train_dir - objective: episode.Curriculum/adr - - # PBT hyperparams - interval_steps: 50000000 - threshold_std: 0.1 - threshold_abs: 0.025 - mutation_rate: 0.25 - change_range: [1.1, 2.0] - mutation: - - agent.params.config.learning_rate: "mutate_float" - agent.params.config.grad_norm: "mutate_float" - agent.params.config.entropy_coef: "mutate_float" - agent.params.config.critic_coef: "mutate_float" - agent.params.config.bounds_loss_coef: "mutate_float" - agent.params.config.kl_threshold: "mutate_float" - agent.params.config.gamma: "mutate_discount" - agent.params.config.tau: "mutate_discount" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rsl_rl_ppo_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rsl_rl_ppo_cfg.py index 9bc92bd8f69b..907777f9614c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rsl_rl_ppo_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/agents/rsl_rl_ppo_cfg.py @@ -3,37 +3,99 @@ # # SPDX-License-Identifier: BSD-3-Clause +from dataclasses import MISSING + from isaaclab.utils import configclass -from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg +from isaaclab_rl.rsl_rl import ( + RslRlCNNModelCfg, + RslRlMLPModelCfg, + RslRlOnPolicyRunnerCfg, + RslRlPpoAlgorithmCfg, +) + +from isaaclab_tasks.utils import PresetCfg + +STATE_POLICY_CFG = RslRlMLPModelCfg( + distribution_cfg=RslRlMLPModelCfg.GaussianDistributionCfg(init_std=1.0), + obs_normalization=True, + hidden_dims=[512, 256, 128], + activation="elu", +) + +STATE_CRITIC_CFG = RslRlMLPModelCfg( + obs_normalization=True, + hidden_dims=[512, 256, 128], + activation="elu", +) + +CNN_POLICY_CFG = RslRlCNNModelCfg( + obs_normalization=True, + hidden_dims=[512, 256, 128], + distribution_cfg=RslRlCNNModelCfg.GaussianDistributionCfg(init_std=1.0), + cnn_cfg=RslRlCNNModelCfg.CNNCfg( + output_channels=[32, 64, 64], + kernel_size=[8, 4, 3], + stride=[4, 2, 1], + activation="elu", + ), + activation="elu", +) + + +ALGO_CFG = RslRlPpoAlgorithmCfg( + value_loss_coef=1.0, + use_clipped_value_loss=True, + clip_param=0.2, + entropy_coef=0.005, + num_learning_epochs=5, + num_mini_batches=4, + learning_rate=1.0e-3, + schedule="adaptive", + gamma=0.99, + lam=0.95, + desired_kl=0.01, + max_grad_norm=1.0, +) @configclass -class DexsuiteKukaAllegroPPORunnerCfg(RslRlOnPolicyRunnerCfg): +class DexsuiteKukaAllegroPPOBaseRunnerCfg(RslRlOnPolicyRunnerCfg): num_steps_per_env = 32 - obs_groups = {"policy": ["policy", "proprio", "perception"], "critic": ["policy", "proprio", "perception"]} max_iterations = 15000 save_interval = 250 - experiment_name = "dexsuite_kuka_allegro" - policy = RslRlPpoActorCriticCfg( - init_noise_std=1.0, - actor_obs_normalization=True, - critic_obs_normalization=True, - actor_hidden_dims=[512, 256, 128], - critic_hidden_dims=[512, 256, 128], - activation="elu", + experiment_name = (MISSING,) # type: ignore + obs_groups = (MISSING,) # type: ignore + actor = (MISSING,) # type: ignore + critic = (MISSING,) # type: ignore + algorithm = MISSING # type: ignore + + +@configclass +class DexsuiteKukaAllegroPPORunnerCfg(PresetCfg): + default = DexsuiteKukaAllegroPPOBaseRunnerCfg().replace( + experiment_name="dexsuite_kuka_allegro", + obs_groups={"actor": ["policy", "proprio", "perception"], "critic": ["policy", "proprio", "perception"]}, + actor=STATE_POLICY_CFG, + critic=STATE_CRITIC_CFG, + algorithm=ALGO_CFG, ) - algorithm = RslRlPpoAlgorithmCfg( - value_loss_coef=1.0, - use_clipped_value_loss=True, - clip_param=0.2, - entropy_coef=0.005, - num_learning_epochs=5, - num_mini_batches=4, - learning_rate=1.0e-3, - schedule="adaptive", - gamma=0.99, - lam=0.95, - desired_kl=0.01, - max_grad_norm=1.0, + + single_camera = DexsuiteKukaAllegroPPOBaseRunnerCfg().replace( + experiment_name="dexsuite_kuka_allegro_single_camera", + obs_groups={"actor": ["policy", "proprio", "base_image"], "critic": ["policy", "proprio", "perception"]}, + actor=CNN_POLICY_CFG, + critic=STATE_CRITIC_CFG, + algorithm=ALGO_CFG.replace(num_mini_batches=2), + ) + + duo_camera = DexsuiteKukaAllegroPPOBaseRunnerCfg().replace( + experiment_name="dexsuite_kuka_allegro_duo_camera", + obs_groups={ + "actor": ["policy", "proprio", "base_image", "wrist_image"], + "critic": ["policy", "proprio", "perception"], + }, + actor=CNN_POLICY_CFG, + critic=STATE_CRITIC_CFG, + algorithm=ALGO_CFG.replace(num_mini_batches=2), ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/camera_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/camera_cfg.py new file mode 100644 index 000000000000..cfaae6bbca4c --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/camera_cfg.py @@ -0,0 +1,188 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +import isaaclab.sim as sim_utils +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.sensors import CameraCfg +from isaaclab.utils import configclass +from isaaclab.utils.noise import UniformNoiseCfg as Unoise + +from isaaclab_tasks.utils import PresetCfg +from isaaclab_tasks.utils.presets import MultiBackendRendererCfg + +from ... import dexsuite_env_cfg as dexsuite +from ... import mdp + +FINGERTIP_LIST = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] + + +BASE_CAMERA_CFG = CameraCfg( + prim_path="/World/envs/env_.*/Camera", + offset=CameraCfg.OffsetCfg( + pos=(0.57, -0.8, 0.5), + rot=(0.6124, 0.3536, 0.3536, 0.6124), + convention="opengl", + ), + data_types=MISSING, + spawn=sim_utils.PinholeCameraCfg(clipping_range=(0.01, 2.5)), + width=MISSING, + height=MISSING, + renderer_cfg=MultiBackendRendererCfg(), +) + +WRIST_CAMERA_CFG = CameraCfg( + prim_path="/World/envs/env_.*/Robot/ee_link/palm_link/Camera", + offset=CameraCfg.OffsetCfg( + pos=(0.038, -0.38, -0.18), + rot=(0.641, 0.641, -0.299, 0.299), + convention="opengl", + ), + data_types=MISSING, + spawn=sim_utils.PinholeCameraCfg(clipping_range=(0.01, 2.5)), + width=MISSING, + height=MISSING, + renderer_cfg=MultiBackendRendererCfg(), +) + + +@configclass +class BaseTiledCameraCfg(PresetCfg): + """Tiled camera configurations""" + + rgb64 = BASE_CAMERA_CFG.replace(data_types=["rgb"], width=64, height=64) + rgb128 = BASE_CAMERA_CFG.replace(data_types=["rgb"], width=128, height=128) + rgb256 = BASE_CAMERA_CFG.replace(data_types=["rgb"], width=256, height=256) + depth64 = BASE_CAMERA_CFG.replace(data_types=["depth"], width=64, height=64) + depth128 = BASE_CAMERA_CFG.replace(data_types=["depth"], width=128, height=128) + depth256 = BASE_CAMERA_CFG.replace(data_types=["depth"], width=256, height=256) + albedo64 = BASE_CAMERA_CFG.replace(data_types=["albedo"], width=64, height=64) + albedo128 = BASE_CAMERA_CFG.replace(data_types=["albedo"], width=128, height=128) + albedo256 = BASE_CAMERA_CFG.replace(data_types=["albedo"], width=256, height=256) + simple_shading_constant_diffuse64 = BASE_CAMERA_CFG.replace( + data_types=["simple_shading_constant_diffuse"], width=64, height=64 + ) + simple_shading_constant_diffuse128 = BASE_CAMERA_CFG.replace( + data_types=["simple_shading_constant_diffuse"], width=128, height=128 + ) + simple_shading_constant_diffuse256 = BASE_CAMERA_CFG.replace( + data_types=["simple_shading_constant_diffuse"], width=256, height=256 + ) + simple_shading_diffuse_mdl64 = BASE_CAMERA_CFG.replace( + data_types=["simple_shading_diffuse_mdl"], width=64, height=64 + ) + simple_shading_diffuse_mdl128 = BASE_CAMERA_CFG.replace( + data_types=["simple_shading_diffuse_mdl"], width=128, height=128 + ) + simple_shading_diffuse_mdl256 = BASE_CAMERA_CFG.replace( + data_types=["simple_shading_diffuse_mdl"], width=256, height=256 + ) + simple_shading_full_mdl64 = BASE_CAMERA_CFG.replace(data_types=["simple_shading_full_mdl"], width=64, height=64) + simple_shading_full_mdl128 = BASE_CAMERA_CFG.replace(data_types=["simple_shading_full_mdl"], width=128, height=128) + simple_shading_full_mdl256 = BASE_CAMERA_CFG.replace(data_types=["simple_shading_full_mdl"], width=256, height=256) + semantic_segmentation64 = BASE_CAMERA_CFG.replace(data_types=["semantic_segmentation"], width=64, height=64) + semantic_segmentation128 = BASE_CAMERA_CFG.replace(data_types=["semantic_segmentation"], width=128, height=128) + semantic_segmentation256 = BASE_CAMERA_CFG.replace(data_types=["semantic_segmentation"], width=256, height=256) + default = rgb64 + + +@configclass +class WristTiledCameraCfg(PresetCfg): + """Tiled camera configurations""" + + rgb64 = WRIST_CAMERA_CFG.replace(data_types=["rgb"], width=64, height=64) + rgb128 = WRIST_CAMERA_CFG.replace(data_types=["rgb"], width=128, height=128) + rgb256 = WRIST_CAMERA_CFG.replace(data_types=["rgb"], width=256, height=256) + depth64 = WRIST_CAMERA_CFG.replace(data_types=["depth"], width=64, height=64) + depth128 = WRIST_CAMERA_CFG.replace(data_types=["depth"], width=128, height=128) + depth256 = WRIST_CAMERA_CFG.replace(data_types=["depth"], width=256, height=256) + albedo64 = WRIST_CAMERA_CFG.replace(data_types=["albedo"], width=64, height=64) + albedo128 = WRIST_CAMERA_CFG.replace(data_types=["albedo"], width=128, height=128) + albedo256 = WRIST_CAMERA_CFG.replace(data_types=["albedo"], width=256, height=256) + simple_shading_constant_diffuse64 = WRIST_CAMERA_CFG.replace( + data_types=["simple_shading_constant_diffuse"], width=64, height=64 + ) + simple_shading_constant_diffuse128 = WRIST_CAMERA_CFG.replace( + data_types=["simple_shading_constant_diffuse"], width=128, height=128 + ) + simple_shading_constant_diffuse256 = WRIST_CAMERA_CFG.replace( + data_types=["simple_shading_constant_diffuse"], width=256, height=256 + ) + simple_shading_diffuse_mdl64 = WRIST_CAMERA_CFG.replace( + data_types=["simple_shading_diffuse_mdl"], width=64, height=64 + ) + simple_shading_diffuse_mdl128 = WRIST_CAMERA_CFG.replace( + data_types=["simple_shading_diffuse_mdl"], width=128, height=128 + ) + simple_shading_diffuse_mdl256 = WRIST_CAMERA_CFG.replace( + data_types=["simple_shading_diffuse_mdl"], width=256, height=256 + ) + simple_shading_full_mdl64 = WRIST_CAMERA_CFG.replace(data_types=["simple_shading_full_mdl"], width=64, height=64) + simple_shading_full_mdl128 = WRIST_CAMERA_CFG.replace(data_types=["simple_shading_full_mdl"], width=128, height=128) + simple_shading_full_mdl256 = WRIST_CAMERA_CFG.replace(data_types=["simple_shading_full_mdl"], width=256, height=256) + semantic_segmentation64 = WRIST_CAMERA_CFG.replace(data_types=["semantic_segmentation"], width=64, height=64) + semantic_segmentation128 = WRIST_CAMERA_CFG.replace(data_types=["semantic_segmentation"], width=128, height=128) + semantic_segmentation256 = WRIST_CAMERA_CFG.replace(data_types=["semantic_segmentation"], width=256, height=256) + default = rgb64 + + +############################ + + +@configclass +class StateObservationCfg(dexsuite.ObservationsCfg): + """Kuka Allegro participant scene for Dexsuite Lifting/Reorientation""" + + def __post_init__(self: dexsuite.ObservationsCfg): + super().__post_init__() + self.proprio.contact = ObsTerm( + func=mdp.fingers_contact_force_b, + params={"contact_sensor_names": [f"{link}_object_s" for link in FINGERTIP_LIST]}, + clip=(-20.0, 20.0), # contact force in finger tips is under 20N normally + ) + self.proprio.hand_tips_state_b.params["body_asset_cfg"].body_names = ["palm_link", ".*_tip"] + + +@configclass +class SingleCameraObservationsCfg(StateObservationCfg): + """Observation specifications for the MDP.""" + + @configclass + class BaseImageObsCfg(ObsGroup): + """Camera observations for policy group.""" + + object_observation_b = ObsTerm( + func=mdp.vision_camera, + noise=Unoise(n_min=-0.0, n_max=0.0), + clip=(-1.0, 1.0), + params={"sensor_cfg": SceneEntityCfg("base_camera")}, + ) + + base_image: BaseImageObsCfg = BaseImageObsCfg() + + def __post_init__(self): + super().__post_init__() + for group in self.__dataclass_fields__.values(): + obs_group = getattr(self, group.name) + obs_group.history_length = None + + +@configclass +class DuoCameraObservationsCfg(SingleCameraObservationsCfg): + """Observation specifications for the MDP.""" + + @configclass + class WristImageObsCfg(ObsGroup): + wrist_observation = ObsTerm( + func=mdp.vision_camera, + noise=Unoise(n_min=-0.0, n_max=0.0), + clip=(-1.0, 1.0), + params={"sensor_cfg": SceneEntityCfg("wrist_camera")}, + ) + + wrist_image: WristImageObsCfg = WristImageObsCfg() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py index 6b7f82fde06e..4492d197f763 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py @@ -3,16 +3,89 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.assets import ArticulationCfg from isaaclab.managers import RewardTermCfg as RewTerm from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import ContactSensorCfg +from isaaclab.sensors import CameraCfg, ContactSensorCfg from isaaclab.utils import configclass +from isaaclab_tasks.utils import PresetCfg + from isaaclab_assets.robots import KUKA_ALLEGRO_CFG from ... import dexsuite_env_cfg as dexsuite from ... import mdp +from .camera_cfg import ( + BaseTiledCameraCfg, + DuoCameraObservationsCfg, + SingleCameraObservationsCfg, + StateObservationCfg, + WristTiledCameraCfg, +) + +FINGERTIP_LIST = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] +THUMB_SENSOR = "thumb_link_3_object_s" +FINGER_SENSORS = [f"{name}_object_s" for name in FINGERTIP_LIST if name != "thumb_link_3"] + + +@configclass +class KukaAllegroPhysicsCfg(PresetCfg): + default = PhysxCfg( + bounce_threshold_velocity=0.01, + gpu_max_rigid_patch_count=4 * 5 * 2**15, + gpu_found_lost_pairs_capacity=2**26, + ) + newton_mjwarp = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + solver="newton", + integrator="implicitfast", + njmax=300, + nconmax=70, + impratio=10.0, + cone="elliptic", + update_data_interval=2, + iterations=100, + ls_iterations=15, + ls_parallel=False, + use_mujoco_contacts=True, + ccd_iterations=5000, + ), + num_substeps=2, + debug_mode=False, + ) + physx = default + + +@configclass +class KukaAllegroSceneCfg(PresetCfg): + @configclass + class KukaAllegroSceneCfg(dexsuite.SceneCfg): + """Kuka Allegro participant scene for Dexsuite Lifting/Reorientation""" + + robot: ArticulationCfg = KUKA_ALLEGRO_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + base_camera: CameraCfg | None = None + + wrist_camera: CameraCfg | None = None + + def __post_init__(self: dexsuite.SceneCfg): + super().__post_init__() + for link_name in FINGERTIP_LIST: + setattr( + self, + f"{link_name}_object_s", + ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/ee_link/" + link_name, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Object"], + ), + ) + + default = KukaAllegroSceneCfg(num_envs=4096, env_spacing=3, replicate_physics=True) + single_camera = default.replace(base_camera=BaseTiledCameraCfg()) + duo_camera = default.replace(base_camera=BaseTiledCameraCfg(), wrist_camera=WristTiledCameraCfg()) @configclass @@ -22,40 +95,65 @@ class KukaAllegroRelJointPosActionCfg: @configclass class KukaAllegroReorientRewardCfg(dexsuite.RewardsCfg): - # bool awarding term if 2 finger tips are in contact with object, one of the contacting fingers has to be thumb. good_finger_contact = RewTerm( func=mdp.contacts, weight=0.5, - params={"threshold": 1.0}, + params={"threshold": 0.1, "thumb_name": THUMB_SENSOR, "finger_names": FINGER_SENSORS}, + ) + + contact_count = RewTerm( + func=mdp.contact_count, + weight=1.0, + params={ + "threshold": 0.01, + "sensor_names": FINGER_SENSORS + [THUMB_SENSOR], + }, ) + def __post_init__(self: dexsuite.RewardsCfg): + super().__post_init__() + self.fingers_to_object.params["asset_cfg"] = SceneEntityCfg("robot", body_names=["palm_link", ".*_tip"]) + self.fingers_to_object.params["thumb_name"] = THUMB_SENSOR + self.fingers_to_object.params["finger_names"] = FINGER_SENSORS + self.position_tracking.params["thumb_name"] = THUMB_SENSOR + self.position_tracking.params["finger_names"] = FINGER_SENSORS + if self.orientation_tracking: + self.orientation_tracking.params["thumb_name"] = THUMB_SENSOR + self.orientation_tracking.params["finger_names"] = FINGER_SENSORS + self.success.params["thumb_name"] = THUMB_SENSOR + self.success.params["finger_names"] = FINGER_SENSORS + + +@configclass +class KukaAllegroObservationCfg(PresetCfg): + state = StateObservationCfg() + single_camera = SingleCameraObservationsCfg() + duo_camera = DuoCameraObservationsCfg() + default = state + + +@configclass +class KukaAllegroEventCfg(PresetCfg): + @configclass + class KukaAllegroPhysxEventCfg(dexsuite.StartupEventCfg, dexsuite.EventCfg): + pass + + default = KukaAllegroPhysxEventCfg() + newton_mjwarp = dexsuite.EventCfg() + physx = default + @configclass class KukaAllegroMixinCfg: + scene: KukaAllegroSceneCfg = KukaAllegroSceneCfg() rewards: KukaAllegroReorientRewardCfg = KukaAllegroReorientRewardCfg() + observations: KukaAllegroObservationCfg = KukaAllegroObservationCfg() + events: KukaAllegroEventCfg = KukaAllegroEventCfg() actions: KukaAllegroRelJointPosActionCfg = KukaAllegroRelJointPosActionCfg() - def __post_init__(self: dexsuite.DexsuiteReorientEnvCfg): + def __post_init__(self): super().__post_init__() - self.commands.object_pose.body_name = "palm_link" - self.scene.robot = KUKA_ALLEGRO_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") - finger_tip_body_list = ["index_link_3", "middle_link_3", "ring_link_3", "thumb_link_3"] - for link_name in finger_tip_body_list: - setattr( - self.scene, - f"{link_name}_object_s", - ContactSensorCfg( - prim_path="{ENV_REGEX_NS}/Robot/ee_link/" + link_name, - filter_prim_paths_expr=["{ENV_REGEX_NS}/Object"], - ), - ) - self.observations.proprio.contact = ObsTerm( - func=mdp.fingers_contact_force_b, - params={"contact_sensor_names": [f"{link}_object_s" for link in finger_tip_body_list]}, - clip=(-20.0, 20.0), # contact force in finger tips is under 20N normally - ) - self.observations.proprio.hand_tips_state_b.params["body_asset_cfg"].body_names = ["palm_link", ".*_tip"] - self.rewards.fingers_to_object.params["asset_cfg"] = SceneEntityCfg("robot", body_names=["palm_link", ".*_tip"]) + self.sim.physics = KukaAllegroPhysicsCfg() @configclass diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/dexsuite_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/dexsuite_env_cfg.py index 9ee00105e57a..449043e3977b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/dexsuite_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/dexsuite_env_cfg.py @@ -5,6 +5,8 @@ from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from isaaclab.envs import ManagerBasedEnvCfg, ViewerCfg @@ -14,15 +16,70 @@ from isaaclab.managers import RewardTermCfg as RewTerm from isaaclab.managers import SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.markers import VisualizationMarkersCfg from isaaclab.scene import InteractiveSceneCfg from isaaclab.sim import CapsuleCfg, ConeCfg, CuboidCfg, RigidBodyMaterialCfg, SphereCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise + +from isaaclab_tasks.utils import PresetCfg from . import mdp from .adr_curriculum import CurriculumCfg +TABLE_SPAWN_CFG = sim_utils.CuboidCfg( + size=(0.8, 1.5, 0.04), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + collision_props=sim_utils.CollisionPropertiesCfg(), + # trick: we let visualizer's color to show the table with success coloring + visible=False, +) + + +@configclass +class ObjectCfg(PresetCfg): + shapes = sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + CuboidCfg(size=(0.05, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.05, 0.05, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.025, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.025, 0.05, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.025, 0.025, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CuboidCfg(size=(0.01, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + SphereCfg(radius=0.05, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + SphereCfg(radius=0.025, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.04, height=0.025, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.04, height=0.01, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.04, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.025, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.025, height=0.2, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + CapsuleCfg(radius=0.01, height=0.2, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + ConeCfg(radius=0.05, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + ConeCfg(radius=0.025, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), + ], + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=16, + solver_velocity_iteration_count=0, + disable_gravity=False, + ), + collision_props=sim_utils.CollisionPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.2), + ) + cube = sim_utils.CuboidCfg( + size=(0.05, 0.1, 0.1), + physics_material=RigidBodyMaterialCfg(static_friction=0.5), + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=16, + solver_velocity_iteration_count=0, + disable_gravity=False, + ), + collision_props=sim_utils.CollisionPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.2), + ) + newton_mjwarp = cube # newton does not support multi-asset spawning yet + default = shapes + @configclass class SceneCfg(InteractiveSceneCfg): @@ -34,54 +91,22 @@ class SceneCfg(InteractiveSceneCfg): # object object: RigidObjectCfg = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - spawn=sim_utils.MultiAssetSpawnerCfg( - assets_cfg=[ - CuboidCfg(size=(0.05, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CuboidCfg(size=(0.05, 0.05, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CuboidCfg(size=(0.025, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CuboidCfg(size=(0.025, 0.05, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CuboidCfg(size=(0.025, 0.025, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CuboidCfg(size=(0.01, 0.1, 0.1), physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - SphereCfg(radius=0.05, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - SphereCfg(radius=0.025, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CapsuleCfg(radius=0.04, height=0.025, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CapsuleCfg(radius=0.04, height=0.01, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CapsuleCfg(radius=0.04, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CapsuleCfg(radius=0.025, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CapsuleCfg(radius=0.025, height=0.2, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - CapsuleCfg(radius=0.01, height=0.2, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - ConeCfg(radius=0.05, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - ConeCfg(radius=0.025, height=0.1, physics_material=RigidBodyMaterialCfg(static_friction=0.5)), - ], - rigid_props=sim_utils.RigidBodyPropertiesCfg( - solver_position_iteration_count=16, - solver_velocity_iteration_count=0, - disable_gravity=False, - ), - collision_props=sim_utils.CollisionPropertiesCfg(), - mass_props=sim_utils.MassPropertiesCfg(mass=0.2), - ), + spawn=ObjectCfg(), # type: ignore init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.55, 0.1, 0.35)), ) # table table: RigidObjectCfg = RigidObjectCfg( prim_path="/World/envs/env_.*/table", - spawn=sim_utils.CuboidCfg( - size=(0.8, 1.5, 0.04), - rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), - collision_props=sim_utils.CollisionPropertiesCfg(), - # trick: we let visualizer's color to show the table with success coloring - visible=False, - ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.55, 0.0, 0.235), rot=(1.0, 0.0, 0.0, 0.0)), + spawn=TABLE_SPAWN_CFG, + init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.55, 0.0, 0.235), rot=(0.0, 0.0, 0.0, 1.0)), ) # plane plane = AssetBaseCfg( prim_path="/World/GroundPlane", init_state=AssetBaseCfg.InitialStateCfg(), - spawn=sim_utils.GroundPlaneCfg(), + spawn=sim_utils.GroundPlaneCfg(color=(1.0, 1.0, 1.0)), collision_group=-1, ) @@ -113,6 +138,17 @@ class CommandsCfg: yaw=(0.0, 0.0), ), success_vis_asset_name="table", + success_visualizer_cfg=VisualizationMarkersCfg( + prim_path="/Visuals/SuccessMarkers", + markers={ + "failure": TABLE_SPAWN_CFG.replace( + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.25, 0.15, 0.15)), visible=True + ), + "success": TABLE_SPAWN_CFG.replace( + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.15, 0.25, 0.15)), visible=True + ), + }, + ), ) @@ -182,15 +218,8 @@ def __post_init__(self): @configclass -class EventCfg: - """Configuration for randomization.""" - - # -- pre-startup - randomize_object_scale = EventTerm( - func=mdp.randomize_rigid_body_scale, - mode="prestartup", - params={"scale_range": (0.75, 1.5), "asset_cfg": SceneEntityCfg("object")}, - ) +class StartupEventCfg: + """Startup-mode domain randomization (PhysX only — Newton does not support startup events).""" robot_physics_material = EventTerm( func=mdp.randomize_rigid_body_material, @@ -247,13 +276,20 @@ class EventCfg: }, ) - reset_table = EventTerm( - func=mdp.reset_root_state_uniform, + +@configclass +class EventCfg: + """Reset-mode events (shared by all physics backends).""" + + # Gravity scheduling is a deliberate curriculum trick — starting with no + # gravity (easy) and gradually introducing full gravity (hard) makes learning + # smoother and removes the need for a separate "Lift" reward. + variable_gravity = EventTerm( + func=mdp.randomize_physics_scene_gravity, mode="reset", params={ - "pose_range": {"x": [-0.05, 0.05], "y": [-0.05, 0.05], "z": [0.0, 0.0]}, - "velocity_range": {"x": [-0.0, 0.0], "y": [-0.0, 0.0], "z": [-0.0, 0.0]}, - "asset_cfg": SceneEntityCfg("table"), + "gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0]), + "operation": "abs", }, ) @@ -303,20 +339,6 @@ class EventCfg: }, ) - # Note (Octi): This is a deliberate trick in Remake to accelerate learning. - # By scheduling gravity as a curriculum — starting with no gravity (easy) - # and gradually introducing full gravity (hard) — the agent learns more smoothly. - # This removes the need for a special "Lift" reward (often required to push the - # agent to counter gravity), which has bonus effect of simplifying reward composition overall. - variable_gravity = EventTerm( - func=mdp.randomize_physics_scene_gravity, - mode="reset", - params={ - "gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0]), - "operation": "abs", - }, - ) - @configclass class ActionsCfg: @@ -393,7 +415,7 @@ class DexsuiteReorientEnvCfg(ManagerBasedEnvCfg): # Scene settings viewer: ViewerCfg = ViewerCfg(eye=(-2.25, 0.0, 0.75), lookat=(0.0, 0.0, 0.45), origin_type="env") - scene: SceneCfg = SceneCfg(num_envs=4096, env_spacing=3, replicate_physics=False) + scene: SceneCfg = SceneCfg(num_envs=4096, env_spacing=3, replicate_physics=True) # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() @@ -401,37 +423,54 @@ class DexsuiteReorientEnvCfg(ManagerBasedEnvCfg): # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() - events: EventCfg = EventCfg() + events: EventCfg = MISSING # type: ignore curriculum: CurriculumCfg | None = CurriculumCfg() + def validate_config(self): + """Check for invalid preset combinations after resolution.""" + is_newton = not isinstance(self.sim.physics, PhysxCfg) + is_multi_asset = isinstance(self.scene.object.spawn, sim_utils.MultiAssetSpawnerCfg) + + if is_newton and is_multi_asset: + raise ValueError( + "Newton physics does not support multi-asset spawning." + " Use a single-geometry object preset (e.g. presets=cube) instead of 'shapes'." + ) + + warp_supported = {"rgb", "depth", "distance_to_image_plane"} + for cam_attr in ("base_camera", "wrist_camera"): + cam = getattr(self.scene, cam_attr, None) + if cam is None: + continue + renderer_type = getattr(cam.renderer_cfg, "renderer_type", None) + if renderer_type == "newton_warp": + unsupported = set(cam.data_types) - warp_supported + if unsupported: + raise ValueError( + f"Warp renderer only supports data types {sorted(warp_supported)}, " + f"but '{cam_attr}' is configured with unsupported types: {sorted(unsupported)}. " + "Choose a compatible preset, e.g. presets=newton_renderer,rgb128." + ) + def __post_init__(self): """Post initialization.""" # general settings self.decimation = 2 # 50 Hz # *single-goal setup - self.commands.object_pose.resampling_time_range = (10.0, 10.0) + self.commands.object_pose.resampling_time_range = (2.0, 3.0) self.commands.object_pose.position_only = False - self.commands.object_pose.success_visualizer_cfg.markers["failure"] = self.scene.table.spawn.replace( - visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.25, 0.15, 0.15), roughness=0.25), visible=True - ) - self.commands.object_pose.success_visualizer_cfg.markers["success"] = self.scene.table.spawn.replace( - visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.15, 0.25, 0.15), roughness=0.25), visible=True - ) - - self.episode_length_s = 4.0 - self.is_finite_horizon = True + self.episode_length_s = 6.0 + self.is_finite_horizon = False # simulation settings self.sim.dt = 1 / 120 self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.2 - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.gpu_max_rigid_patch_count = 4 * 5 * 2**15 - - if self.curriculum is not None: - self.curriculum.adr.params["pos_tol"] = self.rewards.success.params["pos_std"] / 2 - self.curriculum.adr.params["rot_tol"] = self.rewards.success.params["rot_std"] / 2 + self.sim.physics = PhysxCfg( + bounce_threshold_velocity=0.01, + gpu_max_rigid_patch_count=4 * 5 * 2**15, + gpu_found_lost_pairs_capacity=2**26, + ) class DexsuiteLiftEnvCfg(DexsuiteReorientEnvCfg): @@ -443,7 +482,6 @@ def __post_init__(self): self.commands.object_pose.position_only = True if self.curriculum is not None: self.rewards.success.params["rot_std"] = None # make success reward not consider orientation - self.curriculum.adr.params["rot_tol"] = None # make adr not tracking orientation class DexsuiteReorientEnvCfg_PLAY(DexsuiteReorientEnvCfg): @@ -451,7 +489,6 @@ class DexsuiteReorientEnvCfg_PLAY(DexsuiteReorientEnvCfg): def __post_init__(self): super().__post_init__() - self.commands.object_pose.resampling_time_range = (2.0, 3.0) self.commands.object_pose.debug_vis = True self.curriculum.adr.params["init_difficulty"] = self.curriculum.adr.params["max_difficulty"] @@ -461,7 +498,6 @@ class DexsuiteLiftEnvCfg_PLAY(DexsuiteLiftEnvCfg): def __post_init__(self): super().__post_init__() - self.commands.object_pose.resampling_time_range = (2.0, 3.0) self.commands.object_pose.debug_vis = True self.commands.object_pose.position_only = True self.curriculum.adr.params["init_difficulty"] = self.curriculum.adr.params["max_difficulty"] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.py index a6537b1a5e19..e14e0f6d52c5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.py @@ -3,10 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .commands import * # noqa: F401, F403 -from .curriculums import * # noqa: F401, F403 -from .observations import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.pyi new file mode 100644 index 000000000000..4cb94f6e78ae --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/__init__.pyi @@ -0,0 +1,51 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ObjectUniformPoseCommandCfg", + "DifficultyScheduler", + "initial_final_interpolate_fn", + "body_state_b", + "fingers_contact_force_b", + "object_point_cloud_b", + "object_pos_b", + "object_quat_b", + "vision_camera", + "time_left", + "action_l2_clamped", + "action_rate_l2_clamped", + "contacts", + "contact_count", + "object_ee_distance", + "orientation_command_error_tanh", + "position_command_error_tanh", + "success_reward", + "abnormal_robot_state", + "out_of_bound", +] + +from .commands import ObjectUniformPoseCommandCfg +from .curriculums import DifficultyScheduler, initial_final_interpolate_fn +from .observations import ( + body_state_b, + fingers_contact_force_b, + object_point_cloud_b, + object_pos_b, + object_quat_b, + vision_camera, + time_left, +) +from .rewards import ( + action_l2_clamped, + action_rate_l2_clamped, + contacts, + contact_count, + object_ee_distance, + orientation_command_error_tanh, + position_command_error_tanh, + success_reward, +) +from .terminations import abnormal_robot_state, out_of_bound +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/__init__.pyi new file mode 100644 index 000000000000..50695b343506 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ObjectUniformPoseCommandCfg", +] + +from .pose_commands_cfg import ObjectUniformPoseCommandCfg diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py index ade464360a07..710fef931ee4 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py @@ -13,12 +13,12 @@ import torch -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import CommandTerm -from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.leapp import POSE7_ELEMENT_NAMES from isaaclab.utils.math import combine_frame_transforms, compute_pose_error, quat_from_euler_xyz, quat_unique if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedEnv from . import pose_commands_cfg as dex_cmd_cfgs @@ -30,7 +30,7 @@ class ObjectUniformPoseCommand(CommandTerm): This command term samples target object poses by: • Drawing (x, y, z) uniformly within configured Cartesian bounds, and • Drawing roll-pitch-yaw uniformly within configured ranges, then converting - to a quaternion (w, x, y, z). Optionally makes quaternions unique by enforcing + to a quaternion (x, y, z, w). Optionally makes quaternions unique by enforcing a positive real part. Frames: @@ -38,7 +38,7 @@ class ObjectUniformPoseCommand(CommandTerm): targets are transformed into the *world frame* using the robot root pose. Outputs: - The command buffer has shape (num_envs, 7): `(x, y, z, qw, qx, qy, qz)`. + The command buffer has shape (num_envs, 7): ``(x, y, z, qx, qy, qz, qw)``. Metrics: `position_error` and `orientation_error` are computed between the commanded @@ -65,20 +65,29 @@ def __init__(self, cfg: dex_cmd_cfgs.ObjectUniformPoseCommandCfg, env: ManagerBa # extract the robot and body index for which the command is generated self.robot: Articulation = env.scene[cfg.asset_name] self.object: RigidObject = env.scene[cfg.object_name] - self.success_vis_asset: RigidObject = env.scene[cfg.success_vis_asset_name] + if cfg.success_vis_asset_name in env.scene.keys(): + self.success_vis_asset: RigidObject = env.scene[cfg.success_vis_asset_name] + else: + self.success_vis_asset = None # create buffers - # -- commands: (x, y, z, qw, qx, qy, qz) in root frame + # -- commands: (x, y, z, qx, qy, qz, qw) in root frame self.pose_command_b = torch.zeros(self.num_envs, 7, device=self.device) self.pose_command_b[:, 3] = 1.0 self.pose_command_w = torch.zeros_like(self.pose_command_b) # -- metrics self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) + from isaaclab.markers import VisualizationMarkers self.success_visualizer = VisualizationMarkers(self.cfg.success_visualizer_cfg) self.success_visualizer.set_visibility(True) + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or POSE7_ELEMENT_NAMES + def __str__(self) -> str: msg = "UniformPoseCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" @@ -93,7 +102,7 @@ def __str__(self) -> str: def command(self) -> torch.Tensor: """The desired pose command. Shape is (num_envs, 7). - The first three elements correspond to the position, followed by the quaternion orientation in (w, x, y, z). + The first three elements correspond to the position, followed by the quaternion orientation in (x, y, z, w). """ return self.pose_command_b @@ -104,25 +113,29 @@ def command(self) -> torch.Tensor: def _update_metrics(self): # transform command from base frame to simulation world frame self.pose_command_w[:, :3], self.pose_command_w[:, 3:] = combine_frame_transforms( - self.robot.data.root_pos_w, - self.robot.data.root_quat_w, + self.robot.data.root_pos_w.torch, + self.robot.data.root_quat_w.torch, self.pose_command_b[:, :3], self.pose_command_b[:, 3:], ) # compute the error + object_root_pose_w = self.object.data.root_link_pose_w.torch pos_error, rot_error = compute_pose_error( self.pose_command_w[:, :3], self.pose_command_w[:, 3:], - self.object.data.root_state_w[:, :3], - self.object.data.root_state_w[:, 3:7], + object_root_pose_w[:, :3], + object_root_pose_w[:, 3:7], ) - self.metrics["position_error"] = torch.norm(pos_error, dim=-1) - self.metrics["orientation_error"] = torch.norm(rot_error, dim=-1) + self.metrics["position_error"] = torch.linalg.norm(pos_error, dim=-1) + self.metrics["orientation_error"] = torch.linalg.norm(rot_error, dim=-1) success_id = self.metrics["position_error"] < 0.05 if not self.cfg.position_only: success_id &= self.metrics["orientation_error"] < 0.5 - self.success_visualizer.visualize(self.success_vis_asset.data.root_pos_w, marker_indices=success_id.int()) + if self.success_vis_asset is not None: + self.success_visualizer.visualize( + self.success_vis_asset.data.root_pos_w.torch, marker_indices=success_id.int() + ) def _resample_command(self, env_ids: Sequence[int]): # sample new pose targets @@ -144,12 +157,11 @@ def _update_command(self): pass def _set_debug_vis_impl(self, debug_vis: bool): - # create markers if necessary for the first tome if debug_vis: if not hasattr(self, "goal_visualizer"): - # -- goal pose + from isaaclab.markers import VisualizationMarkers + self.goal_visualizer = VisualizationMarkers(self.cfg.goal_pose_visualizer_cfg) - # -- current body pose self.curr_visualizer = VisualizationMarkers(self.cfg.curr_pose_visualizer_cfg) # set their visibility to true self.goal_visualizer.set_visibility(True) @@ -169,12 +181,15 @@ def _debug_vis_callback(self, event): # -- goal pose self.goal_visualizer.visualize(self.pose_command_w[:, :3], self.pose_command_w[:, 3:]) # -- current object pose - self.curr_visualizer.visualize(self.object.data.root_pos_w, self.object.data.root_quat_w) + obj_pos = self.object.data.root_pos_w.torch + obj_quat = self.object.data.root_quat_w.torch + self.curr_visualizer.visualize(obj_pos, obj_quat) else: - distance = torch.norm(self.pose_command_w[:, :3] - self.object.data.root_pos_w[:, :3], dim=1) + obj_pos = self.object.data.root_pos_w.torch + distance = torch.linalg.norm(self.pose_command_w[:, :3] - obj_pos, dim=1) success_id = (distance < 0.05).int() # note: since marker indices for position is 1(far) and 2(near), we can simply shift the success_id by 1. # -- goal position self.goal_visualizer.visualize(self.pose_command_w[:, :3], marker_indices=success_id + 1) # -- current object position - self.curr_visualizer.visualize(self.object.data.root_pos_w, marker_indices=success_id + 1) + self.curr_visualizer.visualize(obj_pos, marker_indices=success_id + 1) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands_cfg.py index e3c83882a3f5..7594f0f636ca 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands_cfg.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause from dataclasses import MISSING +from typing import TYPE_CHECKING import isaaclab.sim as sim_utils from isaaclab.managers import CommandTermCfg @@ -11,7 +12,8 @@ from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from . import pose_commands as dex_cmd +if TYPE_CHECKING: + from .pose_commands import ObjectUniformPoseCommand ALIGN_MARKER_CFG = VisualizationMarkersCfg( markers={ @@ -35,7 +37,7 @@ class ObjectUniformPoseCommandCfg(CommandTermCfg): """Configuration for uniform pose command generator.""" - class_type: type = dex_cmd.ObjectUniformPoseCommand + class_type: type["ObjectUniformPoseCommand"] | str = "{DIR}.pose_commands:ObjectUniformPoseCommand" asset_name: str = MISSING """Name of the coordinate referencing asset in the environment for which the commands are generated respect to.""" @@ -88,5 +90,7 @@ class Ranges: """Name of the asset in the environment for which the success color are indicated.""" # success markers - success_visualizer_cfg = VisualizationMarkersCfg(prim_path="/Visuals/SuccessMarkers", markers={}) + success_visualizer_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg( + prim_path="/Visuals/SuccessMarkers", markers={} + ) """The configuration for the success visualization marker. User needs to add the markers""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/curriculums.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/curriculums.py index 148046f012c7..61f7c2390d73 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/curriculums.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/curriculums.py @@ -10,29 +10,24 @@ import torch -from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import mdp -from isaaclab.managers import ManagerTermBase, SceneEntityCfg -from isaaclab.utils.math import combine_frame_transforms, compute_pose_error +from isaaclab.managers import ManagerTermBase if TYPE_CHECKING: from isaaclab.envs import ManagerBasedRLEnv def initial_final_interpolate_fn(env: ManagerBasedRLEnv, env_id, data, initial_value, final_value, difficulty_term_str): + """Interpolate between initial and final values scaled by the current difficulty fraction. + + Works on arbitrarily nested structures of lists/tuples; scalars (int/float) + are interpolated at the leaves. """ - Interpolate between initial value iv and final value fv, for any arbitrarily - nested structure of lists/tuples in 'data'. Scalars (int/float) are handled - at the leaves. - """ - # get the fraction scalar on the device difficulty_term: DifficultyScheduler = getattr(env.curriculum_manager.cfg, difficulty_term_str).func frac = difficulty_term.difficulty_frac if frac < 0.1: - # no-op during start, since the difficulty fraction near 0 is wasting of resource. return mdp.modify_env_param.NO_CHANGE - # convert iv/fv to tensors, but we'll peel them apart in recursion initial_value_tensor = torch.tensor(initial_value, device=env.device) final_value_tensor = torch.tensor(final_value, device=env.device) @@ -40,31 +35,24 @@ def initial_final_interpolate_fn(env: ManagerBasedRLEnv, env_id, data, initial_v def _recurse(iv_elem, fv_elem, data_elem, frac): - # If it's a sequence, rebuild the same type with each element recursed if isinstance(data_elem, Sequence) and not isinstance(data_elem, (str, bytes)): - # Note: we assume initial value element and final value element have the same structure as data return type(data_elem)(_recurse(iv_e, fv_e, d_e, frac) for iv_e, fv_e, d_e in zip(iv_elem, fv_elem, data_elem)) - # Otherwise it's a leaf scalar: do the interpolation new_val = frac * (fv_elem - iv_elem) + iv_elem if isinstance(data_elem, int): return int(new_val.item()) - else: - # cast floats or any numeric - return new_val.item() + return new_val.item() class DifficultyScheduler(ManagerTermBase): """Adaptive difficulty scheduler for curriculum learning. - Tracks per-environment difficulty levels and adjusts them based on task performance. Difficulty increases when - position/orientation errors fall below given tolerances, and decreases otherwise (unless `promotion_only` is set). - The normalized average difficulty across environments is exposed as `difficulty_frac` for use in curriculum - interpolation. - - Args: - cfg: Configuration object specifying scheduler parameters. - env: The manager-based RL environment. + Promotes difficulty when the ``success`` reward term's sticky + :attr:`succeeded` flag is ``True`` for an environment at episode end, + meaning the agent achieved the success condition at least once during the + episode. Demotes (unless ``promotion_only`` is set) otherwise. + The normalized average difficulty across environments is exposed as + :attr:`difficulty_frac` for use in curriculum interpolation. """ def __init__(self, cfg, env): @@ -83,30 +71,15 @@ def __call__( self, env: ManagerBasedRLEnv, env_ids: Sequence[int], - asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), - object_cfg: SceneEntityCfg = SceneEntityCfg("object"), - pos_tol: float = 0.1, - rot_tol: float | None = None, init_difficulty: int = 0, min_difficulty: int = 0, max_difficulty: int = 50, promotion_only: bool = False, ): - asset: Articulation = env.scene[asset_cfg.name] - object: RigidObject = env.scene[object_cfg.name] - command = env.command_manager.get_command("object_pose") - des_pos_w, des_quat_w = combine_frame_transforms( - asset.data.root_pos_w[env_ids], asset.data.root_quat_w[env_ids], command[env_ids, :3], command[env_ids, 3:7] - ) - pos_err, rot_err = compute_pose_error( - des_pos_w, des_quat_w, object.data.root_pos_w[env_ids], object.data.root_quat_w[env_ids] - ) - pos_dist = torch.norm(pos_err, dim=1) - rot_dist = torch.norm(rot_err, dim=1) - move_up = (pos_dist < pos_tol) & (rot_dist < rot_tol) if rot_tol else pos_dist < pos_tol + succeeded = env.reward_manager.get_term_cfg("success").func.succeeded[env_ids] demot = self.current_adr_difficulties[env_ids] if promotion_only else self.current_adr_difficulties[env_ids] - 1 self.current_adr_difficulties[env_ids] = torch.where( - move_up, + succeeded, self.current_adr_difficulties[env_ids] + 1, demot, ).clamp(min=min_difficulty, max=max_difficulty) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/observations.py index 604c74320b01..fe666a7ecffe 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/observations.py @@ -9,14 +9,13 @@ import torch -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import ManagerTermBase, SceneEntityCfg from isaaclab.utils.math import quat_apply, quat_apply_inverse, quat_inv, quat_mul, subtract_frame_transforms -from .utils import sample_object_point_cloud - if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import Camera def object_pos_b( @@ -36,7 +35,7 @@ def object_pos_b( """ robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] - return quat_apply_inverse(robot.data.root_quat_w, object.data.root_pos_w - robot.data.root_pos_w) + return quat_apply_inverse(robot.data.root_quat_w.torch, object.data.root_pos_w.torch - robot.data.root_pos_w.torch) def object_quat_b( @@ -56,7 +55,7 @@ def object_quat_b( """ robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] - return quat_mul(quat_inv(robot.data.root_quat_w), object.data.root_quat_w) + return quat_mul(quat_inv(robot.data.root_quat_w.torch), object.data.root_quat_w.torch) def body_state_b( @@ -80,14 +79,14 @@ def body_state_b( body_asset: Articulation = env.scene[body_asset_cfg.name] base_asset: Articulation = env.scene[base_asset_cfg.name] # get world pose of bodies - body_pos_w = body_asset.data.body_pos_w[:, body_asset_cfg.body_ids].view(-1, 3) - body_quat_w = body_asset.data.body_quat_w[:, body_asset_cfg.body_ids].view(-1, 4) - body_lin_vel_w = body_asset.data.body_lin_vel_w[:, body_asset_cfg.body_ids].view(-1, 3) - body_ang_vel_w = body_asset.data.body_ang_vel_w[:, body_asset_cfg.body_ids].view(-1, 3) + body_pos_w = body_asset.data.body_pos_w.torch[:, body_asset_cfg.body_ids].view(-1, 3) + body_quat_w = body_asset.data.body_quat_w.torch[:, body_asset_cfg.body_ids].view(-1, 4) + body_lin_vel_w = body_asset.data.body_lin_vel_w.torch[:, body_asset_cfg.body_ids].view(-1, 3) + body_ang_vel_w = body_asset.data.body_ang_vel_w.torch[:, body_asset_cfg.body_ids].view(-1, 3) num_bodies = int(body_pos_w.shape[0] / env.num_envs) # get world pose of base frame - root_pos_w = base_asset.data.root_link_pos_w.unsqueeze(1).repeat_interleave(num_bodies, dim=1).view(-1, 3) - root_quat_w = base_asset.data.root_link_quat_w.unsqueeze(1).repeat_interleave(num_bodies, dim=1).view(-1, 4) + root_pos_w = base_asset.data.root_link_pos_w.torch.unsqueeze(1).repeat_interleave(num_bodies, dim=1).view(-1, 3) + root_quat_w = base_asset.data.root_link_quat_w.torch.unsqueeze(1).repeat_interleave(num_bodies, dim=1).view(-1, 4) # transform from world body pose to local body pose body_pos_b, body_quat_b = subtract_frame_transforms(root_pos_w, root_quat_w, body_pos_w, body_quat_w) body_lin_vel_b = quat_apply_inverse(root_quat_w, body_lin_vel_w) @@ -131,6 +130,8 @@ def __init__(self, cfg, env: ManagerBasedRLEnv): ray_cfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/ObservationPointCloud") ray_cfg.markers["hit"].radius = 0.0025 self.visualizer = VisualizationMarkers(ray_cfg) + from .utils import sample_object_point_cloud + self.points_local = sample_object_point_cloud( env.num_envs, num_points, self.object.cfg.prim_path, device=env.device ) @@ -162,11 +163,11 @@ def __call__( Returns: Tensor of shape ``(num_envs, num_points, 3)`` or flattened if requested. """ - ref_pos_w = self.ref_asset.data.root_pos_w.unsqueeze(1).repeat(1, num_points, 1) - ref_quat_w = self.ref_asset.data.root_quat_w.unsqueeze(1).repeat(1, num_points, 1) + ref_pos_w = self.ref_asset.data.root_pos_w.torch.unsqueeze(1).repeat(1, num_points, 1) + ref_quat_w = self.ref_asset.data.root_quat_w.torch.unsqueeze(1).repeat(1, num_points, 1) - object_pos_w = self.object.data.root_pos_w.unsqueeze(1).repeat(1, num_points, 1) - object_quat_w = self.object.data.root_quat_w.unsqueeze(1).repeat(1, num_points, 1) + object_pos_w = self.object.data.root_pos_w.torch.unsqueeze(1).repeat(1, num_points, 1) + object_quat_w = self.object.data.root_quat_w.torch.unsqueeze(1).repeat(1, num_points, 1) # apply rotation + translation self.points_w = quat_apply(object_quat_w, self.points_local) + object_pos_w if visualize: @@ -191,8 +192,71 @@ def fingers_contact_force_b( Tensor of shape ``(num_envs, 3 * num_sensors)`` with forces stacked horizontally as ``[fx, fy, fz]`` per sensor. """ - force_w = [env.scene.sensors[name].data.force_matrix_w.view(env.num_envs, 3) for name in contact_sensor_names] + force_w = [env.scene.sensors[name].data.force_matrix_w.torch.view(env.num_envs, 3) for name in contact_sensor_names] force_w = torch.stack(force_w, dim=1) robot: Articulation = env.scene[asset_cfg.name] - forces_b = quat_apply_inverse(robot.data.root_link_quat_w.unsqueeze(1).repeat(1, force_w.shape[1], 1), force_w) - return forces_b + root_link_quat_w = robot.data.root_link_quat_w.torch + forces_b = quat_apply_inverse(root_link_quat_w.unsqueeze(1).repeat(1, force_w.shape[1], 1), force_w) + return forces_b.view(env.num_envs, -1) + + +class vision_camera(ManagerTermBase): + def __init__(self, cfg, env: ManagerBasedRLEnv): + super().__init__(cfg, env) + sensor_cfg: SceneEntityCfg = cfg.params.get("sensor_cfg", SceneEntityCfg("tiled_camera")) + self.sensor: Camera = env.scene.sensors[sensor_cfg.name] + self.sensor_type = self.sensor.cfg.data_types[0] + self.norm_fn = ( + self._depth_norm + if self.sensor_type == "distance_to_image_plane" or self.sensor_type == "depth" + else self._rgb_norm + ) + + def __call__( + self, env: ManagerBasedRLEnv, sensor_cfg: SceneEntityCfg, normalize: bool = True + ) -> torch.Tensor: # obtain the input image + images = self.sensor.data.output[self.sensor_type] + torch.nan_to_num_(images, nan=1e6) + if normalize: + images = self.norm_fn(images) + images = images.permute(0, 3, 1, 2).contiguous() + return images + + def _rgb_norm(self, images: torch.Tensor) -> torch.Tensor: + images = images.float() / 255.0 + mean_tensor = torch.mean(images, dim=(1, 2), keepdim=True) + images -= mean_tensor + return images + + def _depth_norm(self, images: torch.Tensor) -> torch.Tensor: + images = torch.tanh(images / 2) * 2 + images -= torch.mean(images, dim=(1, 2), keepdim=True) + return images + + def show_collage(self, images: torch.Tensor, save_path: str = "collage.png"): + import numpy as np + from matplotlib import cm + from PIL import Image + + a = images.detach().cpu().numpy() + n, h, w, c = a.shape + s = int(np.ceil(np.sqrt(n))) + canvas = np.full((s * h, s * w, 3), 255, np.uint8) + turbo = cm.get_cmap("turbo") + for i in range(n): + r, col = divmod(i, s) + img = a[i] + if c == 1: + d = img[..., 0] + d = (d - d.min()) / (np.ptp(d) + 1e-8) + rgb = (turbo(d)[..., :3] * 255).astype(np.uint8) + else: + x = img if img.max() > 1 else img * 255 + rgb = np.clip(x, 0, 255).astype(np.uint8) + canvas[r * h : (r + 1) * h, col * w : (col + 1) * w] = rgb + Image.fromarray(canvas).save(save_path) + + +def time_left(env: ManagerBasedRLEnv): + time_left_frac = 1 - env.episode_length_buf / env.max_episode_length + return time_left_frac.view(env.num_envs, -1) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/rewards.py index a6ddab0f9081..8a5a013ea846 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/rewards.py @@ -5,18 +5,19 @@ from __future__ import annotations +from collections.abc import Sequence from typing import TYPE_CHECKING import torch -from isaaclab.assets import RigidObject -from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import ContactSensor +from isaaclab.managers import ManagerTermBase, SceneEntityCfg from isaaclab.utils import math as math_utils from isaaclab.utils.math import combine_frame_transforms, compute_pose_error if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import ContactSensor def action_rate_l2_clamped(env: ManagerBasedRLEnv) -> torch.Tensor: @@ -32,96 +33,194 @@ def action_l2_clamped(env: ManagerBasedRLEnv) -> torch.Tensor: def object_ee_distance( env: ManagerBasedRLEnv, std: float, + thumb_name: str, + finger_names: list[str], + contact_threshold: float = 1.0, object_cfg: SceneEntityCfg = SceneEntityCfg("object"), asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ) -> torch.Tensor: - """Reward reaching the object using a tanh-kernel on end-effector distance. - - The reward is close to 1 when the maximum distance between the object and any end-effector body is small. + """Reward reaching the object using a tanh-kernel on end-effector distance with contact bonus. + + The reward is close to 1 when the distance is small. The reward is scaled by contact: + - Full reward (1x) when good contact (thumb + finger) + - Reduced reward (0.1x) when no contact + + Args: + env: The environment instance. + std: Standard deviation for tanh kernel. + thumb_name: Name of the thumb contact sensor. + finger_names: Names of the finger contact sensors. + contact_threshold: Contact force magnitude threshold. + object_cfg: Configuration for the object. + asset_cfg: Configuration for the robot asset. """ asset: RigidObject = env.scene[asset_cfg.name] - object: RigidObject = env.scene[object_cfg.name] - asset_pos = asset.data.body_pos_w[:, asset_cfg.body_ids] - object_pos = object.data.root_pos_w - object_ee_distance = torch.norm(asset_pos - object_pos[:, None, :], dim=-1).max(dim=-1).values - return 1 - torch.tanh(object_ee_distance / std) - - -def contacts(env: ManagerBasedRLEnv, threshold: float) -> torch.Tensor: - """Penalize undesired contacts as the number of violations that are above a threshold.""" - - thumb_contact_sensor: ContactSensor = env.scene.sensors["thumb_link_3_object_s"] - index_contact_sensor: ContactSensor = env.scene.sensors["index_link_3_object_s"] - middle_contact_sensor: ContactSensor = env.scene.sensors["middle_link_3_object_s"] - ring_contact_sensor: ContactSensor = env.scene.sensors["ring_link_3_object_s"] - # check if contact force is above threshold - thumb_contact = thumb_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) - index_contact = index_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) - middle_contact = middle_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) - ring_contact = ring_contact_sensor.data.force_matrix_w.view(env.num_envs, 3) - - thumb_contact_mag = torch.norm(thumb_contact, dim=-1) - index_contact_mag = torch.norm(index_contact, dim=-1) - middle_contact_mag = torch.norm(middle_contact, dim=-1) - ring_contact_mag = torch.norm(ring_contact, dim=-1) - good_contact_cond1 = (thumb_contact_mag > threshold) & ( - (index_contact_mag > threshold) | (middle_contact_mag > threshold) | (ring_contact_mag > threshold) - ) + obj: RigidObject = env.scene[object_cfg.name] + asset_pos = asset.data.body_pos_w.torch[:, asset_cfg.body_ids] + object_pos = obj.data.root_pos_w.torch + distance = torch.linalg.norm(asset_pos - object_pos[:, None, :], dim=-1).max(dim=-1).values + contact_bonus = contacts(env, contact_threshold, thumb_name, finger_names).float().clamp(0.1, 1.0) + return (1 - torch.tanh(distance / std)) * contact_bonus - return good_contact_cond1 +def _contact_force_mag(sensor: ContactSensor, num_envs: int) -> torch.Tensor: + """Extract per-environment contact force magnitude from a sensor's force_matrix_w.""" + force = sensor.data.force_matrix_w.torch.view(num_envs, 3) + return torch.linalg.norm(force, dim=-1) -def success_reward( - env: ManagerBasedRLEnv, - command_name: str, - asset_cfg: SceneEntityCfg, - align_asset_cfg: SceneEntityCfg, - pos_std: float, - rot_std: float | None = None, -) -> torch.Tensor: - """Reward success by comparing commanded pose to the object pose using tanh kernels on error.""" - asset: RigidObject = env.scene[asset_cfg.name] - object: RigidObject = env.scene[align_asset_cfg.name] - command = env.command_manager.get_command(command_name) - des_pos_w, des_quat_w = combine_frame_transforms( - asset.data.root_pos_w, asset.data.root_quat_w, command[:, :3], command[:, 3:7] - ) - pos_err, rot_err = compute_pose_error(des_pos_w, des_quat_w, object.data.root_pos_w, object.data.root_quat_w) - pos_dist = torch.norm(pos_err, dim=1) - if not rot_std: - # square is not necessary but this help to keep the final value between having rot_std or not roughly the same - return (1 - torch.tanh(pos_dist / pos_std)) ** 2 - rot_dist = torch.norm(rot_err, dim=1) - return (1 - torch.tanh(pos_dist / pos_std)) * (1 - torch.tanh(rot_dist / rot_std)) +def contacts(env: ManagerBasedRLEnv, threshold: float, thumb_name: str, finger_names: list[str]) -> torch.Tensor: + """Reward for good contact: thumb + at least one finger above threshold. + + Args: + env: The environment instance. + threshold: Contact force magnitude threshold. + thumb_name: Name of the thumb contact sensor in the scene. + finger_names: Names of the finger contact sensors in the scene. + + Returns: + Boolean tensor indicating good contact condition per environment. + """ + thumb_mag = _contact_force_mag(env.scene.sensors[thumb_name], env.num_envs) + + any_finger_contact = torch.zeros(env.num_envs, dtype=torch.bool, device=env.device) + for finger_name in finger_names: + finger_mag = _contact_force_mag(env.scene.sensors[finger_name], env.num_envs) + any_finger_contact = any_finger_contact | (finger_mag > threshold) + + return (thumb_mag > threshold) & any_finger_contact + + +def contact_count(env: ManagerBasedRLEnv, threshold: float, sensor_names: list[str]) -> torch.Tensor: + """Count the number of contact sensors with force above threshold. + + For each sensor that detects contact above the threshold, add 1 to the total. + This provides a reward proportional to the number of fingers in contact. + + Args: + env: The environment instance. + threshold: Contact force magnitude threshold. + sensor_names: Names of the contact sensors in the scene. + + Returns: + Tensor of shape (num_envs,) with the count of sensors in contact per environment. + """ + count = torch.zeros(env.num_envs, dtype=torch.float32, device=env.device) + + for sensor_name in sensor_names: + mag = _contact_force_mag(env.scene.sensors[sensor_name], env.num_envs) + count += (mag > threshold).float() + return count / len(sensor_names) + + +class success_reward(ManagerTermBase): + """Reward success by comparing commanded pose to the object pose using tanh kernels on error. + + The reward is gated by contact: only given when thumb + at least one finger are in contact. + + Maintains a sticky ``succeeded`` boolean tensor per environment that flips to ``True`` once + the success condition is met during an episode and resets to ``False`` on environment reset. + + Args: + cfg: Configuration object specifying term parameters. + env: The manager-based RL environment. + """ + + def __init__(self, cfg, env: ManagerBasedRLEnv): + super().__init__(cfg, env) + self.succeeded = torch.zeros(env.num_envs, dtype=torch.bool, device=env.device) + + def reset(self, env_ids: Sequence[int] | None = None): + if env_ids is None: + env_ids = slice(None) + # Episode success = whether the success condition was met at any point this episode. + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = self.succeeded[env_ids].float().mean().item() + self.succeeded[env_ids] = False + + def __call__( + self, + env: ManagerBasedRLEnv, + command_name: str, + asset_cfg: SceneEntityCfg, + align_asset_cfg: SceneEntityCfg, + pos_std: float, + thumb_name: str, + finger_names: list[str], + contact_threshold: float = 0.01, + rot_std: float | None = None, + ) -> torch.Tensor: + asset: RigidObject = env.scene[asset_cfg.name] + obj: RigidObject = env.scene[align_asset_cfg.name] + command = env.command_manager.get_command(command_name) + des_pos_w, des_quat_w = combine_frame_transforms( + asset.data.root_pos_w.torch, + asset.data.root_quat_w.torch, + command[:, :3], + command[:, 3:7], + ) + pos_err, rot_err = compute_pose_error( + des_pos_w, + des_quat_w, + obj.data.root_pos_w.torch, + obj.data.root_quat_w.torch, + ) + pos_dist = torch.linalg.norm(pos_err, dim=1) + contact_mask = contacts(env, contact_threshold, thumb_name, finger_names) + + if rot_std: + rot_dist = torch.linalg.norm(rot_err, dim=1) + reward = (1 - torch.tanh(pos_dist / pos_std)) * (1 - torch.tanh(rot_dist / rot_std)) * contact_mask.float() + self.succeeded |= contact_mask & (pos_dist < pos_std) & (rot_dist < rot_std) + else: + reward = ((1 - torch.tanh(pos_dist / pos_std)) ** 2) * contact_mask.float() + self.succeeded |= contact_mask & (pos_dist < pos_std) + + return reward def position_command_error_tanh( - env: ManagerBasedRLEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg, align_asset_cfg: SceneEntityCfg + env: ManagerBasedRLEnv, + std: float, + command_name: str, + asset_cfg: SceneEntityCfg, + align_asset_cfg: SceneEntityCfg, + thumb_name: str, + finger_names: list[str], + contact_threshold: float = 0.1, ) -> torch.Tensor: """Reward tracking of commanded position using tanh kernel, gated by contact presence.""" asset: RigidObject = env.scene[asset_cfg.name] - object: RigidObject = env.scene[align_asset_cfg.name] + obj: RigidObject = env.scene[align_asset_cfg.name] command = env.command_manager.get_command(command_name) - # obtain the desired and current positions des_pos_b = command[:, :3] - des_pos_w, _ = combine_frame_transforms(asset.data.root_pos_w, asset.data.root_quat_w, des_pos_b) - distance = torch.norm(object.data.root_pos_w - des_pos_w, dim=1) - return (1 - torch.tanh(distance / std)) * contacts(env, 1.0).float() + des_pos_w, _ = combine_frame_transforms( + asset.data.root_pos_w.torch, + asset.data.root_quat_w.torch, + des_pos_b, + ) + distance = torch.linalg.norm(obj.data.root_pos_w.torch - des_pos_w, dim=1) + return (1 - torch.tanh(distance / std)) * contacts(env, contact_threshold, thumb_name, finger_names).float() def orientation_command_error_tanh( - env: ManagerBasedRLEnv, std: float, command_name: str, asset_cfg: SceneEntityCfg, align_asset_cfg: SceneEntityCfg + env: ManagerBasedRLEnv, + std: float, + command_name: str, + asset_cfg: SceneEntityCfg, + align_asset_cfg: SceneEntityCfg, + thumb_name: str, + finger_names: list[str], + contact_threshold: float = 0.1, ) -> torch.Tensor: """Reward tracking of commanded orientation using tanh kernel, gated by contact presence.""" asset: RigidObject = env.scene[asset_cfg.name] - object: RigidObject = env.scene[align_asset_cfg.name] + obj: RigidObject = env.scene[align_asset_cfg.name] command = env.command_manager.get_command(command_name) - # obtain the desired and current orientations des_quat_b = command[:, 3:7] - des_quat_w = math_utils.quat_mul(asset.data.root_state_w[:, 3:7], des_quat_b) - quat_distance = math_utils.quat_error_magnitude(object.data.root_quat_w, des_quat_w) + root_state = asset.data.root_state_w.torch + des_quat_w = math_utils.quat_mul(root_state[:, 3:7], des_quat_b) + quat_distance = math_utils.quat_error_magnitude(obj.data.root_quat_w.torch, des_quat_w) - return (1 - torch.tanh(quat_distance / std)) * contacts(env, 1.0).float() + return (1 - torch.tanh(quat_distance / std)) * contacts(env, contact_threshold, thumb_name, finger_names).float() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/terminations.py index 91bf2d0e3aaf..13fd1d9d3666 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/terminations.py @@ -15,36 +15,49 @@ import torch -from isaaclab.assets import Articulation, RigidObject -from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import ManagerTermBase, SceneEntityCfg, TerminationTermCfg if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv -def out_of_bound( - env: ManagerBasedRLEnv, - asset_cfg: SceneEntityCfg = SceneEntityCfg("object"), - in_bound_range: dict[str, tuple[float, float]] = {}, -) -> torch.Tensor: - """Termination condition for the object falls out of bound. +class out_of_bound(ManagerTermBase): + """Termination condition for when the object falls out of bound. - Args: - env: The environment. - asset_cfg: The object configuration. Defaults to SceneEntityCfg("object"). - in_bound_range: The range in x, y, z such that the object is considered in range + This class-based implementation caches the asset reference, ranges tensor, and env_origins + to avoid recomputing them on every call. """ - object: RigidObject = env.scene[asset_cfg.name] - range_list = [in_bound_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] - ranges = torch.tensor(range_list, device=env.device) - object_pos_local = object.data.root_pos_w - env.scene.env_origins - outside_bounds = ((object_pos_local < ranges[:, 0]) | (object_pos_local > ranges[:, 1])).any(dim=1) - return outside_bounds + def __init__(self, cfg: TerminationTermCfg, env: ManagerBasedRLEnv): + super().__init__(cfg, env) + + asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg", SceneEntityCfg("object")) + self._object: RigidObject = env.scene[asset_cfg.name] + + in_bound_range: dict[str, tuple[float, float]] = cfg.params.get("in_bound_range", {}) + range_list = [in_bound_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] + ranges = torch.tensor(range_list, device=env.device, dtype=torch.float32) + + # Pre-apply env_origins so we can compare directly against world-space positions. + origins = env.scene.env_origins # (N, 3) + self._lower = origins + ranges[:, 0] # (N, 3) + self._upper = origins + ranges[:, 1] # (N, 3) + + def __call__( + self, + env: ManagerBasedRLEnv, + asset_cfg: SceneEntityCfg = SceneEntityCfg("object"), + in_bound_range: dict[str, tuple[float, float]] = {}, + ) -> torch.Tensor: + pos_w = self._object.data.root_pos_w.torch + return ((pos_w < self._lower) | (pos_w > self._upper)).any(dim=1) def abnormal_robot_state(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor: """Terminating environment when violation of velocity limits detects, this usually indicates unstable physics caused by very bad, or aggressive action""" robot: Articulation = env.scene[asset_cfg.name] - return (robot.data.joint_vel.abs() > (robot.data.joint_vel_limits * 2)).any(dim=1) + joint_vel = robot.data.joint_vel.torch + joint_vel_limits = robot.data.joint_vel_limits.torch + return (joint_vel.abs() > (joint_vel_limits * 2)).any(dim=1) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/utils.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/utils.py index 409abc5931d8..367155ae7ad5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/utils.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/utils.py @@ -183,7 +183,7 @@ def _triangulate_faces(prim) -> np.ndarray: return np.asarray(faces, dtype=np.int64) -def create_primitive_mesh(prim) -> trimesh.Trimesh: +def create_primitive_mesh(prim): """Create a trimesh mesh from a USD primitive (Cube, Sphere, Cylinder, etc.).""" prim_type = prim.GetTypeName() if prim_type == "Cube": @@ -243,7 +243,7 @@ def farthest_point_sampling( farthest = torch.randint(0, N, (1,), device=device) for j in range(n_samples): sampled_idx[j] = farthest - dist = torch.norm(points - points[farthest], dim=1) + dist = torch.linalg.norm(points - points[farthest], dim=1) distances = torch.minimum(distances, dist) farthest = torch.argmax(distances) return sampled_idx diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/inhand_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/inhand_env_cfg.py index 71594ae210d8..891d166c8e1a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/inhand_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/inhand_env_cfg.py @@ -7,6 +7,8 @@ from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -17,11 +19,11 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.scene import InteractiveSceneCfg -from isaaclab.sim.simulation_cfg import PhysxCfg, SimulationCfg -from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg +from isaaclab.sim.simulation_cfg import SimulationCfg +from isaaclab.sim.spawners.materials import RigidBodyMaterialCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveGaussianNoiseCfg as Gnoise +from isaaclab.utils.noise import GaussianNoiseCfg as Gnoise import isaaclab_tasks.manager_based.manipulation.inhand.mdp as mdp @@ -54,7 +56,7 @@ class InHandObjectSceneCfg(InteractiveSceneCfg): ), mass_props=sim_utils.MassPropertiesCfg(density=400.0), ), - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.19, 0.56), rot=(1.0, 0.0, 0.0, 0.0)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.19, 0.56), rot=(0.0, 0.0, 0.0, 1.0)), ) # lights @@ -319,7 +321,7 @@ class InHandObjectEnvCfg(ManagerBasedRLEnvCfg): static_friction=1.0, dynamic_friction=1.0, ), - physx=PhysxCfg( + physics=PhysxCfg( bounce_threshold_velocity=0.2, gpu_max_rigid_contact_count=2**20, gpu_max_rigid_patch_count=2**23, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/__init__.py index 0fafe4500367..a69b931a8cfb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/__init__.py @@ -5,10 +5,6 @@ """This sub-module contains the functions that are specific to the in-hand manipulation environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .commands import * # noqa: F401, F403 -from .events import * # noqa: F401, F403 -from .observations import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/__init__.pyi new file mode 100644 index 000000000000..fac53547437a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "InHandReOrientationCommand", + "InHandReOrientationCommandCfg", + "reset_joints_within_limits_range", + "goal_quat_diff", + "success_bonus", + "track_orientation_inv_l2", + "track_pos_l2", + "max_consecutive_success", + "object_away_from_goal", + "object_away_from_robot", +] + +from .commands import InHandReOrientationCommand, InHandReOrientationCommandCfg +from .events import reset_joints_within_limits_range +from .observations import goal_quat_diff +from .rewards import success_bonus, track_orientation_inv_l2, track_pos_l2 +from .terminations import max_consecutive_success, object_away_from_goal, object_away_from_robot +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.py index ab3a9cf3e11c..66099570dc57 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.py @@ -5,5 +5,6 @@ """Sub-module containing command terms for 3D orientation goals.""" -from .commands_cfg import InHandReOrientationCommandCfg # noqa: F401 -from .orientation_command import InHandReOrientationCommand # noqa: F401 +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.pyi new file mode 100644 index 000000000000..d9531f073a20 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/__init__.pyi @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "InHandReOrientationCommandCfg", + "InHandReOrientationCommand", +] + +from .commands_cfg import InHandReOrientationCommandCfg +from .orientation_command import InHandReOrientationCommand diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/commands_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/commands_cfg.py index 8c020137c0c9..6fe3ad8235ba 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/commands_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/commands_cfg.py @@ -11,8 +11,6 @@ from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from .orientation_command import InHandReOrientationCommand - @configclass class InHandReOrientationCommandCfg(CommandTermCfg): @@ -21,7 +19,8 @@ class InHandReOrientationCommandCfg(CommandTermCfg): Please refer to the :class:`InHandReOrientationCommand` class for more details. """ - class_type: type = InHandReOrientationCommand + class_type: type | str = "{DIR}.orientation_command:InHandReOrientationCommand" + resampling_time_range: tuple[float, float] = (1e6, 1e6) # no resampling based on time asset_name: str = MISSING diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py index 3f116a48c497..7c6d250f2147 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py @@ -13,11 +13,12 @@ import torch import isaaclab.utils.math as math_utils -from isaaclab.assets import RigidObject from isaaclab.managers import CommandTerm -from isaaclab.markers.visualization_markers import VisualizationMarkers +from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.leapp import POSE7_ELEMENT_NAMES if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv from .commands_cfg import InHandReOrientationCommandCfg @@ -58,11 +59,11 @@ def __init__(self, cfg: InHandReOrientationCommandCfg, env: ManagerBasedRLEnv): # create buffers to store the command # -- command: (x, y, z) init_pos_offset = torch.tensor(cfg.init_pos_offset, dtype=torch.float, device=self.device) - self.pos_command_e = self.object.data.default_root_state[:, :3] + init_pos_offset + self.pos_command_e = self.object.data.default_root_pose.torch[:, :3] + init_pos_offset self.pos_command_w = self.pos_command_e + self._env.scene.env_origins - # -- orientation: (w, x, y, z) + # -- orientation: (x, y, z, w) self.quat_command_w = torch.zeros(self.num_envs, 4, device=self.device) - self.quat_command_w[:, 0] = 1.0 # set the scalar component to 1.0 + self.quat_command_w[:, 3] = 1.0 # set the scalar component to 1.0 # -- unit vectors self._X_UNIT_VEC = torch.tensor([1.0, 0, 0], device=self.device).repeat((self.num_envs, 1)) @@ -73,6 +74,15 @@ def __init__(self, cfg: InHandReOrientationCommandCfg, env: ManagerBasedRLEnv): self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["consecutive_success"] = torch.zeros(self.num_envs, device=self.device) + self.metrics["success_rate"] = torch.zeros(self.num_envs, device=self.device) + # -- per-attempt success accounting: each success-driven resample completes one attempt; + # the trailing attempt at episode end counts as one unsuccessful attempt. + self._completed_attempts = torch.zeros(self.num_envs, device=self.device) + + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or POSE7_ELEMENT_NAMES def __str__(self) -> str: msg = "InHandManipulationCommandGenerator:\n" @@ -96,15 +106,37 @@ def _update_metrics(self): # logs data # -- compute the orientation error self.metrics["orientation_error"] = math_utils.quat_error_magnitude( - self.object.data.root_quat_w, self.quat_command_w + self.object.data.root_quat_w.torch, self.quat_command_w ) # -- compute the position error - self.metrics["position_error"] = torch.norm(self.object.data.root_pos_w - self.pos_command_w, dim=1) + self.metrics["position_error"] = torch.linalg.norm( + self.object.data.root_pos_w.torch - self.pos_command_w, dim=1 + ) # -- compute the number of consecutive successes successes = self.metrics["orientation_error"] < self.cfg.orientation_success_threshold self.metrics["consecutive_success"] += successes.float() + def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]: + # Snapshot per-attempt success rate BEFORE the base class logs and zeros metrics. + # success_rate = completed_attempts / (completed_attempts + 1 trailing in-progress). + if env_ids is None: + env_ids = slice(None) + completed = self._completed_attempts[env_ids] + self.metrics["success_rate"][env_ids] = completed / (completed + 1.0) + extras = super().reset(env_ids) + # super().reset() invoked _resample_command for the new initial goal, which + # incremented _completed_attempts; zero it back out so the new episode starts clean. + self._completed_attempts[env_ids] = 0.0 + # Route success_rate to the unified ``Metrics/success_rate`` path (shared TensorBoard + # card across tasks); pop it from the returned dict so CommandManager does not + # additionally log it under ``Metrics//success_rate``. + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = extras.pop("success_rate") + return extras + def _resample_command(self, env_ids: Sequence[int]): + # Each call corresponds to a success-driven (or initial) resample; count it as a + # completed attempt. The post-reset increment is cleared by ``reset()`` afterwards. + self._completed_attempts[env_ids] += 1.0 # sample new orientation targets rand_floats = 2.0 * torch.rand((len(env_ids), 2), device=self.device) - 1.0 # rotate randomly about x-axis and then y-axis diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/events.py index dad2e88107e8..c118cd16a506 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/events.py @@ -11,11 +11,11 @@ import torch -from isaaclab.assets import Articulation from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from isaaclab.utils.math import sample_uniform if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedEnv @@ -77,11 +77,11 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # extract the used quantities (to enable type-hinting) self._asset: Articulation = env.scene[asset_cfg.name] - default_joint_pos = self._asset.data.default_joint_pos[0] - default_joint_vel = self._asset.data.default_joint_vel[0] + default_joint_pos = self._asset.data.default_joint_pos.torch[0] + default_joint_vel = self._asset.data.default_joint_vel.torch[0] # create buffers to store the joint position range - self._pos_ranges = self._asset.data.soft_joint_pos_limits[0].clone() + self._pos_ranges = self._asset.data.soft_joint_pos_limits.torch[0].clone() # parse joint position ranges pos_joint_ids = [] for joint_name, joint_range in cfg.params["position_range"].items(): @@ -113,9 +113,8 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): self._pos_ranges = self._pos_ranges[self._pos_joint_ids] # create buffers to store the joint velocity range - self._vel_ranges = torch.stack( - [-self._asset.data.soft_joint_vel_limits[0], self._asset.data.soft_joint_vel_limits[0]], dim=1 - ) + soft_joint_vel_limits_torch = self._asset.data.soft_joint_vel_limits.torch[0] + self._vel_ranges = torch.stack([-soft_joint_vel_limits_torch, soft_joint_vel_limits_torch], dim=1) # parse joint velocity ranges vel_joint_ids = [] for joint_name, joint_range in cfg.params["velocity_range"].items(): @@ -157,8 +156,8 @@ def __call__( operation: Literal["abs", "scale"] = "abs", ): # get default joint state - joint_pos = self._asset.data.default_joint_pos[env_ids].clone() - joint_vel = self._asset.data.default_joint_vel[env_ids].clone() + joint_pos = self._asset.data.default_joint_pos.torch[env_ids].clone() + joint_vel = self._asset.data.default_joint_vel.torch[env_ids].clone() # sample random joint positions for each joint if len(self._pos_joint_ids) > 0: @@ -167,7 +166,7 @@ def __call__( self._pos_ranges[:, 0], self._pos_ranges[:, 1], joint_pos_shape, device=joint_pos.device ) # clip the joint positions to the joint limits - joint_pos_limits = self._asset.data.soft_joint_pos_limits[0, self._pos_joint_ids] + joint_pos_limits = self._asset.data.soft_joint_pos_limits.torch[0, self._pos_joint_ids] joint_pos = joint_pos.clamp(joint_pos_limits[:, 0], joint_pos_limits[:, 1]) # sample random joint velocities for each joint @@ -177,8 +176,9 @@ def __call__( self._vel_ranges[:, 0], self._vel_ranges[:, 1], joint_vel_shape, device=joint_vel.device ) # clip the joint velocities to the joint limits - joint_vel_limits = self._asset.data.soft_joint_vel_limits[0, self._vel_joint_ids] + joint_vel_limits = self._asset.data.soft_joint_vel_limits.torch[0, self._vel_joint_ids] joint_vel = joint_vel.clamp(-joint_vel_limits, joint_vel_limits) # set into the physics simulation - self._asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + self._asset.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._asset.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/observations.py index e90597925634..57620602b6fd 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/observations.py @@ -5,16 +5,19 @@ """Functions specific to the in-hand dexterous manipulation environments.""" +from __future__ import annotations + from typing import TYPE_CHECKING import torch import isaaclab.utils.math as math_utils -from isaaclab.assets import RigidObject -from isaaclab.envs import ManagerBasedRLEnv from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import RigidObject + from isaaclab.envs import ManagerBasedRLEnv + from .commands import InHandReOrientationCommand @@ -31,7 +34,7 @@ def goal_quat_diff( # obtain the orientations goal_quat_w = command_term.command[:, 3:7] - asset_quat_w = asset.data.root_quat_w + asset_quat_w = asset.data.root_quat_w.torch # compute quaternion difference quat = math_utils.quat_mul(asset_quat_w, math_utils.quat_conjugate(goal_quat_w)) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/rewards.py index f928b92fb367..f89b2f613b51 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/rewards.py @@ -5,16 +5,19 @@ """Functions specific to the in-hand dexterous manipulation environments.""" +from __future__ import annotations + from typing import TYPE_CHECKING import torch import isaaclab.utils.math as math_utils -from isaaclab.assets import RigidObject -from isaaclab.envs import ManagerBasedRLEnv from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import RigidObject + from isaaclab.envs import ManagerBasedRLEnv + from .commands import InHandReOrientationCommand @@ -40,7 +43,7 @@ def success_bonus( # obtain the threshold for the orientation error threshold = command_term.cfg.orientation_success_threshold # calculate the orientation error - dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w) + dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w.torch, goal_quat_w) return dtheta <= threshold @@ -64,9 +67,9 @@ def track_pos_l2( # obtain the goal position goal_pos_e = command_term.command[:, 0:3] # obtain the object position in the environment frame - object_pos_e = asset.data.root_pos_w - env.scene.env_origins + object_pos_e = asset.data.root_pos_w.torch - env.scene.env_origins - return torch.norm(goal_pos_e - object_pos_e, p=2, dim=-1) + return torch.linalg.norm(goal_pos_e - object_pos_e, ord=2, dim=-1) def track_orientation_inv_l2( @@ -92,6 +95,6 @@ def track_orientation_inv_l2( # obtain the goal orientation goal_quat_w = command_term.command[:, 3:7] # calculate the orientation error - dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w) + dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w.torch, goal_quat_w) return 1.0 / (dtheta + rot_eps) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/terminations.py index 1d4f36f1e62b..c2c7a02f6797 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/terminations.py @@ -5,14 +5,17 @@ """Functions specific to the in-hand dexterous manipulation environments.""" +from __future__ import annotations + from typing import TYPE_CHECKING import torch -from isaaclab.envs import ManagerBasedRLEnv from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + from .commands import InHandReOrientationCommand @@ -51,10 +54,10 @@ def object_away_from_goal( asset = env.scene[object_cfg.name] # object pos - asset_pos_e = asset.data.root_pos_w - env.scene.env_origins + asset_pos_e = asset.data.root_pos_w.torch - env.scene.env_origins goal_pos_e = command_term.command[:, :3] - return torch.norm(asset_pos_e - goal_pos_e, p=2, dim=1) > threshold + return torch.linalg.norm(asset_pos_e - goal_pos_e, ord=2, dim=1) > threshold def object_away_from_robot( @@ -79,6 +82,6 @@ def object_away_from_robot( object = env.scene[object_cfg.name] # compute distance - dist = torch.norm(robot.data.root_pos_w - object.data.root_pos_w, dim=1) + dist = torch.linalg.norm(robot.data.root_pos_w.torch - object.data.root_pos_w.torch, dim=1) return dist > threshold diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml index 593de544a83a..8790f4980af2 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/agents/sb3_ppo_cfg.yaml @@ -3,32 +3,43 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Reference: https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/hyperparams/ppo.yml#L32 +# Stable SB3 configuration - prevents 40M crash with normalization seed: 42 -# epoch * n_steps * nenvs: 500×512*8*8 -n_timesteps: 16384000 +# 1000 iterations * 32 steps * 4096 envs = 131072000 +n_timesteps: 131072000 policy: 'MlpPolicy' -n_steps: 64 -# mini batch size: num_envs * nsteps / nminibatches 2048×512÷2048 -batch_size: 192 + +# Balanced rollout settings +n_steps: 32 +# batch_size: 4096 envs * 32 steps / 8 minibatches = 16384 +batch_size: 16384 + gae_lambda: 0.95 gamma: 0.99 -n_epochs: 8 -ent_coef: 0.00 -vf_coef: 0.0001 -learning_rate: !!float 3e-4 + +n_epochs: 5 + +# Small entropy for some exploration but smooth motion +ent_coef: 0.002 + +# Strong value function +vf_coef: 1.0 + +learning_rate: !!float 1e-4 + clip_range: 0.2 +target_kl: 0.01 +max_grad_norm: 1.0 + +# Network architecture policy_kwargs: activation_fn: 'nn.ELU' net_arch: pi: [256, 128, 64] vf: [256, 128, 64] -target_kl: 0.01 -max_grad_norm: 1.0 -# # Uses VecNormalize class to normalize obs -# normalize_input: True -# # Uses VecNormalize class to normalize rew -# normalize_value: True -# clip_obs: 5 +# CRITICAL: Enable BOTH normalizations to prevent reward collapse +normalize_input: True +normalize_value: True +clip_obs: 10.0 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py index 9b9441a22341..4492b487375b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py @@ -3,7 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import DeformableObjectCfg +from isaaclab_physx.assets import DeformableObjectCfg + from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.managers import EventTermCfg as EventTerm @@ -72,7 +73,7 @@ def __post_init__(self): self.scene.object = DeformableObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.5, 0, 0.05), rot=(0.707, 0, 0, 0.707)), + init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.5, 0, 0.05), rot=(0, 0, 0.707, 0.707)), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Objects/Teddy_Bear/teddy_bear.usd", scale=(0.01, 0.01, 0.01), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/joint_pos_env_cfg.py index 5c5754c53e43..58c161aab182 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/joint_pos_env_cfg.py @@ -46,7 +46,7 @@ def __post_init__(self): # Set Cube as object self.scene.object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.5, 0, 0.055], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.5, 0, 0.055], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(0.8, 0.8, 0.8), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/joint_pos_env_cfg.py index b5f29f1fc32f..40f6e6552599 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/joint_pos_env_cfg.py @@ -57,7 +57,7 @@ def __post_init__(self): # Set Cube as object self.scene.object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0, 0.055], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0, 0.055], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", scale=(0.8, 0.8, 0.8), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py index 491b713c14f9..42d6555b4163 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py @@ -10,8 +10,11 @@ from dataclasses import MISSING +from isaaclab_physx.assets import DeformableObjectCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils -from isaaclab.assets import ArticulationCfg, AssetBaseCfg, DeformableObjectCfg, RigidObjectCfg +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import CurriculumTermCfg as CurrTerm from isaaclab.managers import EventTermCfg as EventTerm @@ -50,7 +53,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0, 0, 0.707, 0.707]), spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"), ) @@ -234,7 +237,9 @@ def __post_init__(self): self.sim.dt = 0.01 # 100Hz self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 - self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 - self.sim.physx.friction_correlation_distance = 0.00625 + self.sim.physics = PhysxCfg( + bounce_threshold_velocity=0.01, + gpu_found_lost_aggregate_pairs_capacity=1024 * 1024 * 4, + gpu_total_aggregate_pairs_capacity=16 * 1024, + friction_correlation_distance=0.00625, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py index 272661bda61d..e347a0840a33 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py @@ -5,8 +5,11 @@ from dataclasses import MISSING +from isaaclab_physx.assets import DeformableObjectCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils -from isaaclab.assets import ArticulationCfg, AssetBaseCfg, DeformableObjectCfg, RigidObjectCfg +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import CurriculumTermCfg as CurrTerm from isaaclab.managers import EventTermCfg as EventTerm @@ -45,7 +48,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0, 0, 0.707, 0.707]), spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"), ) @@ -141,7 +144,7 @@ class RewardsCfg: object_goal_tracking = RewTerm( func=mdp.object_goal_distance, - params={"std": 0.3, "minimal_height": 0.04, "command_name": "object_pose"}, + params={"std": 0.3, "minimal_height": 0.04, "command_name": "object_pose", "success_threshold": 0.05}, weight=16.0, ) @@ -215,8 +218,9 @@ def __post_init__(self): self.sim.dt = 0.01 # 100Hz self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.2 - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 - self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 - self.sim.physx.friction_correlation_distance = 0.00625 + self.sim.physics = PhysxCfg( + bounce_threshold_velocity=0.01, + gpu_found_lost_aggregate_pairs_capacity=1024 * 1024 * 4, + gpu_total_aggregate_pairs_capacity=16 * 1024, + friction_correlation_distance=0.00625, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/__init__.py index f3dd0fecdf8e..1c3431f2637c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/__init__.py @@ -5,8 +5,6 @@ """This sub-module contains the functions that are specific to the lift environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .observations import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/__init__.pyi new file mode 100644 index 000000000000..a3487daa9a59 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/__init__.pyi @@ -0,0 +1,17 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "object_position_in_robot_root_frame", + "object_ee_distance", + "object_goal_distance", + "object_is_lifted", + "object_reached_goal", +] + +from .observations import object_position_in_robot_root_frame +from .rewards import object_ee_distance, object_goal_distance, object_is_lifted +from .terminations import object_reached_goal +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/observations.py index 8654933a9aae..06659683d770 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/observations.py @@ -9,11 +9,11 @@ import torch -from isaaclab.assets import RigidObject from isaaclab.managers import SceneEntityCfg from isaaclab.utils.math import subtract_frame_transforms if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv @@ -25,6 +25,6 @@ def object_position_in_robot_root_frame( """The position of the object in the robot's root frame.""" robot: RigidObject = env.scene[robot_cfg.name] object: RigidObject = env.scene[object_cfg.name] - object_pos_w = object.data.root_pos_w[:, :3] - object_pos_b, _ = subtract_frame_transforms(robot.data.root_pos_w, robot.data.root_quat_w, object_pos_w) + object_pos_w = object.data.root_pos_w.torch[:, :3] + object_pos_b, _ = subtract_frame_transforms(robot.data.root_pos_w.torch, robot.data.root_quat_w.torch, object_pos_w) return object_pos_b diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/rewards.py index 34e60773a068..77517708c053 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/rewards.py @@ -9,13 +9,13 @@ import torch -from isaaclab.assets import RigidObject -from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import FrameTransformer +from isaaclab.managers import ManagerTermBase, RewardTermCfg, SceneEntityCfg from isaaclab.utils.math import combine_frame_transforms if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import FrameTransformer def object_is_lifted( @@ -23,7 +23,7 @@ def object_is_lifted( ) -> torch.Tensor: """Reward the agent for lifting the object above the minimal height.""" object: RigidObject = env.scene[object_cfg.name] - return torch.where(object.data.root_pos_w[:, 2] > minimal_height, 1.0, 0.0) + return torch.where(object.data.root_pos_w.torch[:, 2] > minimal_height, 1.0, 0.0) def object_ee_distance( @@ -37,32 +37,56 @@ def object_ee_distance( object: RigidObject = env.scene[object_cfg.name] ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] # Target object position: (num_envs, 3) - cube_pos_w = object.data.root_pos_w + cube_pos_w = object.data.root_pos_w.torch # End-effector position: (num_envs, 3) - ee_w = ee_frame.data.target_pos_w[..., 0, :] + ee_w = ee_frame.data.target_pos_w.torch[..., 0, :] # Distance of the end-effector to the object: (num_envs,) - object_ee_distance = torch.norm(cube_pos_w - ee_w, dim=1) + object_ee_distance = torch.linalg.norm(cube_pos_w - ee_w, dim=1) return 1 - torch.tanh(object_ee_distance / std) -def object_goal_distance( - env: ManagerBasedRLEnv, - std: float, - minimal_height: float, - command_name: str, - robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), - object_cfg: SceneEntityCfg = SceneEntityCfg("object"), -) -> torch.Tensor: - """Reward the agent for tracking the goal pose using tanh-kernel.""" - # extract the used quantities (to enable type-hinting) - robot: RigidObject = env.scene[robot_cfg.name] - object: RigidObject = env.scene[object_cfg.name] - command = env.command_manager.get_command(command_name) - # compute the desired position in the world frame - des_pos_b = command[:, :3] - des_pos_w, _ = combine_frame_transforms(robot.data.root_pos_w, robot.data.root_quat_w, des_pos_b) - # distance of the end-effector to the object: (num_envs,) - distance = torch.norm(des_pos_w - object.data.root_pos_w, dim=1) - # rewarded if the object is lifted above the threshold - return (object.data.root_pos_w[:, 2] > minimal_height) * (1 - torch.tanh(distance / std)) +class object_goal_distance(ManagerTermBase): + """Reward the agent for tracking the object-to-goal pose using a tanh kernel. + + If ``success_threshold`` is provided in the term params, this also tracks per-episode + success (sticky binary: object ever within ``success_threshold`` of the commanded goal + while lifted above ``minimal_height``) and logs the mean across environments under + ``Metrics/success_rate`` on reset. + """ + + def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): + super().__init__(cfg, env) + self._track_success = cfg.params.get("success_threshold") is not None + if self._track_success: + self._succeeded = torch.zeros(env.num_envs, dtype=torch.bool, device=env.device) + + def reset(self, env_ids: torch.Tensor): + if self._track_success: + self._env.extras.setdefault("log", {})["Metrics/success_rate"] = ( + self._succeeded[env_ids].float().mean().item() + ) + self._succeeded[env_ids] = False + + def __call__( + self, + env: ManagerBasedRLEnv, + std: float, + minimal_height: float, + command_name: str, + robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), + success_threshold: float | None = None, + ) -> torch.Tensor: + robot: RigidObject = env.scene[robot_cfg.name] + obj: RigidObject = env.scene[object_cfg.name] + command = env.command_manager.get_command(command_name) + des_pos_w, _ = combine_frame_transforms( + robot.data.root_pos_w.torch, robot.data.root_quat_w.torch, command[:, :3] + ) + object_pos_w = obj.data.root_pos_w.torch + distance = torch.linalg.norm(des_pos_w - object_pos_w, dim=1) + is_lifted = object_pos_w[:, 2] > minimal_height + if success_threshold is not None: + self._succeeded |= is_lifted & (distance < success_threshold) + return is_lifted.float() * (1 - torch.tanh(distance / std)) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/terminations.py index 68fe0e011b85..5342ab669758 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/mdp/terminations.py @@ -15,11 +15,11 @@ import torch -from isaaclab.assets import RigidObject from isaaclab.managers import SceneEntityCfg from isaaclab.utils.math import combine_frame_transforms if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv @@ -46,9 +46,13 @@ def object_reached_goal( command = env.command_manager.get_command(command_name) # compute the desired position in the world frame des_pos_b = command[:, :3] - des_pos_w, _ = combine_frame_transforms(robot.data.root_pos_w, robot.data.root_quat_w, des_pos_b) + # Convert to torch for combine_frame_transforms (robot data may be Warp arrays under Newton) + root_pos_w = robot.data.root_pos_w.torch + root_quat_w = robot.data.root_quat_w.torch + des_pos_w, _ = combine_frame_transforms(root_pos_w, root_quat_w, des_pos_b) # distance of the end-effector to the object: (num_envs,) - distance = torch.norm(des_pos_w - object.data.root_pos_w[:, :3], dim=1) + object_pos_w = object.data.root_pos_w.torch + distance = torch.linalg.norm(des_pos_w - object_pos_w[:, :3], dim=1) # rewarded if the object is lifted above the threshold return distance < threshold diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/agents/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/agents/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/agents/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py index 6e14d2e1fdd2..98c5aca681d8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_base_env_cfg.py @@ -3,15 +3,21 @@ # # SPDX-License-Identifier: BSD-3-Clause +import logging import tempfile from dataclasses import MISSING -import torch +try: + from isaaclab_teleop import XrCfg + + _TELEOP_AVAILABLE = True +except ImportError: + _TELEOP_AVAILABLE = False + logging.getLogger(__name__).warning("isaaclab_teleop is not installed. XR teleoperation features will be disabled.") import isaaclab.envs.mdp as base_mdp import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab.devices.openxr import XrCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import ActionTermCfg, SceneEntityCfg from isaaclab.managers import EventTermCfg as EventTerm @@ -41,7 +47,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table table = AssetBaseCfg( prim_path="/World/envs/env_.*/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/exhaust_pipe_task/exhaust_pipe_assets/table.usd", scale=(1.0, 1.0, 1.3), @@ -51,7 +57,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): blue_exhaust_pipe = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/BlueExhaustPipe", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.04904, 0.31, 1.2590], rot=[0, 0, 1.0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.04904, 0.31, 1.2590], rot=[0, 1.0, 0.0, 0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/exhaust_pipe_task/exhaust_pipe_assets/blue_exhaust_pipe.usd", scale=(0.5, 0.5, 1.5), @@ -61,7 +67,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): blue_sorting_bin = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/BlueSortingBin", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.16605, 0.39, 0.98634], rot=[1.0, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.16605, 0.39, 0.98634], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/exhaust_pipe_task/exhaust_pipe_assets/blue_sorting_bin.usd", scale=(1.0, 1.7, 1.0), @@ -71,7 +77,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): black_sorting_bin = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/BlackSortingBin", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.40132, 0.39, 0.98634], rot=[1.0, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.40132, 0.39, 0.98634], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/exhaust_pipe_task/exhaust_pipe_assets/black_sorting_bin.usd", scale=(1.0, 1.7, 1.0), @@ -84,7 +90,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): prim_path="/World/envs/env_.*/Robot", init_state=ArticulationCfg.InitialStateCfg( pos=(0, 0, 0.93), - rot=(0.7071, 0, 0, 0.7071), + rot=(0.0, 0.0, 0.7071, 0.7071), joint_pos={ # right-arm "right_shoulder_pitch_joint": 0.0, @@ -145,7 +151,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): width=256, data_types=["rgb"], spawn=sim_utils.PinholeCameraCfg(focal_length=18.15, clipping_range=(0.1, 2)), - offset=CameraCfg.OffsetCfg(pos=(0.0, 0.12, 1.85418), rot=(-0.17246, 0.98502, 0.0, 0.0), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.12, 1.85418), rot=(0.0, 0.98502, 0.0, -0.17246), convention="ros"), ) # Ground plane @@ -261,63 +267,50 @@ class ExhaustPipeGR1T2BaseEnvCfg(ManagerBasedRLEnvCfg): rewards = None curriculum = None - # Position of the XR anchor in the world frame - xr: XrCfg = XrCfg( - anchor_pos=(0.0, 0.0, 0.0), - anchor_rot=(1.0, 0.0, 0.0, 0.0), - ) - - # OpenXR hand tracking has 26 joints per hand - NUM_OPENXR_HAND_JOINTS = 26 - # Temporary directory for URDF files temp_urdf_dir = tempfile.gettempdir() # Idle action to hold robot in default pose # Action format: [left arm pos (3), left arm quat (4), right arm pos (3), # right arm quat (4), left/right hand joint pos (22)] - idle_action = torch.tensor( - [ - [ - -0.2909, - 0.2778, - 1.1247, - 0.5253, - 0.5747, - -0.4160, - 0.4699, - 0.22878, - 0.2536, - 1.0953, - 0.5, - 0.5, - -0.5, - 0.5, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ] - ) + idle_action = [ + -0.2909, + 0.2778, + 1.1247, + 0.5747, + -0.4160, + 0.4699, + 0.5253, + 0.22878, + 0.2536, + 1.0953, + 0.5, + -0.5, + 0.5, + 0.5, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] def __post_init__(self): """Post initialization.""" @@ -334,3 +327,9 @@ def __post_init__(self): # List of image observations in policy observations self.image_obs_list = ["robot_pov_cam"] + + if _TELEOP_AVAILABLE: + self.xr = XrCfg( + anchor_pos=(0.0, 0.0, 0.0), + anchor_rot=(0.0, 0.0, 0.0, 1.0), + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py index 66ebfcad8a18..3dd523de27f6 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/exhaustpipe_gr1t2_pink_ik_env_cfg.py @@ -3,21 +3,18 @@ # # SPDX-License-Identifier: BSD-3-Clause -from pink.tasks import DampingTask, FrameTask +from isaaclab_teleop.isaac_teleop_cfg import IsaacTeleopCfg -import carb - -import isaaclab.controllers.utils as ControllerUtils -from isaaclab.controllers.pink_ik import NullSpacePostureTask, PinkIKControllerCfg -from isaaclab.devices import DevicesCfg -from isaaclab.devices.openxr import OpenXRDeviceCfg -from isaaclab.devices.openxr.retargeters import GR1T2RetargeterCfg +from isaaclab.controllers.pink_ik import DampingTaskCfg, FrameTaskCfg, NullSpacePostureTaskCfg, PinkIKControllerCfg from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg from isaaclab.utils import configclass from isaaclab_tasks.manager_based.manipulation.pick_place.exhaustpipe_gr1t2_base_env_cfg import ( ExhaustPipeGR1T2BaseEnvCfg, ) +from isaaclab_tasks.manager_based.manipulation.pick_place.pickplace_gr1t2_env_cfg import ( + _build_gr1t2_pickplace_pipeline, +) @configclass @@ -84,24 +81,24 @@ def __post_init__(self): # Determines whether Pink IK solver will fail due to a joint limit violation fail_on_joint_limit_violation=False, variable_input_tasks=[ - FrameTask( - "GR1T2_fourier_hand_6dof_left_hand_pitch_link", + FrameTaskCfg( + frame="GR1T2_fourier_hand_6dof_left_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] lm_damping=10, # dampening for solver for step jumps gain=0.5, ), - FrameTask( - "GR1T2_fourier_hand_6dof_right_hand_pitch_link", + FrameTaskCfg( + frame="GR1T2_fourier_hand_6dof_right_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] lm_damping=10, # dampening for solver for step jumps gain=0.5, ), - DampingTask( + DampingTaskCfg( cost=0.5, # [cost] * [s] / [rad] ), - NullSpacePostureTask( + NullSpacePostureTaskCfg( cost=0.2, lm_damping=1, controlled_frames=[ @@ -124,32 +121,15 @@ def __post_init__(self): ), ], fixed_input_tasks=[], - xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")), ), ) - # Convert USD to URDF and change revolute joints to fixed - temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( - self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True - ) - - # Set the URDF and mesh paths for the IK controller - self.actions.gr1_action.controller.urdf_path = temp_urdf_output_path - self.actions.gr1_action.controller.mesh_path = temp_urdf_meshes_output_path + # Defer USD→URDF conversion to controller initialization (requires Isaac Sim at runtime). + self.actions.gr1_action.controller.usd_path = self.scene.robot.spawn.usd_path + self.actions.gr1_action.controller.urdf_output_dir = self.temp_urdf_dir - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - GR1T2RetargeterCfg( - enable_visualization=True, - # number of joints in both hands - num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, - sim_device=self.sim.device, - hand_joint_names=self.actions.gr1_action.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + # IsaacTeleop-based teleoperation pipeline. + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=lambda: _build_gr1t2_pickplace_pipeline()[0], + sim_device=self.sim.device, + xr_cfg=self.xr, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.py index 555bfb7cbe8f..1c3431f2637c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.py @@ -5,8 +5,6 @@ """This sub-module contains the functions that are specific to the lift environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .observations import * # noqa: F401, F403 -from .pick_place_events import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.pyi new file mode 100644 index 000000000000..1421a52bc546 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.pyi @@ -0,0 +1,27 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "get_all_robot_link_state", + "get_eef_pos", + "get_eef_quat", + "get_robot_joint_state", + "object_obs", + "reset_object_poses_nut_pour", + "task_done_exhaust_pipe", + "task_done_nut_pour", + "task_done_pick_place", +] + +from .observations import ( + get_all_robot_link_state, + get_eef_pos, + get_eef_quat, + get_robot_joint_state, + object_obs, +) +from .pick_place_events import reset_object_poses_nut_pour +from .terminations import task_done_exhaust_pipe, task_done_nut_pour, task_done_pick_place +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py index 01e52e73f242..c3e030b33ec0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py @@ -26,14 +26,14 @@ def object_obs( right_eef_to object, """ - body_pos_w = env.scene["robot"].data.body_pos_w + body_pos_w = env.scene["robot"].data.body_pos_w.torch left_eef_idx = env.scene["robot"].data.body_names.index(left_eef_link_name) right_eef_idx = env.scene["robot"].data.body_names.index(right_eef_link_name) left_eef_pos = body_pos_w[:, left_eef_idx] - env.scene.env_origins right_eef_pos = body_pos_w[:, right_eef_idx] - env.scene.env_origins - object_pos = env.scene["object"].data.root_pos_w - env.scene.env_origins - object_quat = env.scene["object"].data.root_quat_w + object_pos = env.scene["object"].data.root_pos_w.torch - env.scene.env_origins + object_quat = env.scene["object"].data.root_quat_w.torch left_eef_to_object = object_pos - left_eef_pos right_eef_to_object = object_pos - right_eef_pos @@ -50,7 +50,7 @@ def object_obs( def get_eef_pos(env: ManagerBasedRLEnv, link_name: str) -> torch.Tensor: - body_pos_w = env.scene["robot"].data.body_pos_w + body_pos_w = env.scene["robot"].data.body_pos_w.torch left_eef_idx = env.scene["robot"].data.body_names.index(link_name) left_eef_pos = body_pos_w[:, left_eef_idx] - env.scene.env_origins @@ -58,7 +58,7 @@ def get_eef_pos(env: ManagerBasedRLEnv, link_name: str) -> torch.Tensor: def get_eef_quat(env: ManagerBasedRLEnv, link_name: str) -> torch.Tensor: - body_quat_w = env.scene["robot"].data.body_quat_w + body_quat_w = env.scene["robot"].data.body_quat_w.torch left_eef_idx = env.scene["robot"].data.body_names.index(link_name) left_eef_quat = body_quat_w[:, left_eef_idx] @@ -72,7 +72,7 @@ def get_robot_joint_state( # hand_joint_names is a list of regex, use find_joints indexes, _ = env.scene["robot"].find_joints(joint_names) indexes = torch.tensor(indexes, dtype=torch.long) - robot_joint_states = env.scene["robot"].data.joint_pos[:, indexes] + robot_joint_states = env.scene["robot"].data.joint_pos.torch[:, indexes] return robot_joint_states @@ -80,7 +80,7 @@ def get_robot_joint_state( def get_all_robot_link_state( env: ManagerBasedRLEnv, ) -> torch.Tensor: - body_pos_w = env.scene["robot"].data.body_link_state_w[:, :, :] + body_pos_w = env.scene["robot"].data.body_link_state_w.torch[:, :, :] all_robot_link_pos = body_pos_w return all_robot_link_pos diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/pick_place_events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/pick_place_events.py index ca1fd940fea8..f1b4bd15de97 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/pick_place_events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/pick_place_events.py @@ -44,10 +44,10 @@ def reset_object_poses_nut_pour( sorting_scale = env.scene[sorting_scale_cfg.name] # get default root state - sorting_beaker_root_states = sorting_beaker.data.default_root_state[env_ids].clone() - factory_nut_root_states = factory_nut.data.default_root_state[env_ids].clone() - sorting_bowl_root_states = sorting_bowl.data.default_root_state[env_ids].clone() - sorting_scale_root_states = sorting_scale.data.default_root_state[env_ids].clone() + sorting_beaker_root_poses = sorting_beaker.data.default_root_pose.torch[env_ids].clone() + factory_nut_root_poses = factory_nut.data.default_root_pose.torch[env_ids].clone() + sorting_bowl_root_poses = sorting_bowl.data.default_root_pose.torch[env_ids].clone() + sorting_scale_root_poses = sorting_scale.data.default_root_pose.torch[env_ids].clone() # get pose ranges range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] @@ -58,39 +58,37 @@ def reset_object_poses_nut_pour( ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=sorting_beaker.device ) orientations_delta = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) - positions_sorting_beaker = ( - sorting_beaker_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] - ) - positions_factory_nut = factory_nut_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] - orientations_sorting_beaker = math_utils.quat_mul(sorting_beaker_root_states[:, 3:7], orientations_delta) - orientations_factory_nut = math_utils.quat_mul(factory_nut_root_states[:, 3:7], orientations_delta) + positions_sorting_beaker = sorting_beaker_root_poses[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + positions_factory_nut = factory_nut_root_poses[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + orientations_sorting_beaker = math_utils.quat_mul(sorting_beaker_root_poses[:, 3:7], orientations_delta) + orientations_factory_nut = math_utils.quat_mul(factory_nut_root_poses[:, 3:7], orientations_delta) # randomize sorting bowl rand_samples = math_utils.sample_uniform( ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=sorting_beaker.device ) orientations_delta = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) - positions_sorting_bowl = sorting_bowl_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] - orientations_sorting_bowl = math_utils.quat_mul(sorting_bowl_root_states[:, 3:7], orientations_delta) + positions_sorting_bowl = sorting_bowl_root_poses[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + orientations_sorting_bowl = math_utils.quat_mul(sorting_bowl_root_poses[:, 3:7], orientations_delta) # randomize scorting scale rand_samples = math_utils.sample_uniform( ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=sorting_beaker.device ) orientations_delta = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) - positions_sorting_scale = sorting_scale_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] - orientations_sorting_scale = math_utils.quat_mul(sorting_scale_root_states[:, 3:7], orientations_delta) + positions_sorting_scale = sorting_scale_root_poses[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + orientations_sorting_scale = math_utils.quat_mul(sorting_scale_root_poses[:, 3:7], orientations_delta) # set into the physics simulation - sorting_beaker.write_root_pose_to_sim( - torch.cat([positions_sorting_beaker, orientations_sorting_beaker], dim=-1), env_ids=env_ids + sorting_beaker.write_root_pose_to_sim_index( + root_pose=torch.cat([positions_sorting_beaker, orientations_sorting_beaker], dim=-1), env_ids=env_ids ) - factory_nut.write_root_pose_to_sim( - torch.cat([positions_factory_nut, orientations_factory_nut], dim=-1), env_ids=env_ids + factory_nut.write_root_pose_to_sim_index( + root_pose=torch.cat([positions_factory_nut, orientations_factory_nut], dim=-1), env_ids=env_ids ) - sorting_bowl.write_root_pose_to_sim( - torch.cat([positions_sorting_bowl, orientations_sorting_bowl], dim=-1), env_ids=env_ids + sorting_bowl.write_root_pose_to_sim_index( + root_pose=torch.cat([positions_sorting_bowl, orientations_sorting_bowl], dim=-1), env_ids=env_ids ) - sorting_scale.write_root_pose_to_sim( - torch.cat([positions_sorting_scale, orientations_sorting_scale], dim=-1), env_ids=env_ids + sorting_scale.write_root_pose_to_sim_index( + root_pose=torch.cat([positions_sorting_scale, orientations_sorting_scale], dim=-1), env_ids=env_ids ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py index 6252b9c67a21..7ff4732f2640 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py @@ -15,10 +15,10 @@ import torch -from isaaclab.assets import RigidObject from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv @@ -63,13 +63,13 @@ def task_done_pick_place( object: RigidObject = env.scene[object_cfg.name] # Extract wheel position relative to environment origin - object_x = object.data.root_pos_w[:, 0] - env.scene.env_origins[:, 0] - object_y = object.data.root_pos_w[:, 1] - env.scene.env_origins[:, 1] - object_height = object.data.root_pos_w[:, 2] - env.scene.env_origins[:, 2] - object_vel = torch.abs(object.data.root_vel_w) + object_x = object.data.root_pos_w.torch[:, 0] - env.scene.env_origins[:, 0] + object_y = object.data.root_pos_w.torch[:, 1] - env.scene.env_origins[:, 1] + object_height = object.data.root_pos_w.torch[:, 2] - env.scene.env_origins[:, 2] + object_vel = torch.abs(object.data.root_vel_w.torch) # Get right wrist position relative to environment origin - robot_body_pos_w = env.scene["robot"].data.body_pos_w + robot_body_pos_w = env.scene["robot"].data.body_pos_w.torch right_eef_idx = env.scene["robot"].data.body_names.index(task_link_name) right_wrist_x = robot_body_pos_w[:, right_eef_idx, 0] - env.scene.env_origins[:, 0] @@ -139,11 +139,11 @@ def task_done_nut_pour( sorting_bin: RigidObject = env.scene[sorting_bin_cfg.name] # Get positions relative to environment origin - scale_pos = sorting_scale.data.root_pos_w - env.scene.env_origins - bowl_pos = sorting_bowl.data.root_pos_w - env.scene.env_origins - sorting_beaker_pos = sorting_beaker.data.root_pos_w - env.scene.env_origins - nut_pos = factory_nut.data.root_pos_w - env.scene.env_origins - bin_pos = sorting_bin.data.root_pos_w - env.scene.env_origins + scale_pos = sorting_scale.data.root_pos_w.torch - env.scene.env_origins + bowl_pos = sorting_bowl.data.root_pos_w.torch - env.scene.env_origins + sorting_beaker_pos = sorting_beaker.data.root_pos_w.torch - env.scene.env_origins + nut_pos = factory_nut.data.root_pos_w.torch - env.scene.env_origins + bin_pos = sorting_bin.data.root_pos_w.torch - env.scene.env_origins # nut to bowl nut_to_bowl_x = torch.abs(nut_pos[:, 0] - bowl_pos[:, 0]) @@ -206,8 +206,8 @@ def task_done_exhaust_pipe( blue_sorting_bin: RigidObject = env.scene[blue_sorting_bin_cfg.name] # Get positions relative to environment origin - blue_exhaust_pipe_pos = blue_exhaust_pipe.data.root_pos_w - env.scene.env_origins - blue_sorting_bin_pos = blue_sorting_bin.data.root_pos_w - env.scene.env_origins + blue_exhaust_pipe_pos = blue_exhaust_pipe.data.root_pos_w.torch - env.scene.env_origins + blue_sorting_bin_pos = blue_sorting_bin.data.root_pos_w.torch - env.scene.env_origins # blue exhaust to bin blue_exhaust_to_bin_x = torch.abs(blue_exhaust_pipe_pos[:, 0] - blue_sorting_bin_pos[:, 0]) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py index 01caf58a8af2..ea2980d53b0a 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_base_env_cfg.py @@ -3,15 +3,21 @@ # # SPDX-License-Identifier: BSD-3-Clause +import logging import tempfile from dataclasses import MISSING -import torch +try: + from isaaclab_teleop import XrCfg + + _TELEOP_AVAILABLE = True +except ImportError: + _TELEOP_AVAILABLE = False + logging.getLogger(__name__).warning("isaaclab_teleop is not installed. XR teleoperation features will be disabled.") import isaaclab.envs.mdp as base_mdp import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab.devices.openxr import XrCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import ActionTermCfg, SceneEntityCfg from isaaclab.managers import EventTermCfg as EventTerm @@ -41,7 +47,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table table = AssetBaseCfg( prim_path="/World/envs/env_.*/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/table.usd", scale=(1.0, 1.0, 1.3), @@ -51,7 +57,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): sorting_scale = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/SortingScale", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.22236, 0.56, 0.9859], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.22236, 0.56, 0.9859], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/sorting_scale.usd", scale=(1.0, 1.0, 1.0), @@ -61,7 +67,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): sorting_bowl = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/SortingBowl", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.02779, 0.43007, 0.9860], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.02779, 0.43007, 0.9860], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/sorting_bowl_yellow.usd", scale=(1.0, 1.0, 1.5), @@ -72,7 +78,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): sorting_beaker = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/SortingBeaker", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.13739, 0.45793, 0.9861], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.13739, 0.45793, 0.9861], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/sorting_beaker_red.usd", scale=(0.45, 0.45, 1.3), @@ -82,7 +88,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): factory_nut = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/FactoryNut", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.13739, 0.45793, 0.9995], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.13739, 0.45793, 0.9995], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/factory_m16_nut_green.usd", scale=(0.5, 0.5, 0.5), @@ -93,7 +99,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): black_sorting_bin = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/BlackSortingBin", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.32688, 0.46793, 0.98634], rot=[1.0, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.32688, 0.46793, 0.98634], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/sorting_bin_blue.usd", scale=(0.75, 1.0, 1.0), @@ -105,7 +111,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): prim_path="/World/envs/env_.*/Robot", init_state=ArticulationCfg.InitialStateCfg( pos=(0, 0, 0.93), - rot=(0.7071, 0, 0, 0.7071), + rot=(0.0, 0.0, 0.7071, 0.7071), joint_pos={ # right-arm "right_shoulder_pitch_joint": 0.0, @@ -166,7 +172,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): width=256, data_types=["rgb"], spawn=sim_utils.PinholeCameraCfg(focal_length=18.15, clipping_range=(0.1, 2)), - offset=CameraCfg.OffsetCfg(pos=(0.0, 0.12, 1.67675), rot=(-0.19848, 0.9801, 0.0, 0.0), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.12, 1.67675), rot=(0.9801, 0.0, 0.0, -0.19848), convention="ros"), ) # Ground plane @@ -296,63 +302,50 @@ class NutPourGR1T2BaseEnvCfg(ManagerBasedRLEnvCfg): rewards = None curriculum = None - # Position of the XR anchor in the world frame - xr: XrCfg = XrCfg( - anchor_pos=(0.0, 0.0, 0.0), - anchor_rot=(1.0, 0.0, 0.0, 0.0), - ) - - # OpenXR hand tracking has 26 joints per hand - NUM_OPENXR_HAND_JOINTS = 26 - # Temporary directory for URDF files temp_urdf_dir = tempfile.gettempdir() # Idle action to hold robot in default pose # Action format: [left arm pos (3), left arm quat (4), right arm pos (3), # right arm quat (4), left/right hand joint pos (22)] - idle_action = torch.tensor( - [ - [ - -0.22878, - 0.2536, - 1.0953, - 0.5, - 0.5, - -0.5, - 0.5, - 0.22878, - 0.2536, - 1.0953, - 0.5, - 0.5, - -0.5, - 0.5, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ] - ) + idle_action = [ + -0.22878, + 0.2536, + 1.0953, + 0.5, + -0.5, + 0.5, + 0.5, + 0.22878, + 0.2536, + 1.0953, + 0.5, + -0.5, + 0.5, + 0.5, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] def __post_init__(self): """Post initialization.""" @@ -369,3 +362,9 @@ def __post_init__(self): # List of image observations in policy observations self.image_obs_list = ["robot_pov_cam"] + + if _TELEOP_AVAILABLE: + self.xr = XrCfg( + anchor_pos=(0.0, 0.0, 0.0), + anchor_rot=(0.0, 0.0, 0.0, 1.0), + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py index 818fba1fc805..db242eee50a0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/nutpour_gr1t2_pink_ik_env_cfg.py @@ -3,19 +3,16 @@ # # SPDX-License-Identifier: BSD-3-Clause -from pink.tasks import DampingTask, FrameTask +from isaaclab_teleop.isaac_teleop_cfg import IsaacTeleopCfg -import carb - -import isaaclab.controllers.utils as ControllerUtils -from isaaclab.controllers.pink_ik import NullSpacePostureTask, PinkIKControllerCfg -from isaaclab.devices import DevicesCfg -from isaaclab.devices.openxr import OpenXRDeviceCfg -from isaaclab.devices.openxr.retargeters import GR1T2RetargeterCfg +from isaaclab.controllers.pink_ik import DampingTaskCfg, FrameTaskCfg, NullSpacePostureTaskCfg, PinkIKControllerCfg from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg from isaaclab.utils import configclass from isaaclab_tasks.manager_based.manipulation.pick_place.nutpour_gr1t2_base_env_cfg import NutPourGR1T2BaseEnvCfg +from isaaclab_tasks.manager_based.manipulation.pick_place.pickplace_gr1t2_env_cfg import ( + _build_gr1t2_pickplace_pipeline, +) @configclass @@ -82,24 +79,24 @@ def __post_init__(self): # Determines whether Pink IK solver will fail due to a joint limit violation fail_on_joint_limit_violation=False, variable_input_tasks=[ - FrameTask( - "GR1T2_fourier_hand_6dof_left_hand_pitch_link", + FrameTaskCfg( + frame="GR1T2_fourier_hand_6dof_left_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] lm_damping=10, # dampening for solver for step jumps gain=0.5, ), - FrameTask( - "GR1T2_fourier_hand_6dof_right_hand_pitch_link", + FrameTaskCfg( + frame="GR1T2_fourier_hand_6dof_right_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] lm_damping=10, # dampening for solver for step jumps gain=0.5, ), - DampingTask( + DampingTaskCfg( cost=0.5, # [cost] * [s] / [rad] ), - NullSpacePostureTask( + NullSpacePostureTaskCfg( cost=0.2, lm_damping=1, controlled_frames=[ @@ -122,32 +119,15 @@ def __post_init__(self): ), ], fixed_input_tasks=[], - xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")), ), ) - # Convert USD to URDF and change revolute joints to fixed - temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( - self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True - ) - - # Set the URDF and mesh paths for the IK controller - self.actions.gr1_action.controller.urdf_path = temp_urdf_output_path - self.actions.gr1_action.controller.mesh_path = temp_urdf_meshes_output_path + # Defer USD→URDF conversion to controller initialization (requires Isaac Sim at runtime). + self.actions.gr1_action.controller.usd_path = self.scene.robot.spawn.usd_path + self.actions.gr1_action.controller.urdf_output_dir = self.temp_urdf_dir - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - GR1T2RetargeterCfg( - enable_visualization=True, - # number of joints in both hands - num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, - sim_device=self.sim.device, - hand_joint_names=self.actions.gr1_action.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + # IsaacTeleop-based teleoperation pipeline. + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=lambda: _build_gr1t2_pickplace_pipeline()[0], + sim_device=self.sim.device, + xr_cfg=self.xr, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py index ba6c5d385135..8c95df1041eb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py @@ -3,21 +3,13 @@ # # SPDX-License-Identifier: BSD-3-Clause +import os import tempfile -import torch -from pink.tasks import DampingTask, FrameTask - -import carb - -import isaaclab.controllers.utils as ControllerUtils import isaaclab.envs.mdp as base_mdp import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab.controllers.pink_ik import NullSpacePostureTask, PinkIKControllerCfg -from isaaclab.devices.device_base import DevicesCfg -from isaaclab.devices.openxr import ManusViveCfg, OpenXRDeviceCfg, XrCfg -from isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import GR1T2RetargeterCfg +from isaaclab.controllers.pink_ik import DampingTaskCfg, FrameTaskCfg, NullSpacePostureTaskCfg, PinkIKControllerCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg from isaaclab.managers import EventTermCfg as EventTerm @@ -28,11 +20,244 @@ from isaaclab.scene import InteractiveSceneCfg from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg from isaaclab.utils import configclass -from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path from . import mdp from isaaclab_assets.robots.fourier import GR1T2_HIGH_PD_CFG # isort: skip +from isaaclab_teleop.isaac_teleop_cfg import IsaacTeleopCfg # isort: skip +from isaaclab_teleop.xr_cfg import XrCfg # isort: skip + + +def _build_gr1t2_pickplace_pipeline(): + """Build an IsaacTeleop retargeting pipeline for GR1T2 pick-place teleoperation. + + Creates two Se3AbsRetargeters for left and right wrist pose tracking and + two DexHandRetargeters for left and right dexterous hand finger control + from hand tracking data. All outputs are flattened into a single action + tensor via TensorReorderer. + """ + from isaacteleop.retargeters import ( + DexHandRetargeter, + DexHandRetargeterConfig, + Se3AbsRetargeter, + Se3RetargeterConfig, + TensorReorderer, + ) + from isaacteleop.retargeting_engine.deviceio_source_nodes import ControllersSource, HandsSource + from isaacteleop.retargeting_engine.interface import OutputCombiner, ValueInput + from isaacteleop.retargeting_engine.tensor_types import TransformMatrix + + # Create input sources (trackers are auto-discovered from pipeline) + controllers = ControllersSource(name="controllers") + hands = HandsSource(name="hands") + + # External input: world-to-anchor 4x4 transform matrix provided by IsaacTeleopDevice + transform_input = ValueInput("world_T_anchor", TransformMatrix()) + + # Apply the coordinate-frame transform to controller poses so that + # downstream retargeters receive data in the simulation world frame. + _transformed_controllers = controllers.transformed(transform_input.output(ValueInput.VALUE)) + transformed_hands = hands.transformed(transform_input.output(ValueInput.VALUE)) + + # ------------------------------------------------------------------------- + # SE3 Absolute Pose Retargeters (left and right wrists) + # ------------------------------------------------------------------------- + # Left wrist: identity rotation offset (passes through as-is in original retargeter) + left_se3_cfg = Se3RetargeterConfig( + input_device=HandsSource.LEFT, + zero_out_xy_rotation=False, + use_wrist_rotation=True, + use_wrist_position=True, + target_offset_roll=0.0, + target_offset_pitch=0.0, + target_offset_yaw=0.0, + ) + left_se3 = Se3AbsRetargeter(left_se3_cfg, name="left_ee_pose") + connected_left_se3 = left_se3.connect( + { + HandsSource.LEFT: transformed_hands.output(HandsSource.LEFT), + } + ) + + # Right wrist: 180-degree Z rotation offset + # From GR1T2Retargeter._retarget_abs: the USD control frame is 180 degrees + # rotated around the Z axis w.r.t. the OpenXR frame. + right_se3_cfg = Se3RetargeterConfig( + input_device=HandsSource.RIGHT, + zero_out_xy_rotation=False, + use_wrist_rotation=True, + use_wrist_position=True, + target_offset_roll=0.0, + target_offset_pitch=0.0, + target_offset_yaw=180.0, + ) + right_se3 = Se3AbsRetargeter(right_se3_cfg, name="right_ee_pose") + connected_right_se3 = right_se3.connect( + { + HandsSource.RIGHT: transformed_hands.output(HandsSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # DexHand Retargeters (left and right hands) + # ------------------------------------------------------------------------- + # Resolve dex-retargeting YAML config paths from IsaacLab's retargeter data directory + import isaaclab_teleop.isaac_teleop_cfg as _teleop_cfg_mod + + _teleop_cfg_file = _teleop_cfg_mod.__file__ + if _teleop_cfg_file is None: + raise RuntimeError("Could not resolve isaaclab_teleop package path for dex-retargeting configs.") + _teleop_pkg_dir = os.path.dirname(_teleop_cfg_file) + _data_dir = os.path.join( + _teleop_pkg_dir, + "deprecated", + "openxr", + "retargeters", + "humanoid", + "fourier", + "data", + ) + _config_dir = os.path.join(_data_dir, "configs", "dex-retargeting") + left_yaml_path = os.path.join(_config_dir, "fourier_hand_left_dexpilot.yml") + right_yaml_path = os.path.join(_config_dir, "fourier_hand_right_dexpilot.yml") + + # Resolve URDF paths (downloads from Omniverse if needed) + local_left_urdf = retrieve_file_path(f"{ISAACLAB_NUCLEUS_DIR}/Mimic/GR1T2_assets/GR1_T2_left_hand.urdf") + local_right_urdf = retrieve_file_path(f"{ISAACLAB_NUCLEUS_DIR}/Mimic/GR1T2_assets/GR1_T2_right_hand.urdf") + + # Hand-tracking to base-link frame transform (OPERATOR2MANO matrix) + # From gr1_t2_dex_retargeting_utils: [[0,-1,0],[-1,0,0],[0,0,-1]] + operator2mano = (0, -1, 0, -1, 0, 0, 0, 0, -1) + + # Joint names for each hand (11 DOF per hand) + left_hand_joint_names = [ + "L_index_proximal_joint", + "L_index_intermediate_joint", + "L_middle_proximal_joint", + "L_middle_intermediate_joint", + "L_pinky_proximal_joint", + "L_pinky_intermediate_joint", + "L_ring_proximal_joint", + "L_ring_intermediate_joint", + "L_thumb_proximal_yaw_joint", + "L_thumb_proximal_pitch_joint", + "L_thumb_distal_joint", + ] + + right_hand_joint_names = [ + "R_index_proximal_joint", + "R_index_intermediate_joint", + "R_middle_proximal_joint", + "R_middle_intermediate_joint", + "R_pinky_proximal_joint", + "R_pinky_intermediate_joint", + "R_ring_proximal_joint", + "R_ring_intermediate_joint", + "R_thumb_proximal_yaw_joint", + "R_thumb_proximal_pitch_joint", + "R_thumb_distal_joint", + ] + + left_dex_cfg = DexHandRetargeterConfig( + hand_retargeting_config=left_yaml_path, + hand_urdf=local_left_urdf, + hand_joint_names=left_hand_joint_names, + hand_side="left", + handtracking_to_baselink_frame_transform=operator2mano, + ) + left_dex = DexHandRetargeter(left_dex_cfg, name="left_hand") + connected_left_dex = left_dex.connect( + { + HandsSource.LEFT: hands.output(HandsSource.LEFT), + } + ) + + right_dex_cfg = DexHandRetargeterConfig( + hand_retargeting_config=right_yaml_path, + hand_urdf=local_right_urdf, + hand_joint_names=right_hand_joint_names, + hand_side="right", + handtracking_to_baselink_frame_transform=operator2mano, + ) + right_dex = DexHandRetargeter(right_dex_cfg, name="right_hand") + connected_right_dex = right_dex.connect( + { + HandsSource.RIGHT: hands.output(HandsSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # TensorReorderer: flatten into a 36D action tensor + # ------------------------------------------------------------------------- + # Se3AbsRetargeter outputs 7D arrays: [pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w] + left_ee_elements = ["l_pos_x", "l_pos_y", "l_pos_z", "l_quat_x", "l_quat_y", "l_quat_z", "l_quat_w"] + right_ee_elements = ["r_pos_x", "r_pos_y", "r_pos_z", "r_quat_x", "r_quat_y", "r_quat_z", "r_quat_w"] + + # Output order must match the PinkInverseKinematicsActionCfg expected tensor layout: + # [left_wrist(7), right_wrist(7), hand_joints(22)] + # Hand joints follow hand_joint_names order from ActionsCfg.upper_body_ik. + output_order = ( + left_ee_elements + + right_ee_elements + + [ + # hand_joint_names indices 0-4 (left proximal + thumb yaw) + "L_index_proximal_joint", + "L_middle_proximal_joint", + "L_pinky_proximal_joint", + "L_ring_proximal_joint", + "L_thumb_proximal_yaw_joint", + # hand_joint_names indices 5-9 (right proximal + thumb yaw) + "R_index_proximal_joint", + "R_middle_proximal_joint", + "R_pinky_proximal_joint", + "R_ring_proximal_joint", + "R_thumb_proximal_yaw_joint", + # hand_joint_names indices 10-14 (left intermediate + thumb pitch) + "L_index_intermediate_joint", + "L_middle_intermediate_joint", + "L_pinky_intermediate_joint", + "L_ring_intermediate_joint", + "L_thumb_proximal_pitch_joint", + # hand_joint_names indices 15-19 (right intermediate + thumb pitch) + "R_index_intermediate_joint", + "R_middle_intermediate_joint", + "R_pinky_intermediate_joint", + "R_ring_intermediate_joint", + "R_thumb_proximal_pitch_joint", + # hand_joint_names indices 20-21 (thumb distal) + "L_thumb_distal_joint", + "R_thumb_distal_joint", + ] + ) + + reorderer = TensorReorderer( + input_config={ + "left_ee_pose": left_ee_elements, + "right_ee_pose": right_ee_elements, + "left_hand_joints": left_hand_joint_names, + "right_hand_joints": right_hand_joint_names, + }, + output_order=output_order, + name="action_reorderer", + input_types={ + "left_ee_pose": "array", + "right_ee_pose": "array", + "left_hand_joints": "scalar", + "right_hand_joints": "scalar", + }, + ) + connected_reorderer = reorderer.connect( + { + "left_ee_pose": connected_left_se3.output("ee_pose"), + "right_ee_pose": connected_right_se3.output("ee_pose"), + "left_hand_joints": connected_left_dex.output("hand_joints"), + "right_hand_joints": connected_right_dex.output("hand_joints"), + } + ) + + pipeline = OutputCombiner({"action": connected_reorderer.output("output")}) + return pipeline, [left_dex, right_dex] ## @@ -45,7 +270,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table packing_table = AssetBaseCfg( prim_path="/World/envs/env_.*/PackingTable", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), @@ -54,7 +279,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.45, 0.45, 0.9996], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.45, 0.45, 0.9996], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", scale=(0.75, 0.75, 0.75), @@ -67,7 +292,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): prim_path="/World/envs/env_.*/Robot", init_state=ArticulationCfg.InitialStateCfg( pos=(0, 0, 0.93), - rot=(0.7071, 0, 0, 0.7071), + rot=(0.0, 0.0, 0.7071, 0.7071), joint_pos={ # right-arm "right_shoulder_pitch_joint": 0.0, @@ -176,24 +401,24 @@ class ActionsCfg: # Determines whether Pink IK solver will fail due to a joint limit violation fail_on_joint_limit_violation=False, variable_input_tasks=[ - FrameTask( - "GR1T2_fourier_hand_6dof_left_hand_pitch_link", + FrameTaskCfg( + frame="GR1T2_fourier_hand_6dof_left_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] lm_damping=12, # dampening for solver for step jumps gain=0.5, ), - FrameTask( - "GR1T2_fourier_hand_6dof_right_hand_pitch_link", + FrameTaskCfg( + frame="GR1T2_fourier_hand_6dof_right_hand_pitch_link", position_cost=8.0, # [cost] / [m] orientation_cost=1.0, # [cost] / [rad] lm_damping=12, # dampening for solver for step jumps gain=0.5, ), - DampingTask( + DampingTaskCfg( cost=0.5, # [cost] * [s] / [rad] ), - NullSpacePostureTask( + NullSpacePostureTaskCfg( cost=0.5, lm_damping=1, controlled_frames=[ @@ -216,7 +441,6 @@ class ActionsCfg: ), ], fixed_input_tasks=[], - xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")), ), ) @@ -315,61 +539,50 @@ class PickPlaceGR1T2EnvCfg(ManagerBasedRLEnvCfg): rewards = None curriculum = None - # Position of the XR anchor in the world frame - xr: XrCfg = XrCfg( - anchor_pos=(0.0, 0.0, 0.0), - anchor_rot=(1.0, 0.0, 0.0, 0.0), - ) - - # OpenXR hand tracking has 26 joints per hand - NUM_OPENXR_HAND_JOINTS = 26 - # Temporary directory for URDF files temp_urdf_dir = tempfile.gettempdir() # Idle action to hold robot in default pose # Action format: [left arm pos (3), left arm quat (4), right arm pos (3), right arm quat (4), # left hand joint pos (11), right hand joint pos (11)] - idle_action = torch.tensor( - [ - -0.22878, - 0.2536, - 1.0953, - 0.5, - 0.5, - -0.5, - 0.5, - 0.22878, - 0.2536, - 1.0953, - 0.5, - 0.5, - -0.5, - 0.5, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ) + idle_action = [ + -0.22878, + 0.2536, + 1.0953, + 0.5, + -0.5, + 0.5, + 0.5, + 0.22878, + 0.2536, + 1.0953, + 0.5, + -0.5, + 0.5, + 0.5, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] def __post_init__(self): """Post initialization.""" @@ -380,41 +593,60 @@ def __post_init__(self): self.sim.dt = 1 / 120 # 120Hz self.sim.render_interval = 2 - # Convert USD to URDF and change revolute joints to fixed - temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( - self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True - ) + # Defer USD→URDF conversion to controller initialization (requires Isaac Sim at runtime). + self.actions.upper_body_ik.controller.usd_path = self.scene.robot.spawn.usd_path + self.actions.upper_body_ik.controller.urdf_output_dir = self.temp_urdf_dir - # Set the URDF and mesh paths for the IK controller - self.actions.upper_body_ik.controller.urdf_path = temp_urdf_output_path - self.actions.upper_body_ik.controller.mesh_path = temp_urdf_meshes_output_path - - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - GR1T2RetargeterCfg( - enable_visualization=True, - # number of joints in both hands - num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, - sim_device=self.sim.device, - hand_joint_names=self.actions.upper_body_ik.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - "manusvive": ManusViveCfg( - retargeters=[ - GR1T2RetargeterCfg( - enable_visualization=True, - num_open_xr_hand_joints=2 * 26, - sim_device=self.sim.device, - hand_joint_names=self.actions.upper_body_ik.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + # IsaacTeleop-based teleoperation pipeline. + self.xr = XrCfg( + anchor_pos=(0.0, 0.0, 0.0), + anchor_rot=(0.0, 0.0, 0.0, 1.0), + ) + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=lambda: _build_gr1t2_pickplace_pipeline()[0], + sim_device=self.sim.device, + xr_cfg=self.xr, ) + + # Legacy teleop devices are built lazily via __getattr__ to avoid + # importing runtime-only modules (carb, pxr) at config-load time. + del self.teleop_devices + + def __getattr__(self, name: str): + if name == "teleop_devices": + from isaaclab.devices.device_base import DevicesCfg # noqa: PLC0415 + from isaaclab.devices.openxr import ManusViveCfg, OpenXRDeviceCfg # noqa: PLC0415 + from isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import ( # noqa: PLC0415 + GR1T2RetargeterCfg, + ) + + self.teleop_devices = DevicesCfg( + devices={ + "handtracking": OpenXRDeviceCfg( + retargeters=[ + GR1T2RetargeterCfg( + enable_visualization=True, + num_open_xr_hand_joints=2 * 26, + sim_device=self.sim.device, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + "manusvive": ManusViveCfg( + retargeters=[ + GR1T2RetargeterCfg( + enable_visualization=True, + num_open_xr_hand_joints=2 * 26, + sim_device=self.sim.device, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), + } + ) + return self.teleop_devices + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py index 23ed8d984bcd..1c9f19bb3029 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py @@ -5,14 +5,20 @@ import tempfile -import isaaclab.controllers.utils as ControllerUtils -from isaaclab.devices.device_base import DevicesCfg -from isaaclab.devices.openxr import OpenXRDeviceCfg, XrCfg -from isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import GR1T2RetargeterCfg +from isaaclab_teleop.isaac_teleop_cfg import IsaacTeleopCfg +from isaaclab_teleop.xr_cfg import XrCfg + from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.utils import configclass -from .pickplace_gr1t2_env_cfg import ActionsCfg, EventCfg, ObjectTableSceneCfg, ObservationsCfg, TerminationsCfg +from .pickplace_gr1t2_env_cfg import ( + ActionsCfg, + EventCfg, + ObjectTableSceneCfg, + ObservationsCfg, + TerminationsCfg, + _build_gr1t2_pickplace_pipeline, +) @configclass @@ -33,15 +39,6 @@ class PickPlaceGR1T2WaistEnabledEnvCfg(ManagerBasedRLEnvCfg): rewards = None curriculum = None - # Position of the XR anchor in the world frame - xr: XrCfg = XrCfg( - anchor_pos=(0.0, 0.0, 0.0), - anchor_rot=(1.0, 0.0, 0.0, 0.0), - ) - - # OpenXR hand tracking has 26 joints per hand - NUM_OPENXR_HAND_JOINTS = 26 - # Temporary directory for URDF files temp_urdf_dir = tempfile.gettempdir() @@ -59,29 +56,17 @@ def __post_init__(self): for joint_name in waist_joint_names: self.actions.upper_body_ik.pink_controlled_joint_names.append(joint_name) - # Convert USD to URDF and change revolute joints to fixed - temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( - self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True - ) - - # Set the URDF and mesh paths for the IK controller - self.actions.upper_body_ik.controller.urdf_path = temp_urdf_output_path - self.actions.upper_body_ik.controller.mesh_path = temp_urdf_meshes_output_path + # Defer USD→URDF conversion to controller initialization (requires Isaac Sim at runtime). + self.actions.upper_body_ik.controller.usd_path = self.scene.robot.spawn.usd_path + self.actions.upper_body_ik.controller.urdf_output_dir = self.temp_urdf_dir - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - GR1T2RetargeterCfg( - enable_visualization=True, - # number of joints in both hands - num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, - sim_device=self.sim.device, - hand_joint_names=self.actions.upper_body_ik.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + # IsaacTeleop-based teleoperation pipeline. + self.xr = XrCfg( + anchor_pos=(0.0, 0.0, 0.0), + anchor_rot=(0.0, 0.0, 0.0, 1.0), + ) + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=lambda: _build_gr1t2_pickplace_pipeline()[0], + sim_device=self.sim.device, + xr_cfg=self.xr, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_unitree_g1_inspire_hand_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_unitree_g1_inspire_hand_env_cfg.py index 85af79e7fb19..e223e3649894 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_unitree_g1_inspire_hand_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_unitree_g1_inspire_hand_env_cfg.py @@ -2,21 +2,17 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause +import os import tempfile -import torch -from pink.tasks import FrameTask +from isaaclab_physx.physics import PhysxCfg +from isaaclab_teleop.isaac_teleop_cfg import IsaacTeleopCfg +from isaaclab_teleop.xr_cfg import XrCfg -import carb - -import isaaclab.controllers.utils as ControllerUtils import isaaclab.envs.mdp as base_mdp import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab.controllers.pink_ik import NullSpacePostureTask, PinkIKControllerCfg -from isaaclab.devices.device_base import DevicesCfg -from isaaclab.devices.openxr import ManusViveCfg, OpenXRDeviceCfg, XrCfg -from isaaclab.devices.openxr.retargeters.humanoid.unitree.inspire.g1_upper_body_retargeter import UnitreeG1RetargeterCfg +from isaaclab.controllers.pink_ik import FrameTaskCfg, NullSpacePostureTaskCfg, PinkIKControllerCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg from isaaclab.managers import EventTermCfg as EventTerm @@ -28,13 +24,239 @@ from isaaclab.sim.schemas.schemas_cfg import MassPropertiesCfg from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg from isaaclab.utils import configclass -from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR, retrieve_file_path from . import mdp from isaaclab_assets.robots.unitree import G1_INSPIRE_FTP_CFG # isort: skip +def _build_g1_inspire_pickplace_pipeline(): + """Build an IsaacTeleop retargeting pipeline for Unitree G1 Inspire Hand pick-place teleoperation. + + Creates two Se3AbsRetargeters for left and right wrist pose tracking and + two DexHandRetargeters for left and right dexterous hand finger control + from hand tracking data. All outputs are flattened into a single action + tensor via TensorReorderer. + """ + from isaacteleop.retargeters import ( + DexHandRetargeter, + DexHandRetargeterConfig, + Se3AbsRetargeter, + Se3RetargeterConfig, + TensorReorderer, + ) + from isaacteleop.retargeting_engine.deviceio_source_nodes import HandsSource + from isaacteleop.retargeting_engine.interface import OutputCombiner, ValueInput + from isaacteleop.retargeting_engine.tensor_types import TransformMatrix + + # Create input sources (trackers are auto-discovered from pipeline) + hands = HandsSource(name="hands") + + # External input: world-to-anchor 4x4 transform matrix provided by IsaacTeleopDevice + transform_input = ValueInput("world_T_anchor", TransformMatrix()) + + # Apply the coordinate-frame transform to hand poses so that + # downstream retargeters receive data in the simulation world frame. + transformed_hands = hands.transformed(transform_input.output(ValueInput.VALUE)) + + # ------------------------------------------------------------------------- + # SE3 Absolute Pose Retargeters (left and right wrists) + # ------------------------------------------------------------------------- + # Left wrist: 90-degree Y rotation offset + # From UnitreeG1Retargeter._retarget_abs: the USD control frame requires + # a rotation of (0, 180, 0) in euler angles relative to OpenXR frame. + left_se3_cfg = Se3RetargeterConfig( + input_device=HandsSource.LEFT, + zero_out_xy_rotation=False, + use_wrist_rotation=True, + use_wrist_position=True, + target_offset_roll=0.0, + target_offset_pitch=90.0, + target_offset_yaw=0.0, + ) + left_se3 = Se3AbsRetargeter(left_se3_cfg, name="left_ee_pose") + connected_left_se3 = left_se3.connect( + { + HandsSource.LEFT: transformed_hands.output(HandsSource.LEFT), + } + ) + + # Right wrist: rotation offset for USD control frame + # From UnitreeG1Retargeter._retarget_abs: rotation of (180, 0, 0) in euler angles. + right_se3_cfg = Se3RetargeterConfig( + input_device=HandsSource.RIGHT, + zero_out_xy_rotation=False, + use_wrist_rotation=True, + use_wrist_position=True, + target_offset_roll=180.0, + target_offset_pitch=-90.0, + target_offset_yaw=0.0, + ) + right_se3 = Se3AbsRetargeter(right_se3_cfg, name="right_ee_pose") + connected_right_se3 = right_se3.connect( + { + HandsSource.RIGHT: transformed_hands.output(HandsSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # DexHand Retargeters (left and right hands) + # ------------------------------------------------------------------------- + # Resolve dex-retargeting YAML config paths from the Unitree inspire retargeter data directory + import isaaclab.devices.openxr.retargeters.humanoid.unitree.inspire.g1_dex_retargeting_utils as _dex_utils + + _data_dir = os.path.abspath(os.path.join(os.path.dirname(_dex_utils.__file__), "data")) + _config_dir = os.path.join(_data_dir, "configs", "dex-retargeting") + left_yaml_path = os.path.join(_config_dir, "unitree_hand_left_dexpilot.yml") + right_yaml_path = os.path.join(_config_dir, "unitree_hand_right_dexpilot.yml") + + # Resolve URDF paths (downloads from Omniverse if needed) + local_left_urdf = retrieve_file_path( + f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_left_hand.urdf" + ) + local_right_urdf = retrieve_file_path( + f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_right_hand.urdf" + ) + + # Hand-tracking to base-link frame transform (OPERATOR2MANO matrix) + # From g1_dex_retargeting_utils: [[0,-1,0],[-1,0,0],[0,0,-1]] + operator2mano = (0, -1, 0, -1, 0, 0, 0, 0, -1) + + # Joint names for each hand (12 DOF per hand) + left_hand_joint_names = [ + "L_thumb_proximal_yaw_joint", + "L_thumb_proximal_pitch_joint", + "L_thumb_intermediate_joint", + "L_thumb_distal_joint", + "L_index_proximal_joint", + "L_index_intermediate_joint", + "L_middle_proximal_joint", + "L_middle_intermediate_joint", + "L_ring_proximal_joint", + "L_ring_intermediate_joint", + "L_pinky_proximal_joint", + "L_pinky_intermediate_joint", + ] + + right_hand_joint_names = [ + "R_thumb_proximal_yaw_joint", + "R_thumb_proximal_pitch_joint", + "R_thumb_intermediate_joint", + "R_thumb_distal_joint", + "R_index_proximal_joint", + "R_index_intermediate_joint", + "R_middle_proximal_joint", + "R_middle_intermediate_joint", + "R_ring_proximal_joint", + "R_ring_intermediate_joint", + "R_pinky_proximal_joint", + "R_pinky_intermediate_joint", + ] + + left_dex_cfg = DexHandRetargeterConfig( + hand_retargeting_config=left_yaml_path, + hand_urdf=local_left_urdf, + hand_joint_names=left_hand_joint_names, + hand_side="left", + handtracking_to_baselink_frame_transform=operator2mano, + ) + left_dex = DexHandRetargeter(left_dex_cfg, name="left_hand") + connected_left_dex = left_dex.connect( + { + HandsSource.LEFT: hands.output(HandsSource.LEFT), + } + ) + + right_dex_cfg = DexHandRetargeterConfig( + hand_retargeting_config=right_yaml_path, + hand_urdf=local_right_urdf, + hand_joint_names=right_hand_joint_names, + hand_side="right", + handtracking_to_baselink_frame_transform=operator2mano, + ) + right_dex = DexHandRetargeter(right_dex_cfg, name="right_hand") + connected_right_dex = right_dex.connect( + { + HandsSource.RIGHT: hands.output(HandsSource.RIGHT), + } + ) + + # ------------------------------------------------------------------------- + # TensorReorderer: flatten into a 38D action tensor + # ------------------------------------------------------------------------- + # Se3AbsRetargeter outputs 7D arrays: [pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w] + left_ee_elements = ["l_pos_x", "l_pos_y", "l_pos_z", "l_quat_x", "l_quat_y", "l_quat_z", "l_quat_w"] + right_ee_elements = ["r_pos_x", "r_pos_y", "r_pos_z", "r_quat_x", "r_quat_y", "r_quat_z", "r_quat_w"] + + # Output order must match the PinkInverseKinematicsActionCfg expected tensor layout: + # [left_wrist(7), right_wrist(7), hand_joints(24)] + # Hand joints follow hand_joint_names order from ActionsCfg.pink_ik_cfg. + output_order = ( + left_ee_elements + + right_ee_elements + + [ + # hand_joint_names indices 0-4 (left proximal + thumb yaw) + "L_index_proximal_joint", + "L_middle_proximal_joint", + "L_pinky_proximal_joint", + "L_ring_proximal_joint", + "L_thumb_proximal_yaw_joint", + # hand_joint_names indices 5-9 (right proximal + thumb yaw) + "R_index_proximal_joint", + "R_middle_proximal_joint", + "R_pinky_proximal_joint", + "R_ring_proximal_joint", + "R_thumb_proximal_yaw_joint", + # hand_joint_names indices 10-14 (left intermediate + thumb pitch) + "L_index_intermediate_joint", + "L_middle_intermediate_joint", + "L_pinky_intermediate_joint", + "L_ring_intermediate_joint", + "L_thumb_proximal_pitch_joint", + # hand_joint_names indices 15-19 (right intermediate + thumb pitch) + "R_index_intermediate_joint", + "R_middle_intermediate_joint", + "R_pinky_intermediate_joint", + "R_ring_intermediate_joint", + "R_thumb_proximal_pitch_joint", + # hand_joint_names indices 20-23 (thumb intermediate + distal) + "L_thumb_intermediate_joint", + "R_thumb_intermediate_joint", + "L_thumb_distal_joint", + "R_thumb_distal_joint", + ] + ) + + reorderer = TensorReorderer( + input_config={ + "left_ee_pose": left_ee_elements, + "right_ee_pose": right_ee_elements, + "left_hand_joints": left_hand_joint_names, + "right_hand_joints": right_hand_joint_names, + }, + output_order=output_order, + name="action_reorderer", + input_types={ + "left_ee_pose": "array", + "right_ee_pose": "array", + "left_hand_joints": "scalar", + "right_hand_joints": "scalar", + }, + ) + connected_reorderer = reorderer.connect( + { + "left_ee_pose": connected_left_se3.output("ee_pose"), + "right_ee_pose": connected_right_se3.output("ee_pose"), + "left_hand_joints": connected_left_dex.output("hand_joints"), + "right_hand_joints": connected_right_dex.output("hand_joints"), + } + ) + + pipeline = OutputCombiner({"action": connected_reorderer.output("output")}) + return pipeline + + ## # Scene definition ## @@ -45,7 +267,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table packing_table = AssetBaseCfg( prim_path="/World/envs/env_.*/PackingTable", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), @@ -54,7 +276,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): object = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Object", - init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.9996], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.9996], rot=[0.0, 0.0, 0.0, 1.0]), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", scale=(0.75, 0.75, 0.75), @@ -70,7 +292,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): prim_path="/World/envs/env_.*/Robot", init_state=ArticulationCfg.InitialStateCfg( pos=(0, 0, 1.0), - rot=(0.7071, 0, 0, 0.7071), + rot=(0.0, 0.0, 0.7071, 0.7071), joint_pos={ # right-arm "right_shoulder_pitch_joint": 0.0, @@ -174,21 +396,21 @@ class ActionsCfg: show_ik_warnings=False, fail_on_joint_limit_violation=False, variable_input_tasks=[ - FrameTask( - "g1_29dof_rev_1_0_left_wrist_yaw_link", + FrameTaskCfg( + frame="g1_29dof_rev_1_0_left_wrist_yaw_link", position_cost=8.0, # [cost] / [m] orientation_cost=2.0, # [cost] / [rad] lm_damping=10, # dampening for solver for step jumps gain=0.5, ), - FrameTask( - "g1_29dof_rev_1_0_right_wrist_yaw_link", + FrameTaskCfg( + frame="g1_29dof_rev_1_0_right_wrist_yaw_link", position_cost=8.0, # [cost] / [m] orientation_cost=2.0, # [cost] / [rad] lm_damping=10, # dampening for solver for step jumps gain=0.5, ), - NullSpacePostureTask( + NullSpacePostureTaskCfg( cost=0.5, lm_damping=1, controlled_frames=[ @@ -210,7 +432,6 @@ class ActionsCfg: ), ], fixed_input_tasks=[], - xr_enabled=bool(carb.settings.get_settings().get("/app/xr/enabled")), ), enable_gravity_compensation=False, ) @@ -306,61 +527,53 @@ class PickPlaceG1InspireFTPEnvCfg(ManagerBasedRLEnvCfg): rewards = None curriculum = None - # Position of the XR anchor in the world frame - xr: XrCfg = XrCfg( - anchor_pos=(0.0, 0.0, 0.0), - anchor_rot=(1.0, 0.0, 0.0, 0.0), - ) - # Temporary directory for URDF files temp_urdf_dir = tempfile.gettempdir() # Idle action to hold robot in default pose # Action format: [left arm pos (3), left arm quat (4), right arm pos (3), right arm quat (4), # left hand joint pos (12), right hand joint pos (12)] - idle_action = torch.tensor( - [ - # 14 hand joints for EEF control - -0.1487, - 0.2038, - 1.0952, - 0.707, - 0.0, - 0.0, - 0.707, - 0.1487, - 0.2038, - 1.0952, - 0.707, - 0.0, - 0.0, - 0.707, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ) + idle_action = [ + # 14 hand joints for EEF control + -0.1487, + 0.2038, + 1.0952, + 0.0, + 0.0, + 0.707, + 0.707, + 0.1487, + 0.2038, + 1.0952, + 0.0, + 0.0, + 0.707, + 0.707, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] def __post_init__(self): """Post initialization.""" @@ -371,45 +584,22 @@ def __post_init__(self): self.sim.dt = 1 / 120 # 120Hz self.sim.render_interval = 2 - # Convert USD to URDF and change revolute joints to fixed - temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( - self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True + self.sim.physics = PhysxCfg( + gpu_found_lost_pairs_capacity=2**26, + gpu_found_lost_aggregate_pairs_capacity=2**25, ) - # Set the URDF and mesh paths for the IK controller - self.actions.pink_ik_cfg.controller.urdf_path = temp_urdf_output_path - self.actions.pink_ik_cfg.controller.mesh_path = temp_urdf_meshes_output_path - - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - UnitreeG1RetargeterCfg( - enable_visualization=True, - # number of joints in both hands - num_open_xr_hand_joints=2 * 26, - sim_device=self.sim.device, - # Please confirm that self.actions.pink_ik_cfg.hand_joint_names is - # consistent with robot.joint_names[-24:] - # The order of the joints does matter as it will be used for - # converting pink_ik actions to final control actions in IsaacLab. - hand_joint_names=self.actions.pink_ik_cfg.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - "manusvive": ManusViveCfg( - retargeters=[ - UnitreeG1RetargeterCfg( - enable_visualization=True, - num_open_xr_hand_joints=2 * 26, - sim_device=self.sim.device, - hand_joint_names=self.actions.pink_ik_cfg.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - }, + # Defer USD→URDF conversion to controller initialization (requires Isaac Sim at runtime). + self.actions.pink_ik_cfg.controller.usd_path = self.scene.robot.spawn.usd_path + self.actions.pink_ik_cfg.controller.urdf_output_dir = self.temp_urdf_dir + + # IsaacTeleop-based teleoperation pipeline (resolved lazily at runtime). + self.xr = XrCfg( + anchor_pos=(0.0, 0.0, 0.0), + anchor_rot=(0.0, 0.0, 0.0, 1.0), + ) + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=_build_g1_inspire_pickplace_pipeline, + sim_device=self.sim.device, + xr_cfg=self.xr, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_toy2box_rmp_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_toy2box_rmp_rel_env_cfg.py index ffe842b0202a..4d70b1571a5f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_toy2box_rmp_rel_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_toy2box_rmp_rel_env_cfg.py @@ -6,6 +6,8 @@ import os from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + from isaaclab.assets import AssetBaseCfg, RigidObjectCfg from isaaclab.devices.device_base import DevicesCfg from isaaclab.devices.keyboard import Se3KeyboardCfg @@ -192,11 +194,12 @@ def __post_init__(self): self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.2 - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 - self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 - self.sim.physx.friction_correlation_distance = 0.00625 + self.sim.physics = PhysxCfg( + bounce_threshold_velocity=0.01, + gpu_found_lost_aggregate_pairs_capacity=1024 * 1024 * 4, + gpu_total_aggregate_pairs_capacity=16 * 1024, + friction_correlation_distance=0.00625, + ) # set viewer to see the whole scene self.viewer.eye = [1.5, -1.0, 1.5] @@ -223,7 +226,7 @@ def __post_init__(self): # add table self.scene.table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0.0, -0.7], rot=[0.707, 0, 0, 0.707]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0.0, -0.7], rot=[0.0, 0.0, 0.707, 0.707]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", scale=(1.8, 1.0, 0.30), @@ -241,7 +244,6 @@ def __post_init__(self): controller=AGIBOT_RIGHT_ARM_RMPFLOW_CFG, scale=1.0, body_offset=RMPFlowActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0]), - articulation_prim_expr="/World/envs/env_.*/Robot", use_relative_mode=self.use_relative_mode, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_upright_mug_rmp_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_upright_mug_rmp_rel_env_cfg.py index 14841036d66e..ad9b4d9292ec 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_upright_mug_rmp_rel_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/config/agibot/place_upright_mug_rmp_rel_env_cfg.py @@ -170,7 +170,7 @@ def __post_init__(self): # Table self.scene.table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.50, 0.0, 0.60], rot=[0.707, 0, 0, 0.707]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.50, 0.0, 0.60], rot=[0.0, 0.0, 0.707, 0.707]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", scale=(1.0, 1.0, 0.60), @@ -196,8 +196,7 @@ def __post_init__(self): body_name="gripper_center", controller=AGIBOT_LEFT_ARM_RMPFLOW_CFG, scale=1.0, - body_offset=RMPFlowActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0], rot=[0.7071, 0.0, -0.7071, 0.0]), - articulation_prim_expr="/World/envs/env_.*/Robot", + body_offset=RMPFlowActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0], rot=[0.0, -0.7071, 0.0, 0.7071]), use_relative_mode=self.use_relative_mode, ) @@ -250,10 +249,10 @@ def __post_init__(self): offset=OffsetCfg( pos=[0.0, 0.0, 0.0], rot=[ - 0.7071, 0.0, -0.7071, 0.0, + 0.7071, ], ), ), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.py index 41f76fcdb1bb..5446e2c14ced 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.py @@ -5,7 +5,6 @@ """This sub-module contains the functions that are specific to the pick and place environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .observations import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.pyi new file mode 100644 index 000000000000..dcdd348b8bd6 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/__init__.pyi @@ -0,0 +1,15 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "object_grasped", + "object_poses_in_base_frame", + "object_a_is_into_b", + "object_placed_upright", +] + +from .observations import object_grasped, object_poses_in_base_frame +from .terminations import object_a_is_into_b, object_placed_upright +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/observations.py index b0a9107beca1..0c144db467eb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/observations.py @@ -8,14 +8,15 @@ from typing import TYPE_CHECKING, Literal import torch +import warp as wp import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import FrameTransformer if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import FrameTransformer def object_poses_in_base_frame( @@ -27,13 +28,13 @@ def object_poses_in_base_frame( """The pose of the object in the robot base frame.""" object: RigidObject = env.scene[object_cfg.name] - pos_object_world = object.data.root_pos_w - quat_object_world = object.data.root_quat_w + pos_object_world = object.data.root_pos_w.torch + quat_object_world = object.data.root_quat_w.torch """The position of the robot in the world frame.""" robot: Articulation = env.scene[robot_cfg.name] - root_pos_w = robot.data.root_pos_w - root_quat_w = robot.data.root_quat_w + root_pos_w = robot.data.root_pos_w.torch + root_quat_w = robot.data.root_quat_w.torch pos_object_base, quat_object_base = math_utils.subtract_frame_transforms( root_pos_w, root_quat_w, pos_object_world, quat_object_world @@ -64,12 +65,12 @@ def object_grasped( ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] object: RigidObject = env.scene[object_cfg.name] - object_pos = object.data.root_pos_w - end_effector_pos = ee_frame.data.target_pos_w[:, 0, :] + object_pos = object.data.root_pos_w.torch + end_effector_pos = ee_frame.data.target_pos_w.torch[:, 0, :] pose_diff = torch.linalg.vector_norm(object_pos - end_effector_pos, dim=1) if "contact_grasp" in env.scene.keys() and env.scene["contact_grasp"] is not None: - contact_force_grasp = env.scene["contact_grasp"].data.net_forces_w # shape:(N, 2, 3) for two fingers + contact_force_grasp = env.scene["contact_grasp"].data.net_forces_w.torch # shape:(N, 2, 3) for two fingers contact_force_norm = torch.linalg.vector_norm( contact_force_grasp, dim=2 ) # shape:(N, 2) - force magnitude per finger @@ -83,7 +84,7 @@ def object_grasped( ): contact_force_object = env.scene[ f"contact_grasp_{object_cfg.name}" - ].data.net_forces_w # shape:(N, 2, 3) for two fingers + ].data.net_forces_w.torch # shape:(N, 2, 3) for two fingers contact_force_norm = torch.linalg.vector_norm( contact_force_object, dim=2 ) # shape:(N, 2) - force magnitude per finger @@ -96,7 +97,7 @@ def object_grasped( if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: surface_gripper = env.scene.surface_grippers["surface_gripper"] - suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open + suction_cup_status = wp.to_torch(surface_gripper.state).view(-1, 1) # 1: closed, 0: closing, -1: open suction_cup_is_closed = (suction_cup_status == 1).to(torch.float32) grasped = torch.logical_and(suction_cup_is_closed, pose_diff < diff_threshold) @@ -105,12 +106,12 @@ def object_grasped( gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names) grasped = torch.logical_and( grasped, - torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) + torch.abs(torch.abs(robot.data.joint_pos.torch[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) > env.cfg.gripper_threshold, ) grasped = torch.logical_and( grasped, - torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) + torch.abs(torch.abs(robot.data.joint_pos.torch[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) > env.cfg.gripper_threshold, ) else: diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/terminations.py index 5361e4504480..11368e935181 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/place/mdp/terminations.py @@ -14,12 +14,13 @@ from typing import TYPE_CHECKING import torch +import warp as wp import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv @@ -36,7 +37,7 @@ def object_placed_upright( object: RigidObject = env.scene[object_cfg.name] # Compute mug euler angles of X, Y axis, to check if it is placed upright - object_euler_x, object_euler_y, _ = math_utils.euler_xyz_from_quat(object.data.root_quat_w) # (N,4) [0, 2*pi] + object_euler_x, object_euler_y, _ = math_utils.euler_xyz_from_quat(object.data.root_quat_w.torch) # (N,4) [0, 2*pi] object_euler_x_err = torch.abs(math_utils.wrap_to_pi(object_euler_x)) # (N,) object_euler_y_err = torch.abs(math_utils.wrap_to_pi(object_euler_y)) # (N,) @@ -44,13 +45,13 @@ def object_placed_upright( success = torch.logical_and(object_euler_x_err < euler_xy_threshold, object_euler_y_err < euler_xy_threshold) # Check if current mug height is greater than target height - height_success = object.data.root_pos_w[:, 2] > target_height + height_success = object.data.root_pos_w.torch[:, 2] > target_height success = torch.logical_and(height_success, success) if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: surface_gripper = env.scene.surface_grippers["surface_gripper"] - suction_cup_status = surface_gripper.state.view(-1) # 1: closed, 0: closing, -1: open + suction_cup_status = wp.to_torch(surface_gripper.state).view(-1) # 1: closed, 0: closing, -1: open suction_cup_is_open = (suction_cup_status == -1).to(torch.float32) success = torch.logical_and(suction_cup_is_open, success) @@ -59,12 +60,12 @@ def object_placed_upright( gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names) success = torch.logical_and( success, - torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) + torch.abs(torch.abs(robot.data.joint_pos.torch[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) < env.cfg.gripper_threshold, ) success = torch.logical_and( success, - torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) + torch.abs(torch.abs(robot.data.joint_pos.torch[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) < env.cfg.gripper_threshold, ) else: @@ -89,7 +90,7 @@ def object_a_is_into_b( object_b: RigidObject = env.scene[object_b_cfg.name] # check object a is into object b - pos_diff = object_a.data.root_pos_w - object_b.data.root_pos_w + pos_diff = object_a.data.root_pos_w.torch - object_b.data.root_pos_w.torch height_dist = torch.linalg.vector_norm(pos_diff[:, 2:], dim=1) xy_dist = torch.linalg.vector_norm(pos_diff[:, :2], dim=1) @@ -98,7 +99,7 @@ def object_a_is_into_b( # Check gripper positions if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: surface_gripper = env.scene.surface_grippers["surface_gripper"] - suction_cup_status = surface_gripper.state.view(-1) # 1: closed, 0: closing, -1: open + suction_cup_status = wp.to_torch(surface_gripper.state).view(-1) # 1: closed, 0: closing, -1: open suction_cup_is_open = (suction_cup_status == -1).to(torch.float32) success = torch.logical_and(suction_cup_is_open, success) @@ -109,12 +110,12 @@ def object_a_is_into_b( success = torch.logical_and( success, - torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) + torch.abs(torch.abs(robot.data.joint_pos.torch[:, gripper_joint_ids[0]]) - env.cfg.gripper_open_val) < env.cfg.gripper_threshold, ) success = torch.logical_and( success, - torch.abs(torch.abs(robot.data.joint_pos[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) + torch.abs(torch.abs(robot.data.joint_pos.torch[:, gripper_joint_ids[1]]) - env.cfg.gripper_open_val) < env.cfg.gripper_threshold, ) else: diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_abs_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_abs_env_cfg.py index b090e568965e..e8e955718559 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_abs_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_abs_env_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_physx.physics import PhysxCfg + from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.utils import configclass @@ -34,6 +36,9 @@ def __post_init__(self): body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) + # IK control is not supported with Newton physics; use PhysX only. + self.sim.physics = PhysxCfg(bounce_threshold_velocity=0.2) + @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_rel_env_cfg.py index 024a42270d85..488e92493289 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_rel_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/ik_rel_env_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_physx.physics import PhysxCfg + from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.utils import configclass @@ -35,6 +37,9 @@ def __post_init__(self): body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) + # IK control is not supported with Newton physics; use PhysX only. + self.sim.physics = PhysxCfg(bounce_threshold_velocity=0.2) + @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py index a848ddb87667..1d480cfb3cb0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py @@ -44,6 +44,11 @@ def __post_init__(self): self.commands.ee_pose.ranges.pitch = (math.pi, math.pi) +## +# Play configuration +## + + @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): def __post_init__(self): diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/osc_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/osc_env_cfg.py index e612439fda70..cca92aa019bb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/osc_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/franka/osc_env_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_physx.physics import PhysxCfg + from isaaclab.controllers.operational_space_cfg import OperationalSpaceControllerCfg from isaaclab.envs.mdp.actions.actions_cfg import OperationalSpaceControllerActionCfg from isaaclab.utils import configclass @@ -60,6 +62,9 @@ def __post_init__(self): self.observations.policy.joint_pos = None self.observations.policy.joint_vel = None + # OSC control is not supported with Newton physics; use PhysX only. + self.sim.physics = PhysxCfg(bounce_threshold_velocity=0.2) + @configclass class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/bimanual/reach_openarm_bi_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/bimanual/reach_openarm_bi_env_cfg.py index 7ccdfa0f851b..f111cb41cd20 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/bimanual/reach_openarm_bi_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/bimanual/reach_openarm_bi_env_cfg.py @@ -23,7 +23,7 @@ from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.scene import InteractiveSceneCfg from isaaclab.utils import configclass -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.manipulation.reach.mdp as mdp diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/unimanual/reach_openarm_uni_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/unimanual/reach_openarm_uni_env_cfg.py index ed9bcbfc08be..efde276b7dc6 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/unimanual/reach_openarm_uni_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/config/openarm/unimanual/reach_openarm_uni_env_cfg.py @@ -24,7 +24,7 @@ from isaaclab.scene import InteractiveSceneCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.manipulation.reach.mdp as mdp @@ -49,7 +49,7 @@ class ReachSceneCfg(InteractiveSceneCfg): spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", ), - init_state=AssetBaseCfg.InitialStateCfg(pos=(0.55, 0.0, 0.0), rot=(0.70711, 0.0, 0.0, 0.70711)), + init_state=AssetBaseCfg.InitialStateCfg(pos=(0.55, 0.0, 0.0), rot=(0.0, 0.0, 0.70711, 0.70711)), ) # robots diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/__init__.py index 3fec83fe70af..8f9a146abdc8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/__init__.py @@ -5,6 +5,6 @@ """This sub-module contains the functions that are specific to the locomotion environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .rewards import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/__init__.pyi new file mode 100644 index 000000000000..90535ff4c7e0 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/__init__.pyi @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "orientation_command_error", + "position_command_error", + "position_command_error_tanh", +] + +from .rewards import orientation_command_error, position_command_error, position_command_error_tanh +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/rewards.py index 76f2fe36db41..3238523134d5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/mdp/rewards.py @@ -9,11 +9,11 @@ import torch -from isaaclab.assets import RigidObject from isaaclab.managers import SceneEntityCfg from isaaclab.utils.math import combine_frame_transforms, quat_error_magnitude, quat_mul if TYPE_CHECKING: + from isaaclab.assets import RigidObject from isaaclab.envs import ManagerBasedRLEnv @@ -29,9 +29,9 @@ def position_command_error(env: ManagerBasedRLEnv, command_name: str, asset_cfg: command = env.command_manager.get_command(command_name) # obtain the desired and current positions des_pos_b = command[:, :3] - des_pos_w, _ = combine_frame_transforms(asset.data.root_pos_w, asset.data.root_quat_w, des_pos_b) - curr_pos_w = asset.data.body_pos_w[:, asset_cfg.body_ids[0]] # type: ignore - return torch.norm(curr_pos_w - des_pos_w, dim=1) + des_pos_w, _ = combine_frame_transforms(asset.data.root_pos_w.torch, asset.data.root_quat_w.torch, des_pos_b) + curr_pos_w = asset.data.body_pos_w.torch[:, asset_cfg.body_ids[0]] # type: ignore + return torch.linalg.norm(curr_pos_w - des_pos_w, dim=1) def position_command_error_tanh( @@ -47,9 +47,9 @@ def position_command_error_tanh( command = env.command_manager.get_command(command_name) # obtain the desired and current positions des_pos_b = command[:, :3] - des_pos_w, _ = combine_frame_transforms(asset.data.root_pos_w, asset.data.root_quat_w, des_pos_b) - curr_pos_w = asset.data.body_pos_w[:, asset_cfg.body_ids[0]] # type: ignore - distance = torch.norm(curr_pos_w - des_pos_w, dim=1) + des_pos_w, _ = combine_frame_transforms(asset.data.root_pos_w.torch, asset.data.root_quat_w.torch, des_pos_b) + curr_pos_w = asset.data.body_pos_w.torch[:, asset_cfg.body_ids[0]] # type: ignore + distance = torch.linalg.norm(curr_pos_w - des_pos_w, dim=1) return 1 - torch.tanh(distance / std) @@ -65,6 +65,6 @@ def orientation_command_error(env: ManagerBasedRLEnv, command_name: str, asset_c command = env.command_manager.get_command(command_name) # obtain the desired and current orientations des_quat_b = command[:, 3:7] - des_quat_w = quat_mul(asset.data.root_quat_w, des_quat_b) - curr_quat_w = asset.data.body_quat_w[:, asset_cfg.body_ids[0]] # type: ignore + des_quat_w = quat_mul(asset.data.root_quat_w.torch, des_quat_b) + curr_quat_w = asset.data.body_quat_w.torch[:, asset_cfg.body_ids[0]] # type: ignore return quat_error_magnitude(curr_quat_w, des_quat_w) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/reach_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/reach_env_cfg.py index bad88b401c7c..d859e786cf61 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/reach_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/reach/reach_env_cfg.py @@ -5,6 +5,9 @@ from dataclasses import MISSING +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg from isaaclab.devices import DevicesCfg @@ -21,17 +24,65 @@ from isaaclab.managers import SceneEntityCfg from isaaclab.managers import TerminationTermCfg as DoneTerm from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import CollisionPropertiesCfg, RigidBodyPropertiesCfg, UsdFileCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise +from isaaclab.utils.noise import UniformNoiseCfg as Unoise import isaaclab_tasks.manager_based.manipulation.reach.mdp as mdp +from isaaclab_tasks.utils import PresetCfg + + +@configclass +class ReachPhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg(bounce_threshold_velocity=0.2) + physx: PhysxCfg = PhysxCfg(bounce_threshold_velocity=0.2) + + newton_mjwarp: NewtonCfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=50, + nconmax=20, + cone="pyramidal", + integrator="implicitfast", + impratio=1, + ), + num_substeps=1, + debug_mode=False, + ) + ## # Scene definition ## +@configclass +class TableCfg(PresetCfg): + physx = AssetBaseCfg( + prim_path="/World/envs/env_.*/Table", + init_state=AssetBaseCfg.InitialStateCfg(pos=(0.5, 0, 0), rot=(0, 0, 0.707, 0.707)), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", + ), + ) + + newton_mjwarp: ArticulationCfg = ArticulationCfg( + prim_path="/World/envs/env_.*/Table", + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.5, 0.15, -0.5), rot=(0, 0, 0.707, 0.707), joint_pos={}, joint_vel={} + ), + spawn=sim_utils.CuboidCfg( + size=(0.9, 1.3, 1.00), + collision_props=CollisionPropertiesCfg(), + rigid_props=RigidBodyPropertiesCfg(rigid_body_enabled=True), + ), + actuators={}, + articulation_root_prim_path="", + ) + + default = physx + + @configclass class ReachSceneCfg(InteractiveSceneCfg): """Configuration for the scene with a robotic arm.""" @@ -43,13 +94,7 @@ class ReachSceneCfg(InteractiveSceneCfg): init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), ) - table = AssetBaseCfg( - prim_path="{ENV_REGEX_NS}/Table", - spawn=sim_utils.UsdFileCfg( - usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd", - ), - init_state=AssetBaseCfg.InitialStateCfg(pos=(0.55, 0.0, 0.0), rot=(0.70711, 0.0, 0.0, 0.70711)), - ) + table = TableCfg() # robots robot: ArticulationCfg = MISSING @@ -75,6 +120,7 @@ class CommandsCfg: body_name=MISSING, resampling_time_range=(4.0, 4.0), debug_vis=True, + position_success_threshold=0.05, ranges=mdp.UniformPoseCommandCfg.Ranges( pos_x=(0.35, 0.65), pos_y=(-0.2, 0.2), @@ -210,6 +256,7 @@ def __post_init__(self): self.viewer.eye = (3.5, 3.5, 3.5) # simulation settings self.sim.dt = 1.0 / 60.0 + self.sim.physics = ReachPhysicsCfg() self.teleop_devices = DevicesCfg( devices={ diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py index 0c93d83ff1f1..11d519ffdebb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py @@ -71,6 +71,47 @@ disable_env_checker=True, ) +gym.register( + id="Isaac-Stack-Cube-RedGreen-Franka-IK-Rel-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.stack_ik_rel_env_cfg:FrankaCubeStackRedGreenEnvCfg", + "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-Stack-Cube-RedGreenBlue-Franka-IK-Rel-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.stack_ik_rel_env_cfg:FrankaCubeStackRedGreenBlueEnvCfg", + "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", + }, + disable_env_checker=True, +) + + +gym.register( + id="Isaac-Stack-Cube-BlueGreen-Franka-IK-Rel-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.stack_ik_rel_env_cfg:FrankaCubeStackBlueGreenEnvCfg", + "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-Stack-Cube-BlueGreenRed-Franka-IK-Rel-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.stack_ik_rel_env_cfg:FrankaCubeStackBlueGreenRedEnvCfg", + "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", + }, + disable_env_checker=True, +) + gym.register( id="Isaac-Stack-Cube-Franka-IK-Abs-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py index 4121c1bb2e20..08f651e954a5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py @@ -30,6 +30,7 @@ class EventCfg: """Configuration for events.""" + # FIXME: Let's not do that and initialize the arm pose correctly in the environment constructor instead. init_franka_arm_pose = EventTerm( func=franka_stack_events.set_default_joint_pose, # mode="startup", @@ -131,7 +132,7 @@ def __post_init__(self): # Blue sorting bin positioned at table center self.scene.blue_sorting_bin = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/BlueSortingBin", - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.0, 0.0203), rot=(1.0, 0.0, 0.0, 0.0)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.0, 0.0203), rot=(0.0, 0.0, 0.0, 0.1)), spawn=UsdFileCfg( usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/sorting_bin_blue.usd", scale=(1.1, 1.6, 3.3), @@ -143,7 +144,7 @@ def __post_init__(self): # The bin is at (0.4, 0.0, 0.0203), so cube_1 should be slightly above it self.scene.cube_1 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_1", - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.0, 0.025), rot=(1.0, 0.0, 0.0, 0.0)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.0, 0.025), rot=(0.0, 0.0, 0.0, 1.0)), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", scale=(1.0, 1.0, 1.0), @@ -154,7 +155,7 @@ def __post_init__(self): # Cube 2 positioned outside the bin (to the right) self.scene.cube_2 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_2", - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.85, 0.25, 0.0203), rot=(1.0, 0.0, 0.0, 0.0)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.85, 0.25, 0.0203), rot=(0.0, 0.0, 0.0, 1.0)), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", scale=(1.0, 1.0, 1.0), @@ -165,7 +166,7 @@ def __post_init__(self): # Cube 3 positioned outside the bin (to the left) self.scene.cube_3 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_3", - init_state=RigidObjectCfg.InitialStateCfg(pos=(0.85, -0.25, 0.0203), rot=(1.0, 0.0, 0.0, 0.0)), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.85, -0.25, 0.0203), rot=(0.0, 0.0, 0.0, 1.0)), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", scale=(1.0, 1.0, 1.0), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_abs_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_abs_env_cfg.py index 8d9b18bcd95f..bedcc4a7730d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_abs_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_abs_env_cfg.py @@ -3,14 +3,21 @@ # # SPDX-License-Identifier: BSD-3-Clause +import logging + from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg -from isaaclab.devices.device_base import DeviceBase, DevicesCfg -from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg -from isaaclab.devices.openxr.retargeters.manipulator.gripper_retargeter import GripperRetargeterCfg -from isaaclab.devices.openxr.retargeters.manipulator.se3_abs_retargeter import Se3AbsRetargeterCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.utils import configclass +try: + import isaacteleop # noqa: F401 -- pipeline builders need isaacteleop at runtime + from isaaclab_teleop import IsaacTeleopCfg + + _TELEOP_AVAILABLE = True +except ImportError: + _TELEOP_AVAILABLE = False + logging.getLogger(__name__).warning("isaaclab_teleop is not installed. XR teleoperation features will be disabled.") + from . import stack_joint_pos_env_cfg ## @@ -19,6 +26,91 @@ from isaaclab_assets.robots.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip +def _build_franka_stack_pipeline(): + """Build a IsaacTeleop retargeting pipeline for Franka cube stacking. + + Creates an Se3AbsRetargeter for right-hand pose tracking and a GripperRetargeter + for right-hand gripper control, flattened into a single action tensor via + TensorReorderer. + + Returns: + OutputCombiner with a single "action" output containing the flattened + 8D action tensor: [pos_x, pos_y, pos_z, quat_w, quat_x, quat_y, quat_z, gripper]. + """ + from isaacteleop.retargeters import ( + GripperRetargeter, + GripperRetargeterConfig, + Se3AbsRetargeter, + Se3RetargeterConfig, + TensorReorderer, + ) + from isaacteleop.retargeting_engine.deviceio_source_nodes import ControllersSource, HandsSource + from isaacteleop.retargeting_engine.interface import OutputCombiner, ValueInput + from isaacteleop.retargeting_engine.tensor_types import TransformMatrix + + # Create input sources (trackers are auto-discovered from pipeline) + controllers = ControllersSource(name="controllers") + hands = HandsSource(name="hands") + + # External input: world-to-anchor 4x4 transform matrix provided by IsaacTeleopDevice + transform_input = ValueInput("world_T_anchor", TransformMatrix()) + + # Apply the coordinate-frame transform to controller poses so that + # downstream retargeters receive data in the simulation world frame. + transformed_controllers = controllers.transformed(transform_input.output(ValueInput.VALUE)) + + # SE3 Absolute Pose Retargeter (right hand) + se3_cfg = Se3RetargeterConfig( + input_device=ControllersSource.RIGHT, + zero_out_xy_rotation=False, + use_wrist_rotation=False, + use_wrist_position=False, + target_offset_roll=90.0, + target_offset_pitch=0.0, + target_offset_yaw=0.0, + ) + se3 = Se3AbsRetargeter(se3_cfg, name="ee_pose") + connected_se3 = se3.connect( + { + ControllersSource.RIGHT: transformed_controllers.output(ControllersSource.RIGHT), + } + ) + + # Gripper Retargeter (right hand) + gripper_cfg = GripperRetargeterConfig(hand_side="right") + gripper = GripperRetargeter(gripper_cfg, name="gripper") + connected_gripper = gripper.connect( + { + ControllersSource.RIGHT: transformed_controllers.output(ControllersSource.RIGHT), + HandsSource.RIGHT: hands.output(HandsSource.RIGHT), + } + ) + + # TensorReorderer to flatten into a single action vector + # Se3AbsRetargeter outputs a 7D NDArray (pos xyz + quat xyzw) + # GripperRetargeter outputs a single float (gripper command) + ee_pose_elements = ["pos_x", "pos_y", "pos_z", "quat_x", "quat_y", "quat_z", "quat_w"] + gripper_elements = ["gripper_value"] + + reorderer = TensorReorderer( + input_config={ + "ee_pose": ee_pose_elements, + "gripper_command": gripper_elements, + }, + output_order=ee_pose_elements + gripper_elements, + name="action_reorderer", + input_types={"ee_pose": "array", "gripper_command": "scalar"}, + ) + connected_reorderer = reorderer.connect( + { + "ee_pose": connected_se3.output("ee_pose"), + "gripper_command": connected_gripper.output("gripper_command"), + } + ) + + return OutputCombiner({"action": connected_reorderer.output("output")}) + + @configclass class FrankaCubeStackEnvCfg(stack_joint_pos_env_cfg.FrankaCubeStackEnvCfg): def __post_init__(self): @@ -37,23 +129,10 @@ def __post_init__(self): controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls"), ) - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - Se3AbsRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, - zero_out_xy_rotation=True, - use_wrist_rotation=False, - use_wrist_position=True, - sim_device=self.sim.device, - ), - GripperRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } - ) + # IsaacTeleop-based teleoperation pipeline + if _TELEOP_AVAILABLE: + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=_build_franka_stack_pipeline, + sim_device=self.sim.device, + xr_cfg=self.xr, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_blueprint_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_blueprint_env_cfg.py index 3586508df2dc..e123a25604bb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_blueprint_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_blueprint_env_cfg.py @@ -3,7 +3,10 @@ # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + import os +from typing import TYPE_CHECKING import torch from torchvision.utils import save_image @@ -11,17 +14,19 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg -from isaaclab.envs import ManagerBasedEnv from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import Camera, CameraCfg, RayCasterCamera, TiledCamera +from isaaclab.sensors import CameraCfg from isaaclab.utils import configclass from ... import mdp from . import stack_joint_pos_env_cfg +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + from isaaclab.sensors import Camera, RayCasterCamera ## # Pre-defined configs ## @@ -58,7 +63,7 @@ def image( The images produced at the last time-step """ # extract the used quantities (to enable type-hinting) - sensor: TiledCamera | Camera | RayCasterCamera = env.scene.sensors[sensor_cfg.name] + sensor: Camera | RayCasterCamera = env.scene.sensors[sensor_cfg.name] # obtain the input image images = sensor.data.output[data_type] @@ -255,7 +260,7 @@ def __post_init__(self): spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), - offset=CameraCfg.OffsetCfg(pos=(1.0, 0.0, 0.33), rot=(-0.3799, 0.5963, 0.5963, -0.3799), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(1.0, 0.0, 0.33), rot=(0.5963, 0.5963, -0.3799, -0.3799), convention="ros"), ) # Set table view camera @@ -270,5 +275,5 @@ def __post_init__(self): spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(1.5, 1.0e5) ), - offset=CameraCfg.OffsetCfg(pos=(1.4, 1.8, 1.2), rot=(-0.1393, 0.2025, 0.8185, -0.5192), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(1.4, 1.8, 1.2), rot=(0.2025, 0.8185, -0.5192, -0.1393), convention="ros"), ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg.py index 16eb9b9f087e..1d32adc3426e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg.py @@ -3,32 +3,91 @@ # # SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.assets import ArticulationCfg, RigidObjectCfg from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg -from isaaclab.devices.device_base import DeviceBase, DevicesCfg -from isaaclab.devices.keyboard import Se3KeyboardCfg -from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg -from isaaclab.devices.openxr.retargeters.manipulator.gripper_retargeter import GripperRetargeterCfg -from isaaclab.devices.openxr.retargeters.manipulator.se3_rel_retargeter import Se3RelRetargeterCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.sensors import FrameTransformerCfg +from isaaclab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg +from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR -from . import stack_joint_pos_env_cfg +from isaaclab_tasks.manager_based.manipulation.stack.mdp import franka_stack_events +from isaaclab_tasks.manager_based.manipulation.stack.stack_env_cfg import StackEnvCfg, mdp ## # Pre-defined configs ## +from isaaclab.markers.config import FRAME_MARKER_CFG # isort: skip from isaaclab_assets.robots.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip +# Default arm + gripper joint pose +_FRANKA_STACK_IK_REL_INIT_JOINT_POS: dict[str, float] = { + "panda_joint1": 0.0444, + "panda_joint2": -0.1894, + "panda_joint3": -0.1107, + "panda_joint4": -2.5148, + "panda_joint5": 0.0044, + "panda_joint6": 2.3775, + "panda_joint7": 0.6952, + "panda_finger_joint.*": 0.0400, +} + + +@configclass +class EventCfg: + """Configuration for events.""" + + randomize_franka_joint_state = EventTerm( + func=franka_stack_events.randomize_joint_by_gaussian_offset, + mode="reset", + params={ + "mean": 0.0, + "std": 0.02, + "asset_cfg": SceneEntityCfg("robot"), + }, + ) + + randomize_cube_positions = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + "pose_range": {"x": (0.4, 0.6), "y": (-0.10, 0.10), "z": (0.0203, 0.0203), "yaw": (-1.0, 1, 0)}, + "min_separation": 0.1, + "asset_cfgs": [SceneEntityCfg("cube_1"), SceneEntityCfg("cube_2"), SceneEntityCfg("cube_3")], + }, + ) + @configclass -class FrankaCubeStackEnvCfg(stack_joint_pos_env_cfg.FrankaCubeStackEnvCfg): +class FrankaCubeStackEnvCfg(StackEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() + # Set events + self.events = EventCfg() + # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. - self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace( + prim_path="{ENV_REGEX_NS}/Robot", + init_state=ArticulationCfg.InitialStateCfg(joint_pos=_FRANKA_STACK_IK_REL_INIT_JOINT_POS), + ) + + # Add semantics to table + self.scene.table.spawn.semantic_tags = [("class", "table")] + + # Add semantics to ground + self.scene.plane.semantic_tags = [("class", "ground")] + + # Add semantics to robot + self.scene.robot.spawn.semantic_tags = [("class", "robot")] # Set actions for the specific robot type (franka) self.actions.arm_action = DifferentialInverseKinematicsActionCfg( @@ -39,31 +98,144 @@ def __post_init__(self): scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) + self.actions.gripper_action = mdp.BinaryJointPositionActionCfg( + asset_name="robot", + joint_names=["panda_finger.*"], + open_command_expr={"panda_finger_.*": 0.04}, + close_command_expr={"panda_finger_.*": 0.0}, + ) + # utilities for gripper status check + self.gripper_joint_names = ["panda_finger_.*"] + self.gripper_open_val = 0.04 + self.gripper_threshold = 0.005 - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - Se3RelRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, - zero_out_xy_rotation=True, - use_wrist_rotation=False, - use_wrist_position=True, - delta_pos_scale_factor=10.0, - delta_rot_scale_factor=10.0, - sim_device=self.sim.device, - ), - GripperRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, + # Rigid body properties of each cube + cube_properties = RigidBodyPropertiesCfg( + solver_position_iteration_count=16, + solver_velocity_iteration_count=1, + max_angular_velocity=1000.0, + max_linear_velocity=1000.0, + max_depenetration_velocity=5.0, + disable_gravity=False, + ) + + # Set each stacking cube deterministically + self.scene.cube_1 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_1", + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[0, 0, 0, 1]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + semantic_tags=[("class", "cube_1")], + ), + ) + self.scene.cube_2 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_2", + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[0, 0, 0, 1]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + semantic_tags=[("class", "cube_2")], + ), + ) + self.scene.cube_3 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_3", + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[0, 0, 0, 1]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + semantic_tags=[("class", "cube_3")], + ), + ) + + # Listens to the required transforms + marker_cfg = FRAME_MARKER_CFG.copy() + marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + marker_cfg.prim_path = "/Visuals/FrameTransformer" + self.scene.ee_frame = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_link0", + debug_vis=False, + visualizer_cfg=marker_cfg, + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_hand", + name="end_effector", + offset=OffsetCfg( + pos=[0.0, 0.0, 0.1034], + ), + ), + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_rightfinger", + name="tool_rightfinger", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.046), + ), ), - "keyboard": Se3KeyboardCfg( - pos_sensitivity=0.05, - rot_sensitivity=0.05, - sim_device=self.sim.device, + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_leftfinger", + name="tool_leftfinger", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.046), + ), ), - } + ], + ) + + +@configclass +class FrankaCubeStackRedGreenEnvCfg(FrankaCubeStackEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.terminations.success = DoneTerm( + func=mdp.cubes_stacked, + params={"cube_1_cfg": SceneEntityCfg("cube_2"), "cube_2_cfg": SceneEntityCfg("cube_3"), "cube_3_cfg": None}, + ) + + +@configclass +class FrankaCubeStackRedGreenBlueEnvCfg(FrankaCubeStackEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.terminations.success = DoneTerm( + func=mdp.cubes_stacked, + params={ + "cube_1_cfg": SceneEntityCfg("cube_2"), + "cube_2_cfg": SceneEntityCfg("cube_3"), + "cube_3_cfg": SceneEntityCfg("cube_1"), + }, + ) + + +@configclass +class FrankaCubeStackBlueGreenEnvCfg(FrankaCubeStackEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.terminations.success = DoneTerm( + func=mdp.cubes_stacked, + params={"cube_1_cfg": SceneEntityCfg("cube_1"), "cube_2_cfg": SceneEntityCfg("cube_3"), "cube_3_cfg": None}, + ) + + +@configclass +class FrankaCubeStackBlueGreenRedEnvCfg(FrankaCubeStackEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.terminations.success = DoneTerm( + func=mdp.cubes_stacked, + params={ + "cube_1_cfg": SceneEntityCfg("cube_1"), + "cube_2_cfg": SceneEntityCfg("cube_3"), + "cube_3_cfg": SceneEntityCfg("cube_2"), + }, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg_skillgen.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg_skillgen.py index d2a2bd62100a..a8ed078d07d1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg_skillgen.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_env_cfg_skillgen.py @@ -4,11 +4,6 @@ # SPDX-License-Identifier: BSD-3-Clause from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg -from isaaclab.devices.device_base import DeviceBase, DevicesCfg -from isaaclab.devices.keyboard import Se3KeyboardCfg -from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg -from isaaclab.devices.openxr.retargeters.manipulator.gripper_retargeter import GripperRetargeterCfg -from isaaclab.devices.openxr.retargeters.manipulator.se3_rel_retargeter import Se3RelRetargeterCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm @@ -124,34 +119,6 @@ def __post_init__(self): body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0]), ) - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - Se3RelRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, - zero_out_xy_rotation=True, - use_wrist_rotation=False, - use_wrist_position=True, - delta_pos_scale_factor=10.0, - delta_rot_scale_factor=10.0, - sim_device=self.sim.device, - ), - GripperRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - "keyboard": Se3KeyboardCfg( - pos_sensitivity=0.05, - rot_sensitivity=0.05, - sim_device=self.sim.device, - ), - } - ) - # Apply skillgen-specific cube position randomization self.events.randomize_cube_positions.params["pose_range"] = { "x": (0.45, 0.6), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py index 2cdd8ef39ee4..ab199c4dcc10 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_cosmos_env_cfg.py @@ -131,7 +131,7 @@ def __post_init__(self): focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 2) ), offset=CameraCfg.OffsetCfg( - pos=(0.13, 0.0, -0.15), rot=(-0.70614, 0.03701, 0.03701, -0.70614), convention="ros" + pos=(0.13, 0.0, -0.15), rot=(0.03701, 0.03701, -0.70614, -0.70614), convention="ros" ), ) @@ -148,7 +148,7 @@ def __post_init__(self): focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 2) ), offset=CameraCfg.OffsetCfg( - pos=(1.0, 0.0, 0.4), rot=(0.35355, -0.61237, -0.61237, 0.35355), convention="ros" + pos=(1.0, 0.0, 0.4), rot=(-0.61237, -0.61237, 0.35355, 0.35355), convention="ros" ), ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_env_cfg.py index 13040620b788..624df7a8dc1d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_ik_rel_visuomotor_env_cfg.py @@ -4,31 +4,67 @@ # SPDX-License-Identifier: BSD-3-Clause import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, RigidObjectCfg from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import CameraCfg +from isaaclab.sensors import CameraCfg, FrameTransformerCfg +from isaaclab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg +from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg from isaaclab.utils import configclass from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, NVIDIA_NUCLEUS_DIR from isaaclab_tasks.manager_based.manipulation.stack import mdp from isaaclab_tasks.manager_based.manipulation.stack.mdp import franka_stack_events - -from . import stack_joint_pos_env_cfg +from isaaclab_tasks.manager_based.manipulation.stack.stack_env_cfg import StackEnvCfg ## # Pre-defined configs ## +from isaaclab.markers.config import FRAME_MARKER_CFG # isort: skip from isaaclab_assets.robots.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip +# Default arm + gripper joint pose +_FRANKA_STACK_IK_REL_INIT_JOINT_POS: dict[str, float] = { + "panda_joint1": 0.0444, + "panda_joint2": -0.1894, + "panda_joint3": -0.1107, + "panda_joint4": -2.5148, + "panda_joint5": 0.0044, + "panda_joint6": 2.3775, + "panda_joint7": 0.6952, + "panda_finger_joint.*": 0.0400, +} + @configclass -class EventCfg(stack_joint_pos_env_cfg.EventCfg): +class EventCfg: """Configuration for events.""" + randomize_franka_joint_state = EventTerm( + func=franka_stack_events.randomize_joint_by_gaussian_offset, + mode="reset", + params={ + "mean": 0.0, + "std": 0.02, + "asset_cfg": SceneEntityCfg("robot"), + }, + ) + + randomize_cube_positions = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + "pose_range": {"x": (0.4, 0.6), "y": (-0.10, 0.10), "z": (0.0203, 0.0203), "yaw": (-1.0, 1, 0)}, + "min_separation": 0.1, + "asset_cfgs": [SceneEntityCfg("cube_1"), SceneEntityCfg("cube_2"), SceneEntityCfg("cube_3")], + }, + ) + randomize_light = EventTerm( func=franka_stack_events.randomize_scene_lighting_domelight, mode="reset", @@ -170,7 +206,7 @@ def __post_init__(self): @configclass -class FrankaCubeStackVisuomotorEnvCfg(stack_joint_pos_env_cfg.FrankaCubeStackEnvCfg): +class FrankaCubeStackVisuomotorEnvCfg(StackEnvCfg): observations: ObservationsCfg = ObservationsCfg() # Evaluation settings @@ -186,7 +222,18 @@ def __post_init__(self): # Set Franka as robot # We switch here to a stiffer PD controller for IK tracking to be better. - self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace( + prim_path="{ENV_REGEX_NS}/Robot", + init_state=ArticulationCfg.InitialStateCfg(joint_pos=_FRANKA_STACK_IK_REL_INIT_JOINT_POS), + ) + + # Add semantics to table + self.scene.table.spawn.semantic_tags = [("class", "table")] + + # Add semantics to ground + self.scene.plane.semantic_tags = [("class", "ground")] + + # Add semantics to robot self.scene.robot.spawn.semantic_tags = [("class", "robot")] # Set actions for the specific robot type (franka) @@ -198,6 +245,91 @@ def __post_init__(self): scale=0.5, body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.107]), ) + self.actions.gripper_action = mdp.BinaryJointPositionActionCfg( + asset_name="robot", + joint_names=["panda_finger.*"], + open_command_expr={"panda_finger_.*": 0.04}, + close_command_expr={"panda_finger_.*": 0.0}, + ) + # utilities for gripper status check + self.gripper_joint_names = ["panda_finger_.*"] + self.gripper_open_val = 0.04 + self.gripper_threshold = 0.005 + + # Rigid body properties of each cube + cube_properties = RigidBodyPropertiesCfg( + solver_position_iteration_count=16, + solver_velocity_iteration_count=1, + max_angular_velocity=1000.0, + max_linear_velocity=1000.0, + max_depenetration_velocity=5.0, + disable_gravity=False, + ) + + # Set each stacking cube deterministically + self.scene.cube_1 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_1", + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[0, 0, 0, 1]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + semantic_tags=[("class", "cube_1")], + ), + ) + self.scene.cube_2 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_2", + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[0, 0, 0, 1]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + semantic_tags=[("class", "cube_2")], + ), + ) + self.scene.cube_3 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_3", + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[0, 0, 0, 1]), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + semantic_tags=[("class", "cube_3")], + ), + ) + + # Listens to the required transforms + marker_cfg = FRAME_MARKER_CFG.copy() + marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + marker_cfg.prim_path = "/Visuals/FrameTransformer" + self.scene.ee_frame = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_link0", + debug_vis=False, + visualizer_cfg=marker_cfg, + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_hand", + name="end_effector", + offset=OffsetCfg( + pos=[0.0, 0.0, 0.1034], + ), + ), + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_rightfinger", + name="tool_rightfinger", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.046), + ), + ), + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_leftfinger", + name="tool_leftfinger", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.046), + ), + ), + ], + ) # Set cameras # Set wrist camera @@ -211,7 +343,7 @@ def __post_init__(self): focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 2) ), offset=CameraCfg.OffsetCfg( - pos=(0.13, 0.0, -0.15), rot=(-0.70614, 0.03701, 0.03701, -0.70614), convention="ros" + pos=(0.13, 0.0, -0.15), rot=(0.03701, 0.03701, -0.70614, -0.70614), convention="ros" ), ) @@ -226,7 +358,7 @@ def __post_init__(self): focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 2) ), offset=CameraCfg.OffsetCfg( - pos=(1.0, 0.0, 0.4), rot=(0.35355, -0.61237, -0.61237, 0.35355), convention="ros" + pos=(1.0, 0.0, 0.4), rot=(-0.61237, -0.61237, 0.35355, 0.35355), convention="ros" ), ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py index e344f0021db2..83f05e0497df 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import RigidObjectCfg +from isaaclab.assets import ArticulationCfg, RigidObjectCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import SceneEntityCfg from isaaclab.sensors import FrameTransformerCfg @@ -23,19 +23,23 @@ from isaaclab.markers.config import FRAME_MARKER_CFG # isort: skip from isaaclab_assets.robots.franka import FRANKA_PANDA_CFG # isort: skip +# Default arm + gripper joint pose +_FRANKA_STACK_IK_REL_INIT_JOINT_POS: dict[str, float] = { + "panda_joint1": 0.0444, + "panda_joint2": -0.1894, + "panda_joint3": -0.1107, + "panda_joint4": -2.5148, + "panda_joint5": 0.0044, + "panda_joint6": 2.3775, + "panda_joint7": 0.6952, + "panda_finger_joint.*": 0.0400, +} + @configclass class EventCfg: """Configuration for events.""" - init_franka_arm_pose = EventTerm( - func=franka_stack_events.set_default_joint_pose, - mode="reset", - params={ - "default_pose": [0.0444, -0.1894, -0.1107, -2.5148, 0.0044, 2.3775, 0.6952, 0.0400, 0.0400], - }, - ) - randomize_franka_joint_state = EventTerm( func=franka_stack_events.randomize_joint_by_gaussian_offset, mode="reset", @@ -69,8 +73,10 @@ def __post_init__(self): self.events = EventCfg() # Set Franka as robot - self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") - self.scene.robot.spawn.semantic_tags = [("class", "robot")] + self.scene.robot = FRANKA_PANDA_CFG.replace( + prim_path="{ENV_REGEX_NS}/Robot", + init_state=ArticulationCfg.InitialStateCfg(joint_pos=_FRANKA_STACK_IK_REL_INIT_JOINT_POS), + ) # Add semantics to table self.scene.table.spawn.semantic_tags = [("class", "table")] @@ -78,6 +84,9 @@ def __post_init__(self): # Add semantics to ground self.scene.plane.semantic_tags = [("class", "ground")] + # Add semantics to robot + self.scene.robot.spawn.semantic_tags = [("class", "robot")] + # Set actions for the specific robot type (franka) self.actions.arm_action = mdp.JointPositionActionCfg( asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True @@ -106,7 +115,7 @@ def __post_init__(self): # Set each stacking cube deterministically self.scene.cube_1 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_1", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", scale=(1.0, 1.0, 1.0), @@ -116,7 +125,7 @@ def __post_init__(self): ) self.scene.cube_2 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_2", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", scale=(1.0, 1.0, 1.0), @@ -126,7 +135,7 @@ def __post_init__(self): ) self.scene.cube_3 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_3", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", scale=(1.0, 1.0, 1.0), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py index 3e14199a2630..a607f0c78c13 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py @@ -32,6 +32,7 @@ class EventCfg: """Configuration for events.""" + # FIXME: Let's not do that and initialize the arm pose correctly in the environment constructor instead. init_franka_arm_pose = EventTerm( func=franka_stack_events.set_default_joint_pose, mode="startup", @@ -106,7 +107,7 @@ def __post_init__(self): cube_1_config_dict = { "blue_cube": RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_1_Blue", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", scale=(1.0, 1.0, 1.0), @@ -115,7 +116,7 @@ def __post_init__(self): ), "red_cube": RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_1_Red", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0403], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0403], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", scale=(1.0, 1.0, 1.0), @@ -127,7 +128,7 @@ def __post_init__(self): cube_2_config_dict = { "red_cube": RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_2_Red", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", scale=(1.0, 1.0, 1.0), @@ -136,7 +137,7 @@ def __post_init__(self): ), "yellow_cube": RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_2_Yellow", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0403], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0403], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/yellow_block.usd", scale=(1.0, 1.0, 1.0), @@ -148,7 +149,7 @@ def __post_init__(self): cube_3_config_dict = { "yellow_cube": RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_3_Yellow", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/yellow_block.usd", scale=(1.0, 1.0, 1.0), @@ -157,7 +158,7 @@ def __post_init__(self): ), "green_cube": RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_2_Green", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0403], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0403], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", scale=(1.0, 1.0, 1.0), diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py index 4506a95eaba9..8c8490ad1c7e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py @@ -4,11 +4,10 @@ # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import RigidObjectCfg, SurfaceGripperCfg -from isaaclab.devices import DevicesCfg -from isaaclab.devices.device_base import DeviceBase -from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg -from isaaclab.devices.openxr.retargeters import GripperRetargeterCfg, Se3AbsRetargeterCfg +from isaaclab_physx.assets import SurfaceGripperCfg +from isaaclab_teleop import IsaacTeleopCfg + +from isaaclab.assets import RigidObjectCfg from isaaclab.envs.mdp.actions.actions_cfg import SurfaceGripperBinaryActionCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import ObservationGroupCfg as ObsGroup @@ -32,6 +31,82 @@ from isaaclab_assets.robots.galbot import GALBOT_ONE_CHARLIE_CFG # isort: skip +def _build_se3_abs_gripper_pipeline(hand_side="left"): + """Build an IsaacTeleop Se3Abs + Gripper pipeline for single-arm manipulator teleoperation. + + Creates a Se3AbsRetargeter for end-effector absolute pose tracking and + a GripperRetargeter for pinch-based gripper control from hand tracking data. + All outputs are flattened into a single 8D action tensor via TensorReorderer. + """ + from isaacteleop.retargeters import ( + GripperRetargeter, + GripperRetargeterConfig, + Se3AbsRetargeter, + Se3RetargeterConfig, + TensorReorderer, + ) + from isaacteleop.retargeting_engine.deviceio_source_nodes import ControllersSource, HandsSource + from isaacteleop.retargeting_engine.interface import OutputCombiner, ValueInput + from isaacteleop.retargeting_engine.tensor_types import TransformMatrix + + controllers = ControllersSource(name="controllers") + hands = HandsSource(name="hands") + transform_input = ValueInput("world_T_anchor", TransformMatrix()) + transformed_hands = hands.transformed(transform_input.output(ValueInput.VALUE)) + + hand_key = HandsSource.LEFT if hand_side == "left" else HandsSource.RIGHT + + # SE3 Absolute Pose Retargeter + se3_cfg = Se3RetargeterConfig( + input_device=hand_key, + zero_out_xy_rotation=True, + use_wrist_rotation=False, + use_wrist_position=True, + target_offset_roll=0.0, + target_offset_pitch=0.0, + target_offset_yaw=0.0, + ) + se3 = Se3AbsRetargeter(se3_cfg, name="ee_pose") + connected_se3 = se3.connect({hand_key: transformed_hands.output(hand_key)}) + + # Gripper Retargeter (pinch-based) + gripper_cfg = GripperRetargeterConfig(hand_side=hand_side) + gripper = GripperRetargeter(gripper_cfg, name="gripper") + controller_key = ControllersSource.LEFT if hand_side == "left" else ControllersSource.RIGHT + connected_gripper = gripper.connect( + { + f"hand_{hand_side}": hands.output(hand_key), + f"controller_{hand_side}": controllers.output(controller_key), + } + ) + + # TensorReorderer: flatten into an 8D action tensor [ee_pose(7), gripper(1)] + ee_elements = ["pos_x", "pos_y", "pos_z", "quat_x", "quat_y", "quat_z", "quat_w"] + gripper_elements = ["gripper_cmd"] + + reorderer = TensorReorderer( + input_config={ + "ee_pose": ee_elements, + "gripper": gripper_elements, + }, + output_order=ee_elements + gripper_elements, + name="action_reorderer", + input_types={ + "ee_pose": "array", + "gripper": "scalar", + }, + ) + connected_reorderer = reorderer.connect( + { + "ee_pose": connected_se3.output("ee_pose"), + "gripper": connected_gripper.output("gripper_command"), + } + ) + + pipeline = OutputCombiner({"action": connected_reorderer.output("output")}) + return pipeline + + @configclass class EventCfg: """Configuration for events.""" @@ -199,7 +274,7 @@ def __post_init__(self): # Set each stacking cube deterministically self.scene.cube_1 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_1", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", scale=(1.0, 1.0, 1.0), @@ -209,7 +284,7 @@ def __post_init__(self): ) self.scene.cube_2 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_2", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", scale=(1.0, 1.0, 1.0), @@ -219,7 +294,7 @@ def __post_init__(self): ) self.scene.cube_3 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_3", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", scale=(1.0, 1.0, 1.0), @@ -248,25 +323,11 @@ def __post_init__(self): ], ) - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - Se3AbsRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_LEFT, - zero_out_xy_rotation=True, - use_wrist_rotation=False, - use_wrist_position=True, - sim_device=self.sim.device, - ), - GripperRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_LEFT, sim_device=self.sim.device - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + # IsaacTeleop-based teleoperation pipeline (left hand) + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=lambda: _build_se3_abs_gripper_pipeline(hand_side="left"), + sim_device=self.sim.device, + xr_cfg=self.xr, ) @@ -303,23 +364,9 @@ def __post_init__(self): self.scene.ee_frame.target_frames[0].prim_path = "{ENV_REGEX_NS}/Robot/right_suction_cup_tcp_link" - self.teleop_devices = DevicesCfg( - devices={ - "handtracking": OpenXRDeviceCfg( - retargeters=[ - Se3AbsRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, - zero_out_xy_rotation=True, - use_wrist_rotation=False, - use_wrist_position=True, - sim_device=self.sim.device, - ), - GripperRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } + # IsaacTeleop-based teleoperation pipeline (right hand) + self.isaac_teleop = IsaacTeleopCfg( + pipeline_builder=lambda: _build_se3_abs_gripper_pipeline(hand_side="right"), + sim_device=self.sim.device, + xr_cfg=self.xr, ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py index eebb79e1d315..0a9e66310325 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_rmp_rel_env_cfg.py @@ -6,11 +6,11 @@ import os +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils -from isaaclab.devices.device_base import DeviceBase, DevicesCfg +from isaaclab.devices.device_base import DevicesCfg from isaaclab.devices.keyboard import Se3KeyboardCfg -from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg -from isaaclab.devices.openxr.retargeters import GripperRetargeterCfg, Se3RelRetargeterCfg from isaaclab.devices.spacemouse import Se3SpaceMouseCfg from isaaclab.envs.mdp.actions.rmpflow_actions_cfg import RMPFlowActionCfg from isaaclab.sensors import CameraCfg, FrameTransformerCfg @@ -55,10 +55,26 @@ def __post_init__(self): controller=GALBOT_LEFT_ARM_RMPFLOW_CFG, scale=1.0, body_offset=RMPFlowActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0]), - articulation_prim_expr="/World/envs/env_.*/Robot", use_relative_mode=self.use_relative_mode, ) + # Relative mode uses legacy teleop (keyboard/spacemouse) instead of XR; + # absolute mode keeps the inherited XR isaac_teleop pipeline. + if self.use_relative_mode: + self.isaac_teleop = None + self.teleop_devices = DevicesCfg( + devices={ + "keyboard": Se3KeyboardCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + ), + "spacemouse": Se3SpaceMouseCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + ), + } + ) + # Set the simulation parameters self.sim.dt = 1 / 60 self.sim.render_interval = 6 @@ -66,39 +82,6 @@ def __post_init__(self): self.decimation = 3 self.episode_length_s = 30.0 - self.teleop_devices = DevicesCfg( - devices={ - "keyboard": Se3KeyboardCfg( - pos_sensitivity=0.05, - rot_sensitivity=0.05, - sim_device=self.sim.device, - ), - "spacemouse": Se3SpaceMouseCfg( - pos_sensitivity=0.05, - rot_sensitivity=0.05, - sim_device=self.sim.device, - ), - "handtracking": OpenXRDeviceCfg( - retargeters=[ - Se3RelRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_LEFT, - zero_out_xy_rotation=True, - use_wrist_rotation=False, - use_wrist_position=True, - delta_pos_scale_factor=10.0, - delta_rot_scale_factor=10.0, - sim_device=self.sim.device, - ), - GripperRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_LEFT, sim_device=self.sim.device - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } - ) - ## # RmpFlow Controller for Galbot Right Arm Cube Stack Task (with Surface Gripper) @@ -122,9 +105,26 @@ def __post_init__(self): controller=GALBOT_RIGHT_ARM_RMPFLOW_CFG, scale=1.0, body_offset=RMPFlowActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0]), - articulation_prim_expr="/World/envs/env_.*/Robot", use_relative_mode=self.use_relative_mode, ) + + # Relative mode uses legacy teleop (keyboard/spacemouse) instead of XR; + # absolute mode keeps the inherited XR isaac_teleop pipeline. + if self.use_relative_mode: + self.isaac_teleop = None + self.teleop_devices = DevicesCfg( + devices={ + "keyboard": Se3KeyboardCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + ), + "spacemouse": Se3SpaceMouseCfg( + pos_sensitivity=0.05, + rot_sensitivity=0.05, + ), + } + ) + # Set the simulation parameters self.sim.dt = 1 / 120 self.sim.render_interval = 6 @@ -133,40 +133,7 @@ def __post_init__(self): self.episode_length_s = 30.0 # Enable CCD to avoid tunneling - self.sim.physx.enable_ccd = True - - self.teleop_devices = DevicesCfg( - devices={ - "keyboard": Se3KeyboardCfg( - pos_sensitivity=0.05, - rot_sensitivity=0.05, - sim_device=self.sim.device, - ), - "spacemouse": Se3SpaceMouseCfg( - pos_sensitivity=0.05, - rot_sensitivity=0.05, - sim_device=self.sim.device, - ), - "handtracking": OpenXRDeviceCfg( - retargeters=[ - Se3RelRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, - zero_out_xy_rotation=True, - use_wrist_rotation=False, - use_wrist_position=True, - delta_pos_scale_factor=10.0, - delta_rot_scale_factor=10.0, - sim_device=self.sim.device, - ), - GripperRetargeterCfg( - bound_hand=DeviceBase.TrackingTarget.HAND_RIGHT, sim_device=self.sim.device - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), - } - ) + self.sim.physics = PhysxCfg(enable_ccd=True) ## @@ -188,7 +155,7 @@ def __post_init__(self): spawn=sim_utils.PinholeCameraCfg( focal_length=18.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), - offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(-0.5, 0.5, -0.5, 0.5), convention="ros"), ) self.scene.left_wrist_cam = CameraCfg( @@ -200,7 +167,7 @@ def __post_init__(self): spawn=sim_utils.PinholeCameraCfg( focal_length=18.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), - offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(-0.5, 0.5, -0.5, 0.5), convention="ros"), ) # Set ego view camera @@ -213,7 +180,7 @@ def __post_init__(self): spawn=sim_utils.PinholeCameraCfg( focal_length=18.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), - offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(-0.5, 0.5, -0.5, 0.5), convention="ros"), ) # Set front view camera @@ -226,7 +193,7 @@ def __post_init__(self): spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), - offset=CameraCfg.OffsetCfg(pos=(1.0, 0.0, 0.6), rot=(-0.3799, 0.5963, 0.5963, -0.3799), convention="ros"), + offset=CameraCfg.OffsetCfg(pos=(1.0, 0.0, 0.6), rot=(0.5963, 0.5963, -0.3799, -0.3799), convention="ros"), ) marker_right_camera_cfg = FRAME_MARKER_CFG.copy() @@ -243,7 +210,7 @@ def __post_init__(self): name="right_camera", offset=OffsetCfg( pos=[0.0, 0.0, 0.0], - rot=(0.5, -0.5, 0.5, -0.5), + rot=(-0.5, 0.5, -0.5, 0.5), ), ), ], @@ -263,7 +230,7 @@ def __post_init__(self): name="left_camera", offset=OffsetCfg( pos=[0.0, 0.0, 0.0], - rot=(0.5, -0.5, 0.5, -0.5), + rot=(-0.5, 0.5, -0.5, 0.5), ), ), ], diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py index c3d73a3fb3c9..296b95e103a5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py @@ -3,7 +3,9 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import RigidObjectCfg, SurfaceGripperCfg +from isaaclab_physx.assets import SurfaceGripperCfg + +from isaaclab.assets import RigidObjectCfg from isaaclab.envs.mdp.actions.actions_cfg import SurfaceGripperBinaryActionCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import SceneEntityCfg @@ -33,6 +35,7 @@ class EventCfgLongSuction: """Configuration for events.""" + # FIXME: Let's not do that and initialize the arm pose correctly in the environment constructor instead. init_franka_arm_pose = EventTerm( func=franka_stack_events.set_default_joint_pose, mode="reset", @@ -100,7 +103,7 @@ def __post_init__(self): # Set each stacking cube deterministically self.scene.cube_1 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_1", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.4, 0.0, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", scale=self.cube_scale, @@ -109,7 +112,7 @@ def __post_init__(self): ) self.scene.cube_2 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_2", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.55, 0.05, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", scale=self.cube_scale, @@ -118,7 +121,7 @@ def __post_init__(self): ) self.scene.cube_3 = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/Cube_3", - init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[1, 0, 0, 0]), + init_state=RigidObjectCfg.InitialStateCfg(pos=[0.60, -0.1, 0.0203], rot=[0, 0, 0, 1]), spawn=UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", scale=self.cube_scale, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/__init__.py index ea04fcc468e9..1c3431f2637c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/__init__.py @@ -5,7 +5,6 @@ """This sub-module contains the functions that are specific to the lift environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .observations import * # noqa: F401, F403 -from .terminations import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/__init__.pyi new file mode 100644 index 000000000000..c7184d285121 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/__init__.pyi @@ -0,0 +1,41 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "cube_orientations_in_world_frame", + "cube_poses_in_base_frame", + "cube_positions_in_world_frame", + "ee_frame_pos", + "ee_frame_pose_in_base_frame", + "ee_frame_quat", + "gripper_pos", + "instance_randomize_cube_orientations_in_world_frame", + "instance_randomize_cube_positions_in_world_frame", + "instance_randomize_object_obs", + "object_abs_obs_in_base_frame", + "object_grasped", + "object_obs", + "object_stacked", + "cubes_stacked", +] + +from .observations import ( + cube_orientations_in_world_frame, + cube_poses_in_base_frame, + cube_positions_in_world_frame, + ee_frame_pos, + ee_frame_pose_in_base_frame, + ee_frame_quat, + gripper_pos, + instance_randomize_cube_orientations_in_world_frame, + instance_randomize_cube_positions_in_world_frame, + instance_randomize_object_obs, + object_abs_obs_in_base_frame, + object_grasped, + object_obs, + object_stacked, +) +from .terminations import cubes_stacked +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py index 3d9e1db48629..4d148d6ba216 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py @@ -11,14 +11,13 @@ from typing import TYPE_CHECKING import torch - -from isaacsim.core.utils.extensions import enable_extension +import warp as wp import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation, AssetBase from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import Articulation, AssetBase from isaaclab.envs import ManagerBasedEnv @@ -30,7 +29,23 @@ def set_default_joint_pose( ): # Set the default pose for robots in all envs asset = env.scene[asset_cfg.name] - asset.data.default_joint_pos = torch.tensor(default_pose, device=env.device).repeat(env.num_envs, 1) + # Convert default_pose to 1D array and create joint indices + default_pose_1d = torch.tensor(default_pose, device=env.device).repeat(env.num_envs, 1).flatten() + num_joints = len(default_pose) + joint_ids = torch.arange(num_joints, device=env.device, dtype=torch.int32) + # Use update_default_joint_values kernel to update all joints for all environments + from isaaclab_physx.assets.articulation.kernels import update_default_joint_values + + wp.launch( + update_default_joint_values, + dim=(env.num_envs, num_joints), + inputs=[ + default_pose_1d, + joint_ids, + ], + outputs=[asset.data.default_joint_pos], + device=env.device, + ) def randomize_joint_by_gaussian_offset( @@ -43,21 +58,22 @@ def randomize_joint_by_gaussian_offset( asset: Articulation = env.scene[asset_cfg.name] # Add gaussian noise to joint states - joint_pos = asset.data.default_joint_pos[env_ids].clone() - joint_vel = asset.data.default_joint_vel[env_ids].clone() + joint_pos = asset.data.default_joint_pos.torch[env_ids].clone() + joint_vel = asset.data.default_joint_vel.torch[env_ids].clone() joint_pos += math_utils.sample_gaussian(mean, std, joint_pos.shape, joint_pos.device) # Clamp joint pos to limits - joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids] + joint_pos_limits = asset.data.soft_joint_pos_limits.torch[env_ids] joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1]) # Don't noise the gripper poses - joint_pos[:, -2:] = asset.data.default_joint_pos[env_ids, -2:] + joint_pos[:, -2:] = asset.data.default_joint_pos.torch[env_ids, -2:] # Set into the physics simulation - asset.set_joint_position_target(joint_pos, env_ids=env_ids) - asset.set_joint_velocity_target(joint_vel, env_ids=env_ids) - asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids) + asset.set_joint_position_target_index(target=joint_pos, env_ids=env_ids) + asset.set_joint_velocity_target_index(target=joint_vel, env_ids=env_ids) + asset.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + asset.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) def sample_random_color(base=(0.75, 0.75, 0.75), variation=0.1): @@ -187,11 +203,12 @@ def randomize_object_pose( pose_tensor = torch.tensor([pose_list[i]], device=env.device) positions = pose_tensor[:, 0:3] + env.scene.env_origins[cur_env, 0:3] orientations = math_utils.quat_from_euler_xyz(pose_tensor[:, 3], pose_tensor[:, 4], pose_tensor[:, 5]) - asset.write_root_pose_to_sim( - torch.cat([positions, orientations], dim=-1), env_ids=torch.tensor([cur_env], device=env.device) + asset.write_root_pose_to_sim_index( + root_pose=torch.cat([positions, orientations], dim=-1), + env_ids=torch.tensor([cur_env], device=env.device), ) - asset.write_root_velocity_to_sim( - torch.zeros(1, 6, device=env.device), env_ids=torch.tensor([cur_env], device=env.device) + asset.write_root_velocity_to_sim_index( + root_velocity=torch.zeros(1, 6, device=env.device), env_ids=torch.tensor([cur_env], device=env.device) ) @@ -225,19 +242,23 @@ def randomize_rigid_objects_in_focus( asset = env.scene[asset_cfg.name] # Randomly select an object to bring into focus - object_id = random.randint(0, asset.num_objects - 1) + object_id = random.randint(0, asset.num_bodies - 1) selected_ids.append(object_id) - # Create object state tensor - object_states = torch.stack([out_focus_state] * asset.num_objects).to(device=env.device) + # Create object state tensor with shape (num_envs, num_objects, state_dim) + # Since we're updating one environment, we need shape (1, num_objects, state_dim) + object_states = torch.stack([out_focus_state] * asset.num_bodies).to(device=env.device).unsqueeze(0) pose_tensor = torch.tensor([pose_list[asset_idx]], device=env.device) positions = pose_tensor[:, 0:3] + env.scene.env_origins[cur_env, 0:3] orientations = math_utils.quat_from_euler_xyz(pose_tensor[:, 3], pose_tensor[:, 4], pose_tensor[:, 5]) - object_states[object_id, 0:3] = positions - object_states[object_id, 3:7] = orientations + object_states[0, object_id, 0:3] = positions.squeeze(0) + object_states[0, object_id, 3:7] = orientations.squeeze(0) - asset.write_object_state_to_sim( - object_state=object_states, env_ids=torch.tensor([cur_env], device=env.device) + asset.write_body_pose_to_sim_index( + body_poses=object_states[:, :, :7], env_ids=torch.tensor([cur_env], device=env.device) + ) + asset.write_body_link_velocity_to_sim_index( + body_velocities=object_states[:, :, 7:], env_ids=torch.tensor([cur_env], device=env.device) ) env.rigid_objects_in_focus.append(selected_ids) @@ -275,6 +296,8 @@ def randomize_visual_texture_material( # textures = [default_texture] # enable replicator extension if not already enabled + from isaacsim.core.experimental.utils.app import enable_extension + enable_extension("omni.replicator.core") # we import the module here since we may not always need the replicator import omni.replicator.core as rep diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/observations.py index 31123e71a308..1269ef260ff5 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/observations.py @@ -8,14 +8,15 @@ from typing import TYPE_CHECKING, Literal import torch +import warp as wp import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation, RigidObject, RigidObjectCollection from isaaclab.managers import SceneEntityCfg -from isaaclab.sensors import FrameTransformer if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject, RigidObjectCollection from isaaclab.envs import ManagerBasedRLEnv + from isaaclab.sensors import FrameTransformer def cube_positions_in_world_frame( @@ -29,7 +30,10 @@ def cube_positions_in_world_frame( cube_2: RigidObject = env.scene[cube_2_cfg.name] cube_3: RigidObject = env.scene[cube_3_cfg.name] - return torch.cat((cube_1.data.root_pos_w, cube_2.data.root_pos_w, cube_3.data.root_pos_w), dim=1) + return torch.cat( + (cube_1.data.root_pos_w.torch, cube_2.data.root_pos_w.torch, cube_3.data.root_pos_w.torch), + dim=1, + ) def instance_randomize_cube_positions_in_world_frame( @@ -50,9 +54,9 @@ def instance_randomize_cube_positions_in_world_frame( cube_2_pos_w = [] cube_3_pos_w = [] for env_id in range(env.num_envs): - cube_1_pos_w.append(cube_1.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][0], :3]) - cube_2_pos_w.append(cube_2.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][1], :3]) - cube_3_pos_w.append(cube_3.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][2], :3]) + cube_1_pos_w.append(cube_1.data.body_link_pos_w.torch[env_id, env.rigid_objects_in_focus[env_id][0], :3]) + cube_2_pos_w.append(cube_2.data.body_link_pos_w.torch[env_id, env.rigid_objects_in_focus[env_id][1], :3]) + cube_3_pos_w.append(cube_3.data.body_link_pos_w.torch[env_id, env.rigid_objects_in_focus[env_id][2], :3]) cube_1_pos_w = torch.stack(cube_1_pos_w) cube_2_pos_w = torch.stack(cube_2_pos_w) cube_3_pos_w = torch.stack(cube_3_pos_w) @@ -71,7 +75,14 @@ def cube_orientations_in_world_frame( cube_2: RigidObject = env.scene[cube_2_cfg.name] cube_3: RigidObject = env.scene[cube_3_cfg.name] - return torch.cat((cube_1.data.root_quat_w, cube_2.data.root_quat_w, cube_3.data.root_quat_w), dim=1) + return torch.cat( + ( + cube_1.data.root_quat_w.torch, + cube_2.data.root_quat_w.torch, + cube_3.data.root_quat_w.torch, + ), + dim=1, + ) def instance_randomize_cube_orientations_in_world_frame( @@ -92,9 +103,9 @@ def instance_randomize_cube_orientations_in_world_frame( cube_2_quat_w = [] cube_3_quat_w = [] for env_id in range(env.num_envs): - cube_1_quat_w.append(cube_1.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][0], :4]) - cube_2_quat_w.append(cube_2.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][1], :4]) - cube_3_quat_w.append(cube_3.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][2], :4]) + cube_1_quat_w.append(cube_1.data.body_link_quat_w.torch[env_id, env.rigid_objects_in_focus[env_id][0], :4]) + cube_2_quat_w.append(cube_2.data.body_link_quat_w.torch[env_id, env.rigid_objects_in_focus[env_id][1], :4]) + cube_3_quat_w.append(cube_3.data.body_link_quat_w.torch[env_id, env.rigid_objects_in_focus[env_id][2], :4]) cube_1_quat_w = torch.stack(cube_1_quat_w) cube_2_quat_w = torch.stack(cube_2_quat_w) cube_3_quat_w = torch.stack(cube_3_quat_w) @@ -129,16 +140,16 @@ def object_obs( cube_3: RigidObject = env.scene[cube_3_cfg.name] ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] - cube_1_pos_w = cube_1.data.root_pos_w - cube_1_quat_w = cube_1.data.root_quat_w + cube_1_pos_w = cube_1.data.root_pos_w.torch + cube_1_quat_w = cube_1.data.root_quat_w.torch - cube_2_pos_w = cube_2.data.root_pos_w - cube_2_quat_w = cube_2.data.root_quat_w + cube_2_pos_w = cube_2.data.root_pos_w.torch + cube_2_quat_w = cube_2.data.root_quat_w.torch - cube_3_pos_w = cube_3.data.root_pos_w - cube_3_quat_w = cube_3.data.root_quat_w + cube_3_pos_w = cube_3.data.root_pos_w.torch + cube_3_quat_w = cube_3.data.root_quat_w.torch - ee_pos_w = ee_frame.data.target_pos_w[:, 0, :] + ee_pos_w = ee_frame.data.target_pos_w.torch[:, 0, :] gripper_to_cube_1 = cube_1_pos_w - ee_pos_w gripper_to_cube_2 = cube_2_pos_w - ee_pos_w gripper_to_cube_3 = cube_3_pos_w - ee_pos_w @@ -203,12 +214,12 @@ def instance_randomize_object_obs( cube_2_quat_w = [] cube_3_quat_w = [] for env_id in range(env.num_envs): - cube_1_pos_w.append(cube_1.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][0], :3]) - cube_2_pos_w.append(cube_2.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][1], :3]) - cube_3_pos_w.append(cube_3.data.object_pos_w[env_id, env.rigid_objects_in_focus[env_id][2], :3]) - cube_1_quat_w.append(cube_1.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][0], :4]) - cube_2_quat_w.append(cube_2.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][1], :4]) - cube_3_quat_w.append(cube_3.data.object_quat_w[env_id, env.rigid_objects_in_focus[env_id][2], :4]) + cube_1_pos_w.append(cube_1.data.body_link_pos_w.torch[env_id, env.rigid_objects_in_focus[env_id][0], :3]) + cube_2_pos_w.append(cube_2.data.body_link_pos_w.torch[env_id, env.rigid_objects_in_focus[env_id][1], :3]) + cube_3_pos_w.append(cube_3.data.body_link_pos_w.torch[env_id, env.rigid_objects_in_focus[env_id][2], :3]) + cube_1_quat_w.append(cube_1.data.body_link_quat_w.torch[env_id, env.rigid_objects_in_focus[env_id][0], :4]) + cube_2_quat_w.append(cube_2.data.body_link_quat_w.torch[env_id, env.rigid_objects_in_focus[env_id][1], :4]) + cube_3_quat_w.append(cube_3.data.body_link_quat_w.torch[env_id, env.rigid_objects_in_focus[env_id][2], :4]) cube_1_pos_w = torch.stack(cube_1_pos_w) cube_2_pos_w = torch.stack(cube_2_pos_w) cube_3_pos_w = torch.stack(cube_3_pos_w) @@ -216,7 +227,7 @@ def instance_randomize_object_obs( cube_2_quat_w = torch.stack(cube_2_quat_w) cube_3_quat_w = torch.stack(cube_3_quat_w) - ee_pos_w = ee_frame.data.target_pos_w[:, 0, :] + ee_pos_w = ee_frame.data.target_pos_w.torch[:, 0, :] gripper_to_cube_1 = cube_1_pos_w - ee_pos_w gripper_to_cube_2 = cube_2_pos_w - ee_pos_w gripper_to_cube_3 = cube_3_pos_w - ee_pos_w @@ -246,14 +257,14 @@ def instance_randomize_object_obs( def ee_frame_pos(env: ManagerBasedRLEnv, ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame")) -> torch.Tensor: ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] - ee_frame_pos = ee_frame.data.target_pos_w[:, 0, :] - env.scene.env_origins[:, 0:3] + ee_frame_pos = ee_frame.data.target_pos_w.torch[:, 0, :] - env.scene.env_origins[:, 0:3] return ee_frame_pos def ee_frame_quat(env: ManagerBasedRLEnv, ee_frame_cfg: SceneEntityCfg = SceneEntityCfg("ee_frame")) -> torch.Tensor: ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] - ee_frame_quat = ee_frame.data.target_quat_w[:, 0, :] + ee_frame_quat = ee_frame.data.target_quat_w.torch[:, 0, :] return ee_frame_quat @@ -271,7 +282,7 @@ def gripper_pos( # Handle multiple surface grippers by concatenating their states gripper_states = [] for gripper_name, surface_gripper in env.scene.surface_grippers.items(): - gripper_states.append(surface_gripper.state.view(-1, 1)) + gripper_states.append(wp.to_torch(surface_gripper.state).view(-1, 1)) if len(gripper_states) == 1: return gripper_states[0] @@ -282,8 +293,8 @@ def gripper_pos( if hasattr(env.cfg, "gripper_joint_names"): gripper_joint_ids, _ = robot.find_joints(env.cfg.gripper_joint_names) assert len(gripper_joint_ids) == 2, "Observation gripper_pos only support parallel gripper for now" - finger_joint_1 = robot.data.joint_pos[:, gripper_joint_ids[0]].clone().unsqueeze(1) - finger_joint_2 = -1 * robot.data.joint_pos[:, gripper_joint_ids[1]].clone().unsqueeze(1) + finger_joint_1 = robot.data.joint_pos.torch[:, gripper_joint_ids[0]].clone().unsqueeze(1) + finger_joint_2 = -1 * robot.data.joint_pos.torch[:, gripper_joint_ids[1]].clone().unsqueeze(1) return torch.cat((finger_joint_1, finger_joint_2), dim=1) else: raise NotImplementedError("[Error] Cannot find gripper_joint_names in the environment config") @@ -302,13 +313,13 @@ def object_grasped( ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] object: RigidObject = env.scene[object_cfg.name] - object_pos = object.data.root_pos_w - end_effector_pos = ee_frame.data.target_pos_w[:, 0, :] + object_pos = object.data.root_pos_w.torch + end_effector_pos = ee_frame.data.target_pos_w.torch[:, 0, :] pose_diff = torch.linalg.vector_norm(object_pos - end_effector_pos, dim=1) if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: surface_gripper = env.scene.surface_grippers["surface_gripper"] - suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open + suction_cup_status = wp.to_torch(surface_gripper.state).view(-1, 1) # 1: closed, 0: closing, -1: open suction_cup_is_closed = (suction_cup_status == 1).to(torch.float32) grasped = torch.logical_and(suction_cup_is_closed, pose_diff < diff_threshold) @@ -320,7 +331,7 @@ def object_grasped( grasped = torch.logical_and( pose_diff < diff_threshold, torch.abs( - robot.data.joint_pos[:, gripper_joint_ids[0]] + robot.data.joint_pos.torch[:, gripper_joint_ids[0]] - torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device) ) > env.cfg.gripper_threshold, @@ -328,7 +339,7 @@ def object_grasped( grasped = torch.logical_and( grasped, torch.abs( - robot.data.joint_pos[:, gripper_joint_ids[1]] + robot.data.joint_pos.torch[:, gripper_joint_ids[1]] - torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device) ) > env.cfg.gripper_threshold, @@ -352,7 +363,7 @@ def object_stacked( upper_object: RigidObject = env.scene[upper_object_cfg.name] lower_object: RigidObject = env.scene[lower_object_cfg.name] - pos_diff = upper_object.data.root_pos_w - lower_object.data.root_pos_w + pos_diff = upper_object.data.root_pos_w.torch - lower_object.data.root_pos_w.torch height_dist = torch.linalg.vector_norm(pos_diff[:, 2:], dim=1) xy_dist = torch.linalg.vector_norm(pos_diff[:, :2], dim=1) @@ -360,7 +371,7 @@ def object_stacked( if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: surface_gripper = env.scene.surface_grippers["surface_gripper"] - suction_cup_status = surface_gripper.state.view(-1, 1) # 1: closed, 0: closing, -1: open + suction_cup_status = wp.to_torch(surface_gripper.state).view(-1, 1) # 1: closed, 0: closing, -1: open suction_cup_is_open = (suction_cup_status == -1).to(torch.float32) stacked = torch.logical_and(suction_cup_is_open, stacked) @@ -370,7 +381,7 @@ def object_stacked( assert len(gripper_joint_ids) == 2, "Observations only support parallel gripper for now" stacked = torch.logical_and( torch.isclose( - robot.data.joint_pos[:, gripper_joint_ids[0]], + robot.data.joint_pos.torch[:, gripper_joint_ids[0]], torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device), atol=1e-4, rtol=1e-4, @@ -379,7 +390,7 @@ def object_stacked( ) stacked = torch.logical_and( torch.isclose( - robot.data.joint_pos[:, gripper_joint_ids[1]], + robot.data.joint_pos.torch[:, gripper_joint_ids[1]], torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device), atol=1e-4, rtol=1e-4, @@ -406,17 +417,17 @@ def cube_poses_in_base_frame( cube_2: RigidObject = env.scene[cube_2_cfg.name] cube_3: RigidObject = env.scene[cube_3_cfg.name] - pos_cube_1_world = cube_1.data.root_pos_w - pos_cube_2_world = cube_2.data.root_pos_w - pos_cube_3_world = cube_3.data.root_pos_w + pos_cube_1_world = cube_1.data.root_pos_w.torch + pos_cube_2_world = cube_2.data.root_pos_w.torch + pos_cube_3_world = cube_3.data.root_pos_w.torch - quat_cube_1_world = cube_1.data.root_quat_w - quat_cube_2_world = cube_2.data.root_quat_w - quat_cube_3_world = cube_3.data.root_quat_w + quat_cube_1_world = cube_1.data.root_quat_w.torch + quat_cube_2_world = cube_2.data.root_quat_w.torch + quat_cube_3_world = cube_3.data.root_quat_w.torch robot: Articulation = env.scene[robot_cfg.name] - root_pos_w = robot.data.root_pos_w - root_quat_w = robot.data.root_quat_w + root_pos_w = robot.data.root_pos_w.torch + root_quat_w = robot.data.root_quat_w.torch pos_cube_1_base, quat_cube_1_base = math_utils.subtract_frame_transforms( root_pos_w, root_quat_w, pos_cube_1_world, quat_cube_1_world @@ -465,17 +476,17 @@ def object_abs_obs_in_base_frame( ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] robot: Articulation = env.scene[robot_cfg.name] - root_pos_w = robot.data.root_pos_w - root_quat_w = robot.data.root_quat_w + root_pos_w = robot.data.root_pos_w.torch + root_quat_w = robot.data.root_quat_w.torch - cube_1_pos_w = cube_1.data.root_pos_w - cube_1_quat_w = cube_1.data.root_quat_w + cube_1_pos_w = cube_1.data.root_pos_w.torch + cube_1_quat_w = cube_1.data.root_quat_w.torch - cube_2_pos_w = cube_2.data.root_pos_w - cube_2_quat_w = cube_2.data.root_quat_w + cube_2_pos_w = cube_2.data.root_pos_w.torch + cube_2_quat_w = cube_2.data.root_quat_w.torch - cube_3_pos_w = cube_3.data.root_pos_w - cube_3_quat_w = cube_3.data.root_quat_w + cube_3_pos_w = cube_3.data.root_pos_w.torch + cube_3_quat_w = cube_3.data.root_quat_w.torch pos_cube_1_base, quat_cube_1_base = math_utils.subtract_frame_transforms( root_pos_w, root_quat_w, cube_1_pos_w, cube_1_quat_w @@ -487,8 +498,8 @@ def object_abs_obs_in_base_frame( root_pos_w, root_quat_w, cube_3_pos_w, cube_3_quat_w ) - ee_pos_w = ee_frame.data.target_pos_w[:, 0, :] - ee_quat_w = ee_frame.data.target_quat_w[:, 0, :] + ee_pos_w = ee_frame.data.target_pos_w.torch[:, 0, :] + ee_quat_w = ee_frame.data.target_quat_w.torch[:, 0, :] ee_pos_base, ee_quat_base = math_utils.subtract_frame_transforms(root_pos_w, root_quat_w, ee_pos_w, ee_quat_w) return torch.cat( @@ -516,12 +527,12 @@ def ee_frame_pose_in_base_frame( The end effector pose in the robot base frame. """ ee_frame: FrameTransformer = env.scene[ee_frame_cfg.name] - ee_frame_pos_w = ee_frame.data.target_pos_w[:, 0, :] - ee_frame_quat_w = ee_frame.data.target_quat_w[:, 0, :] + ee_frame_pos_w = ee_frame.data.target_pos_w.torch[:, 0, :] + ee_frame_quat_w = ee_frame.data.target_quat_w.torch[:, 0, :] robot: Articulation = env.scene[robot_cfg.name] - root_pos_w = robot.data.root_pos_w - root_quat_w = robot.data.root_quat_w + root_pos_w = robot.data.root_pos_w.torch + root_quat_w = robot.data.root_quat_w.torch ee_pos_in_base, ee_quat_in_base = math_utils.subtract_frame_transforms( root_pos_w, root_quat_w, ee_frame_pos_w, ee_frame_quat_w ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/terminations.py index d48680329999..e709821972cf 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/terminations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/terminations.py @@ -14,11 +14,12 @@ from typing import TYPE_CHECKING import torch +import warp as wp -from isaaclab.assets import Articulation, RigidObject from isaaclab.managers import SceneEntityCfg if TYPE_CHECKING: + from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedRLEnv @@ -27,40 +28,49 @@ def cubes_stacked( robot_cfg: SceneEntityCfg = SceneEntityCfg("robot"), cube_1_cfg: SceneEntityCfg = SceneEntityCfg("cube_1"), cube_2_cfg: SceneEntityCfg = SceneEntityCfg("cube_2"), - cube_3_cfg: SceneEntityCfg = SceneEntityCfg("cube_3"), + cube_3_cfg: SceneEntityCfg | None = SceneEntityCfg("cube_3"), xy_threshold: float = 0.04, height_threshold: float = 0.005, height_diff: float = 0.0468, - atol=0.0001, - rtol=0.0001, -): + atol: float = 0.0001, + rtol: float = 0.0001, +) -> torch.Tensor: robot: Articulation = env.scene[robot_cfg.name] cube_1: RigidObject = env.scene[cube_1_cfg.name] cube_2: RigidObject = env.scene[cube_2_cfg.name] - cube_3: RigidObject = env.scene[cube_3_cfg.name] - pos_diff_c12 = cube_1.data.root_pos_w - cube_2.data.root_pos_w - pos_diff_c23 = cube_2.data.root_pos_w - cube_3.data.root_pos_w + pos_diff_c12 = cube_1.data.root_pos_w.torch - cube_2.data.root_pos_w.torch # Compute cube position difference in x-y plane - xy_dist_c12 = torch.norm(pos_diff_c12[:, :2], dim=1) - xy_dist_c23 = torch.norm(pos_diff_c23[:, :2], dim=1) + xy_dist_c12 = torch.linalg.norm(pos_diff_c12[:, :2], dim=1) # Compute cube height difference - h_dist_c12 = torch.norm(pos_diff_c12[:, 2:], dim=1) - h_dist_c23 = torch.norm(pos_diff_c23[:, 2:], dim=1) + h_dist_c12 = torch.linalg.norm(pos_diff_c12[:, 2:], dim=1) # Check cube positions - stacked = torch.logical_and(xy_dist_c12 < xy_threshold, xy_dist_c23 < xy_threshold) + stacked = xy_dist_c12 < xy_threshold stacked = torch.logical_and(h_dist_c12 - height_diff < height_threshold, stacked) stacked = torch.logical_and(pos_diff_c12[:, 2] < 0.0, stacked) - stacked = torch.logical_and(h_dist_c23 - height_diff < height_threshold, stacked) - stacked = torch.logical_and(pos_diff_c23[:, 2] < 0.0, stacked) + + if cube_3_cfg is not None: + cube_3: RigidObject = env.scene[cube_3_cfg.name] + pos_diff_c23 = cube_2.data.root_pos_w.torch - cube_3.data.root_pos_w.torch + + # Compute cube position difference in x-y plane + xy_dist_c23 = torch.linalg.norm(pos_diff_c23[:, :2], dim=1) + + # Compute cube height difference + h_dist_c23 = torch.linalg.norm(pos_diff_c23[:, 2:], dim=1) + + # Check cube positions + stacked = torch.logical_and(xy_dist_c23 < xy_threshold, stacked) + stacked = torch.logical_and(h_dist_c23 - height_diff < height_threshold, stacked) + stacked = torch.logical_and(pos_diff_c23[:, 2] < 0.0, stacked) # Check gripper positions if hasattr(env.scene, "surface_grippers") and len(env.scene.surface_grippers) > 0: surface_gripper = env.scene.surface_grippers["surface_gripper"] - suction_cup_status = surface_gripper.state.view(-1) # 1: closed, 0: closing, -1: open + suction_cup_status = wp.to_torch(surface_gripper.state).view(-1) # 1: closed, 0: closing, -1: open suction_cup_is_open = (suction_cup_status == -1).to(torch.float32) stacked = torch.logical_and(suction_cup_is_open, stacked) @@ -71,7 +81,7 @@ def cubes_stacked( stacked = torch.logical_and( torch.isclose( - robot.data.joint_pos[:, gripper_joint_ids[0]], + robot.data.joint_pos.torch[:, gripper_joint_ids[0]], torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device), atol=atol, rtol=rtol, @@ -80,7 +90,7 @@ def cubes_stacked( ) stacked = torch.logical_and( torch.isclose( - robot.data.joint_pos[:, gripper_joint_ids[1]], + robot.data.joint_pos.torch[:, gripper_joint_ids[1]], torch.tensor(env.cfg.gripper_open_val, dtype=torch.float32).to(env.device), atol=atol, rtol=rtol, diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_env_cfg.py index 5c772a11760e..7706fa3923e3 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_env_cfg.py @@ -5,6 +5,8 @@ from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg from isaaclab.devices.openxr import XrCfg @@ -27,7 +29,7 @@ ## @configclass class ObjectTableSceneCfg(InteractiveSceneCfg): - """Configuration for the lift scene with a robot and a object. + """Configuration for the stacking scene with a robot and objects. This is the abstract base implementation, the exact scene is defined in the derived classes which need to set the target object, robot and end-effector frames """ @@ -40,7 +42,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0, 0, 0.707, 0.707]), spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"), ) @@ -180,7 +182,7 @@ class StackEnvCfg(ManagerBasedRLEnvCfg): xr: XrCfg = XrCfg( anchor_pos=(-0.1, -0.5, -1.05), - anchor_rot=(0.866, 0, 0, -0.5), + anchor_rot=(0, 0, -0.5, 0.866), ) def __post_init__(self): @@ -192,8 +194,9 @@ def __post_init__(self): self.sim.dt = 0.01 # 100Hz self.sim.render_interval = 2 - self.sim.physx.bounce_threshold_velocity = 0.2 - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 - self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 - self.sim.physx.friction_correlation_distance = 0.00625 + self.sim.physics = PhysxCfg( + bounce_threshold_velocity=0.01, + gpu_found_lost_aggregate_pairs_capacity=1024 * 1024 * 4, + gpu_total_aggregate_pairs_capacity=2**21, + friction_correlation_distance=0.00625, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_instance_randomize_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_instance_randomize_env_cfg.py index 526297b95617..29c280ab98ef 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_instance_randomize_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/stack_instance_randomize_env_cfg.py @@ -5,6 +5,8 @@ from dataclasses import MISSING +from isaaclab_physx.physics import PhysxCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg from isaaclab.envs import ManagerBasedRLEnvCfg @@ -38,7 +40,7 @@ class ObjectTableSceneCfg(InteractiveSceneCfg): # Table table = AssetBaseCfg( prim_path="{ENV_REGEX_NS}/Table", - init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0.707, 0, 0, 0.707]), + init_state=AssetBaseCfg.InitialStateCfg(pos=[0.5, 0, 0], rot=[0, 0, 0.707, 0.707]), spawn=UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd"), ) @@ -128,8 +130,9 @@ def __post_init__(self): self.sim.dt = 0.01 # 100Hz self.sim.render_interval = self.decimation - self.sim.physx.bounce_threshold_velocity = 0.2 - self.sim.physx.bounce_threshold_velocity = 0.01 - self.sim.physx.gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 4 - self.sim.physx.gpu_total_aggregate_pairs_capacity = 16 * 1024 - self.sim.physx.friction_correlation_distance = 0.00625 + self.sim.physics = PhysxCfg( + bounce_threshold_velocity=0.01, + gpu_found_lost_aggregate_pairs_capacity=1024 * 1024 * 4, + gpu_total_aggregate_pairs_capacity=16 * 1024, + friction_correlation_distance=0.00625, + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/__init__.py index f611f9e8eb4d..fc4816bd7ca8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/__init__.py @@ -5,4 +5,6 @@ """Navigation environments.""" -from .config import anymal_c +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/__init__.pyi new file mode 100644 index 000000000000..a4da2f326f79 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/__init__.pyi @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "anymal_c", +] + +from .config import anymal_c diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/navigation_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/navigation_env_cfg.py index 96b60705bb50..4d4ad67e9305 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/navigation_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/config/anymal_c/navigation_env_cfg.py @@ -103,6 +103,7 @@ class CommandsCfg: simple_heading=False, resampling_time_range=(8.0, 8.0), debug_vis=True, + position_success_threshold=0.5, ranges=mdp.UniformPose2dCommandCfg.Ranges(pos_x=(-3.0, 3.0), pos_y=(-3.0, 3.0), heading=(-math.pi, math.pi)), ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/__init__.py index 213391d362b1..8f9a146abdc8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/__init__.py @@ -5,7 +5,6 @@ """This sub-module contains the functions that are specific to the locomotion environments.""" -from isaaclab.envs.mdp import * # noqa: F401, F403 +from isaaclab.utils.module import lazy_export -from .pre_trained_policy_action import * # noqa: F401, F403 -from .rewards import * # noqa: F401, F403 +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/__init__.pyi new file mode 100644 index 000000000000..af793c2c205a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/__init__.pyi @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "PreTrainedPolicyActionCfg", + "heading_command_error_abs", + "position_command_error_tanh", +] + +from .pre_trained_policy_action_cfg import PreTrainedPolicyActionCfg +from .rewards import heading_command_error_abs, position_command_error_tanh +from isaaclab.envs.mdp import * diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/pre_trained_policy_action.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/pre_trained_policy_action.py index c25558c78846..4857d63711e1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/pre_trained_policy_action.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/pre_trained_policy_action.py @@ -5,22 +5,24 @@ from __future__ import annotations -from dataclasses import MISSING from typing import TYPE_CHECKING import torch import isaaclab.utils.math as math_utils -from isaaclab.assets import Articulation -from isaaclab.managers import ActionTerm, ActionTermCfg, ObservationGroupCfg, ObservationManager +from isaaclab.managers import ActionTerm, ObservationManager from isaaclab.markers import VisualizationMarkers from isaaclab.markers.config import BLUE_ARROW_X_MARKER_CFG, GREEN_ARROW_X_MARKER_CFG -from isaaclab.utils import configclass from isaaclab.utils.assets import check_file_path, read_file +from .pre_trained_policy_action_cfg import PreTrainedPolicyActionCfg # noqa: F401 + if TYPE_CHECKING: + from isaaclab.assets import Articulation from isaaclab.envs import ManagerBasedRLEnv + from .pre_trained_policy_action_cfg import PreTrainedPolicyActionCfg + class PreTrainedPolicyAction(ActionTerm): r"""Pre-trained policy action term. @@ -135,11 +137,13 @@ def _debug_vis_callback(self, event): return # get marker location # -- base state - base_pos_w = self.robot.data.root_pos_w.clone() + base_pos_w = self.robot.data.root_pos_w.torch.clone() base_pos_w[:, 2] += 0.5 # -- resolve the scales and quaternions vel_des_arrow_scale, vel_des_arrow_quat = self._resolve_xy_velocity_to_arrow(self.raw_actions[:, :2]) - vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow(self.robot.data.root_lin_vel_b[:, :2]) + vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow( + self.robot.data.root_lin_vel_b.torch[:, :2] + ) # display markers self.base_vel_goal_visualizer.visualize(base_pos_w, vel_des_arrow_quat, vel_des_arrow_scale) self.base_vel_visualizer.visualize(base_pos_w, vel_arrow_quat, vel_arrow_scale) @@ -160,30 +164,7 @@ def _resolve_xy_velocity_to_arrow(self, xy_velocity: torch.Tensor) -> tuple[torc zeros = torch.zeros_like(heading_angle) arrow_quat = math_utils.quat_from_euler_xyz(zeros, zeros, heading_angle) # convert everything back from base to world frame - base_quat_w = self.robot.data.root_quat_w + base_quat_w = self.robot.data.root_quat_w.torch arrow_quat = math_utils.quat_mul(base_quat_w, arrow_quat) return arrow_scale, arrow_quat - - -@configclass -class PreTrainedPolicyActionCfg(ActionTermCfg): - """Configuration for pre-trained policy action term. - - See :class:`PreTrainedPolicyAction` for more details. - """ - - class_type: type[ActionTerm] = PreTrainedPolicyAction - """ Class of the action term.""" - asset_name: str = MISSING - """Name of the asset in the environment for which the commands are generated.""" - policy_path: str = MISSING - """Path to the low level policy (.pt files).""" - low_level_decimation: int = 4 - """Decimation factor for the low level action term.""" - low_level_actions: ActionTermCfg = MISSING - """Low level action configuration.""" - low_level_observations: ObservationGroupCfg = MISSING - """Low level observation configuration.""" - debug_vis: bool = True - """Whether to visualize debug information. Defaults to False.""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/pre_trained_policy_action_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/pre_trained_policy_action_cfg.py new file mode 100644 index 000000000000..0c7b54152dfb --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/pre_trained_policy_action_cfg.py @@ -0,0 +1,38 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab.managers import ActionTermCfg, ObservationGroupCfg +from isaaclab.utils import configclass + + +@configclass +class PreTrainedPolicyActionCfg(ActionTermCfg): + """Configuration for pre-trained policy action term. + + See :class:`PreTrainedPolicyAction` for more details. + """ + + class_type: type | str = "{DIR}.pre_trained_policy_action:PreTrainedPolicyAction" + """Class of the action term.""" + + asset_name: str = MISSING + """Name of the asset in the environment for which the commands are generated.""" + + policy_path: str = MISSING + """Path to the low level policy (.pt files).""" + + low_level_decimation: int = 4 + """Decimation factor for the low level action term.""" + + low_level_actions: ActionTermCfg = MISSING + """Low level action configuration.""" + + low_level_observations: ObservationGroupCfg = MISSING + """Low level observation configuration.""" + + debug_vis: bool = True + """Whether to visualize debug information. Defaults to False.""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/rewards.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/rewards.py index ccaad755d087..8490bbb527bb 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/rewards.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/navigation/mdp/rewards.py @@ -17,7 +17,7 @@ def position_command_error_tanh(env: ManagerBasedRLEnv, std: float, command_name """Reward position tracking with tanh kernel.""" command = env.command_manager.get_command(command_name) des_pos_b = command[:, :3] - distance = torch.norm(des_pos_b, dim=1) + distance = torch.linalg.norm(des_pos_b, dim=1) return 1 - torch.tanh(distance / std) diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/utils/__init__.py index 495b207c3194..6a10cd07807d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/utils/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/__init__.py @@ -5,5 +5,6 @@ """Sub-package with utilities, data collectors and environment wrappers.""" -from .importer import import_packages -from .parse_cfg import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/__init__.pyi b/source/isaaclab_tasks/isaaclab_tasks/utils/__init__.pyi new file mode 100644 index 000000000000..6a0ba23b9429 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/__init__.pyi @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "import_packages", + "get_checkpoint_path", + "load_cfg_from_registry", + "parse_env_cfg", + "PresetCfg", + "preset", + "resolve_task_config", + "hydra_task_config", + "resolve_presets", + "add_launcher_args", + "launch_simulation", + "compute_kit_requirements", +] + +from .hydra import PresetCfg, preset, hydra_task_config, resolve_task_config, resolve_presets +from .importer import import_packages +from .parse_cfg import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg +from .sim_launcher import add_launcher_args, launch_simulation, compute_kit_requirements diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/hydra.py b/source/isaaclab_tasks/isaaclab_tasks/utils/hydra.py index 525b425917fa..320632705852 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/utils/hydra.py +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/hydra.py @@ -3,105 +3,664 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Sub-module with utilities for the hydra configuration system.""" +"""Hydra utilities with REPLACE-only preset system. + +This module bypasses Hydra's default MERGE behavior for config groups. +Instead, when a preset is selected, the entire config section is REPLACED +with the preset -- no field merging. + +Presets are declared by subclassing :class:`PresetCfg` (or using the +:func:`preset` factory for scalars). The system recursively discovers all +presets and their paths automatically, including inside dict-valued fields. + +Override categories (applied in order): + 1. Global presets: ``presets=inference,newton_mjwarp`` -- apply everywhere matching + 2. Path presets: ``env.backend=newton_mjwarp`` -- REPLACE specific section + 3. Preset-path scalars: ``env.backend.dt=0.001`` -- handled by us + 4. Global scalars: ``env.decimation=10`` -- handled by Hydra + +Example usage:: + + presets=newton_mjwarp env.backend.dt=0.001 env.decimation=10 +""" import functools -from collections.abc import Callable +import sys +import warnings +from collections.abc import Callable, Mapping -try: - import hydra - from hydra.core.config_store import ConfigStore - from omegaconf import DictConfig, OmegaConf -except ImportError: - raise ImportError("Hydra is not installed. Please install it by running 'pip install hydra-core'.") +import hydra +from hydra.core.config_store import ConfigStore +from omegaconf import OmegaConf -from isaaclab.envs import DirectRLEnvCfg, ManagerBasedRLEnvCfg from isaaclab.envs.utils.spaces import replace_env_cfg_spaces_with_strings, replace_strings_with_env_cfg_spaces -from isaaclab.utils import replace_slices_with_strings, replace_strings_with_slices +from isaaclab.utils import configclass, replace_slices_with_strings, replace_strings_with_slices + +_LITERAL_MAP = {"true": True, "false": False, "none": None, "null": None} + +# Map of deprecated preset name -> current name. Newton-backend solver presets are +# now prefixed with ``newton_`` so they group together in autocomplete and read +# distinctly from backend/package/visualizer names that also use the word +# ``newton``. Aliases keep legacy CLI invocations and ``PresetCfg`` field accesses +# working with a :class:`FutureWarning`; they will be removed in a future release. +_LEGACY_PRESET_ALIASES = {"newton": "newton_mjwarp", "kamino": "newton_kamino"} + + +def _user_stacklevel() -> int: + """Compute a ``warnings.warn`` stacklevel that lands on the first frame + outside this module, so deprecation messages cite user code rather than + internal hydra-utility frames. + + Walks at most a small bounded number of frames; if no non-hydra frame is + found within the bound (frozen modules, exec'd contexts, or oddly named + ``__file__`` globals), falls back to ``stacklevel=2`` so the warning at + least jumps out of the helper that called it. + """ + max_walk = 16 + level = 1 + frame = sys._getframe(1) + while frame is not None and frame.f_globals.get("__file__") == __file__: + level += 1 + frame = frame.f_back + if level > max_walk: + return 2 + return level + + +def _known_preset_names(presets: dict) -> set[str]: + """Return all preset names declared in a collected preset dictionary.""" + return {name for section in presets.values() for fields in section.values() for name in fields} + + +def _normalize_preset_name(name: str, known_names: set[str]) -> str: + """Map a deprecated preset name to its replacement and emit a warning. + + Returns ``name`` unchanged when: + * ``name`` is not a deprecated alias, or + * the replacement is not declared in ``known_names`` (so the user-supplied + value can flow into the standard "unknown preset" error path, where + :func:`_format_unknown_presets_error` will surface the rename), or + * ``name`` is itself a real field in ``known_names`` (a user-defined preset + legitimately reusing the deprecated spelling shadows the alias). + """ + replacement = _LEGACY_PRESET_ALIASES.get(name) + if replacement is None or replacement not in known_names or name in known_names: + return name + warnings.warn( + f"Preset '{name}' is deprecated. Use '{replacement}' instead.", + FutureWarning, + stacklevel=_user_stacklevel(), + ) + return replacement -from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry +@configclass +class PresetCfg: + """Base class for declarative preset definitions. -def register_task_to_hydra( - task_name: str, agent_cfg_entry_point: str -) -> tuple[ManagerBasedRLEnvCfg | DirectRLEnvCfg, dict]: - """Register the task configuration to the Hydra configuration store. + Subclass this and define fields as preset options. + The field named ``default`` holds the config instance used + when no CLI override is given. All other fields are named + alternative presets. - This function resolves the configuration file for the environment and agent based on the task's name. - It then registers the configurations to the Hydra configuration store. + Example:: + + @configclass + class PhysicsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + newton_mjwarp: NewtonCfg = NewtonCfg() + + The preset *name* (``newton_mjwarp``) is decoupled from the config class + (``NewtonCfg``): the class describes the Newton backend, while the field + name labels which solver variant this entry selects. + """ + + def __getattr__(self, name: str): + """Alias a deprecated preset name to its replacement field. + + Raises ``AttributeError`` for any other missing attribute so that + ``hasattr`` and standard introspection keep working unchanged. The + replacement is only returned when the deprecated name is *not* itself a + real field on the subclass, so a user redefining the deprecated name + shadows the alias. + """ + replacement = _LEGACY_PRESET_ALIASES.get(name) + fields = getattr(type(self), "__dataclass_fields__", {}) + if replacement is not None and replacement in fields and name not in fields: + warnings.warn( + f"Preset '{name}' is deprecated. Use '{replacement}' instead.", + FutureWarning, + stacklevel=_user_stacklevel(), + ) + return getattr(self, replacement) + raise AttributeError(f"{type(self).__name__!s} object has no attribute {name!r}") + + +def preset(**options) -> PresetCfg: + """Create a :class:`PresetCfg` instance from keyword arguments. + + A convenience factory that dynamically builds a ``PresetCfg`` subclass + with one field per keyword argument, then returns an instance of it. + The caller **must** supply a ``default`` key. + + Example:: + + armature = preset(default=0.0, newton_mjwarp=0.01) + # Equivalent to: + # @configclass + # class _Preset(PresetCfg): + # default: float = 0.0 + # newton_mjwarp: float = 0.01 + # armature = _Preset() Args: - task_name: The name of the task. - agent_cfg_entry_point: The entry point key to resolve the agent's configuration file. + **options: Preset alternatives keyed by name. Must include ``default``. Returns: - A tuple containing the parsed environment and agent configuration objects. + A ``PresetCfg`` instance whose fields are the supplied options. + + Raises: + ValueError: If ``default`` is not provided. """ - # load the configurations - env_cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point") - agent_cfg = None - if agent_cfg_entry_point: - agent_cfg = load_cfg_from_registry(task_name, agent_cfg_entry_point) - # replace gymnasium spaces with strings because OmegaConf does not support them. - # this must be done before converting the env configs to dictionary to avoid internal reinterpretations - env_cfg = replace_env_cfg_spaces_with_strings(env_cfg) - # convert the configs to dictionary - env_cfg_dict = env_cfg.to_dict() - if isinstance(agent_cfg, dict) or agent_cfg is None: - agent_cfg_dict = agent_cfg - else: - agent_cfg_dict = agent_cfg.to_dict() - cfg_dict = {"env": env_cfg_dict, "agent": agent_cfg_dict} - # replace slices with strings because OmegaConf does not support slices - cfg_dict = replace_slices_with_strings(cfg_dict) - # store the configuration to Hydra - ConfigStore.instance().store(name=task_name, node=cfg_dict) - return env_cfg, agent_cfg + if "default" not in options: + raise ValueError("preset() requires a 'default' keyword argument.") + annotations = {k: type(v) if v is not None else object for k, v in options.items()} + ns = {"__annotations__": annotations, **options} + cls = configclass(type("_Preset", (PresetCfg,), ns)) + return cls() -def hydra_task_config(task_name: str, agent_cfg_entry_point: str) -> Callable: - """Decorator to handle the Hydra configuration for a task. +def _preset_fields(preset_obj) -> dict: + """Extract all alternatives from a :class:`PresetCfg`, class attrs over instance. + + Class-level values take priority because robot-specific modules + (e.g. ``joint_pos_env_cfg.py``) reassign fields on the class after + instances are already created. + """ + cls = type(preset_obj) + d = {} + for fn in preset_obj.__dataclass_fields__: + cls_val = getattr(cls, fn, None) + d[fn] = cls_val if cls_val is not None else getattr(preset_obj, fn) + for attr in vars(cls): + if attr.startswith("_") or attr in d or callable(getattr(cls, attr)): + continue + d[attr] = getattr(cls, attr) + return d + + +def _walk_cfg(cfg, path: str, on_preset: Callable) -> None: + """Depth-first walk of a config tree, calling *on_preset(parent, key, obj, path)* + for every :class:`PresetCfg` node. Recurses through dataclass attrs, dicts, and + nested dicts transparently.""" + items = ( + cfg.items() + if isinstance(cfg, dict) + else ((n, v) for n in dir(cfg) if not n.startswith("_") for v in [getattr(cfg, n, None)] if v is not None) + ) + for key, val in items: + child_path = f"{path}.{key}" if path else key + if isinstance(val, PresetCfg): + on_preset(cfg, key, val, child_path) + elif hasattr(val, "__dataclass_fields__") or isinstance(val, dict): + _walk_cfg(val, child_path, on_preset) + + +def collect_presets(cfg, path: str = "") -> dict: + """Recursively discover :class:`PresetCfg` nodes in the config tree. - This decorator registers the task to Hydra and updates the environment and agent configurations from Hydra parsed - command line arguments. + Walks dataclass fields and dict values at any nesting depth. Args: - task_name: The name of the task. - agent_cfg_entry_point: The entry point key to resolve the agent's configuration file. + cfg: A configclass instance to walk. + path: Current path prefix (used during recursion). Returns: - The decorated function with the envrionment's and agent's configurations updated from command line arguments. + Dict mapping dotted paths to preset dicts, e.g.: + ``{"backend": {"default": PhysxCfg(), "newton_mjwarp": NewtonCfg()}}`` + """ + result = {} + + def _record(preset_obj, preset_path): + fields = _preset_fields(preset_obj) + result[preset_path] = fields + for alt in fields.values(): + if hasattr(alt, "__dataclass_fields__"): + result.update(collect_presets(alt, preset_path)) + elif isinstance(alt, dict): + for v in alt.values(): + if hasattr(v, "__dataclass_fields__"): + result.update(collect_presets(v, preset_path)) + + if isinstance(cfg, PresetCfg): + _record(cfg, path) + return result + + _walk_cfg(cfg, path, lambda _p, _k, obj, cp: _record(obj, cp)) + return result + + +# ============================================================================ +# Preset resolution +# ============================================================================ + + +def _pick_alternative(preset_obj: PresetCfg, selected: set[str], path: str = ""): + """Choose the best alternative from a PresetCfg. + + Priority: first match in ``selected``, then ``default`` (preferring + class-level over instance-level). + + Raises: + ValueError: If no matching name and no ``default`` field exists. + """ + fields = _preset_fields(preset_obj) + field_names = set(fields) + for name in selected: + name = _normalize_preset_name(name, field_names) + if name in fields: + return fields[name] + if "default" in fields: + return fields["default"] + raise ValueError( + f"PresetCfg {type(preset_obj).__name__} at '{path}' has no 'default' field " + f"and none of the selected presets {selected} match its fields {set(fields.keys())}." + ) + + +def resolve_presets(cfg, selected: set[str] = frozenset()): + """Replace every :class:`PresetCfg` in the tree with the best alternative. + + For each ``PresetCfg`` found during a depth-first walk: + + 1. Pick the first name from *selected* that exists as a field on the + preset, otherwise fall back to ``default``. + 2. Replace the preset in its parent (dict key or dataclass attr). + 3. Continue walking the replacement (which may contain more presets). + + Args: + cfg: A configclass, dict, or PresetCfg to resolve in-place. + selected: Set of preset names chosen by the user (e.g. from CLI + ``presets=peg_insert_4mm,eval``). + + Returns: + The resolved ``cfg`` (possibly a different object if the root itself + was a PresetCfg). + """ + if isinstance(cfg, PresetCfg): + seen: set[int] = {id(cfg)} + replacement = _pick_alternative(cfg, selected, path="") + while isinstance(replacement, PresetCfg): + if id(replacement) in seen: + raise ValueError( + f"Cyclic PresetCfg chain detected at '': {type(replacement).__name__} was already visited." + ) + seen.add(id(replacement)) + replacement = _pick_alternative(replacement, selected, path="") + return resolve_presets(replacement, selected) + + def _resolve(parent, key, preset_obj, _path): + seen: set[int] = {id(preset_obj)} + val = _pick_alternative(preset_obj, selected, path=_path) + while isinstance(val, PresetCfg): + if id(val) in seen: + raise ValueError( + f"Cyclic PresetCfg chain detected at '{_path}': {type(val).__name__} was already visited." + ) + seen.add(id(val)) + val = _pick_alternative(val, selected, path=_path) + if isinstance(parent, dict): + parent[key] = val + else: + setattr(parent, key, val) + if hasattr(val, "__dataclass_fields__") or isinstance(val, dict): + _walk_cfg(val, _path, _resolve) + + _walk_cfg(cfg, "", _resolve) + return cfg + + +# ============================================================================ +# CLI / Hydra integration +# ============================================================================ + + +def _run_hydra(task, env_cfg, agent_cfg, presets, callback): + """Shared Hydra entry point for :func:`resolve_task_config` and :func:`hydra_task_config`.""" + global_presets, preset_sel, preset_scalar, global_scalar = parse_overrides(sys.argv[1:], presets) + original_argv, sys.argv = sys.argv, [sys.argv[0]] + global_scalar + + @hydra.main(config_path=None, config_name=task, version_base="1.3") + def hydra_main(hydra_cfg, env_cfg=env_cfg, agent_cfg=agent_cfg): + hydra_cfg = replace_strings_with_slices(OmegaConf.to_container(hydra_cfg, resolve=True)) + env_cfg, agent_cfg = apply_overrides( + env_cfg, agent_cfg, hydra_cfg, global_presets, preset_sel, preset_scalar, presets + ) + env_cfg.from_dict(hydra_cfg["env"]) + env_cfg = replace_strings_with_env_cfg_spaces(env_cfg) + if isinstance(agent_cfg, dict) or agent_cfg is None: + agent_cfg = hydra_cfg["agent"] + else: + agent_cfg.from_dict(hydra_cfg["agent"]) + callback(env_cfg, agent_cfg) + + try: + hydra_main() + finally: + sys.argv = original_argv + + +def resolve_task_config(task_name: str, agent_cfg_entry_point: str): + """Resolve env and agent configs with Hydra overrides, presets, and scalars fully applied. + + Safe to call before Kit is launched -- callable config values are stored as + :class:`~isaaclab.utils.string.ResolvableString` and resolved lazily on + first use, so no implementation modules are imported eagerly. + + Args: + task_name: Task name (e.g., "Isaac-Velocity-Flat-Anymal-C-v0"). + agent_cfg_entry_point: Agent config entry point key (e.g., "rsl_rl_cfg_entry_point"). + + Returns: + Tuple of (env_cfg, agent_cfg) fully resolved. + """ + task = task_name.split(":")[-1] + env_cfg, agent_cfg, presets = register_task(task, agent_cfg_entry_point) + resolved = {} + _run_hydra(task, env_cfg, agent_cfg, presets, lambda e, a: resolved.update(env_cfg=e, agent_cfg=a)) + return resolved["env_cfg"], resolved["agent_cfg"] + + +def hydra_task_config(task_name: str, agent_cfg_entry_point: str) -> Callable: + """Decorator for Hydra config with REPLACE-only preset semantics. + + Args: + task_name: Task name (e.g., "Isaac-Reach-Franka-v0") + agent_cfg_entry_point: Agent config entry point key + + Returns: + Decorated function receiving ``(env_cfg, agent_cfg, *args, **kwargs)`` """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - # register the task to Hydra - env_cfg, agent_cfg = register_task_to_hydra(task_name.split(":")[-1], agent_cfg_entry_point) - - # define the new Hydra main function - @hydra.main(config_path=None, config_name=task_name.split(":")[-1], version_base="1.3") - def hydra_main(hydra_env_cfg: DictConfig, env_cfg=env_cfg, agent_cfg=agent_cfg): - # convert to a native dictionary - hydra_env_cfg = OmegaConf.to_container(hydra_env_cfg, resolve=True) - # replace string with slices because OmegaConf does not support slices - hydra_env_cfg = replace_strings_with_slices(hydra_env_cfg) - # update the configs with the Hydra command line arguments - env_cfg.from_dict(hydra_env_cfg["env"]) - # replace strings that represent gymnasium spaces because OmegaConf does not support them. - # this must be done after converting the env configs from dictionary to avoid internal reinterpretations - env_cfg = replace_strings_with_env_cfg_spaces(env_cfg) - # get agent configs - if isinstance(agent_cfg, dict) or agent_cfg is None: - agent_cfg = hydra_env_cfg["agent"] - else: - agent_cfg.from_dict(hydra_env_cfg["agent"]) - # call the original function - func(env_cfg, agent_cfg, *args, **kwargs) - - # call the new Hydra main function - hydra_main() + task = task_name.split(":")[-1] + env_cfg, agent_cfg, presets = register_task(task, agent_cfg_entry_point) + _run_hydra(task, env_cfg, agent_cfg, presets, lambda e, a: func(e, a, *args, **kwargs)) return wrapper return decorator + + +def _format_unknown_presets_error(unknown: set[str], name_to_paths: dict[str, list[str]], max_paths: int = 5) -> str: + """Build a readable error message grouping presets by identical path fingerprints. + + When an unknown name matches a deprecated alias (e.g. ``newton``), the + message explicitly calls out the rename so users updating from older + tutorials or scripts get an actionable hint instead of a bare "unknown". + """ + fingerprint_to_names: dict[tuple[str, ...], list[str]] = {} + for name, paths in name_to_paths.items(): + key = tuple(sorted(paths)) + fingerprint_to_names.setdefault(key, []).append(name) + + lines = [f"Unknown preset(s): {', '.join(sorted(unknown))}"] + deprecated_hits = sorted(name for name in unknown if name in _LEGACY_PRESET_ALIASES) + for legacy in deprecated_hits: + replacement = _LEGACY_PRESET_ALIASES[legacy] + lines.append(f" '{legacy}' was renamed to '{replacement}'; this task does not declare '{replacement}' either.") + lines += [ + "", + "Available presets (grouped by affected paths):", + "", + ] + for paths_tuple in sorted(fingerprint_to_names, key=lambda k: fingerprint_to_names[k][0]): + names = sorted(fingerprint_to_names[paths_tuple]) + if len(names) <= 30: + lines.append(f" {', '.join(names)}") + else: + lines.append(f" {', '.join(names[:25])}, ... ({len(names)} total)") + shown = list(paths_tuple[:max_paths]) + for p in shown: + lines.append(f" -> {p}") + remaining = len(paths_tuple) - len(shown) + if remaining > 0: + lines.append(f" ... ({remaining} more)") + lines.append("") + return "\n".join(lines) + + +def register_task(task_name: str, agent_entry: str) -> tuple: + """Load configs, collect presets recursively, register base config to Hydra. + + Presets are collected from nested configclasses and stored separately - + NOT registered as Hydra groups to avoid Hydra's merge behavior. + + Returns: + (env_cfg, agent_cfg, presets) where presets = + {"env": {"path": {"name": cfg}}, "agent": {...}} + """ + from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry + + env_cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point") + agent_cfg = load_cfg_from_registry(task_name, agent_entry) if agent_entry else None + + # Collect presets before resolution (needed for path-based overrides) + presets = { + "env": collect_presets(env_cfg), + "agent": collect_presets(agent_cfg) if agent_cfg else {}, + } + + known_names = _known_preset_names(presets) + selected = { + _normalize_preset_name(v.strip(), known_names) + for arg in sys.argv[1:] + if "=" in arg + for key, val in [arg.split("=", 1)] + if key.lstrip("-") == "presets" + for v in val.split(",") + if v.strip() + } + + if selected: + name_to_paths: dict[str, list[str]] = {} + for sec, sec_presets in presets.items(): + for path, fields in sec_presets.items(): + full = f"{sec}.{path}" if path else sec + for name in fields: + name_to_paths.setdefault(name, []).append(full) + unknown = selected - set(name_to_paths) + if unknown: + display = {n: p for n, p in name_to_paths.items() if n != "default"} + raise ValueError(_format_unknown_presets_error(unknown, display)) + + env_cfg = resolve_presets(env_cfg, selected) + if agent_cfg is not None: + agent_cfg = resolve_presets(agent_cfg, selected) + + # Also resolve presets inside collected alternatives so that apply_overrides + # never re-introduces unresolved PresetCfg objects when applying a selection. + for section_presets in presets.values(): + for path_presets in section_presets.values(): + for name, alt in path_presets.items(): + resolve_presets(alt, selected) + + # Convert to dict for Hydra (handle gym spaces and slices) + env_cfg = replace_env_cfg_spaces_with_strings(env_cfg) + agent_dict = agent_cfg.to_dict() if agent_cfg is not None and hasattr(agent_cfg, "to_dict") else agent_cfg + env_dict = env_cfg.to_dict() # type: ignore[union-attr] + cfg_dict = replace_slices_with_strings({"env": env_dict, "agent": agent_dict}) + + # Register plain config (no groups) - Hydra only handles global scalars + ConfigStore.instance().store(name=task_name, node=OmegaConf.create(cfg_dict)) + return env_cfg, agent_cfg, presets + + +def parse_overrides(args: list[str], presets: dict) -> tuple: + """Categorize command line args by type. + + Args: + args: Command line args (without script name) + presets: {"env": {"path": {"name": cfg}}, "agent": {...}} + + Returns: + (global_presets, preset_sel, preset_scalar, global_scalar) where: + - global_presets: [name, ...] - apply to all matching configs + - preset_sel: [(section, path, name), ...] - REPLACE selections + - preset_scalar: [(full_path, value), ...] - scalars in preset paths + - global_scalar: [arg, ...] - pass to Hydra + """ + preset_paths = {f"{s}.{p}" if p else s for s, v in presets.items() for p in v} + global_presets, preset_sel, preset_scalar, global_scalar = [], [], [], [] + + for arg in args: + if "=" not in arg: + global_scalar.append(arg) + continue + key, val = arg.split("=", 1) + if key == "presets": + known_names = _known_preset_names(presets) + global_presets.extend(_normalize_preset_name(v.strip(), known_names) for v in val.split(",") if v.strip()) + elif key in preset_paths: + sec, path = key.split(".", 1) if "." in key else (key, "") + known_names = set(presets[sec][path]) + preset_sel.append((sec, path, _normalize_preset_name(val, known_names))) + elif any(key.startswith(pp + ".") for pp in preset_paths): + preset_scalar.append((key, val)) + else: + global_scalar.append(arg) + + preset_sel.sort(key=lambda x: x[1].count(".")) + return global_presets, preset_sel, preset_scalar, global_scalar + + +def apply_overrides( + env_cfg, + agent_cfg, + hydra_cfg: dict, + global_presets: list, + preset_sel: list, + preset_scalar: list, + presets: dict, +): + """Apply preset selections and scalar overrides with REPLACE semantics. + + Global presets are already applied by :func:`resolve_presets` in + :func:`register_task`. This function handles: + + 1. Path-based selections (``env.backend=newton_mjwarp``) + 2. Scalar overrides within preset paths (``env.backend.dt=0.001``) + + Returns: + (env_cfg, agent_cfg) -- possibly replaced if root-level PresetCfg was resolved. + + Raises: + ValueError: If multiple global presets conflict on the same path. + """ + cfgs = {"env": env_cfg, "agent": agent_cfg} + + def _path_reachable(sec: str, path: str) -> bool: + if not path: + return cfgs[sec] is not None + obj = cfgs[sec] + for part in path.split("."): + try: + obj = obj[part] if isinstance(obj, dict) else getattr(obj, part) + except (AttributeError, TypeError, KeyError): + return False + if obj is None: + return False + return True + + # --- Phase 1: path-based selections + global broadcast for reachable paths + resolved: dict[str, tuple[str, str, str]] = {} + for sec, path, name in preset_sel: + if path not in presets.get(sec, {}): + raise ValueError(f"Unknown preset group: {sec}.{path}") + name = _normalize_preset_name(name, set(presets[sec][path])) + if name not in presets[sec][path]: + avail = list(presets[sec][path].keys()) + hint = "" + if name in _LEGACY_PRESET_ALIASES: + replacement = _LEGACY_PRESET_ALIASES[name] + hint = f" '{name}' was renamed to '{replacement}'; this path does not declare '{replacement}' either." + raise ValueError(f"Unknown preset '{name}' for {sec}.{path}. Available: {avail}.{hint}") + full_path = f"{sec}.{path}" if path else sec + resolved[full_path] = (sec, path, name) + + applied_by: dict[str, str] = {} + known_names = _known_preset_names(presets) + for name in global_presets: + name = _normalize_preset_name(name, known_names) + for sec in ("env", "agent"): + for path, path_presets in presets.get(sec, {}).items(): + if name in path_presets: + full_path = f"{sec}.{path}" if path else sec + if full_path in applied_by: + prev_name = applied_by[full_path] + prev_val = path_presets[prev_name] + cur_val = path_presets[name] + if prev_val is not cur_val and prev_val != cur_val: + raise ValueError( + f"Conflicting global presets: '{prev_name}' and '{name}' " + f"both define preset for '{full_path}'" + ) + else: + applied_by[full_path] = name + resolved.setdefault(full_path, (sec, path, name)) + + for sec in ("env", "agent"): + for path, path_presets in presets.get(sec, {}).items(): + if "default" in path_presets: + full_path = f"{sec}.{path}" if path else sec + resolved.setdefault(full_path, (sec, path, "default")) + + # --- Phase 2: apply in depth order, pruning unreachable children + for full_path in sorted(resolved, key=lambda fp: fp.count(".")): + sec, path, name = resolved[full_path] + if cfgs[sec] is not None and _path_reachable(sec, path): + node = presets[sec][path][name] + node_dict = ( + node.to_dict() if hasattr(node, "to_dict") else dict(node) if isinstance(node, Mapping) else node + ) + if not path: + cfgs[sec], hydra_cfg[sec] = node, node_dict + else: + _setattr(cfgs[sec], path, node) + _setattr(hydra_cfg, f"{sec}.{path}", node_dict) + + # --- Phase 3: scalar overrides within preset paths + for full_path, val_str in preset_scalar: + sec = full_path.split(".", 1)[0] + if sec not in cfgs: + continue + path = full_path[len(sec) + 1 :] + if cfgs[sec] is not None: + val = _parse_val(val_str) + _setattr(cfgs[sec], path, val) + _setattr(hydra_cfg, full_path, val) + + return cfgs["env"], cfgs["agent"] + + +def _setattr(obj, path: str, val): + """Set nested attribute/key (e.g., "actions.arm_action.scale").""" + *parts, leaf = path.split(".") + for p in parts: + obj = obj[p] if isinstance(obj, Mapping) else getattr(obj, p) + if isinstance(obj, dict): + obj[leaf] = val + else: + setattr(obj, leaf, val) + + +def _parse_val(s: str): + """Parse string to Python value (bool, None, int, float, or str).""" + if s.lower() in _LITERAL_MAP: + return _LITERAL_MAP[s.lower()] + try: + return float(s) if "." in s else int(s) + except ValueError: + return s[1:-1] if len(s) >= 2 and s[0] in "\"'" and s[-1] in "\"'" else s diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/importer.py b/source/isaaclab_tasks/isaaclab_tasks/utils/importer.py index ddbab7ede412..421f33ee2863 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/utils/importer.py +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/importer.py @@ -15,16 +15,11 @@ def import_packages(package_name: str, blacklist_pkgs: list[str] | None = None): """Import all sub-packages in a package recursively. - It is easier to use this function to import all sub-packages in a package recursively - than to manually import each sub-package. - - It replaces the need of the following code snippet on the top of each package's ``__init__.py`` file: - - .. code-block:: python - - import .locomotion.velocity - import .manipulation.reach - import .manipulation.lift + Only **packages** (directories with ``__init__.py``) are imported — plain + ``.py`` modules (e.g. ``env_cfg.py``, ``env.py``) are skipped. This is + sufficient because ``gym.register()`` calls live exclusively in + ``__init__.py`` files, and avoids eagerly importing every config module + at startup. Args: package_name: The package name. @@ -76,21 +71,26 @@ def seen(p: str, m: dict[str, bool] = {}) -> bool: if any([black_pkg_name in info.name for black_pkg_name in blacklist_pkgs]): continue - # yield the module info + # Only import packages (directories with __init__.py), not plain .py + # modules. The walk exists to trigger gym.register() calls which live + # exclusively in __init__.py files. Skipping bare modules avoids + # eagerly importing every env_cfg / env / agent config at startup. + if not info.ispkg: + continue + yield info - if info.ispkg: - try: - __import__(info.name) - except Exception: - if onerror is not None: - onerror(info.name) - else: - raise + try: + __import__(info.name) + except Exception: + if onerror is not None: + onerror(info.name) else: - path: list = getattr(sys.modules[info.name], "__path__", []) + raise + else: + path: list = getattr(sys.modules[info.name], "__path__", []) - # don't traverse path items we've seen before - path = [p for p in path if not seen(p)] + # don't traverse path items we've seen before + path = [p for p in path if not seen(p)] - yield from _walk_packages(path, info.name + ".", onerror, blacklist_pkgs) + yield from _walk_packages(path, info.name + ".", onerror, blacklist_pkgs) diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/parse_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/utils/parse_cfg.py index 0002c5d58d9a..e5e71fa06f8c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/utils/parse_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/parse_cfg.py @@ -5,16 +5,22 @@ """Sub-module with utilities for parsing and loading configurations.""" +from __future__ import annotations + import collections import importlib import inspect import os import re +from typing import TYPE_CHECKING import gymnasium as gym import yaml -from isaaclab.envs import DirectRLEnvCfg, ManagerBasedRLEnvCfg +from isaaclab_tasks.utils.hydra import resolve_presets + +if TYPE_CHECKING: + from isaaclab.envs import DirectRLEnvCfg, ManagerBasedRLEnvCfg def load_cfg_from_registry(task_name: str, entry_point_key: str) -> dict | object: @@ -145,6 +151,13 @@ def parse_env_cfg( if isinstance(cfg, dict): raise RuntimeError(f"Configuration for the task: '{task_name}' is not a class. Please provide a class.") + # Resolve any PresetCfg wrappers to their default preset so the config + # is usable without a Hydra CLI override (e.g. in tests). + # Must happen BEFORE attribute overrides, otherwise overrides on PresetCfg wrapper + # fields (e.g. cfg.scene when scene is a PresetCfg) get discarded when the wrapper + # is replaced by its .default. + cfg = resolve_presets(cfg) + # simulation device cfg.sim.device = device # disable fabric to read/write through USD diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/presets.py b/source/isaaclab_tasks/isaaclab_tasks/utils/presets.py new file mode 100644 index 000000000000..f6fac9cb9aa6 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/presets.py @@ -0,0 +1,20 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.renderers import NewtonWarpRendererCfg +from isaaclab_ov.renderers import OVRTXRendererCfg +from isaaclab_physx.renderers import IsaacRtxRendererCfg + +from isaaclab.utils import configclass + +from isaaclab_tasks.utils import PresetCfg + + +@configclass +class MultiBackendRendererCfg(PresetCfg): + default: IsaacRtxRendererCfg = IsaacRtxRendererCfg() + newton_renderer: NewtonWarpRendererCfg = NewtonWarpRendererCfg() + ovrtx_renderer: OVRTXRendererCfg = OVRTXRendererCfg() + isaacsim_rtx_renderer = default diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/sim_launcher.py b/source/isaaclab_tasks/isaaclab_tasks/utils/sim_launcher.py new file mode 100644 index 000000000000..3030bda1c4b7 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/sim_launcher.py @@ -0,0 +1,359 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for detecting and launching the appropriate simulation backend.""" + +from __future__ import annotations + +import argparse +import logging +from collections.abc import Callable, Generator +from contextlib import contextmanager +from typing import Any + +from isaaclab.physics.physics_manager_cfg import PhysicsCfg +from isaaclab.renderers.renderer_cfg import RendererCfg +from isaaclab.sensors.camera.camera_cfg import CameraCfg + +logger = logging.getLogger(__name__) + + +def add_launcher_args(parser: argparse.ArgumentParser) -> None: + """Add simulation-launcher CLI arguments (``--headless``, ``--device``, etc.) to *parser*. + + Delegates to :meth:`AppLauncher.add_app_launcher_args` so that user scripts + do not need to import ``AppLauncher`` directly. + """ + from isaaclab.app import AppLauncher + + AppLauncher.add_app_launcher_args(parser) + + +def _scan_config(cfg, predicates: list[Callable[[Any], bool]]) -> list[bool]: + """Recursively walk *cfg* and evaluate each predicate on every node. + + Returns a list of booleans, one per predicate, where ``True`` means at + least one node in the config tree satisfied that predicate. Once a + predicate is satisfied it is no longer evaluated (short-circuit). + """ + results = [False] * len(predicates) + visited: set[int] = set() + + def _visit(node): + if all(results): + return + node_id = id(node) + if node_id in visited: + return + visited.add(node_id) + + for i, pred in enumerate(predicates): + if not results[i] and pred(node): + results[i] = True + + try: + children = vars(node) + except TypeError: + return + for child in children.values(): + if child is None or isinstance(child, (int, float, str, bool)): + continue + _visit(child) + + _visit(cfg) + return results + + +def _is_kitless_physics(node) -> bool: + """True when the node is a kitless physics config (Newton or OvPhysX).""" + return isinstance(node, PhysicsCfg) and type(node).__name__ in ("NewtonCfg", "OvPhysxCfg") + + +def _get_visualizer_types(launcher_args: argparse.Namespace | dict | None) -> set[str]: + """Extract requested visualizer type names from launcher args.""" + if isinstance(launcher_args, argparse.Namespace): + visualizers = getattr(launcher_args, "visualizer", None) + elif isinstance(launcher_args, dict): + visualizers = launcher_args.get("visualizer") + else: + return set() + if not visualizers: + return set() + if isinstance(visualizers, str): + # CLI now uses comma-delimited syntax: --visualizer kit,newton,rerun + visualizers = [token.strip() for token in visualizers.split(",")] + return {str(v).strip().lower() for v in visualizers if str(v).strip()} + + +def _compute_visualizer_intent(env_cfg) -> dict[str, bool]: + """Compute upstream visualizer intent from ``env_cfg.sim.visualizer_cfgs``.""" + sim_cfg = getattr(env_cfg, "sim", None) + visualizer_cfgs = getattr(sim_cfg, "visualizer_cfgs", None) + if visualizer_cfgs is None: + return {"has_any_visualizers": False, "has_kit_visualizer": False} + + cfg_list = visualizer_cfgs if isinstance(visualizer_cfgs, list) else [visualizer_cfgs] + cfg_list = [cfg for cfg in cfg_list if cfg is not None] + has_any = len(cfg_list) > 0 + has_kit = any(getattr(cfg, "visualizer_type", None) == "kit" for cfg in cfg_list) + return {"has_any_visualizers": has_any, "has_kit_visualizer": has_kit} + + +def _set_visualizer_intent_on_launcher_args( + launcher_args: argparse.Namespace | dict | None, visualizer_intent: dict[str, bool] +) -> None: + """Attach visualizer intent to launcher args when possible.""" + if launcher_args is None: + return + if isinstance(launcher_args, argparse.Namespace): + setattr(launcher_args, "visualizer_intent", visualizer_intent) + elif isinstance(launcher_args, dict): + launcher_args["visualizer_intent"] = visualizer_intent + + +def _is_kit_camera(node) -> bool: + """True for a CameraCfg whose renderer requires Kit (not Newton).""" + if not isinstance(node, CameraCfg): + return False + renderer_cfg = getattr(node, "renderer_cfg", None) + if renderer_cfg is None: + return True + if isinstance(renderer_cfg, RendererCfg): + return renderer_cfg.renderer_type in ("default", "isaac_rtx") + # PresetCfg renderers (e.g. MultiBackendRendererCfg) are resolved during + # environment construction when the physics backend is known (see + # resolve_task_config and preset resolution in presets.py). At this + # stage we assume they will match the physics backend, so not + # necessarily Kit. + from isaaclab_tasks.utils import PresetCfg + + if isinstance(renderer_cfg, PresetCfg): + return False + return True + + +def compute_kit_requirements( + env_cfg, + launcher_args: argparse.Namespace | dict | None = None, +) -> tuple[bool, bool, set[str]]: + """Compute whether Kit is needed and related flags. + + Uses the same logic as :func:`launch_simulation` to decide whether Isaac Sim + Kit must be launched. + + Args: + env_cfg: Resolved environment config (e.g. from :func:`resolve_task_config`). + launcher_args: Optional CLI args; if ``--visualizer`` includes ``kit``, needs_kit is True. + + Returns: + (needs_kit, has_kit_cameras, visualizer_types) + """ + is_kitless, has_kit_cameras = _scan_config(env_cfg, [_is_kitless_physics, _is_kit_camera]) + needs_kit = has_kit_cameras or not is_kitless + visualizer_types = _get_visualizer_types(launcher_args) + if "kit" in visualizer_types: + needs_kit = True + return needs_kit, has_kit_cameras, visualizer_types + + +def _resolve_distributed_device( + env_cfg, + launcher_args: argparse.Namespace | dict | None, +) -> None: + """Set ``env_cfg.sim.device`` for distributed training. + + When ``--distributed`` is active and CUDA_VISIBLE_DEVICES restricts each + process to a single GPU, ``local_rank`` may exceed the visible device count. + This helper applies the same fallback logic used by :class:`AppLauncher` so + that **training scripts do not need their own device-resolution code**. + + For the Kit path, :func:`launch_simulation` additionally propagates + ``AppLauncher.device`` after creation; this function handles the early + (pre-AppLauncher) and kitless cases. + """ + distributed = False + if isinstance(launcher_args, argparse.Namespace): + distributed = getattr(launcher_args, "distributed", False) + elif isinstance(launcher_args, dict): + distributed = launcher_args.get("distributed", False) + + if not distributed: + return + + import os + + import torch + + local_rank = int(os.getenv("LOCAL_RANK", "0")) + int(os.getenv("JAX_LOCAL_RANK", "0")) + num_visible_gpus = torch.cuda.device_count() + + # Compare local_rank against device_count (not WORLD_SIZE) so that + # multi-node setups work correctly: WORLD_SIZE is global across all + # nodes, but device_count is local. + if local_rank < num_visible_gpus: + device_str = f"cuda:{local_rank}" + else: + device_str = "cuda:0" + + sim_cfg = getattr(env_cfg, "sim", None) + if sim_cfg is not None: + sim_cfg.device = device_str + + # Set CUDA device early so physics backends that allocate on the + # "current" device during init get the correct GPU. For the Kit path, + # AppLauncher._resolve_device_settings will call set_device again with + # the same value, which is harmless. For the kitless Newton path, this + # is the only place it gets set. + torch.cuda.set_device(device_str) + + logger.info( + "Distributed device resolved to %s (local_rank=%d, visible_gpus=%d)", + device_str, + local_rank, + num_visible_gpus, + ) + + +@contextmanager +def launch_simulation( + env_cfg, + launcher_args: argparse.Namespace | dict | None = None, +) -> Generator[None, None, None]: + """Context manager that launches the appropriate simulation runtime for *env_cfg*. + + * Recursively scans the config tree to decide whether Isaac Sim Kit is needed. + * Auto-enables ``enable_cameras`` when the scene contains camera sensors + that use a Kit renderer (not Newton). + * For Kit-based backends, launches ``AppLauncher`` and calls ``app.close()`` on exit. + * For kitless backends (e.g. Newton with Newton Warp renderer only), this is a no-op. + * For Newton Physics + RTX Renderer (with Kit cameras): Kit is launched + so that RTX can run; Newton syncs its state to the USD stage each step for rendering. + + Example:: + + with launch_simulation(env_cfg, args_cli): + main() + """ + # When --visualizer kit is explicitly requested alongside an ovrtx preset, fail early. + # ovrtx and Kit ship the same RTX hydra libraries under conflicting USD namespaces; + # loading both in the same process causes a dynamic-linker crash. Use + # --visualizer newton instead, which is compatible with ovrtx presets. + early_visualizer_types = _get_visualizer_types(launcher_args) + if "kit" in early_visualizer_types: + has_ovrtx = _scan_config( + env_cfg, [lambda node: isinstance(node, RendererCfg) and getattr(node, "renderer_type", None) == "ovrtx"] + )[0] + if has_ovrtx: + raise ValueError( + "[launch_simulation] '--visualizer kit' is incompatible with 'ovrtx_renderer'. " + "Both Kit (Isaac Sim) and ovrtx ship conflicting RTX hydra libraries " + "(librtx.hydra.so, liblegacy.hydra.so) compiled against different USD namespaces, " + "which causes a dynamic-linker crash when loaded into the same process. " + "Use '--visualizer newton' instead, which is fully compatible with ovrtx presets." + ) + + needs_kit, has_kit_cameras, visualizer_types = compute_kit_requirements(env_cfg, launcher_args) + visualizer_intent = _compute_visualizer_intent(env_cfg) + _set_visualizer_intent_on_launcher_args(launcher_args, visualizer_intent) + + if needs_kit and has_kit_cameras: + if isinstance(launcher_args, argparse.Namespace): + if not getattr(launcher_args, "enable_cameras", False): + logger.info("Auto-enabling cameras: scene contains camera sensors with a Kit renderer.") + launcher_args.enable_cameras = True + elif isinstance(launcher_args, dict): + if not launcher_args.get("enable_cameras", False): + logger.info("Auto-enabling cameras: scene contains camera sensors with a Kit renderer.") + launcher_args["enable_cameras"] = True + + close_fn: Any = None + + # Resolve distributed device early, before AppLauncher or physics init. + _resolve_distributed_device(env_cfg, launcher_args) + + if needs_kit: + # check if Isaac Sim is installed + import importlib.util + + if importlib.util.find_spec("omni.kit") is None: + # Print a more obvious hint when a local _isaac_sim symlink + # exists but its env wasn't sourced (typical on Win11 + conda + # when activate.d hooks didn't fire, e.g. under `conda run`). + import os + import sys + + isaaclab_path = os.environ.get("ISAACLAB_PATH") + local_sim = os.path.join(isaaclab_path, "_isaac_sim") if isaaclab_path else None + extra_hint = "" + if local_sim and os.path.isdir(local_sim): + if sys.platform == "win32": + extra_hint = ( + f" Found a local Isaac Sim at {local_sim} but its environment is not active.\n" + f" Either run via `isaaclab.bat ...` (which now sources setup_conda_env.bat\n" + f" automatically), or in your current shell run:\n" + f' call "{local_sim}\\setup_conda_env.bat"\n' + ) + else: + extra_hint = ( + f" Found a local Isaac Sim at {local_sim} but its environment is not active.\n" + f" Either run via `./isaaclab.sh ...` (which now sources setup_conda_env.sh\n" + f" automatically), or in your current shell run:\n" + f' source "{local_sim}/setup_conda_env.sh"\n' + ) + + logger.error( + "\n[ERROR] Isaac Sim is not installed or not found on PYTHONPATH.\n" + "\n" + " This environment requires Isaac Sim and Omniverse Kit.\n" + " PhysX backend and Kit visualizer currently requires Isaac Sim.\n" + "\n" + f"{extra_hint}" + " To fix this, ensure Isaac Sim is installed and available in the current environment.\n" + "\n" + " See https://isaac-sim.github.io/IsaacLab/main/source/setup/installation for details.\n" + ) + raise SystemExit(1) + + # If the simulation app is not launched, we launch it. + from isaaclab.utils import has_kit + + if not has_kit(): + from isaaclab.app import AppLauncher + + app_launcher = AppLauncher(launcher_args) + # AppLauncher may refine the device choice (e.g. Kit-specific + # overrides), so propagate its final value to env_cfg. This + # intentionally overwrites the earlier value set by + # _resolve_distributed_device. + sim_cfg = getattr(env_cfg, "sim", None) + if sim_cfg is not None and hasattr(app_launcher, "device"): + sim_cfg.device = app_launcher.device + close_fn = app_launcher.app.close + elif visualizer_types: + # Newton path without Kit: AppLauncher is skipped, so manually store the visualizer + # selection in SettingsManager (works in standalone mode via plain dict) so that + # SimulationContext._get_cli_visualizer_types() can find it. + from isaaclab.app import AppLauncher + + disable_all = "none" in visualizer_types + if isinstance(launcher_args, argparse.Namespace): + AppLauncher.sync_visualizer_cli_settings_to_carb( + {**vars(launcher_args), "visualizer_explicit": True, "visualizer_disable_all": disable_all} + ) + elif isinstance(launcher_args, dict): + AppLauncher.sync_visualizer_cli_settings_to_carb( + {**launcher_args, "visualizer_explicit": True, "visualizer_disable_all": disable_all} + ) + + try: + yield + except Exception: + import traceback + + traceback.print_exc() + raise + finally: + if close_fn is not None: + close_fn() diff --git a/source/isaaclab_tasks/setup.py b/source/isaaclab_tasks/setup.py index 455c56689ce0..a719231ca986 100644 --- a/source/isaaclab_tasks/setup.py +++ b/source/isaaclab_tasks/setup.py @@ -18,13 +18,13 @@ # Minimum dependencies required prior to installation INSTALL_REQUIRES = [ # generic - "numpy<2", - "torch>=2.7", - "torchvision>=0.14.1", # ensure compatibility with torch 1.13.1 + "numpy>=2", + "torch>=2.10", + "torchvision>=0.25.0", # ensure compatibility with torch 2.10.0 "protobuf>=4.25.8,!=5.26.0", # basic logger "tensorboard", - "numba", + "numba>=0.63.1", ] PYTORCH_INDEX_URL = ["https://download.pytorch.org/whl/cu128"] @@ -39,17 +39,15 @@ description=EXTENSION_TOML_DATA["package"]["description"], keywords=EXTENSION_TOML_DATA["package"]["keywords"], include_package_data=True, - python_requires=">=3.10", + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", install_requires=INSTALL_REQUIRES, dependency_links=PYTORCH_INDEX_URL, packages=["isaaclab_tasks"], classifiers=[ "Natural Language :: English", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Isaac Sim :: 4.5.0", - "Isaac Sim :: 5.0.0", - "Isaac Sim :: 5.1.0", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", ], zip_safe=False, ) diff --git a/source/isaaclab_tasks/test/benchmarking/configs.yaml b/source/isaaclab_tasks/test/benchmarking/configs.yaml index d5df21551b73..7a7a0aa49945 100644 --- a/source/isaaclab_tasks/test/benchmarking/configs.yaml +++ b/source/isaaclab_tasks/test/benchmarking/configs.yaml @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause + # mode for very simple functional testing without checking thresholds fast_test: rl_games:Isaac-Ant-v0: @@ -16,7 +17,7 @@ fast_test: # mode for capturing KPIs across all environments without checking thresholds full_test: Isaac-*: - max_iterations: 500 + max_iterations: 10 lower_thresholds: reward: -99999 episode_length: -99999 @@ -31,7 +32,7 @@ fast: reward: 45 episode_length: 900 upper_thresholds: - duration: 750 + duration: 1200 #750 skrl:Isaac-Cartpole-RGB-Camera-Direct-v0: max_iterations: 50 lower_thresholds: @@ -43,7 +44,7 @@ fast: max_iterations: 200 lower_thresholds: reward: 50 - episode_length: 750 + episode_length: 600 #700 upper_thresholds: duration: 500 rl_games:Isaac-Quadcopter-Direct-v0: @@ -52,7 +53,7 @@ fast: reward: 100 episode_length: 400 upper_thresholds: - duration: 250 + duration: 300 #250 skrl:Isaac-Shadow-Hand-Over-Direct-v0: max_iterations: 300 lower_thresholds: @@ -64,12 +65,34 @@ fast: max_iterations: 300 lower_thresholds: reward: 7 - episode_length: 900 + episode_length: 875 upper_thresholds: duration: 1800 + sb3:Isaac-Lift-Cube-Franka-v0: + max_iterations: 150 + lower_thresholds: + reward: 1.5 + episode_length: 150 + upper_thresholds: + duration: 2000 + rsl_rl:Isaac-Dexsuite-Kuka-Allegro-Lift-v0: + max_iterations: 300 # 200 + lower_thresholds: + reward: 0.4 # 0.7 + episode_length: 150 + upper_thresholds: + duration: 1400 #1200 + rsl_rl:Isaac-Reach-OpenArm-v0: + max_iterations: 200 + lower_thresholds: + reward: 0.15 + episode_length: 100 + upper_thresholds: + duration: 600 -# mode for weekly CI +# mode for nightly CI full: + # Locomotion tasks - basic Isaac-Ant-Direct-v0: max_iterations: 300 lower_thresholds: @@ -84,6 +107,21 @@ full: episode_length: 700 upper_thresholds: duration: 800 + Isaac-Humanoid-Direct-v0: + max_iterations: 300 + lower_thresholds: + reward: 2000 + episode_length: 600 + upper_thresholds: + duration: 1000 + Isaac-Humanoid-v0: + max_iterations: 1000 + lower_thresholds: + reward: 100 + episode_length: 600 + upper_thresholds: + duration: 2500 + # Classic control tasks Isaac-Cart-Double-Pendulum-Direct-v0: max_iterations: 300 lower_thresholds: @@ -91,27 +129,35 @@ full: episode_length: 150 upper_thresholds: duration: 500 - Isaac-Cartpole-Depth-Camera-Direct-v0: + Isaac-Cartpole-Direct-v0: max_iterations: 300 lower_thresholds: reward: 200 episode_length: 150 upper_thresholds: - duration: 3000 - Isaac-Cartpole-Depth-v0: + duration: 500 + Isaac-Cartpole-v0: + max_iterations: 1000 + lower_thresholds: + reward: 3 + episode_length: 150 + upper_thresholds: + duration: 1500 + # Vision-based cartpole tasks + Isaac-Cartpole-Depth-Camera-Direct-v0: max_iterations: 300 lower_thresholds: - reward: 1 + reward: 200 episode_length: 150 upper_thresholds: duration: 3000 - Isaac-Cartpole-Direct-v0: + Isaac-Cartpole-Depth-v0: max_iterations: 300 lower_thresholds: - reward: 200 + reward: 1 episode_length: 150 upper_thresholds: - duration: 500 + duration: 3000 Isaac-Cartpole-RGB-Camera-Direct-v0: max_iterations: 300 lower_thresholds: @@ -140,34 +186,7 @@ full: episode_length: 150 upper_thresholds: duration: 4000 - Isaac-Cartpole-v0: - max_iterations: 1000 - lower_thresholds: - reward: 3 - episode_length: 150 - upper_thresholds: - duration: 1500 - Isaac-Factory-GearMesh-Direct-v0: - max_iterations: 100 - lower_thresholds: - reward: 200 - episode_length: 250 - upper_thresholds: - duration: 6000 - Isaac-Factory-NutThread-Direct-v0: - max_iterations: 100 - lower_thresholds: - reward: 400 - episode_length: 400 - upper_thresholds: - duration: 5000 - Isaac-Factory-PegInsert-Direct-v0: - max_iterations: 100 - lower_thresholds: - reward: 125 - episode_length: 130 - upper_thresholds: - duration: 4000 + # Manipulation tasks - Franka Isaac-Franka-Cabinet-Direct-v0: max_iterations: 300 lower_thresholds: @@ -175,55 +194,64 @@ full: episode_length: 400 upper_thresholds: duration: 1000 - Isaac-Humanoid-Direct-v0: + Isaac-Lift-Cube-Franka-v0: max_iterations: 300 lower_thresholds: - reward: 2000 - episode_length: 600 + reward: 90 + episode_length: 100 upper_thresholds: duration: 1000 - Isaac-Humanoid-v0: + Isaac-Open-Drawer-Franka-v0: + max_iterations: 200 + lower_thresholds: + reward: 60 + episode_length: 150 + upper_thresholds: + duration: 3000 + Isaac-Reach-Franka-*: max_iterations: 1000 lower_thresholds: - reward: 100 - episode_length: 600 + reward: 0.25 + episode_length: 150 upper_thresholds: - duration: 2500 - Isaac-Lift-Cube-Franka-v0: - max_iterations: 300 + duration: 1500 + Isaac-Reach-Franka-OSC-v0: + max_iterations: 1000 lower_thresholds: - reward: 90 - episode_length: 100 + reward: 0.25 + episode_length: 150 upper_thresholds: - duration: 1000 - Isaac-Navigation-Flat-Anymal-C-v0: + duration: 1500 + # Manipulation tasks - OpenArm + Isaac-Lift-Cube-OpenArm-v0: max_iterations: 300 lower_thresholds: - reward: 0.5 - episode_length: 20 + reward: 80 + episode_length: 100 upper_thresholds: - duration: 2000 - Isaac-Open-Drawer-Franka-v0: + duration: 1200 + Isaac-Open-Drawer-OpenArm-v0: max_iterations: 200 lower_thresholds: - reward: 60 + reward: 50 episode_length: 150 upper_thresholds: duration: 3000 - Isaac-Quadcopter-Direct-v0: - max_iterations: 500 + Isaac-Reach-OpenArm-v0: + max_iterations: 1000 lower_thresholds: - reward: 90 - episode_length: 300 + reward: 0.25 + episode_length: 150 upper_thresholds: - duration: 500 - Isaac-Reach-Franka-*: + duration: 1500 + Isaac-Reach-OpenArm-Bi-v0: max_iterations: 1000 lower_thresholds: reward: 0.25 episode_length: 150 upper_thresholds: - duration: 1500 + duration: 1800 + # Manipulation tasks - UR10 Isaac-Reach-UR10-v0: max_iterations: 1000 lower_thresholds: @@ -231,6 +259,7 @@ full: episode_length: 150 upper_thresholds: duration: 1500 + # Dexterous manipulation - Allegro hand Isaac-Repose-Cube-Allegro-Direct-v0: max_iterations: 500 lower_thresholds: @@ -245,6 +274,7 @@ full: episode_length: 300 upper_thresholds: duration: 1500 + # Dexterous manipulation - Shadow hand Isaac-Repose-Cube-Shadow-Direct-v0: max_iterations: 3000 lower_thresholds: @@ -280,6 +310,87 @@ full: episode_length: 150 upper_thresholds: duration: 10000 + # Dexterous manipulation - Kuka-Allegro + Isaac-Dexsuite-Kuka-Allegro-Lift-v0: + max_iterations: 500 + lower_thresholds: + reward: 50 + episode_length: 200 + upper_thresholds: + duration: 2000 + Isaac-Dexsuite-Kuka-Allegro-Reorient-v0: + max_iterations: 500 + lower_thresholds: + reward: 50 + episode_length: 200 + upper_thresholds: + duration: 2000 + # Factory and forge tasks + Isaac-AutoMate-Assembly-Direct-v0: + max_iterations: 300 + lower_thresholds: + reward: 100 + episode_length: 200 + upper_thresholds: + duration: 1500 + Isaac-AutoMate-Disassembly-Direct-v0: + max_iterations: 300 + lower_thresholds: + reward: 100 + episode_length: 200 + upper_thresholds: + duration: 1500 + Isaac-Factory-GearMesh-Direct-v0: + max_iterations: 100 + lower_thresholds: + reward: 200 + episode_length: 250 + upper_thresholds: + duration: 6000 + Isaac-Factory-NutThread-Direct-v0: + max_iterations: 100 + lower_thresholds: + reward: 400 + episode_length: 400 + upper_thresholds: + duration: 5000 + Isaac-Factory-PegInsert-Direct-v0: + max_iterations: 100 + lower_thresholds: + reward: 125 + episode_length: 130 + upper_thresholds: + duration: 4000 + Isaac-Forge-GearMesh-Direct-v0: + max_iterations: 100 + lower_thresholds: + reward: 200 + episode_length: 250 + upper_thresholds: + duration: 6000 + Isaac-Forge-NutThread-Direct-v0: + max_iterations: 100 + lower_thresholds: + reward: 400 + episode_length: 400 + upper_thresholds: + duration: 5000 + Isaac-Forge-PegInsert-Direct-v0: + max_iterations: 100 + lower_thresholds: + reward: 125 + episode_length: 130 + upper_thresholds: + duration: 4000 + # Aerial tasks + Isaac-Quadcopter-Direct-v0: + max_iterations: 500 + lower_thresholds: + reward: 90 + episode_length: 300 + upper_thresholds: + duration: 500 + # Quadruped and humanoid locomotion - flat terrain Isaac-Velocity-Flat-*: max_iterations: 1000 lower_thresholds: @@ -294,6 +405,35 @@ full: episode_length: 700 upper_thresholds: duration: 6000 + Isaac-Velocity-Flat-Digit-v0: + max_iterations: 1000 + lower_thresholds: + reward: 10 + episode_length: 700 + upper_thresholds: + duration: 3000 + Isaac-Velocity-Flat-G1-v0: + max_iterations: 1000 + lower_thresholds: + reward: 10 + episode_length: 700 + upper_thresholds: + duration: 3500 + Isaac-Velocity-Flat-Unitree-Go1-v0: + max_iterations: 1000 + lower_thresholds: + reward: 15 + episode_length: 700 + upper_thresholds: + duration: 3000 + Isaac-Velocity-Flat-Unitree-Go2-v0: + max_iterations: 1000 + lower_thresholds: + reward: 15 + episode_length: 700 + upper_thresholds: + duration: 3000 + # Quadruped and humanoid locomotion - rough terrain Isaac-Velocity-Rough-*: max_iterations: 1000 lower_thresholds: @@ -301,3 +441,39 @@ full: episode_length: 700 upper_thresholds: duration: 6000 + Isaac-Velocity-Rough-Digit-v0: + max_iterations: 1000 + lower_thresholds: + reward: 5 + episode_length: 700 + upper_thresholds: + duration: 6000 + Isaac-Velocity-Rough-G1-v0: + max_iterations: 1000 + lower_thresholds: + reward: 5 + episode_length: 700 + upper_thresholds: + duration: 6000 + Isaac-Velocity-Rough-Unitree-Go1-v0: + max_iterations: 1000 + lower_thresholds: + reward: 7 + episode_length: 700 + upper_thresholds: + duration: 6000 + Isaac-Velocity-Rough-Unitree-Go2-v0: + max_iterations: 1000 + lower_thresholds: + reward: 7 + episode_length: 700 + upper_thresholds: + duration: 6000 + # Locomotion-manipulation + Isaac-Tracking-LocoManip-Digit-v0: + max_iterations: 500 + lower_thresholds: + reward: 5 + episode_length: 500 + upper_thresholds: + duration: 3000 diff --git a/source/isaaclab_tasks/test/benchmarking/conftest.py b/source/isaaclab_tasks/test/benchmarking/conftest.py index 6a13b1898a52..51096c6de144 100644 --- a/source/isaaclab_tasks/test/benchmarking/conftest.py +++ b/source/isaaclab_tasks/test/benchmarking/conftest.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause import json +import os +from datetime import datetime import pytest @@ -12,6 +14,8 @@ # Global variable for storing KPI data GLOBAL_KPI_STORE = {} +# Global variable for storing the start timestamp +START_TIMESTAMP = None def pytest_addoption(parser): @@ -84,6 +88,21 @@ def kpi_store(): return GLOBAL_KPI_STORE # Using global variable for storing KPI data +# Shard parametrized test items across parallel CI jobs. +# Reads the same TEST_SHARD_INDEX / TEST_SHARD_COUNT env vars used by tools/conftest.py +# for file-level sharding, but applies them at the test-item level so a single +# parametrized file can be split across multiple runners. +# This is a pytest hook — pytest calls it automatically during test collection. +def pytest_collection_modifyitems(config, items): + shard_index = os.environ.get("TEST_SHARD_INDEX", "") + shard_count = os.environ.get("TEST_SHARD_COUNT", "") + if shard_index and shard_count: + shard_index = int(shard_index) + shard_count = int(shard_count) + items[:] = [item for i, item in enumerate(items) if i % shard_count == shard_index] + print(f"Shard {shard_index}/{shard_count}: selected {len(items)} test items") + + # This hook dynamically generates test cases based on the --workflows option. # For any test that includes a 'workflow' fixture, this will parametrize it # with all values passed via the command line option --workflows. @@ -93,11 +112,17 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("workflow", workflows) +# The pytest session start hook to capture the start timestamp +def pytest_sessionstart(session): + global START_TIMESTAMP + START_TIMESTAMP = datetime.now().isoformat() + + # The pytest session finish hook def pytest_sessionfinish(session, exitstatus): # Access global variable instead of fixture tag = session.config.getoption("--tag") - utils.process_kpi_data(GLOBAL_KPI_STORE, tag=tag) + utils.process_kpi_data(GLOBAL_KPI_STORE, tag=tag, timestamp=START_TIMESTAMP) print(json.dumps(GLOBAL_KPI_STORE, indent=2)) save_kpi_payload = session.config.getoption("--save_kpi_payload") if save_kpi_payload: diff --git a/source/isaaclab_tasks/test/benchmarking/env_benchmark_test_utils.py b/source/isaaclab_tasks/test/benchmarking/env_benchmark_test_utils.py index 52dbeadda3a0..89d32f844c54 100644 --- a/source/isaaclab_tasks/test/benchmarking/env_benchmark_test_utils.py +++ b/source/isaaclab_tasks/test/benchmarking/env_benchmark_test_utils.py @@ -8,13 +8,27 @@ import math import os import re -from datetime import datetime import numpy as np import yaml -from tensorboard.backend.event_processing import event_accumulator -import carb + +def _get_repo_path(): + """Get the repository root by searching for marker files. + + Searches upward from the current file for IsaacLab repository markers + (isaaclab.sh or setup.py) to robustly find the repo root. + """ + current = os.path.abspath(__file__) + # Look for isaaclab.sh or setup.py as markers (max 10 levels up) + for _ in range(10): + current = os.path.dirname(current) + if os.path.exists(os.path.join(current, "isaaclab.sh")): + return current + # Fallback marker + if os.path.exists(os.path.join(current, "setup.py")) and os.path.exists(os.path.join(current, "source")): + return current + raise RuntimeError("Could not find IsaacLab repository root. Expected to find 'isaaclab.sh' in parent directories.") def get_env_configs(configs_path): @@ -77,14 +91,17 @@ def evaluate_job(workflow, task, env_config, duration): if val is None or not isinstance(val, (int, float)) or (isinstance(val, float) and math.isnan(val)): continue val = round(val, 4) + threshold_val_rounded = round(threshold_val, 4) if uses_lower_threshold: - # print(f"{threshold_name}: {val} > {round(threshold_val, 4)}") if val < threshold_val: kpi_payload["success"] = False + if not kpi_payload["msg"]: + kpi_payload["msg"] = f"{threshold_name} below threshold: {val} < {threshold_val_rounded}" else: - # print(f"{threshold_name}: {val} < {round(threshold_val, 4)}") if val > threshold_val: kpi_payload["success"] = False + if not kpi_payload["msg"]: + kpi_payload["msg"] = f"{threshold_name} above threshold: {val} > {threshold_val_rounded}" kpi_payload[threshold_name] = val if threshold_name == "reward": normalized_reward = val / threshold_val @@ -99,8 +116,14 @@ def evaluate_job(workflow, task, env_config, duration): return kpi_payload -def process_kpi_data(kpi_payloads, tag=""): - """Combine and augment the KPI payloads.""" +def process_kpi_data(kpi_payloads, tag, timestamp): + """Combine and augment the KPI payloads. + + Args: + kpi_payloads: Dictionary of KPI payloads for each job. + tag: Tag for the KPI payload. + timestamp: Timestamp to use (ISO format). + """ # accumulate workflow outcomes totals = {} successes = {} @@ -127,7 +150,7 @@ def process_kpi_data(kpi_payloads, tag=""): "successes": successes, "failures_did_not_finish": failures_did_not_finish, "failures_did_not_pass_thresholds": failures_did_not_pass_thresholds, - "timestamp": datetime.now().isoformat(), + "timestamp": timestamp, "tag": tag, } @@ -137,7 +160,7 @@ def process_kpi_data(kpi_payloads, tag=""): def output_payloads(payloads): """Output the KPI payloads to a json file.""" # first grab all log files - repo_path = os.path.join(carb.tokens.get_tokens_interface().resolve("${app}"), "..") + repo_path = _get_repo_path() output_path = os.path.join(repo_path, "logs/kpi.json") # create directory if it doesn't exist if not os.path.exists(os.path.dirname(output_path)): @@ -150,13 +173,17 @@ def output_payloads(payloads): def _retrieve_logs(workflow, task): """Retrieve training logs.""" # first grab all log files - repo_path = os.path.join(carb.tokens.get_tokens_interface().resolve("${app}"), "..") + repo_path = _get_repo_path() + + # Defer Isaac Sim version import to avoid preloading USD before SimulationApp starts. from isaaclab.utils.version import get_isaac_sim_version if get_isaac_sim_version().major < 5: repo_path = os.path.join(repo_path, "..") if workflow == "rl_games": log_files_path = os.path.join(repo_path, f"logs/{workflow}/{task}/*/summaries/*") + elif workflow == "sb3": + log_files_path = os.path.join(repo_path, f"logs/{workflow}/{task}/*/*/*.tfevents.*") else: log_files_path = os.path.join(repo_path, f"logs/{workflow}/{task}/*/*.tfevents.*") log_files = glob.glob(log_files_path) @@ -167,11 +194,20 @@ def _retrieve_logs(workflow, task): latest_log_file = max(log_files, key=os.path.getctime) # parse tf file into a dictionary log_data = _parse_tf_logs(latest_log_file) + + # validate that log data contains entries + if not log_data: + print(f"Warning: Log file {latest_log_file} parsed but contains no data") + return None + return log_data def _parse_tf_logs(log): """Parse the tensorflow filepath into a dictionary.""" + # Defer tensorboard import to avoid side effects during pytest collection. + from tensorboard.backend.event_processing import event_accumulator + log_data = {} ea = event_accumulator.EventAccumulator(log) ea.Reload() @@ -190,7 +226,7 @@ def _extract_log_val(name, log_data, uses_lower_threshold, workflow): reward_tags = { "rl_games": "rewards/iter", "rsl_rl": "Train/mean_reward", - "sb3": None, # TODO: complete when sb3 is fixed + "sb3": "rollout/ep_rew_mean", "skrl": "Reward / Total reward (mean)", } tag = reward_tags.get(workflow) @@ -201,18 +237,17 @@ def _extract_log_val(name, log_data, uses_lower_threshold, workflow): episode_tags = { "rl_games": "episode_lengths/iter", "rsl_rl": "Train/mean_episode_length", - "sb3": None, # TODO: complete when sb3 is fixed + "sb3": "rollout/ep_len_mean", "skrl": "Episode / Total timesteps (mean)", } tag = episode_tags.get(workflow) if tag: return _extract_feature(log_data, tag, uses_lower_threshold) - - elif name == "training_time": - return {"rl_games": log_data["rewards/time"][-1][0], "rsl_rl": None, "sb3": None, "skrl": None}.get( - workflow - ) - except Exception: + except KeyError as e: + print(f"Warning: Metric '{name}' not found in logs for workflow '{workflow}': {e}") + return None + except Exception as e: + print(f"Error extracting '{name}' for workflow '{workflow}': {e}") return None raise ValueError(f"Env Config name {name} is not supported.") diff --git a/source/isaaclab_tasks/test/benchmarking/test_environments_training.py b/source/isaaclab_tasks/test/benchmarking/test_environments_training.py index 70fd562089a1..988cc0336789 100644 --- a/source/isaaclab_tasks/test/benchmarking/test_environments_training.py +++ b/source/isaaclab_tasks/test/benchmarking/test_environments_training.py @@ -20,8 +20,6 @@ import gymnasium as gym import pytest -import carb - from isaaclab_rl.utils.pretrained_checkpoint import WORKFLOW_EXPERIMENT_NAME_VARIABLE, WORKFLOW_TRAINER @@ -36,11 +34,6 @@ def setup_environment(): # Sort environments by name registered_task_specs.sort(key=lambda x: x.id) - # This flag is necessary to prevent a bug where the simulation gets stuck randomly when running the - # test on many environments. - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) - return registered_task_specs @@ -65,17 +58,25 @@ def train_job(workflow, task, env_config, num_gpus): cmd.append("--distributed") # Add experiment name variable - cmd.append(f"{WORKFLOW_EXPERIMENT_NAME_VARIABLE[workflow]}={task}") + workflow_experiment_name_variable = WORKFLOW_EXPERIMENT_NAME_VARIABLE.get(workflow) + if workflow_experiment_name_variable: + cmd.append(f"{workflow_experiment_name_variable}={task}") print("Running : " + " ".join(cmd)) start_time = time.time() - subprocess.run(cmd) + result = subprocess.run(cmd, capture_output=True, text=True) duration = time.time() - start_time + if result.returncode != 0: + print(f"Training failed with exit code {result.returncode}") + print(f"STDERR: {result.stderr}") + # Still return duration so evaluate_job can report failure via logs + return duration +@pytest.mark.skip(reason="Temporarily disabled due to long running time.") @pytest.mark.parametrize("task_spec", setup_environment()) def test_train_environments(workflow, task_spec, config_path, mode, num_gpus, kpi_store): """Train environments provided in the config file, save KPIs, and evaluate against thresholds""" diff --git a/source/isaaclab_tasks/test/env_test_utils.py b/source/isaaclab_tasks/test/env_test_utils.py index b6f0383abee1..cde4db9a3693 100644 --- a/source/isaaclab_tasks/test/env_test_utils.py +++ b/source/isaaclab_tasks/test/env_test_utils.py @@ -5,26 +5,96 @@ """Shared test utilities for Isaac Lab environments.""" +import importlib import inspect import os +import sys import gymnasium as gym import pytest import torch -import carb -import omni.usd - +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import get_settings_manager from isaaclab.envs.utils.spaces import sample_space +from isaaclab.sim import SimulationContext from isaaclab.utils.version import get_isaac_sim_version -from isaaclab_tasks.utils.parse_cfg import parse_env_cfg +from isaaclab_tasks.utils.hydra import apply_overrides, collect_presets +from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry, parse_env_cfg + +# Map of task IDs to the reason for marking the corresponding parametrized +# test cases as expected failures. Tests that consume :func:`setup_environment` +# automatically pick up these marks via :class:`pytest.param`. +XFAIL_TASKS: dict[str, str] = {} + + +def _is_teleop_env(task_spec) -> bool: + """Check if a task's environment config has teleop dependencies. + + Inspects the class hierarchy of the env config to check if any base + class module defines ``_TELEOP_AVAILABLE``, indicating the environment + uses isaacteleop / isaaclab_teleop. + """ + env_cfg_entry_point = task_spec.kwargs.get("env_cfg_entry_point") + if not isinstance(env_cfg_entry_point, str) or ":" not in env_cfg_entry_point: + return False + try: + mod_name, attr_name = env_cfg_entry_point.split(":") + mod = importlib.import_module(mod_name) + cfg_cls = getattr(mod, attr_name, None) + if cfg_cls is None: + return False + for cls in cfg_cls.__mro__: + cls_module = sys.modules.get(cls.__module__) + if cls_module is not None and hasattr(cls_module, "_TELEOP_AVAILABLE"): + return True + except (ImportError, AttributeError): + pass + return False + + +def _is_pickplace_stack_env(task_id: str) -> bool: + """Check if a task is a PickPlace or Stack environment based on its ID.""" + return any(keyword in task_id for keyword in ("Place", "Stack", "NutPour", "ExhaustPipe")) + + +def _has_physics_preset(raw_cfg, preset_name: str) -> bool: + """Check if a raw (unresolved) env config has a named physics preset. + + Must be called with the result of :func:`load_cfg_from_registry`, not + :func:`parse_env_cfg`, because the latter resolves all PresetCfg wrappers + to their default before returning. + + Args: + raw_cfg: Raw env config from :func:`load_cfg_from_registry`. + preset_name: Name of the preset to check for (e.g., 'newton_mjwarp'). + + Returns: + True if ``raw_cfg.sim.physics`` is a PresetCfg with the given preset field. + """ + if isinstance(raw_cfg, dict): + return False + # If the top-level cfg is itself a PresetCfg wrapper, unwrap to its default. + env_cfg = raw_cfg + if ( + hasattr(env_cfg, "__dataclass_fields__") + and hasattr(env_cfg, "default") + and not hasattr(type(env_cfg), "class_type") + ): + env_cfg = env_cfg.default + physics = getattr(getattr(env_cfg, "sim", None), "physics", None) + return physics is not None and hasattr(physics, preset_name) def setup_environment( include_play: bool = False, factory_envs: bool | None = None, multi_agent: bool | None = None, + teleop_envs: bool | None = None, + cartpole_showcase_envs: bool | None = None, + pickplace_stack_envs: bool | None = None, + newton_mjwarp_envs: bool | None = None, ) -> list[str]: """ Acquire all registered Isaac environment task IDs with optional filters. @@ -39,6 +109,22 @@ def setup_environment( - True: include only multi-agent environments - False: include only single-agent environments - None: include all environments regardless of agent type + teleop_envs: + - True: include only teleop environments (those requiring isaacteleop) + - False: exclude teleop environments + - None: include all environments regardless of teleop dependency + cartpole_showcase_envs: + - True: include only Cartpole Showcase environments + - False: exclude Cartpole Showcase environments + - None: include all environments regardless of showcase type + pickplace_stack_envs: + - True: include only PickPlace/Stack environments + - False: exclude PickPlace/Stack environments + - None: include all environments regardless of pick-place/stack type + newton_mjwarp_envs: + - True: include only environments that have an MJWarp physics preset. + - False: exclude environments that have an MJWarp physics preset. + - None: include all environments regardless of MJWarp preset availability. Returns: A sorted list of task IDs matching the selected filters. @@ -66,6 +152,29 @@ def setup_environment( continue # if None: no filter + # apply cartpole showcase filter + if (cartpole_showcase_envs is True and "Showcase" not in task_spec.id) or ( + cartpole_showcase_envs is False and "Showcase" in task_spec.id + ): + continue + # if None: no filter + + # apply pickplace/stack filter + if pickplace_stack_envs is not None: + is_pickplace_stack = _is_pickplace_stack_env(task_spec.id) + if (pickplace_stack_envs is True and not is_pickplace_stack) or ( + pickplace_stack_envs is False and is_pickplace_stack + ): + continue + # if None: no filter + + # apply teleop filter + if teleop_envs is not None: + is_teleop = _is_teleop_env(task_spec) + if (teleop_envs is True and not is_teleop) or (teleop_envs is False and is_teleop): + continue + # if None: no filter + # apply multi agent filter if multi_agent is not None: # parse config @@ -76,17 +185,36 @@ def setup_environment( continue # if None: no filter + # apply MJWarp preset filter + if newton_mjwarp_envs is not None: + # Use load_cfg_from_registry (not parse_env_cfg) so that the PresetCfg + # wrapper on sim.physics is not yet resolved to its default. + raw_cfg = load_cfg_from_registry(task_spec.id, "env_cfg_entry_point") + has_newton_mjwarp = _has_physics_preset(raw_cfg, "newton_mjwarp") + if (newton_mjwarp_envs is True and not has_newton_mjwarp) or ( + newton_mjwarp_envs is False and has_newton_mjwarp + ): + continue + # if None: no filter + registered_tasks.append(task_spec.id) # sort environments alphabetically registered_tasks.sort() - # this flag is necessary to prevent a bug where the simulation gets stuck randomy when running many environments - carb.settings.get_settings().set_bool("/physics/cooking/ujitsoCollisionCooking", False) + # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running many environments + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) print(">>> All registered environments:", registered_tasks) - return registered_tasks + # Wrap tasks listed in XFAIL_TASKS in pytest.param so the corresponding + # parametrized test cases are reported as xfailed instead of failed. + return [ + pytest.param(task_id, marks=pytest.mark.xfail(reason=XFAIL_TASKS[task_id], strict=False)) + if task_id in XFAIL_TASKS + else task_id + for task_id in registered_tasks + ] def _run_environments( @@ -97,6 +225,7 @@ def _run_environments( multi_agent=False, create_stage_in_memory=False, disable_clone_in_fabric=False, + physics_preset_name: str | None = None, ): """Run all environments and check environments return valid signals. @@ -108,6 +237,8 @@ def _run_environments( multi_agent: Whether the environment is multi-agent. create_stage_in_memory: Whether to create stage in memory. disable_clone_in_fabric: Whether to disable fabric cloning. + physics_preset_name: Name of the physics preset to apply (e.g., 'newton_mjwarp'). + If None, uses the environment's default physics. """ # skip test if stage in memory is not supported @@ -123,9 +254,14 @@ def _run_environments( "Isaac-Stack-Cube-Franka-IK-Rel-Blueprint-v0", "Isaac-Stack-Cube-Instance-Randomize-Franka-IK-Rel-v0", "Isaac-Stack-Cube-Instance-Randomize-Franka-v0", + "Isaac-PickPlace-G1-InspireFTP-Abs-v0", ]: return + # these environments are using SingleArticulation class, which need to be updated + if "RmpFlow" in task_name or "Isaac-Stack-Cube-Galbot-Left-Arm-Gripper-Visuomotor" in task_name: + return + # skip these environments as they cannot be run with 32 environments within reasonable VRAM if "Visuomotor" in task_name and num_envs == 32: return @@ -134,6 +270,11 @@ def _run_environments( if task_name in ["Isaac-AutoMate-Assembly-Direct-v0", "Isaac-AutoMate-Disassembly-Direct-v0"]: return + # skip skillgen environments as they require cuRobo installation; + # tested separately via test_environments_skillgen.py + if "Skillgen" in task_name: + return + # Check if this is the teddy bear environment and if it's being called from the right test file if task_name == "Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0": # Get the calling frame to check which test file is calling this function @@ -158,6 +299,7 @@ def _run_environments( multi_agent=multi_agent, create_stage_in_memory=create_stage_in_memory, disable_clone_in_fabric=disable_clone_in_fabric, + physics_preset_name=physics_preset_name, ) print(f""">>> Closing environment: {task_name}""") print("-" * 80) @@ -171,6 +313,7 @@ def _check_random_actions( multi_agent: bool = False, create_stage_in_memory: bool = False, disable_clone_in_fabric: bool = False, + physics_preset_name: str | None = None, ): """Run random actions and check environments return valid signals. @@ -182,16 +325,31 @@ def _check_random_actions( multi_agent: Whether the environment is multi-agent. create_stage_in_memory: Whether to create stage in memory. disable_clone_in_fabric: Whether to disable fabric cloning. + physics_preset_name: Name of the physics preset to apply (e.g., 'newton_mjwarp'). + If None, uses the environment's default physics. """ # create a new context stage, if stage in memory is not enabled if not create_stage_in_memory: - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() - # reset the rtx sensors carb setting to False - carb.settings.get_settings().set_bool("/isaaclab/render/rtx_sensors", False) + # reset the rtx sensors setting to False + get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", False) + env = None try: # parse config env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) + # apply physics preset override before creating the environment + if physics_preset_name is not None: + # parse_env_cfg already resolved PresetCfg wrappers to their default, + # so we load the raw config to retrieve preset alternatives. + raw_cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point") + presets = {"env": collect_presets(raw_cfg), "agent": {}} + hydra_cfg = {"env": env_cfg.to_dict(), "agent": None} + apply_overrides(env_cfg, None, hydra_cfg, [physics_preset_name], [], [], presets) + # Re-apply num_envs since apply_overrides may have replaced + # the scene config with the preset's default num_envs. + if num_envs is not None: + env_cfg.scene.num_envs = num_envs # set config args env_cfg.sim.create_stage_in_memory = create_stage_in_memory if disable_clone_in_fabric: @@ -202,66 +360,67 @@ def _check_random_actions( if not hasattr(env_cfg, "possible_agents"): print(f"[INFO]: Skipping {task_name} as it is not a multi-agent task") return - env = gym.make(task_name, cfg=env_cfg) else: if hasattr(env_cfg, "possible_agents"): print(f"[INFO]: Skipping {task_name} as it is a multi-agent task") return - env = gym.make(task_name, cfg=env_cfg) - - except Exception as e: - # try to close environment on exception - if "env" in locals() and hasattr(env, "_is_closed"): - env.close() - else: - if hasattr(e, "obj") and hasattr(e.obj, "_is_closed"): - e.obj.close() - pytest.fail(f"Failed to set-up the environment for task {task_name}. Error: {e}") - # disable control on stop - env.unwrapped.sim._app_control_on_stop_handle = None # type: ignore - - # override action space if set to inf for `Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0` - if task_name == "Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0": - for i in range(env.unwrapped.single_action_space.shape[0]): - if env.unwrapped.single_action_space.low[i] == float("-inf"): - env.unwrapped.single_action_space.low[i] = -1.0 - if env.unwrapped.single_action_space.high[i] == float("inf"): - env.unwrapped.single_action_space.low[i] = 1.0 - - # reset environment - obs, _ = env.reset() - - # check signal - assert _check_valid_tensor(obs) - - # simulate environment for num_steps - with torch.inference_mode(): - for _ in range(num_steps): - # sample actions according to the defined space - if multi_agent: - actions = { - agent: sample_space( - env.unwrapped.action_spaces[agent], device=env.unwrapped.device, batch_size=num_envs - ) - for agent in env.unwrapped.possible_agents - } - else: - actions = sample_space( - env.unwrapped.single_action_space, device=env.unwrapped.device, batch_size=num_envs - ) - # apply actions - transition = env.step(actions) - # check signals - for data in transition[:-1]: # exclude info + # TODO: Selecting the MJWarp preset routes through the Newton backend, which does not yet + # support multi-asset spawning; some combinations fail config validation here with a + # ValueError. Consider filtering invalid combinations in setup_environment() rather than + # forgiving them at runtime. See PR #5097 commit fb2c74a3862 for a workaround that caught + # the error and called pytest.skip(). + env = gym.make(task_name, cfg=env_cfg) + + # disable control on stop + env.unwrapped.sim._app_control_on_stop_handle = None # type: ignore + + # override action space if set to inf for `Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0` + if task_name == "Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0": + for i in range(env.unwrapped.single_action_space.shape[0]): + if env.unwrapped.single_action_space.low[i] == float("-inf"): + env.unwrapped.single_action_space.low[i] = -1.0 + if env.unwrapped.single_action_space.high[i] == float("inf"): + env.unwrapped.single_action_space.low[i] = 1.0 + + # reset environment + obs, _ = env.reset() + + # check signal + assert _check_valid_tensor(obs) + + # simulate environment for num_steps + with torch.inference_mode(): + for _ in range(num_steps): + # sample actions according to the defined space if multi_agent: - for agent, agent_data in data.items(): - assert _check_valid_tensor(agent_data), f"Invalid data ('{agent}'): {agent_data}" + actions = { + agent: sample_space( + env.unwrapped.action_spaces[agent], device=env.unwrapped.device, batch_size=num_envs + ) + for agent in env.unwrapped.possible_agents + } else: - assert _check_valid_tensor(data), f"Invalid data: {data}" + actions = sample_space( + env.unwrapped.single_action_space, device=env.unwrapped.device, batch_size=num_envs + ) + # apply actions + transition = env.step(actions) + # check signals + for data in transition[:-1]: # exclude info + if multi_agent: + for agent, agent_data in data.items(): + assert _check_valid_tensor(agent_data), f"Invalid data ('{agent}'): {agent_data}" + else: + assert _check_valid_tensor(data), f"Invalid data: {data}" + + finally: + # Always ensure cleanup happens, regardless of success or failure + if env is not None: + env.close() - # close environment - env.close() + # Clear the simulation context singleton (also closes the USD context stage) + SimulationContext.clear_instance() def _check_valid_tensor(data: torch.Tensor | dict) -> bool: diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-albedo.png new file mode 100644 index 000000000000..d44cd9a9f6a8 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88da57e08497b308d47c3e606d40142291adde3c0fe2df46ab0a924d39b6847b +size 435 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-depth.png new file mode 100644 index 000000000000..0387686a7a78 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea4e6ba2251666e4df0c937fbd123489f2a06939bda9e674d5f84eb5c8831f7d +size 422 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-rgb.png new file mode 100644 index 000000000000..4c01a0c9d17c --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98bc2b8d88abe0a989610f1354eac5b4b73c89bcf51140ff68f28879e90805a1 +size 3513 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-rgba.png new file mode 100644 index 000000000000..7eae5eb40345 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3d0a29a288f6b987796c527e839618467042741e54f88df810f1ac76dc33950 +size 3927 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..21926715b7f6 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fbe8833cefa8b037cb80538ff73ec9c503253cfe410282f3b9acdea12e7a017 +size 427 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..276f96e72e30 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5fd099acf5a7e1b462206284d961a1eda60345033ab96dcff9811c8af6e088b +size 397 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..cd3b20dfd5fe --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0b1e78700de638e5c9b38465d09e4b83875849bac49219f7e343e0235a5e2d0 +size 460 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..dbf8793d8e8e --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74ea7191aa328bbd26a6b029aa7c960db1d901c16222d67eed9e62f8f1ef93cd +size 626 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-depth.png new file mode 100644 index 000000000000..0a26486d7f85 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8929af0b155ac73e4dd74dd70dbc502fb78bd962248e5c46f88972a0a7e3e80e +size 538 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-rgb.png new file mode 100644 index 000000000000..ff94c25572dc --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43e4e5e1b33b0f33b80cd03c267deb4a6097676aa70f21f7abebeae073909fc4 +size 801 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-rgba.png new file mode 100644 index 000000000000..aa6a47e561fb --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-newton_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe5c40ef94b011927a506d244aa42fc85e65b1da2b3789ec9dd569c157afe154 +size 864 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-albedo.png new file mode 100644 index 000000000000..d44cd9a9f6a8 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88da57e08497b308d47c3e606d40142291adde3c0fe2df46ab0a924d39b6847b +size 435 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-depth.png new file mode 100644 index 000000000000..0387686a7a78 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea4e6ba2251666e4df0c937fbd123489f2a06939bda9e674d5f84eb5c8831f7d +size 422 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-rgb.png new file mode 100644 index 000000000000..e47c06e2ca7c --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4029eb71d2361c9fa12d255415bb9edcf1caaeb9d230ca2e6c4e67596c037dd1 +size 2580 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-rgba.png new file mode 100644 index 000000000000..791497af827c --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e1fed2c618875f9f9b4520c52308b0831cf637835e7a62e7a84e96f914c1e83 +size 2882 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..21926715b7f6 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fbe8833cefa8b037cb80538ff73ec9c503253cfe410282f3b9acdea12e7a017 +size 427 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..87104cb87161 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47b5b15d79d0b61d00c0538caa0012172753a481ad6efb45df2888402be2f407 +size 391 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..7d05e4a7adbd --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2ba382c0804ea49b55fc5216c9f1e28c34d5cae95b33d9982fd55763df178cc +size 435 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..6b4f8389da06 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/newton-ovrtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7af4ef2afca01d0bf4f9c069c5c3778fa07050bcd8091486541ab11f14e8227 +size 742 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-albedo.png new file mode 100644 index 000000000000..d44cd9a9f6a8 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88da57e08497b308d47c3e606d40142291adde3c0fe2df46ab0a924d39b6847b +size 435 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-depth.png new file mode 100644 index 000000000000..0387686a7a78 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea4e6ba2251666e4df0c937fbd123489f2a06939bda9e674d5f84eb5c8831f7d +size 422 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-rgb.png new file mode 100644 index 000000000000..0397969c1af1 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0618f881648cc0039729df724f9776379378660bf1858223bf9ed65bb4204691 +size 3584 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-rgba.png new file mode 100644 index 000000000000..74a8d4f725c8 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e1564f0b96e09346f33be82cbbd181d1e5e589d51c929e95a8884707be2c10e +size 3994 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..21926715b7f6 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fbe8833cefa8b037cb80538ff73ec9c503253cfe410282f3b9acdea12e7a017 +size 427 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..276f96e72e30 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5fd099acf5a7e1b462206284d961a1eda60345033ab96dcff9811c8af6e088b +size 397 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..cd3b20dfd5fe --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0b1e78700de638e5c9b38465d09e4b83875849bac49219f7e343e0235a5e2d0 +size 460 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..dbf8793d8e8e --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74ea7191aa328bbd26a6b029aa7c960db1d901c16222d67eed9e62f8f1ef93cd +size 626 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-depth.png new file mode 100644 index 000000000000..0a26486d7f85 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8929af0b155ac73e4dd74dd70dbc502fb78bd962248e5c46f88972a0a7e3e80e +size 538 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-rgb.png new file mode 100644 index 000000000000..ff94c25572dc --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43e4e5e1b33b0f33b80cd03c267deb4a6097676aa70f21f7abebeae073909fc4 +size 801 diff --git a/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-rgba.png new file mode 100644 index 000000000000..aa6a47e561fb --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/cartpole/physx-newton_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe5c40ef94b011927a506d244aa42fc85e65b1da2b3789ec9dd569c157afe154 +size 864 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-albedo.png new file mode 100644 index 000000000000..a49e526d4eee --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec727622d2e85c742051daac4c8ad9ce56af4b2d4d10766810a696b60cef82da +size 2579 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-depth.png new file mode 100644 index 000000000000..bf4b0e0290dd --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c003b10810539f538464992860c74ee3bf531b8b4e9b6e0ebe84041d42dba643 +size 532 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-rgb.png new file mode 100644 index 000000000000..6d21415e1650 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e199c01ed54ce1f9d3430017b73e5793c09340054288efddde04ea031ff2c19 +size 17594 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-rgba.png new file mode 100644 index 000000000000..525aa5b2776e --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63a63059efc82372e87f8c117e94cd2c5690dd982c65bae0002845a704741182 +size 19970 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..762a23184316 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b8729d722d1780272b24f0804e517ea024ec2b93f4a0fa3e26c82f222f5c9eb +size 700 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..46ce5933fb8b --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e1d94f0c6ae2e40a1b0ff9cf27a0f2f9b756ebcebd9ddf27b0da31c89e3a57f +size 1485 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..e591d31c5df9 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:058466b74a49061695567d84edeffb21ad9e88d8c83125d6b2b59cb70bdd5a70 +size 3701 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..2cdfe790a7df --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bf8a42b1b3c652ea9f8d79d89a02865ea2c1b347bf61dee810e01890aa00563 +size 4248 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-depth.png new file mode 100644 index 000000000000..39ea590f59aa --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e12023f4f5314fe7214e4cc23368c7e24aef6997d2bfdcdf5a8654e03ea80c3c +size 1064 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-rgb.png new file mode 100644 index 000000000000..04f5ba9e9614 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fdf17d8f0ea63a15d178dfdff8537625e4a31796a75427fd87132ffc9fd83c9 +size 1625 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-rgba.png new file mode 100644 index 000000000000..9a7493e9cdc8 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-newton_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6868ef6f9fa4f00113e56e475f6b5bb4d55c8861f10629eb8b5ebe5a56acceee +size 2680 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-albedo.png new file mode 100644 index 000000000000..5199099a7587 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cf76622f5f5cc7e7889fe6032ba4cda22516248fa7bb5e957f181c92c86b42b +size 3054 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-depth.png new file mode 100644 index 000000000000..bf4b0e0290dd --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c003b10810539f538464992860c74ee3bf531b8b4e9b6e0ebe84041d42dba643 +size 532 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-rgb.png new file mode 100644 index 000000000000..544e2ffd450b --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3445142682c88dc5ce11c5d749c7754bf2d54bf0f1aab6420513a733bf3cf645 +size 14919 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-rgba.png new file mode 100644 index 000000000000..c3b229d34871 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:179a5acba0a763fcc2317cd784f8c347ef48f0d81f8028a1a64996ade67f8706 +size 17836 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..762a23184316 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b8729d722d1780272b24f0804e517ea024ec2b93f4a0fa3e26c82f222f5c9eb +size 700 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..46ce5933fb8b --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e1d94f0c6ae2e40a1b0ff9cf27a0f2f9b756ebcebd9ddf27b0da31c89e3a57f +size 1485 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..2e2f6cb257a7 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83ca3d8f55f971d473409c73e77582175906670f3962b56723cccd28d062a868 +size 3513 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..16b6b73ec7f4 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/newton-ovrtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3afc4f214e51a1ccc1b0341448f64b82773bcc4bba3d913ad4f3052dcf497032 +size 4513 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-albedo.png new file mode 100644 index 000000000000..266c129cbf86 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fddc33b267fe9810973419babf140faf39b964466bfb6f50193abc8f278cfce4 +size 3052 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-depth.png new file mode 100644 index 000000000000..56fa75793780 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66a908918302e713a2cc82a091c4e82db3da46a06cf08311b3818e93714b9132 +size 532 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-rgb.png new file mode 100644 index 000000000000..d36460128aae --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d132e5a1d1dbfbeb250345f19e1389019ce79a388d51e94a8e954abc2825029a +size 17348 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-rgba.png new file mode 100644 index 000000000000..c666fa628df2 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:164ec6a8afffc5706573dfcad5747f818f059cd476b65ce9bbfed97c53ed0a83 +size 19729 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..fcc9576e7e95 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f77680ebb1f5c9dedc3419d526b65feb82e38013ce402d7805c75b0ad3be8cd6 +size 696 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..38f0a4d9d7e3 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70cc200aa9d558e309d0e69c618b8235de0af11d6cf8b524bb7e7777f5d950cc +size 1492 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..1b833d0f65d6 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14f277009292a7cc2e21fdb4f2b5e77a0fb1025435badcb0ccc96871bd8c322d +size 3698 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..28122c3bc3c2 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e9155c484a7ab6131add9adce634f37d6c8205cb1696108f83acc96f6d19ea6 +size 4241 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-depth.png new file mode 100644 index 000000000000..2b296ef399b7 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2eb315a195ddddaabb31529efe051c5543d27f9b603bf1eb910b2ad426df22fa +size 1061 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-rgb.png new file mode 100644 index 000000000000..a8365cfc8297 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f280f00e8cace33fe5ccf03ca0767954690285ee1908395bff9e8dbf92994e3 +size 1618 diff --git a/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-rgba.png new file mode 100644 index 000000000000..c6d72a99eba2 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/dexsuite_kuka/physx-newton_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da6cf857f7248d6c46d9c7b9a742609ac9c81eaeb99c9eeec36714bc34aacd17 +size 2670 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Albedo-Camera-Direct-v0/default_physics-default_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Albedo-Camera-Direct-v0/default_physics-default_renderer-albedo.png new file mode 100644 index 000000000000..fc6cb99a4aab --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Albedo-Camera-Direct-v0/default_physics-default_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85dff471f312ae5d0a0856fcce81a71f162e662d41e8abaae72752f719c30bfe +size 437 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Camera-Presets-Direct-v0/default_physics-default_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Camera-Presets-Direct-v0/default_physics-default_renderer-rgb.png new file mode 100644 index 000000000000..fbfa3e28851a --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Camera-Presets-Direct-v0/default_physics-default_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1388be146f03b4330570c77e7a809dabb8ba56ed45bb7fe606e40bd6e6027c40 +size 3582 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Camera-Presets-Direct-v0/default_physics-default_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Camera-Presets-Direct-v0/default_physics-default_renderer-rgba.png new file mode 100644 index 000000000000..0791c1063cae --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Camera-Presets-Direct-v0/default_physics-default_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de65124177ae23872f1ad52d84a75b1c527c8e302df77df045006f7ac0ccf509 +size 3992 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Depth-Camera-Direct-v0/default_physics-default_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Depth-Camera-Direct-v0/default_physics-default_renderer-depth.png new file mode 100644 index 000000000000..0387686a7a78 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-Depth-Camera-Direct-v0/default_physics-default_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea4e6ba2251666e4df0c937fbd123489f2a06939bda9e674d5f84eb5c8831f7d +size 422 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-RGB-Camera-Direct-v0/default_physics-default_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-RGB-Camera-Direct-v0/default_physics-default_renderer-rgb.png new file mode 100644 index 000000000000..fbfa3e28851a --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-RGB-Camera-Direct-v0/default_physics-default_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1388be146f03b4330570c77e7a809dabb8ba56ed45bb7fe606e40bd6e6027c40 +size 3582 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-RGB-Camera-Direct-v0/default_physics-default_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-RGB-Camera-Direct-v0/default_physics-default_renderer-rgba.png new file mode 100644 index 000000000000..0791c1063cae --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-RGB-Camera-Direct-v0/default_physics-default_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de65124177ae23872f1ad52d84a75b1c527c8e302df77df045006f7ac0ccf509 +size 3992 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Constant-Camera-Direct-v0/default_physics-default_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Constant-Camera-Direct-v0/default_physics-default_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..72930cd6f398 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Constant-Camera-Direct-v0/default_physics-default_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e99cb214df51fd07425d7b312468122a5c151e40e9be65b8b1d204664052963b +size 393 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Diffuse-Camera-Direct-v0/default_physics-default_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Diffuse-Camera-Direct-v0/default_physics-default_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..1ec3dad3f087 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Diffuse-Camera-Direct-v0/default_physics-default_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bc7ec2fb3083c8aad8aca658b8230b5252707f2a6a51d2dac7b70732dd9cfd0 +size 462 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Full-Camera-Direct-v0/default_physics-default_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Full-Camera-Direct-v0/default_physics-default_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..4399230afd8a --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Cartpole-SimpleShading-Full-Camera-Direct-v0/default_physics-default_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10724024d0768e95ae1304d988f477f9bf8bc3d80eabf7b0bb120bebde87dcd3 +size 623 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-depth.png new file mode 100644 index 000000000000..df186c373970 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95d738c3d959f788f8e1b74a37dda30f19de9f7875f25ed8dd77961edc01c704 +size 3660 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-rgb.png new file mode 100644 index 000000000000..1197faa87f8d --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f906dc6e5107e6b768baf25f31f79f573cc352bcf0c5865134177adceca6d801 +size 20714 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-rgba.png new file mode 100644 index 000000000000..96989870a197 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eacd38c075e656cce6638ded4a386c01a1190042b7021c08bd8421bc0b31f90e +size 23058 diff --git a/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..4bad29d72ce6 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/registered_tasks/Isaac-Repose-Cube-Shadow-Vision-Direct-v0/default_physics-default_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75576f31118081f96b0cec2151ada7016794a933a035ea35665a752e8552b503 +size 1474 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-albedo.png new file mode 100644 index 000000000000..34d8b8867cb1 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db7624c63ad16d4364af50b8886d03b9caa96256b8eaf008cd873d122be89a78 +size 1758 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-depth.png new file mode 100644 index 000000000000..5cad962b3ac0 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc51bc91ac202857ec3648475f25e58e921aa927ec1d0814f63862462877e3f +size 3661 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-rgb.png new file mode 100644 index 000000000000..4d7c5c0081fb --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccc557d5379dec7af05ce5f9e75a9df69ae081011468f439f15e6c1882cf1578 +size 20771 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-rgba.png new file mode 100644 index 000000000000..108b5c2bbad3 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a508b4275c9211c534650689740c3b2ed541e8a9ad6ed61adcc0caad6b4cd71b +size 23150 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..7aa704e6243b --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81cc1b489503d889d93650ca5d5bd760bbbfe2b7d9d0b505003df68de6eb80f3 +size 1476 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..d5e537ddc82d --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df59cbc94cfc4695842f60884960f8efbbfbc7dc98295fd369747a858df2bea +size 6832 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png new file mode 100755 index 000000000000..4c4dc0b7383c --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e1c06a3327dd0ccbab5ace856e483b6059d5ef10fb1c721f909d923509fe209 +size 7134 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..bb230deaf25d --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-isaacsim_rtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f021ea83f1638b23c4bb9c8f8feb6dbf6cdfb79d686199a9f10d82514b66a70e +size 9084 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-depth.png new file mode 100644 index 000000000000..f13af97768c5 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cc500faa0f57eb58e445fac0c8df788cde05bf48f2d17e4b048de8bc559c429 +size 4047 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-rgb.png new file mode 100644 index 000000000000..7844802a4698 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35fe13ad391de7b7c967ac87bbefb3a1d20805dd010ab9052807be235348e5d4 +size 9313 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-rgba.png new file mode 100644 index 000000000000..33e87b8be324 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-newton_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1f928096c7fe098f2c303076657dda01859ab5e381e858521f2a880b18367e6 +size 10177 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-albedo.png new file mode 100644 index 000000000000..b0a81304d1b4 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6ea478eb0b63ac6e9b19c66fabc9944a9cdd1d3131aa0d480ba645151765f41 +size 2150 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-depth.png new file mode 100644 index 000000000000..5cad962b3ac0 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc51bc91ac202857ec3648475f25e58e921aa927ec1d0814f63862462877e3f +size 3661 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-rgb.png new file mode 100644 index 000000000000..d6e87b16a116 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87b927816b29714d92113b2fcab01569c60372f017119b37ced9a12f72b01cd7 +size 19717 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-rgba.png new file mode 100644 index 000000000000..ddffaebf0722 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fb6696c895cb07a86897002e434be6c8c67a9d50f15615c2fd16f5038eee209 +size 21761 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..c1d11cc65f5c --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8edbd9b2c8131d659d36ed3f6f07040eba5428d0e6e888bb8d97bb67d9001917 +size 1477 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..a25340e96b0f --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3122340f40be0b24e9d7f2d262bc7285607536c9cf82151750d2691f05d8950d +size 6840 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png new file mode 100644 index 000000000000..572a1759a30c --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f0ececab1b4b385c54352d02c9b07d6572f8a4f4069eea1afe2089343a164d6 +size 7429 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..687917f13e3b --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/newton-ovrtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88ca19937bbf54a87c40f476c395f678a39d443df3899eb3c74b4e3e854866fc +size 9192 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-albedo.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-albedo.png new file mode 100644 index 000000000000..4f1b5fda96b4 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-albedo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255ee6595de19467b0fcef1c6ddc3d5f37f227fa09095f27f455a58e22c84152 +size 1907 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-depth.png new file mode 100644 index 000000000000..df186c373970 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95d738c3d959f788f8e1b74a37dda30f19de9f7875f25ed8dd77961edc01c704 +size 3660 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-rgb.png new file mode 100644 index 000000000000..6e64fac3cb3e --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7378e928931d9685aa0fc19f03743776e103dcb089705c4fb0353ae1f1acddc +size 20621 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-rgba.png new file mode 100644 index 000000000000..b05baaca49c9 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44227c68ffdd1663808afc8a01252a522798e0811692f69ef6b59e2162041be2 +size 22843 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-semantic_segmentation.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-semantic_segmentation.png new file mode 100644 index 000000000000..4bad29d72ce6 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-semantic_segmentation.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75576f31118081f96b0cec2151ada7016794a933a035ea35665a752e8552b503 +size 1474 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png new file mode 100644 index 000000000000..830b7ab5464b --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_constant_diffuse.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63cdc26f60934e57c465286c1ad976ad76826cb03e48657e54578285cd025af1 +size 6418 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png new file mode 100755 index 000000000000..357f1231be34 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_diffuse_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66597ccc03c1c29558f9bcb93f8cdfb98ff9bdb3ff2e0c5600f01cbc8ad2f965 +size 6893 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png new file mode 100644 index 000000000000..bc0da8969fff --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-isaacsim_rtx_renderer-simple_shading_full_mdl.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27b5f0d1d423fa75183d3cff01664328cfa06257a47a1e7e5d433b9e8bb01897 +size 8588 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-depth.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-depth.png new file mode 100644 index 000000000000..1da91b1c4b0b --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-depth.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e4a1b0845691e5032894d45529a00ec652c8cb1d694f698d6e6cc22e30e4067 +size 4075 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-rgb.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-rgb.png new file mode 100644 index 000000000000..05fa5d11e83d --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-rgb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ded42ebd1735db54f759f90868406ea6e4fb1d50e915d50c143c592752dfd7 +size 8424 diff --git a/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-rgba.png b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-rgba.png new file mode 100644 index 000000000000..59c5d30efda6 --- /dev/null +++ b/source/isaaclab_tasks/test/golden_images/shadow_hand/physx-newton_renderer-rgba.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65d5b3399e638da995a29eb667fb67906a7eebb41d8f70ba33fb2ee898d307c9 +size 9239 diff --git a/source/isaaclab_tasks/test/rendering_test_utils.py b/source/isaaclab_tasks/test/rendering_test_utils.py new file mode 100644 index 000000000000..f79f55c553a5 --- /dev/null +++ b/source/isaaclab_tasks/test/rendering_test_utils.py @@ -0,0 +1,816 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared helpers for rendering correctness tests.""" + +import os +from datetime import datetime +from typing import Any + +import numpy as np +import pytest +import torch +from PIL import Image, ImageChops + +# Directory containing golden images. +_GOLDEN_IMAGES_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), "golden_images") + +# Pixel L2 norm difference threshold. L2 norm difference is the Euclidean distance between two pixels: +# +# d = sqrt((R1 - R2)^2 + (G1 - G2)^2 + (B1 - B2)^2) +# +# If the difference between two pixels is less than this threshold, consider them "equal" (i.e. within the tolerance). +# +_PIXEL_L2_NORM_DIFFERENCE_THRESHOLD = 10.0 + +# The max percentage of pixels allowed to differ. If the percentage exceeds this value, the test will fail. +# The value is set case by case based on the screen space taken up by the env in camera output images. It +# needs to be large enough to tolerate minor rendering noise while small enough to catch unexpected changes. +MAX_DIFFERENT_PIXELS_PERCENTAGE_BY_ENV_NAME = { + "cartpole": 1.0, + # Shadow-hand renderings (incl. ``Isaac-Repose-Cube-Shadow-Vision-Direct-v0``) show up to + # ~3.28 % per-pixel diff from anti-aliasing noise along the many finger/cube edges. 5.0 gives + # headroom above that without masking real regressions, which the SSIM gate still catches. + "shadow_hand": 5.0, + # Texture aliasing artifacts on the ground (NVBUG#6116767) + "dexsuite_kuka": 8.0, +} + +# Minimum SSIM score below which two images are considered structurally different. SSIM is a perceptual metric +# robust to uniform per-pixel noise that penalises structural changes (geometry shifts, swapped colours, missing +# materials, etc.), so it complements the per-pixel L2 gate by catching regressions that survive a loosened pixel +# threshold. +_SSIM_THRESHOLD = 0.985 + +# Per-env SSIM overrides. Envs not listed fall back to ``_SSIM_THRESHOLD``. Loosened individually +# (not globally) to keep the strict gate active everywhere it already passes. +_SSIM_THRESHOLD_BY_ENV_NAME = { + # Texture aliasing artifacts on the ground (NVBUG#6116767) + "dexsuite_kuka": 0.95, +} + +# Data types for which the SSIM gate is not enforced. SSIM assumes natural-image statistics and is unreliable on +# outputs where the per-pixel value distribution is highly non-uniform after normalisation (e.g. depth, where we +# divide by the max value so tiny absolute differences near the far plane dominate windowed variance). For these +# data types we still compute SSIM for reporting, but only the per-pixel L2 gate is used to decide pass/fail. +_SSIM_DISABLED_DATA_TYPES: set[str] = {"depth", "distance_to_camera", "distance_to_image_plane"} + +# Directory for comparison images saved during the test session. +# Located under the pytest output root so it gets copied alongside test reports. +_COMPARISON_IMAGES_DIR = os.path.join(os.getcwd(), "tests", "comparison-images") +_COMPARISON_IMAGE_SUBDIR = "images" + +# --------------------------------------------------------------------------- +# Parametrization: (physics_backend, renderer, data_type) +# --------------------------------------------------------------------------- + +# OVRTX kitless paths can segfault on CI runners; keep warp/Kit paths in CI. +_SKIP_ON_CI = any(os.environ.get(name) == "true" for name in ("CI", "GITHUB_ACTIONS", "GITLAB_CI")) +_SKIP_ON_CI_MARK = pytest.mark.skipif( + _SKIP_ON_CI, + reason="Skipped on CI runners until the test can run on CI runners.", +) + +# Let's just accept the fact that low-resolution camera outputs from RTX renderers are not deterministic enough to pass +# golden image testing on every CI run. +_FLAKY_MARK = pytest.mark.flaky(max_runs=3, min_passes=1) + +PHYSICS_RENDERER_AOV_COMBINATIONS = [ + # physx + isaacsim_rtx_renderer + pytest.param( + "physx", + "isaacsim_rtx_renderer", + "rgb", + id="physx-isaacsim_rtx-rgb", + marks=_FLAKY_MARK, + ), + pytest.param( + "physx", + "isaacsim_rtx_renderer", + "albedo", + id="physx-isaacsim_rtx-albedo", + marks=_FLAKY_MARK, + ), + pytest.param( + "physx", + "isaacsim_rtx_renderer", + "depth", + id="physx-isaacsim_rtx-depth", + marks=_FLAKY_MARK, + ), + pytest.param( + "physx", + "isaacsim_rtx_renderer", + "simple_shading_constant_diffuse", + id="physx-isaacsim_rtx-simple_shading_constant_diffuse", + marks=_FLAKY_MARK, + ), + pytest.param( + "physx", + "isaacsim_rtx_renderer", + "simple_shading_diffuse_mdl", + id="physx-isaacsim_rtx-simple_shading_diffuse_mdl", + marks=_FLAKY_MARK, + ), + pytest.param( + "physx", + "isaacsim_rtx_renderer", + "simple_shading_full_mdl", + id="physx-isaacsim_rtx-simple_shading_full_mdl", + marks=_FLAKY_MARK, + ), + pytest.param( + "physx", + "isaacsim_rtx_renderer", + "semantic_segmentation", + id="physx-isaacsim_rtx-semantic_segmentation", + marks=_FLAKY_MARK, + ), + # newton + isaacsim_rtx_renderer + pytest.param( + "newton", + "isaacsim_rtx_renderer", + "rgb", + id="newton-isaacsim_rtx-rgb", + marks=_FLAKY_MARK, + ), + pytest.param( + "newton", + "isaacsim_rtx_renderer", + "albedo", + id="newton-isaacsim_rtx-albedo", + marks=_FLAKY_MARK, + ), + pytest.param( + "newton", + "isaacsim_rtx_renderer", + "depth", + id="newton-isaacsim_rtx-depth", + marks=_FLAKY_MARK, + ), + pytest.param( + "newton", + "isaacsim_rtx_renderer", + "simple_shading_constant_diffuse", + id="newton-isaacsim_rtx-simple_shading_constant_diffuse", + marks=_FLAKY_MARK, + ), + pytest.param( + "newton", + "isaacsim_rtx_renderer", + "simple_shading_diffuse_mdl", + id="newton-isaacsim_rtx-simple_shading_diffuse_mdl", + marks=_FLAKY_MARK, + ), + pytest.param( + "newton", + "isaacsim_rtx_renderer", + "simple_shading_full_mdl", + id="newton-isaacsim_rtx-simple_shading_full_mdl", + marks=_FLAKY_MARK, + ), + pytest.param( + "newton", + "isaacsim_rtx_renderer", + "semantic_segmentation", + id="newton-isaacsim_rtx-semantic_segmentation", + marks=_FLAKY_MARK, + ), + # physx + newton_renderer (warp) + pytest.param( + "physx", + "newton_renderer", + "rgb", + id="physx-newton_warp-rgb", + ), + pytest.param( + "physx", + "newton_renderer", + "depth", + id="physx-newton_warp-depth", + ), +] + +KITLESS_PHYSICS_RENDERER_AOV_COMBINATIONS = [ + # newton + ovrtx_renderer + pytest.param( + "newton", + "ovrtx_renderer", + "rgb", + id="newton-ovrtx-rgb", + marks=_SKIP_ON_CI_MARK, + ), + pytest.param( + "newton", + "ovrtx_renderer", + "albedo", + id="newton-ovrtx-albedo", + marks=_SKIP_ON_CI_MARK, + ), + pytest.param( + "newton", + "ovrtx_renderer", + "depth", + id="newton-ovrtx-depth", + marks=_SKIP_ON_CI_MARK, + ), + pytest.param( + "newton", + "ovrtx_renderer", + "simple_shading_constant_diffuse", + id="newton-ovrtx-simple_shading_constant_diffuse", + marks=_SKIP_ON_CI_MARK, + ), + pytest.param( + "newton", + "ovrtx_renderer", + "simple_shading_diffuse_mdl", + id="newton-ovrtx-simple_shading_diffuse_mdl", + marks=_SKIP_ON_CI_MARK, + ), + pytest.param( + "newton", + "ovrtx_renderer", + "simple_shading_full_mdl", + id="newton-ovrtx-simple_shading_full_mdl", + marks=_SKIP_ON_CI_MARK, + ), + pytest.param( + "newton", + "ovrtx_renderer", + "semantic_segmentation", + id="newton-ovrtx-semantic_segmentation", + marks=_SKIP_ON_CI_MARK, + ), + # newton + newton_renderer (warp) + pytest.param( + "newton", + "newton_renderer", + "rgb", + id="newton-newton_warp-rgb", + ), + pytest.param( + "newton", + "newton_renderer", + "depth", + id="newton-newton_warp-depth", + ), +] + + +def maybe_save_stage(test_name: str, physics_backend: str, renderer: str, data_type: str) -> None: + """If ``ISAAC_LAB_SAVE_STAGES`` is set, dump the current USD stage to that directory.""" + out_dir = os.environ.get("ISAAC_LAB_SAVE_STAGES") + if not out_dir: + return + + import isaaclab.sim as sim_utils + + os.makedirs(out_dir, exist_ok=True) + safe_test_name = test_name.replace("/", "_") + stage_path = os.path.join(out_dir, f"{safe_test_name}-{physics_backend}-{renderer}-{data_type}.usda") + sim_utils.save_stage(stage_path, save_and_reload_in_place=False) + print(f"[ISAAC_LAB_SAVE_STAGES] wrote {stage_path}") + + +def _apply_overrides_to_env_cfg(env_cfg: Any, override_args: list[str]) -> Any: + """Apply override args to env_cfg using parse_overrides and apply_overrides.""" + from isaaclab_tasks.utils.hydra import apply_overrides, collect_presets, parse_overrides + + presets = {"env": collect_presets(env_cfg)} + global_presets, preset_sel, preset_scalar, _ = parse_overrides(override_args, presets) + hydra_cfg = {"env": env_cfg.to_dict()} + env_cfg, _ = apply_overrides(env_cfg, None, hydra_cfg, global_presets, preset_sel, preset_scalar, presets) + return env_cfg + + +def _physics_preset_name(physics_backend: str) -> str: + """Translate the historical ``"newton"`` backend label (still used by golden-image + filenames and ``pytest.param`` IDs) to the renamed Hydra preset + ``"newton_mjwarp"``. Other labels (``"physx"`` etc.) pass through unchanged. + """ + return "newton_mjwarp" if physics_backend == "newton" else physics_backend + + +def _normalize_tensor(tensor: torch.Tensor, data_type: str) -> torch.Tensor: + """Convert camera output tensor to [0, 1] float32 for conversion to image.""" + normalized = tensor.float() + + if data_type in ["depth", "distance_to_camera", "distance_to_image_plane"]: + max_val = normalized.max() + if max_val > 0: + normalized = normalized / max_val + elif data_type in {"albedo"}: + normalized = normalized[..., :3] / 255.0 + else: + normalized = normalized / 255.0 + + return normalized + + +def _save_comparison_image(img: Image.Image, filename: str) -> str: + """Save a PIL image under the comparison images directory.""" + path = os.path.join(_COMPARISON_IMAGES_DIR, _COMPARISON_IMAGE_SUBDIR, filename) + os.makedirs(os.path.dirname(path), exist_ok=True) + img.save(path, format="PNG") + return path + + +def generate_html_report(comparison_scores: list[dict], report_filename: str) -> None: + """Generate and save an HTML report of comparison scores.""" + if not comparison_scores: + return + + os.makedirs(_COMPARISON_IMAGES_DIR, exist_ok=True) + report_path = os.path.join(_COMPARISON_IMAGES_DIR, report_filename) + sorted_scores = sorted(comparison_scores, key=lambda e: -e["diff_pct"]) + + rows = [] + for entry in sorted_scores: + status_class = "pass" if entry["passed"] else "fail" + status_text = status_class.upper() + + actual_img_html = "" + golden_img_html = "" + if entry.get("img_result_path"): + actual_fname = os.path.relpath(entry["img_result_path"], _COMPARISON_IMAGES_DIR) + golden_fname = os.path.relpath(entry["img_golden_path"], _COMPARISON_IMAGES_DIR) + actual_img_html = f'' + golden_img_html = f'' + + ssim_checked = entry.get("ssim_checked", True) + ssim_cell_class = "" if ssim_checked else ' class="ssim-disabled"' + entry_ssim_threshold = entry.get("ssim_threshold", _SSIM_THRESHOLD) + ssim_threshold_cell = f"{entry_ssim_threshold:.4f}" if ssim_checked else "N/A" + ssim_title = "" if ssim_checked else ' title="SSIM gate disabled for this data type; score is informational."' + + rows.append( + f'' + f"{entry['test']}" + f"{entry['backend']}" + f"{entry['renderer']}" + f"{entry['aov']}" + f"{entry['diff_pct']:.2f}" + f"{entry['threshold']:.1f}" + f"{entry['ssim']:.4f}" + f"{ssim_threshold_cell}" + f'{status_text}' + f"{actual_img_html}" + f"{golden_img_html}" + "" + ) + + html = ( + "\n" + "\n" + "\n" + '\n' + "Rendering Correctness - Image Comparison Report\n" + "\n" + "\n" + "\n" + "

Rendering Correctness - Image Comparison Report

\n" + f"

Sorted by PixelDiff % (desc) - {len(sorted_scores)}  total.

\n" + "\n" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "\n" + "\n" + "\n".join(rows) + "\n\n
TestBackendRendererAOVPixelDiff %PixelDiff Threshold %SSIMSSIM ThresholdStatusACTUALGOLDEN
\n" + f"

Generated: {datetime.now().astimezone().isoformat(timespec='seconds')}.

\n" + "\n" + "\n" + ) + + with open(report_path, "w", encoding="utf-8") as file: + file.write(html) + + +def attach_comparison_properties( + request: pytest.FixtureRequest, comparison_scores: list[dict], initial_count: int +) -> None: + """Attach pixel-diff, SSIM scores, and failure images as JUnit XML properties.""" + for entry in comparison_scores[initial_count:]: + label = f"{entry['backend']}-{entry['renderer']}-{entry['aov']}" + request.node.user_properties.append((f"diff_pct:{label}", f"{entry['diff_pct']:.2f}")) + ssim_value = f"{entry['ssim']:.4f}" if entry.get("ssim_checked", True) else f"{entry['ssim']:.4f} (N/A)" + request.node.user_properties.append((f"ssim:{label}", ssim_value)) + request.node.user_properties.append((f"threshold:{label}", f"{entry['threshold']:.1f}")) + if entry.get("img_result_path"): + request.node.user_properties.append((f"img_result:{label}", entry["img_result_path"])) + request.node.user_properties.append((f"img_golden:{label}", entry["img_golden_path"])) + + +def make_determinism_fixture(): + """Create an autouse fixture that enables determinism for each test.""" + + @pytest.fixture(autouse=True) + def _determinism_fixture(): + """Enable determinism for each test.""" + from isaaclab.utils.seed import configure_seed + + configure_seed(42, torch_deterministic=True) + + yield + + from isaaclab.sim import SimulationContext + + SimulationContext.clear_instance() + + return _determinism_fixture + + +def make_generate_html_report_fixture(comparison_scores: list[dict], report_filename: str): + """Create a session fixture that writes the HTML report for one module. + + Args: + comparison_scores: Module-local comparison score storage. + report_filename: Output report filename. + """ + + @pytest.fixture(scope="session", autouse=True) + def _generate_html_report(): + """Generate an HTML comparison report after all tests in the session complete.""" + yield + generate_html_report(comparison_scores, report_filename) + + return _generate_html_report + + +def make_attach_comparison_properties_fixture(comparison_scores: list[dict]): + """Create an autouse fixture that attaches JUnit properties for one module. + + Args: + comparison_scores: Module-local comparison score storage. + """ + + @pytest.fixture(autouse=True) + def _attach_comparison_properties(request): + """Attach pixel-diff, SSIM scores, and failure images as JUnit XML properties.""" + initial_count = len(comparison_scores) + yield + attach_comparison_properties(request, comparison_scores, initial_count) + + return _attach_comparison_properties + + +def make_require_ovrtx_install_fixture(): + """Create an autouse fixture that fails fast when OVRTX is required but not installed. + + Only parametrized cases with ``renderer == "ovrtx_renderer"`` are checked (Newton + Warp kitless cases do not need ``ov[ovrtx]``). Install with + ``./isaaclab.sh -i 'ov[ovrtx]'`` (or the equivalent in your environment). + """ + + @pytest.fixture(autouse=True) + def _require_ovrtx_install(request): + callspec = getattr(request.node, "callspec", None) + if callspec is None: + return + + if callspec.params.get("renderer") != "ovrtx_renderer": + return + + try: + import ovrtx + + print(f"ovrtx version: {ovrtx.__version__}") + except ImportError as exc: + pytest.fail( + "Kitless OVRTX rendering tests require the optional dependency ov[ovrtx]. " + "Install with: ./isaaclab.sh -i 'ov[ovrtx]'\n" + f"ImportError: {exc}" + ) + + return _require_ovrtx_install + + +def _make_grid(images: torch.Tensor) -> torch.Tensor: + """Make a grid of images from a tensor of shape (B, H, W, C).""" + from torchvision.utils import make_grid + + return make_grid(torch.swapaxes(images.unsqueeze(1), 1, -1).squeeze(-1), nrow=round(images.shape[0] ** 0.5)) + + +def _ssim(img1: torch.Tensor, img2: torch.Tensor, window_size: int = 11) -> float: + """Compute mean SSIM between two (1, C, H, W) float tensors in [0, 1].""" + c1 = 0.01**2 + c2 = 0.03**2 + channels = img1.shape[1] + pad = window_size // 2 + + kernel = torch.ones(channels, 1, window_size, window_size, device=img1.device, dtype=img1.dtype) / ( + window_size * window_size + ) + + mu1 = torch.nn.functional.conv2d(img1, kernel, padding=pad, groups=channels) + mu2 = torch.nn.functional.conv2d(img2, kernel, padding=pad, groups=channels) + + mu1_sq = mu1 * mu1 + mu2_sq = mu2 * mu2 + mu1_mu2 = mu1 * mu2 + + sigma1_sq = torch.nn.functional.conv2d(img1 * img1, kernel, padding=pad, groups=channels) - mu1_sq + sigma2_sq = torch.nn.functional.conv2d(img2 * img2, kernel, padding=pad, groups=channels) - mu2_sq + sigma12 = torch.nn.functional.conv2d(img1 * img2, kernel, padding=pad, groups=channels) - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + c1) * (2 * sigma12 + c2)) / ((mu1_sq + mu2_sq + c1) * (sigma1_sq + sigma2_sq + c2)) + return ssim_map.mean().item() + + +def _pixel_diff_percentage( + result_image: Image.Image, + golden_image: Image.Image, + pixel_diff_threshold: float = _PIXEL_L2_NORM_DIFFERENCE_THRESHOLD, +) -> float: + """Compute the percentage of pixels whose L2 norm difference exceeds a threshold.""" + diff_array = np.array(ImageChops.difference(result_image, golden_image)) + l2_norm_array = np.linalg.norm(diff_array, axis=2) + num_different_pixels = np.sum(l2_norm_array > pixel_diff_threshold) + return 100.0 * num_different_pixels / l2_norm_array.size + + +def _compare_images( + result_image: Image.Image, + golden_image: Image.Image, + max_different_pixels_percentage: float, + check_ssim: bool = True, + ssim_threshold: float = _SSIM_THRESHOLD, +) -> tuple[bool, str | None, float, float]: + """Compare result and golden images against pixel and SSIM thresholds.""" + if result_image.size != golden_image.size: + return False, f"Size mismatch: expected {golden_image.size}, got {result_image.size}.", 0.0, 0.0 + + if result_image.mode != golden_image.mode: + return False, f"Mode mismatch: expected {golden_image.mode}, got {result_image.mode}.", 0.0, 0.0 + + diff_pct = _pixel_diff_percentage(result_image, golden_image) + + result_tensor = torch.from_numpy(np.array(result_image, dtype=np.float32) / 255.0).permute(2, 0, 1).unsqueeze(0) + golden_tensor = torch.from_numpy(np.array(golden_image, dtype=np.float32) / 255.0).permute(2, 0, 1).unsqueeze(0) + ssim_score = _ssim(result_tensor, golden_tensor) + + if diff_pct > max_different_pixels_percentage: + return ( + False, + f"The percentage of different pixels ({diff_pct:.2f}%) exceeds the threshold of" + f" {max_different_pixels_percentage:.2f}%. SSIM={ssim_score:.4f}.", + diff_pct, + ssim_score, + ) + + if check_ssim and ssim_score < ssim_threshold: + return ( + False, + f"SSIM ({ssim_score:.4f}) is below the threshold of {ssim_threshold:.4f}." + f" Different pixels: {diff_pct:.2f}%.", + diff_pct, + ssim_score, + ) + + return True, None, diff_pct, ssim_score + + +def validate_camera_outputs( + test_name: str, + physics_backend: str, + renderer: str, + camera_outputs: dict[str, torch.Tensor], + max_different_pixels_percentage: float, + comparison_scores: list[dict], +) -> None: + """Validate correctness and consistency of camera outputs.""" + assert len(camera_outputs) > 0, f"[{test_name}] No camera outputs produced by {physics_backend} + {renderer}." + + golden_image_dir = os.path.join(_GOLDEN_IMAGES_DIRECTORY, test_name) + os.makedirs(golden_image_dir, exist_ok=True) + + ssim_threshold = _SSIM_THRESHOLD_BY_ENV_NAME.get(test_name, _SSIM_THRESHOLD) + failed_data_types = {} + + for data_type, tensor in camera_outputs.items(): + condition = torch.logical_or(torch.isinf(tensor), torch.isnan(tensor)) + corrected = torch.where(condition, torch.zeros_like(tensor), tensor) + max_val = corrected.max() + if max_val <= 0: + failed_data_types[data_type] = f"Camera output '{data_type}' has no non-zero pixels." + continue + + normalized = _normalize_tensor(corrected, data_type) + grid = _make_grid(normalized) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + result_image = Image.fromarray(ndarr) + + golden_path = os.path.join(golden_image_dir, f"{physics_backend}-{renderer}-{data_type}.png") + if not os.path.exists(golden_path): + failed_data_types[data_type] = f"Golden image not found at {golden_path}." + result_image.save(golden_path) + continue + + try: + golden_image = Image.open(golden_path) + except Exception as error: # noqa: BLE001 + failed_data_types[data_type] = f"Error opening golden image: {error}" + continue + + check_ssim = data_type not in _SSIM_DISABLED_DATA_TYPES + succeeded, error_message, diff_pct, ssim_score = _compare_images( + result_image, + golden_image, + max_different_pixels_percentage, + check_ssim=check_ssim, + ssim_threshold=ssim_threshold, + ) + + entry = { + "test": test_name, + "backend": physics_backend, + "renderer": renderer, + "aov": data_type, + "diff_pct": diff_pct, + "ssim": ssim_score, + "ssim_checked": check_ssim, + "threshold": max_different_pixels_percentage, + "ssim_threshold": ssim_threshold, + "passed": succeeded, + "img_result_path": None, + "img_golden_path": None, + } + + if diff_pct > 0: + prefix = f"{test_name}-{physics_backend}-{renderer}-{data_type}" + entry["img_result_path"] = _save_comparison_image(result_image, f"{prefix}-actual.png") + entry["img_golden_path"] = _save_comparison_image(golden_image, f"{prefix}-golden.png") + + comparison_scores.append(entry) + + if not succeeded: + failed_data_types[data_type] = error_message + + if failed_data_types: + reason = f"{test_name} (physics={physics_backend}, renderer={renderer}) failed for the following data types:\n" + for data_type, error_message in failed_data_types.items(): + reason += f"- {data_type}: {error_message}\n" + reason += f"Comparison images were written to {_COMPARISON_IMAGES_DIR}." + pytest.fail(reason) + + +def rendering_test_shadow_hand( + physics_backend: str, + renderer: str, + data_type: str, + comparison_scores: list[dict], +) -> None: + from isaaclab_tasks.direct.shadow_hand.shadow_hand_vision_env import ShadowHandVisionEnv + from isaaclab_tasks.direct.shadow_hand.shadow_hand_vision_env_cfg import ShadowHandVisionEnvCfg + + override_args = [f"presets={_physics_preset_name(physics_backend)},{renderer},{data_type}"] + + env_cfg = ShadowHandVisionEnvCfg() + env_cfg = _apply_overrides_to_env_cfg(env_cfg, override_args) + + env_cfg.scene.num_envs = 4 + + if data_type == "depth": + # Disable CNN forward pass as it cannot be meaningfully trained from depth alone and will raise a ValueError. + env_cfg.feature_extractor.enabled = False + + env = None + + try: + env = ShadowHandVisionEnv(env_cfg) + maybe_save_stage("shadow_hand", physics_backend, renderer, data_type) + + validate_camera_outputs( + "shadow_hand", + physics_backend, + renderer, + env._tiled_camera.data.output, + max_different_pixels_percentage=MAX_DIFFERENT_PIXELS_PERCENTAGE_BY_ENV_NAME["shadow_hand"], + comparison_scores=comparison_scores, + ) + finally: + if env is not None: + env.close() + + # This invokes camera sensor and renderer cleanup explicitly before pytest teardown, otherwise OV + # native code could probably complain about leaks and trigger segmentation fault. + env = None + + +def rendering_test_cartpole( + physics_backend: str, + renderer: str, + data_type: str, + comparison_scores: list[dict], +) -> None: + from isaaclab_tasks.direct.cartpole.cartpole_camera_env import CartpoleCameraEnv + from isaaclab_tasks.direct.cartpole.cartpole_camera_presets_env_cfg import CartpoleCameraPresetsEnvCfg + + env_cfg = CartpoleCameraPresetsEnvCfg() + env_cfg = _apply_overrides_to_env_cfg( + env_cfg, [f"presets={_physics_preset_name(physics_backend)},{renderer},{data_type}"] + ) + + env_cfg.scene.num_envs = 4 + + env = None + + try: + env = CartpoleCameraEnv(env_cfg) + maybe_save_stage("cartpole", physics_backend, renderer, data_type) + validate_camera_outputs( + "cartpole", + physics_backend, + renderer, + env._tiled_camera.data.output, + max_different_pixels_percentage=MAX_DIFFERENT_PIXELS_PERCENTAGE_BY_ENV_NAME["cartpole"], + comparison_scores=comparison_scores, + ) + finally: + if env is not None: + env.close() + + # This invokes camera sensor and renderer cleanup explicitly before pytest teardown, otherwise OV + # native code could probably complain about leaks and trigger segmentation fault. + env = None + + +def rendering_test_dexsuite_kuka( + physics_backend: str, + renderer: str, + data_type: str, + comparison_scores: list[dict], +) -> None: + from isaaclab.envs import ManagerBasedRLEnv + + from isaaclab_tasks.manager_based.manipulation.dexsuite.config.kuka_allegro.dexsuite_kuka_allegro_env_cfg import ( + DexsuiteKukaAllegroLiftEnvCfg, + ) + + override_args = [f"presets={_physics_preset_name(physics_backend)},{renderer},{data_type}64,single_camera,cube"] + + env_cfg = DexsuiteKukaAllegroLiftEnvCfg() + env_cfg = _apply_overrides_to_env_cfg(env_cfg, override_args) + + env_cfg.scene.num_envs = 4 + + # Disable the observation point-cloud visualisation markers (/Visuals/ObservationPointCloud). + # The underlying point sampling uses the global numpy/torch RNG, so marker positions shift + # across processes and show up as random red dots in the rendered camera output. Since this + # test only cares about rendering correctness of the scene itself, we hide the markers here + # rather than making the sampler deterministic globally. + point_cloud_term = getattr(env_cfg.observations.perception, "object_point_cloud", None) + if point_cloud_term is not None: + point_cloud_term.params["visualize"] = False + + # The success and failure markers are placed exactly at the same location. If both markers are + # visible, the rendering order will determine which one is visible in the camera output. Hide + # both markers to avoid this nondeterministic behavior. + for marker_cfg in env_cfg.commands.object_pose.success_visualizer_cfg.markers.values(): + marker_cfg.visible = False + + env = None + + try: + env = ManagerBasedRLEnv(env_cfg) + maybe_save_stage("dexsuite_kuka", physics_backend, renderer, data_type) + validate_camera_outputs( + "dexsuite_kuka", + physics_backend, + renderer, + env.scene.sensors["base_camera"].data.output, + max_different_pixels_percentage=MAX_DIFFERENT_PIXELS_PERCENTAGE_BY_ENV_NAME["dexsuite_kuka"], + comparison_scores=comparison_scores, + ) + finally: + if env is not None: + env.close() + + # This invokes camera sensor and renderer cleanup explicitly before pytest teardown, otherwise OV + # native code could probably complain about leaks and trigger segmentation fault. + env = None diff --git a/source/isaaclab_tasks/test/test_cartpole_showcase_environments.py b/source/isaaclab_tasks/test/test_cartpole_showcase_environments.py new file mode 100644 index 000000000000..38b2ce277a19 --- /dev/null +++ b/source/isaaclab_tasks/test/test_cartpole_showcase_environments.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +from env_test_utils import _run_environments, setup_environment # isort: skip + + +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize( + "task_name", + setup_environment( + include_play=False, factory_envs=False, multi_agent=False, teleop_envs=False, cartpole_showcase_envs=True + ), +) +@pytest.mark.isaacsim_ci +def test_cartpole_showcase_environments(task_name, num_envs, device): + # run cartpole showcase environments without stage in memory + _run_environments(task_name, device, num_envs, create_stage_in_memory=False) diff --git a/source/isaaclab_tasks/test/test_cartpole_showcase_environments_with_stage_in_memory.py b/source/isaaclab_tasks/test/test_cartpole_showcase_environments_with_stage_in_memory.py new file mode 100644 index 000000000000..536b209dbd47 --- /dev/null +++ b/source/isaaclab_tasks/test/test_cartpole_showcase_environments_with_stage_in_memory.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + +from isaaclab.utils.version import get_isaac_sim_version + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +from env_test_utils import _run_environments, setup_environment # isort: skip + + +@pytest.mark.parametrize("num_envs, device", [(2, "cuda")]) +@pytest.mark.parametrize( + "task_name", + setup_environment( + include_play=False, factory_envs=False, multi_agent=False, teleop_envs=False, cartpole_showcase_envs=True + ), +) +def test_cartpole_showcase_environments_with_stage_in_memory_and_clone_in_fabric_disabled(task_name, num_envs, device): + # skip test if stage in memory is not supported + if get_isaac_sim_version().major < 5: + pytest.skip("Stage in memory is not supported in this version of Isaac Sim") + + # run cartpole showcase environments with stage in memory + _run_environments(task_name, device, num_envs, create_stage_in_memory=True, disable_clone_in_fabric=True) diff --git a/source/isaaclab_tasks/test/test_distributed_device_resolution.py b/source/isaaclab_tasks/test/test_distributed_device_resolution.py new file mode 100644 index 000000000000..ec592c2b1342 --- /dev/null +++ b/source/isaaclab_tasks/test/test_distributed_device_resolution.py @@ -0,0 +1,479 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for distributed multi-GPU device resolution logic. + +These tests verify that ``_resolve_distributed_device`` (in sim_launcher) +correctly handles: + +- Normal multi-GPU: each rank sees all GPUs (local_rank maps directly) +- CUDA_VISIBLE_DEVICES restricted: each rank sees 1 GPU (fallback to cuda:0) +- Multi-node: WORLD_SIZE > local GPU count (local_rank still maps correctly) +- JAX_LOCAL_RANK: added to local_rank for JAX distributed training +- Non-distributed: no device override applied +- launch_simulation device propagation from AppLauncher + +No actual GPUs required — ``torch.cuda.device_count`` and +``torch.cuda.set_device`` are mocked throughout. +""" + +from __future__ import annotations + +import argparse +import os +import sys +import types +from unittest.mock import patch + +import isaaclab_tasks.utils.sim_launcher as sim_launcher + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +class _DummySimCfg: + """Minimal sim config stub with a mutable ``device`` attribute.""" + + def __init__(self, device: str = "cuda:0"): + self.device = device + + +class _DummyEnvCfg: + """Minimal env config stub wrapping a sim config.""" + + def __init__(self, device: str = "cuda:0"): + self.sim = _DummySimCfg(device) + + +def _make_distributed_args(**overrides) -> argparse.Namespace: + """Create an argparse.Namespace with ``distributed=True`` plus any overrides.""" + defaults = {"distributed": True} + defaults.update(overrides) + return argparse.Namespace(**defaults) + + +def _make_env_vars( + local_rank: int = 0, + world_size: int = 2, + rank: int = 0, + jax_local_rank: int | None = None, + jax_rank: int | None = None, +) -> dict[str, str]: + """Build a dict of environment variables for distributed training.""" + env = { + "LOCAL_RANK": str(local_rank), + "WORLD_SIZE": str(world_size), + "RANK": str(rank), + } + if jax_local_rank is not None: + env["JAX_LOCAL_RANK"] = str(jax_local_rank) + if jax_rank is not None: + env["JAX_RANK"] = str(jax_rank) + return env + + +# --------------------------------------------------------------------------- +# _resolve_distributed_device — Namespace launcher_args +# --------------------------------------------------------------------------- + + +class TestResolveDistributedDeviceNamespace: + """Tests for _resolve_distributed_device with argparse.Namespace args.""" + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=4) + def test_normal_multi_gpu_rank0(self, mock_count, mock_set_device): + """4 visible GPUs, world_size=4, rank 0 → cuda:0.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=0, world_size=4) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_called_once_with("cuda:0") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=4) + def test_normal_multi_gpu_rank3(self, mock_count, mock_set_device): + """4 visible GPUs, world_size=4, rank 3 → cuda:3.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=3, world_size=4) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:3" + mock_set_device.assert_called_once_with("cuda:3") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=1) + def test_cuda_visible_devices_restricted_rank0(self, mock_count, mock_set_device): + """1 visible GPU (CUDA_VISIBLE_DEVICES set), world_size=2, rank 0 → cuda:0.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=0, world_size=2) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_called_once_with("cuda:0") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=1) + def test_cuda_visible_devices_restricted_rank1(self, mock_count, mock_set_device): + """1 visible GPU, world_size=2, rank 1 → falls back to cuda:0 (not cuda:1).""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=1, world_size=2) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_called_once_with("cuda:0") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=2) + def test_jax_local_rank_added(self, mock_count, mock_set_device): + """JAX_LOCAL_RANK is added to LOCAL_RANK for correct device mapping.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + # LOCAL_RANK=0, JAX_LOCAL_RANK=1 → effective local_rank=1 + env = _make_env_vars(local_rank=0, world_size=2, jax_local_rank=1) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:1" + mock_set_device.assert_called_once_with("cuda:1") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=1) + def test_jax_local_rank_with_restricted_gpus(self, mock_count, mock_set_device): + """JAX_LOCAL_RANK + restricted GPUs → fallback to cuda:0.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=0, world_size=2, jax_local_rank=1) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_called_once_with("cuda:0") + + +# --------------------------------------------------------------------------- +# _resolve_distributed_device — dict launcher_args +# --------------------------------------------------------------------------- + + +class TestResolveDistributedDeviceDict: + """Tests for _resolve_distributed_device with dict-style args.""" + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=4) + def test_dict_args_distributed(self, mock_count, mock_set_device): + """Dict launcher_args with distributed=True should work identically.""" + env_cfg = _DummyEnvCfg() + args = {"distributed": True} + env = _make_env_vars(local_rank=2, world_size=4) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:2" + mock_set_device.assert_called_once_with("cuda:2") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=1) + def test_dict_args_restricted(self, mock_count, mock_set_device): + """Dict args with restricted GPUs should fall back to cuda:0.""" + env_cfg = _DummyEnvCfg() + args = {"distributed": True} + env = _make_env_vars(local_rank=3, world_size=4) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_called_once_with("cuda:0") + + +# --------------------------------------------------------------------------- +# _resolve_distributed_device — non-distributed (no-op) +# --------------------------------------------------------------------------- + + +class TestResolveDistributedDeviceNoop: + """Tests that non-distributed runs skip device resolution.""" + + @patch("torch.cuda.set_device") + def test_not_distributed_namespace(self, mock_set_device): + """distributed=False → device unchanged, set_device not called.""" + env_cfg = _DummyEnvCfg(device="cuda:0") + args = argparse.Namespace(distributed=False) + + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_not_called() + + @patch("torch.cuda.set_device") + def test_not_distributed_dict(self, mock_set_device): + """Dict with distributed=False → no-op.""" + env_cfg = _DummyEnvCfg(device="cuda:0") + args = {"distributed": False} + + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_not_called() + + @patch("torch.cuda.set_device") + def test_no_distributed_key(self, mock_set_device): + """Dict without 'distributed' key → no-op.""" + env_cfg = _DummyEnvCfg(device="cuda:0") + args = {} + + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_not_called() + + @patch("torch.cuda.set_device") + def test_none_launcher_args(self, mock_set_device): + """launcher_args=None → no-op.""" + env_cfg = _DummyEnvCfg(device="cuda:0") + + sim_launcher._resolve_distributed_device(env_cfg, None) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_not_called() + + +# --------------------------------------------------------------------------- +# _resolve_distributed_device — edge cases +# --------------------------------------------------------------------------- + + +class TestResolveDistributedDeviceEdgeCases: + """Edge cases for device resolution.""" + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=2) + def test_env_cfg_without_sim(self, mock_count, mock_set_device): + """env_cfg with no 'sim' attribute → set_device still called, no crash.""" + + class _BareEnvCfg: + pass + + env_cfg = _BareEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=1, world_size=2) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + # Should still call set_device even without sim_cfg + mock_set_device.assert_called_once_with("cuda:1") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=2) + def test_world_size_equals_visible_gpus(self, mock_count, mock_set_device): + """Exact match: 2 visible GPUs, world_size=2 → use local_rank directly.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=1, world_size=2) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:1" + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=8) + def test_more_gpus_than_world_size(self, mock_count, mock_set_device): + """8 visible GPUs but only 2 ranks → use local_rank directly.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=1, world_size=2) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:1" + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=0) + def test_zero_visible_gpus(self, mock_count, mock_set_device): + """0 visible GPUs → fallback to cuda:0 (will fail later at CUDA init).""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=0, world_size=2) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=4) + def test_missing_env_vars_default_to_zero(self, mock_count, mock_set_device): + """Missing LOCAL_RANK/WORLD_SIZE → defaults to 0/1.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + + # Remove distributed env vars if they exist + clean_env = { + k: v + for k, v in os.environ.items() + if k not in ("LOCAL_RANK", "WORLD_SIZE", "RANK", "JAX_LOCAL_RANK", "JAX_RANK") + } + + with patch.dict(os.environ, clean_env, clear=True): + sim_launcher._resolve_distributed_device(env_cfg, args) + + # local_rank=0, 0 < 4 → cuda:0 + assert env_cfg.sim.device == "cuda:0" + + +# --------------------------------------------------------------------------- +# _resolve_distributed_device — multi-node scenarios +# --------------------------------------------------------------------------- + + +class TestResolveDistributedDeviceMultiNode: + """Tests for multi-node setups where WORLD_SIZE > local GPU count.""" + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=4) + def test_multi_node_rank3_sees_4_gpus(self, mock_count, mock_set_device): + """2 nodes × 4 GPUs, WORLD_SIZE=8, local_rank=3, 4 visible → cuda:3. + + Previously this would fail because 4 >= 8 is False, falling back to cuda:0. + With the fix (local_rank < num_visible_gpus), 3 < 4 → cuda:3 ✅ + """ + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=3, world_size=8, rank=7) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:3" + mock_set_device.assert_called_once_with("cuda:3") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=4) + def test_multi_node_rank0_sees_4_gpus(self, mock_count, mock_set_device): + """2 nodes × 4 GPUs, WORLD_SIZE=8, local_rank=0 → cuda:0.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=0, world_size=8, rank=4) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_called_once_with("cuda:0") + + @patch("torch.cuda.set_device") + @patch("torch.cuda.device_count", return_value=1) + def test_multi_node_restricted_gpus(self, mock_count, mock_set_device): + """Multi-node with CUDA_VISIBLE_DEVICES=, local_rank=1 → cuda:0.""" + env_cfg = _DummyEnvCfg() + args = _make_distributed_args() + env = _make_env_vars(local_rank=1, world_size=8, rank=5) + + with patch.dict(os.environ, env, clear=False): + sim_launcher._resolve_distributed_device(env_cfg, args) + + assert env_cfg.sim.device == "cuda:0" + mock_set_device.assert_called_once_with("cuda:0") + + +# --------------------------------------------------------------------------- +# launch_simulation integration — verify device propagation from AppLauncher +# --------------------------------------------------------------------------- + + +class TestLaunchSimulationDevicePropagation: + """Verify that launch_simulation propagates AppLauncher.device to env_cfg.""" + + def test_kit_path_propagates_applauncher_device(self, monkeypatch): + """When Kit is needed, AppLauncher.device should be written to env_cfg.sim.device.""" + + class _FakeAppLauncher: + def __init__(self, launcher_args): + self.device = "cuda:3" # Simulate resolved device + self.app = types.SimpleNamespace(close=lambda: None) + + # Mock has_kit to return False so AppLauncher gets created + mock_isaaclab_utils = types.ModuleType("isaaclab.utils") + mock_isaaclab_utils.has_kit = lambda: False + monkeypatch.setitem(sys.modules, "isaaclab.utils", mock_isaaclab_utils) + + monkeypatch.setitem( + sys.modules, + "isaaclab.app", + types.SimpleNamespace(AppLauncher=_FakeAppLauncher), + ) + monkeypatch.setattr( + "importlib.util.find_spec", + lambda name: object() if name == "omni.kit" else None, + ) + # Force needs_kit=True, no cameras + monkeypatch.setattr( + sim_launcher, + "compute_kit_requirements", + lambda env_cfg, launcher_args: (True, False, set()), + ) + # Mock _resolve_distributed_device to avoid torch.cuda calls + monkeypatch.setattr( + sim_launcher, + "_resolve_distributed_device", + lambda env_cfg, launcher_args: None, + ) + + env_cfg = _DummyEnvCfg(device="cuda:0") + args = argparse.Namespace() + + with sim_launcher.launch_simulation(env_cfg, args): + pass + + assert env_cfg.sim.device == "cuda:3" + + def test_kitless_path_uses_resolve_distributed_device(self, monkeypatch): + """When Kit is NOT needed, _resolve_distributed_device sets the device.""" + resolved_devices = [] + + def _fake_resolve(env_cfg, launcher_args): + env_cfg.sim.device = "cuda:1" + resolved_devices.append("cuda:1") + + monkeypatch.setattr( + sim_launcher, + "compute_kit_requirements", + lambda env_cfg, launcher_args: (False, False, set()), + ) + monkeypatch.setattr( + sim_launcher, + "_resolve_distributed_device", + _fake_resolve, + ) + + env_cfg = _DummyEnvCfg(device="cuda:0") + args = _make_distributed_args() + + with sim_launcher.launch_simulation(env_cfg, args): + pass + + assert env_cfg.sim.device == "cuda:1" + assert len(resolved_devices) == 1 diff --git a/source/isaaclab_tasks/test/test_env_cfg_no_forbidden_imports.py b/source/isaaclab_tasks/test/test_env_cfg_no_forbidden_imports.py new file mode 100644 index 000000000000..c1ab6e78e7b4 --- /dev/null +++ b/source/isaaclab_tasks/test/test_env_cfg_no_forbidden_imports.py @@ -0,0 +1,189 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test that env_cfg construction does not import forbidden backend modules. + +``load_cfg_from_registry`` runs BEFORE SimulationApp is launched to inspect +the physics backend and decide whether Kit is needed at all. Config classes +are pure data — they must be constructable without any runtime dependencies +that conflict with Kit's internal ``fork()`` or that require a running +simulator. + +Forbidden categories +-------------------- +1. **Backend / simulator runtime** (``pxr``, ``omni``, ``carb``, ``isaacsim``) + — require SimulationApp / Kit to be initialized first. +2. **SciPy** — loads OpenBLAS which registers ``atfork`` handlers that crash + Kit's internal ``fork()`` during startup. + +Remediation patterns +-------------------- +* Use ``lazy_loader.attach_stub`` in ``__init__.py`` files with a + corresponding ``.pyi`` stub so that implementation modules are only + imported when first accessed. +* Guard annotation-only imports with ``TYPE_CHECKING``. +* Store ``class_type`` / ``func`` fields as fully-qualified strings + (e.g. ``"isaaclab.assets.articulation:Articulation"``); ``cfg.validate()`` + resolves them to callables after Kit has launched. +* Use local ``# noqa: PLC0415`` imports inside functions for Kit-dependent + symbols that cannot be imported at module level before Kit is running. + +Performance note +---------------- +All task checks are batched into a **single subprocess** so that +``import isaaclab_tasks`` (~1.6 s) is paid only once instead of once per test. +Results are returned as JSON and cached for the parametrized test functions. +""" + +import json +import subprocess +import sys +import textwrap + +import gymnasium +import pytest + +import isaaclab_tasks # noqa: F401 -- triggers task registration + +# Forbidden module prefixes -- these must NOT appear in sys.modules after +# config loading because they require SimulationApp / a specific physics +# backend to be started first, or because they are heavyweight runtime +# libraries that should never be needed to construct pure-data config objects. +_FORBIDDEN_PREFIXES = ( + # Backend / simulator runtime (require SimulationApp / Kit) + "pxr", # USD Python bindings + "omni", # Omniverse runtime + "carb", # Carbonite framework + "isaacsim", # Isaac Sim modules + # SciPy loads OpenBLAS which crashes Kit's fork() + "scipy", +) + +_ALL_ISAAC_TASKS = sorted(name for name in gymnasium.registry if name.startswith("Isaac-")) + +# --------------------------------------------------------------------------- +# Batch subprocess: run all checks in one Python process so we only pay the +# `import isaaclab_tasks` cost once (~1.6 s) instead of once per test. +# --------------------------------------------------------------------------- + + +def _build_batch_script(task_names: list[str]) -> str: + return textwrap.dedent(f"""\ + import sys, traceback, json + + FORBIDDEN = {list(_FORBIDDEN_PREFIXES)!r} + task_names = {task_names!r} + + import isaaclab_tasks # noqa: F401 + from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry + + results = {{}} + + for task_name in task_names: + violations = {{}} + load_error = None + + _orig_import = __builtins__.__import__ + + def _hook(name, *args, **kw): + top = name.split('.')[0] + if top in FORBIDDEN and top not in violations: + violations[top] = ''.join(traceback.format_stack()) + return _orig_import(name, *args, **kw) + + __builtins__.__import__ = _hook + try: + cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point") + except Exception as exc: + load_error = str(exc) + finally: + __builtins__.__import__ = _orig_import + + # Note: we intentionally do NOT clean up sys.modules between tasks. + # The import hook intercepts every __import__ call regardless of + # whether the module is already cached, so it reliably catches + # violations even across tasks. Cleaning up sys.modules would force + # re-importing shared modules (e.g. velocity_env_cfg, which is + # shared by 40+ locomotion tasks) and turn a 3 s run into 17 s. + + results[task_name] = {{ + 'load_error': load_error, + 'violations': violations, + }} + + # Use a sentinel so the parser can find the JSON even when + # load_cfg_from_registry prints [INFO] lines to stdout. + print("__RESULTS__" + json.dumps(results)) + """) + + +@pytest.fixture(scope="session") +def all_cfg_check_results() -> dict: + """Run all task cfg checks in a single subprocess and return results dict.""" + script = _build_batch_script(_ALL_ISAAC_TASKS) + result = subprocess.run( + [sys.executable, "-c", script], + capture_output=True, + text=True, + timeout=300, + ) + # Find the sentinel line (load_cfg_from_registry emits [INFO] lines to stdout) + json_line = None + for line in result.stdout.splitlines(): + if line.startswith("__RESULTS__"): + json_line = line[len("__RESULTS__") :] + break + + if json_line is None: + return { + "__subprocess_crash__": ( + f"Batch subprocess did not produce results.\n" + f"--- stdout ---\n{result.stdout}\n" + f"--- stderr ---\n{result.stderr}" + ) + } + try: + return json.loads(json_line) + except json.JSONDecodeError as exc: + return {"__json_error__": str(exc), "__raw__": json_line[:500]} + + +@pytest.mark.parametrize("task_name", _ALL_ISAAC_TASKS) +def test_config_load_does_not_import_backend_modules(task_name: str, all_cfg_check_results: dict): + """Config loading must not import forbidden runtime modules. + + Config classes are pure data. They must not pull in backend modules + (pxr, omni, carb, isaacsim) or heavyweight libraries (scipy). + + Fix: use lazy_loader.attach_stub with .pyi stubs in __init__.py files, + TYPE_CHECKING guards for annotation-only imports, and string references + for class_type/func fields in cfg files. + """ + if "__subprocess_crash__" in all_cfg_check_results: + pytest.fail(f"Batch check subprocess crashed:\n{all_cfg_check_results['__subprocess_crash__']}") + + if task_name not in all_cfg_check_results: + pytest.fail(f"No result for '{task_name}' - batch subprocess may have crashed.") + + info = all_cfg_check_results[task_name] + load_error = info.get("load_error") + violations = info.get("violations", {}) + + messages = [] + if load_error: + messages.append(f"ERROR: config load crashed: {load_error}") + if violations: + messages.append(f"FAIL: {len(violations)} forbidden top-level module(s) imported:") + for mod, stack in sorted(violations.items()): + messages.append(f"\n=== {mod} ===\n{stack}") + + assert not violations and not load_error, ( + f"Config loading for '{task_name}' imported forbidden backend modules.\n" + f"Forbidden prefixes: {_FORBIDDEN_PREFIXES}\n" + + "\n".join(messages) + + "\n\nFix: use lazy_loader.attach_stub with a .pyi stub in the offending " + "__init__.py, or move the import under TYPE_CHECKING and use a string " + "reference for isinstance checks." + ) diff --git a/source/isaaclab_tasks/test/test_environment_determinism.py b/source/isaaclab_tasks/test/test_environment_determinism.py index 52ac1f34980e..1618d62fd3cb 100644 --- a/source/isaaclab_tasks/test/test_environment_determinism.py +++ b/source/isaaclab_tasks/test/test_environment_determinism.py @@ -18,8 +18,7 @@ import pytest import torch -import carb -import omni.usd +import isaaclab.sim as sim_utils import isaaclab_tasks # noqa: F401 from isaaclab_tasks.utils.parse_cfg import parse_env_cfg @@ -29,8 +28,9 @@ def setup_environment(): # this flag is necessary to prevent a bug where the simulation gets stuck randomly when running the # test on many environments. - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/cooking/ujitsoCollisionCooking", False) + from isaaclab.app.settings_manager import get_settings_manager + + get_settings_manager().set_bool("/physics/cooking/ujitsoCollisionCooking", False) @pytest.mark.parametrize( @@ -76,7 +76,7 @@ def test_dextrous_env_determinism(task_name, device): def _test_environment_determinism(task_name: str, device: str): """Check deterministic environment creation.""" # fix number of steps - num_envs = 32 + num_envs = 2 num_steps = 100 # call function to create and step the environment obs_1, rew_1 = _obtain_transition_tuples(task_name, num_envs, device, num_steps) @@ -93,7 +93,7 @@ def _test_environment_determinism(task_name: str, device: str): def _obtain_transition_tuples(task_name: str, num_envs: int, device: str, num_steps: int) -> tuple[dict, torch.Tensor]: """Run random actions and obtain transition tuples after fixed number of steps.""" # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() try: # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) diff --git a/source/isaaclab_tasks/test/test_environments.py b/source/isaaclab_tasks/test/test_environments.py index 879948f9d9a8..6f9da001b514 100644 --- a/source/isaaclab_tasks/test/test_environments.py +++ b/source/isaaclab_tasks/test/test_environments.py @@ -5,14 +5,6 @@ """Launch Isaac Sim Simulator first.""" -import sys - -# Import pinocchio in the main script to force the use of the dependencies -# installed by IsaacLab and not the one installed by Isaac Sim. -# pinocchio is required by the Pink IK controller -if sys.platform != "win32": - import pinocchio # noqa: F401 - from isaaclab.app import AppLauncher # launch the simulator @@ -30,8 +22,18 @@ from env_test_utils import _run_environments, setup_environment # isort: skip -@pytest.mark.parametrize("num_envs, device", [(32, "cuda"), (1, "cuda")]) -@pytest.mark.parametrize("task_name", setup_environment(include_play=False, factory_envs=False, multi_agent=False)) +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize( + "task_name", + setup_environment( + include_play=False, + factory_envs=False, + multi_agent=False, + teleop_envs=False, + cartpole_showcase_envs=False, + pickplace_stack_envs=False, + ), +) @pytest.mark.isaacsim_ci def test_environments(task_name, num_envs, device): # run environments without stage in memory diff --git a/source/isaaclab_tasks/test/test_environments_automate.py b/source/isaaclab_tasks/test/test_environments_automate.py new file mode 100644 index 000000000000..bace1932db57 --- /dev/null +++ b/source/isaaclab_tasks/test/test_environments_automate.py @@ -0,0 +1,48 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +import sys + +# Import pinocchio in the main script to force the use of the dependencies +# installed by IsaacLab and not the one installed by Isaac Sim. +# pinocchio is required by the Pink IK controller +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +# Note: _check_random_actions is used directly here (instead of _run_environments) so that the +# skip guards in _run_environments for AutoMate environments are intentionally bypassed. +from env_test_utils import _check_random_actions # isort: skip + +# AutoMate environments require a CUDA installation that is present in the cuRobo Docker image +# but not in the base image. This test is intentionally excluded from the base-image CI jobs via +# CUROBO_TESTS / TESTS_TO_SKIP in test_settings.py and is only executed by the dedicated +# ``test-curobo`` CI job which uses the cuRobo Docker image. +AUTOMATE_ENVS = [ + "Isaac-AutoMate-Assembly-Direct-v0", + "Isaac-AutoMate-Disassembly-Direct-v0", +] + + +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize("task_name", AUTOMATE_ENVS) +@pytest.mark.isaacsim_ci +def test_automate_environments(task_name, num_envs, device): + _check_random_actions(task_name, device, num_envs, create_stage_in_memory=False) diff --git a/source/isaaclab_tasks/test/test_environments_newton.py b/source/isaaclab_tasks/test/test_environments_newton.py new file mode 100644 index 000000000000..23456946a689 --- /dev/null +++ b/source/isaaclab_tasks/test/test_environments_newton.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +from env_test_utils import _run_environments, setup_environment # isort: skip + + +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize( + "task_name", + setup_environment( + include_play=False, + factory_envs=False, + multi_agent=False, + teleop_envs=False, + cartpole_showcase_envs=False, + pickplace_stack_envs=False, + newton_mjwarp_envs=True, + ), +) +@pytest.mark.newton_ci +@pytest.mark.xfail( + reason=( + "TODO: Nested PresetCfg resolution for named presets (e.g. 'newton_mjwarp') is not yet supported. " + "The logic in parse_cfg.apply_named_preset should be unified with the deep-nesting " + "fixes in https://github.com/isaac-sim/IsaacLab/pull/5029 (isaaclab_tasks.utils.hydra)." + ), + strict=False, +) +def test_environments_newton(task_name, num_envs, device): + # run environments with MJWarp physics preset + _run_environments(task_name, device, num_envs, physics_preset_name="newton_mjwarp", create_stage_in_memory=False) diff --git a/source/isaaclab_tasks/test/test_environments_skillgen.py b/source/isaaclab_tasks/test/test_environments_skillgen.py new file mode 100644 index 000000000000..6dc9b024814d --- /dev/null +++ b/source/isaaclab_tasks/test/test_environments_skillgen.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +import sys + +# Import pinocchio in the main script to force the use of the dependencies +# installed by IsaacLab and not the one installed by Isaac Sim. +# pinocchio is required by the Pink IK controller +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +# Note: _check_random_actions is used directly here (instead of _run_environments) so that the +# skip guards in _run_environments for Skillgen environments are intentionally bypassed. +from env_test_utils import _check_random_actions # isort: skip + +# SkillGen environments require cuRobo to be installed. This test is intentionally excluded from +# the base-image CI jobs via CUROBO_TESTS / TESTS_TO_SKIP in test_settings.py and is only executed +# by the dedicated ``test-curobo`` CI job which uses the cuRobo Docker image. +SKILLGEN_ENVS = [ + "Isaac-Stack-Cube-Franka-IK-Rel-Skillgen-v0", +] + + +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize("task_name", SKILLGEN_ENVS) +@pytest.mark.isaacsim_ci +def test_skillgen_environments(task_name, num_envs, device): + _check_random_actions(task_name, device, num_envs, create_stage_in_memory=False) diff --git a/source/isaaclab_tasks/test/test_environments_with_stage_in_memory.py b/source/isaaclab_tasks/test/test_environments_with_stage_in_memory.py index 8a99436b91c7..388c510d629a 100644 --- a/source/isaaclab_tasks/test/test_environments_with_stage_in_memory.py +++ b/source/isaaclab_tasks/test/test_environments_with_stage_in_memory.py @@ -5,14 +5,6 @@ """Launch Isaac Sim Simulator first.""" -import sys - -# Import pinocchio in the main script to force the use of the dependencies -# installed by IsaacLab and not the one installed by Isaac Sim. -# pinocchio is required by the Pink IK controller -if sys.platform != "win32": - import pinocchio # noqa: F401 - from isaaclab.app import AppLauncher # launch the simulator @@ -48,7 +40,17 @@ @pytest.mark.parametrize("num_envs, device", [(2, "cuda")]) -@pytest.mark.parametrize("task_name", setup_environment(include_play=False, factory_envs=False, multi_agent=False)) +@pytest.mark.parametrize( + "task_name", + setup_environment( + include_play=False, + factory_envs=False, + multi_agent=False, + teleop_envs=False, + cartpole_showcase_envs=False, + pickplace_stack_envs=False, + ), +) def test_environments_with_stage_in_memory_and_clone_in_fabric_disabled(task_name, num_envs, device): # skip test if stage in memory is not supported if get_isaac_sim_version().major < 5: diff --git a/source/isaaclab_tasks/test/test_factory_environments.py b/source/isaaclab_tasks/test/test_factory_environments.py index 059080006557..d3119ec27ab1 100644 --- a/source/isaaclab_tasks/test/test_factory_environments.py +++ b/source/isaaclab_tasks/test/test_factory_environments.py @@ -21,7 +21,7 @@ from env_test_utils import _check_random_actions, setup_environment # isort: skip -@pytest.mark.parametrize("num_envs, device", [(32, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) @pytest.mark.parametrize("task_name", setup_environment(factory_envs=True, multi_agent=False)) @pytest.mark.isaacsim_ci def test_factory_environments(task_name, num_envs, device): diff --git a/source/isaaclab_tasks/test/test_hydra.py b/source/isaaclab_tasks/test/test_hydra.py index 5c81cb3e650f..738884874899 100644 --- a/source/isaaclab_tasks/test/test_hydra.py +++ b/source/isaaclab_tasks/test/test_hydra.py @@ -3,103 +3,1397 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Launch Isaac Sim Simulator first.""" +"""Self-contained tests for Hydra configuration utilities. -import sys +These tests verify the REPLACE-only preset system without depending on +external environment configurations. +""" -from isaaclab.app import AppLauncher +import warnings -# launch the simulator -app_launcher = AppLauncher(headless=True) -simulation_app = app_launcher.app +import pytest +from isaaclab.utils import configclass -"""Rest everything follows.""" +from isaaclab_tasks.utils import hydra as hydra_mod +from isaaclab_tasks.utils.hydra import ( + PresetCfg, + _format_unknown_presets_error, + apply_overrides, + collect_presets, + parse_overrides, + preset, + resolve_presets, +) -import functools -from collections.abc import Callable +# ============================================================================= +# Leaf config classes (reused across all test sections) +# ============================================================================= -import hydra -from hydra import compose, initialize -from omegaconf import OmegaConf -from isaaclab.utils import replace_strings_with_slices +@configclass +class PhysxCfg: + backend: str = "physx" + dt: float = 0.005 + substeps: int = 2 -import isaaclab_tasks # noqa: F401 -from isaaclab_tasks.utils.hydra import register_task_to_hydra +@configclass +class NewtonCfg: + backend: str = "newton" + dt: float = 0.002 + substeps: int = 4 + solver_iterations: int = 8 -def hydra_task_config_test(task_name: str, agent_cfg_entry_point: str) -> Callable: - """Copied from hydra.py hydra_task_config, since hydra.main requires a single point of entry, - which will not work with multiple tests. Here, we replace hydra.main with hydra initialize - and compose.""" - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - # register the task to Hydra - env_cfg, agent_cfg = register_task_to_hydra(task_name, agent_cfg_entry_point) +@configclass +class NoiselessObservationsCfg: + enable_corruption: bool = False + concatenate_terms: bool = True + noise_scale: float = 0.0 - # replace hydra.main with initialize and compose - with initialize(config_path=None, version_base="1.3"): - hydra_env_cfg = compose(config_name=task_name, overrides=sys.argv[1:]) - # convert to a native dictionary - hydra_env_cfg = OmegaConf.to_container(hydra_env_cfg, resolve=True) - # replace string with slices because OmegaConf does not support slices - hydra_env_cfg = replace_strings_with_slices(hydra_env_cfg) - # update the configs with the Hydra command line arguments - env_cfg.from_dict(hydra_env_cfg["env"]) - if isinstance(agent_cfg, dict): - agent_cfg = hydra_env_cfg["agent"] - else: - agent_cfg.from_dict(hydra_env_cfg["agent"]) - # call the original function - func(env_cfg, agent_cfg, *args, **kwargs) - return wrapper +@configclass +class FastObservationsCfg: + enable_corruption: bool = False + concatenate_terms: bool = False + noise_scale: float = 0.0 - return decorator +@configclass +class SmallPolicyCfg: + actor_hidden_dims: list = [64, 32] -def test_hydra(): - """Test the hydra configuration system.""" - # set hardcoded command line arguments - sys.argv = [ - sys.argv[0], - "env.decimation=42", # test simple env modification - "env.events.physics_material.params.asset_cfg.joint_ids='slice(0 ,1, 2)'", # test slice setting - "env.scene.robot.init_state.joint_vel={.*: 4.0}", # test regex setting - "env.rewards.feet_air_time=null", # test setting to none - "agent.max_iterations=3", # test simple agent modification +@configclass +class FastPolicyCfg: + actor_hidden_dims: list = [32, 16] + + +# ============================================================================= +# Composite configs using PresetCfg +# ============================================================================= + + +@configclass +class SampleEnvCfg: + decimation: int = 4 + sim_dt: float = 0.005 + + +@configclass +class SampleAgentCfg: + max_iterations: int = 1000 + learning_rate: float = 3e-4 + + +@configclass +class SimBackendCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + newton_mjwarp: NewtonCfg = NewtonCfg() + + +@configclass +class ObsModeCfg(PresetCfg): + default: NoiselessObservationsCfg = NoiselessObservationsCfg() + fast: FastObservationsCfg = FastObservationsCfg() + + +@configclass +class PolicyModeCfg(PresetCfg): + default: SmallPolicyCfg = SmallPolicyCfg() + fast: FastPolicyCfg = FastPolicyCfg() + + +@configclass +class PresetCfgEnvCfg: + decimation: int = 4 + backend: SimBackendCfg = SimBackendCfg() + observations: ObsModeCfg = ObsModeCfg() + + +@configclass +class PresetCfgAgentCfg: + learning_rate: float = 3e-4 + policy: PolicyModeCfg = PolicyModeCfg() + + +@configclass +class RootAgentCfg(PresetCfg): + """Root-level PresetCfg -- the agent config itself is a PresetCfg.""" + + default: SampleAgentCfg = SampleAgentCfg() + fast: SampleAgentCfg = SampleAgentCfg(max_iterations=100, learning_rate=1e-3) + + +# -- Nested PresetCfg-inside-PresetCfg (mirrors scene.base_camera pattern) -- + + +@configclass +class CameraSmallCfg: + width: int = 64 + height: int = 64 + + +@configclass +class CameraLargeCfg: + width: int = 256 + height: int = 256 + + +@configclass +class CameraPresetCfg(PresetCfg): + small: CameraSmallCfg = CameraSmallCfg() + large: CameraLargeCfg = CameraLargeCfg() + default: CameraSmallCfg = CameraSmallCfg() + + +@configclass +class BaseSceneCfg: + num_envs: int = 1024 + camera: CameraPresetCfg | None = None + + +@configclass +class ScenePresetCfg(PresetCfg): + default: BaseSceneCfg = BaseSceneCfg() + with_camera: BaseSceneCfg = BaseSceneCfg(camera=CameraPresetCfg()) + + +@configclass +class NestedPresetEnvCfg: + decimation: int = 4 + scene: ScenePresetCfg = ScenePresetCfg() + + +# -- Scalar PresetCfg and actuator configs (shared by scalar + dict sections) -- + + +@configclass +class ScalarPresetCfg(PresetCfg): + default: float = 0.0 + newton_mjwarp: float = 0.01 + + +@configclass +class ActuatorWithPresetCfg: + joint_names: list = [".*"] + stiffness: float = 40.0 + damping: float = 5.0 + armature: ScalarPresetCfg = ScalarPresetCfg() + + +# -- Deep-nested dict configs (event term params pattern) -- + + +@configclass +class OffsetCfg(PresetCfg): + """Mimics task-specific offset presets (e.g., AssembledOffsetCfg).""" + + task_a: tuple = (0.0, 0.0, 0.01) + task_b: tuple = (0.02, 0.0, 0.005) + default: tuple = task_a + + +@configclass +class FractionCfg(PresetCfg): + task_a: tuple = (0.05, 0.5) + task_b: tuple = (0.3, 1.0) + default: tuple = task_a + + +@configclass +class JointNamesCfg(PresetCfg): + default: list[str] | None = None + robot_a: list[str] = None + robot_b: list[str] = None + + +@configclass +class EntityCfg: + """Mimics SceneEntityCfg with a preset-valued field.""" + + name: str = "robot" + joint_names: list[str] | None = None + + +@configclass +class InnerTermCfg: + """Mimics an EventTermCfg with params containing presets.""" + + func: str = "reset_fn" + params: dict = None + + def __post_init__(self): + if self.params is None: + self.params = { + "offset": OffsetCfg(), + "fraction": FractionCfg(), + "robot_cfg": EntityCfg(name="robot", joint_names=JointNamesCfg()), + } + + +@configclass +class OuterTermCfg: + """Mimics a chained reset term with nested terms dict.""" + + func: str = "chain_fn" + params: dict = None + + def __post_init__(self): + if self.params is None: + self.params = { + "terms": { + "step_one": InnerTermCfg(), + } + } + + +@configclass +class DeepDictEnvCfg: + decimation: int = 4 + events: OuterTermCfg = OuterTermCfg() + + +@configclass +class DictPresetTermCfg: + """Outer term where the terms dict is itself a preset (resolves to a dict).""" + + func: str = "term_choice" + params: dict = None + + def __post_init__(self): + if self.params is None: + self.params = { + "terms": preset( + default={ + "strategy_a": InnerTermCfg(), + "strategy_b": InnerTermCfg(), + }, + alt={ + "strategy_a": InnerTermCfg(), + }, + ), + } + + +@configclass +class PresetResolvesToDictEnvCfg: + decimation: int = 4 + events: DictPresetTermCfg = DictPresetTermCfg() + + +# ============================================================================= +# Helpers +# ============================================================================= + + +def _apply(env_cfg, agent_cfg=None, global_presets=None, preset_sel=None, preset_scalar=None): + """Collect presets, resolve defaults, build hydra dict, and apply overrides.""" + if agent_cfg is None: + agent_cfg = PresetCfgAgentCfg() + presets = {"env": collect_presets(env_cfg), "agent": collect_presets(agent_cfg)} + env_cfg = resolve_presets(env_cfg) + agent_cfg = resolve_presets(agent_cfg) + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + return apply_overrides( + env_cfg, + agent_cfg, + hydra_cfg, + global_presets or [], + preset_sel or [], + preset_scalar or [], + presets, + ) + + +# ============================================================================= +# Fixtures +# ============================================================================= + + +@pytest.fixture +def class_presets(): + """Fresh configs using PresetCfg pattern.""" + env_cfg = PresetCfgEnvCfg() + agent_cfg = PresetCfgAgentCfg() + presets = {"env": collect_presets(env_cfg), "agent": collect_presets(agent_cfg)} + return env_cfg, agent_cfg, presets + + +# ============================================================================= +# Tests: collect_presets +# ============================================================================= + + +def test_collect_presets_class_style(): + """PresetCfg fields discovered at correct paths.""" + presets = collect_presets(PresetCfgEnvCfg()) + assert "backend" in presets + assert set(presets["backend"].keys()) == {"default", "newton_mjwarp"} + assert isinstance(presets["backend"]["default"], PhysxCfg) + assert isinstance(presets["backend"]["newton_mjwarp"], NewtonCfg) + + +def test_legacy_newton_attribute_alias_warns(): + """Python access to the legacy ``newton`` preset aliases to ``newton_mjwarp`` during deprecation.""" + cfg = SimBackendCfg() + with pytest.warns(FutureWarning, match="Preset 'newton' is deprecated"): + assert cfg.newton is cfg.newton_mjwarp + + +def test_legacy_kamino_attribute_alias_warns(): + """Python access to the legacy ``kamino`` preset aliases to ``newton_kamino`` during deprecation.""" + + @configclass + class _SolverPresetsCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + newton_kamino: NewtonCfg = NewtonCfg() + + cfg = _SolverPresetsCfg() + with pytest.warns(FutureWarning, match="Preset 'kamino' is deprecated"): + assert cfg.kamino is cfg.newton_kamino + + +def test_legacy_alias_suppressed_when_legacy_name_is_real_field(): + """An env that legitimately defines ``newton`` should not warn or be remapped.""" + + @configclass + class _ShadowingCfg(PresetCfg): + default: PhysxCfg = PhysxCfg() + newton: PhysxCfg = PhysxCfg() + newton_mjwarp: NewtonCfg = NewtonCfg() + + cfg = _ShadowingCfg() + with warnings.catch_warnings(): + warnings.simplefilter("error", FutureWarning) + assert cfg.newton is not cfg.newton_mjwarp + assert isinstance(cfg.newton, PhysxCfg) + + +def test_presetcfg_attribute_error_for_unknown_attribute(): + """Plain missing attributes should raise ``AttributeError`` (not warn or alias).""" + cfg = SimBackendCfg() + assert not hasattr(cfg, "completely_unknown") + with pytest.raises(AttributeError, match="completely_unknown"): + _ = cfg.completely_unknown + + +def test_format_unknown_presets_error_calls_out_legacy_aliases(): + """The unknown-preset error should explicitly mention the rename for legacy aliases.""" + msg = _format_unknown_presets_error({"newton", "typo"}, {"fast": ["env"]}) + assert "newton' was renamed to 'newton_mjwarp'" in msg + assert "typo" in msg + + +def test_user_stacklevel_warning_origin_is_outside_hydra_module(): + """``_normalize_preset_name`` warnings should not be attributed to hydra.py itself.""" + presets_arg = {"env": {"backend": {"default": None, "newton_mjwarp": None}}, "agent": {}} + with warnings.catch_warnings(record=True) as caught: + warnings.simplefilter("always", FutureWarning) + parse_overrides(["presets=newton"], presets_arg) + deprecations = [w for w in caught if issubclass(w.category, FutureWarning)] + assert deprecations, "expected a FutureWarning from the legacy alias" + assert deprecations[0].filename != hydra_mod.__file__, ( + f"warning was attributed to hydra.py ({deprecations[0].filename}); _user_stacklevel should " + f"point outside the module" + ) + + +def test_collect_presets_root_level(): + """Root-level PresetCfg collected at path=''.""" + presets = collect_presets(RootAgentCfg()) + assert "" in presets + assert set(presets[""].keys()) == {"default", "fast"} + assert isinstance(presets[""]["default"], SampleAgentCfg) + assert presets[""]["fast"].max_iterations == 100 + + +# ============================================================================= +# Tests: parse_overrides +# ============================================================================= + + +def test_parse_overrides_mixed(): + """All override types categorized correctly.""" + env_cfg = PresetCfgEnvCfg() + presets = {"env": collect_presets(env_cfg), "agent": {}} + args = [ + "presets=fast", + "env.decimation=10", + "env.backend=newton_mjwarp", + "env.backend.dt=0.001", ] + global_p, sel, scalar, glob = parse_overrides(args, presets) + assert global_p == ["fast"] + assert ("env", "backend", "newton_mjwarp") in sel + assert ("env.backend.dt", "0.001") in scalar + assert "env.decimation=10" in glob + + +def test_parse_overrides_root_preset(): + """Root-level PresetCfg parsed as agent=.""" + presets = {"env": {}, "agent": collect_presets(RootAgentCfg())} + _, sel, _, _ = parse_overrides(["agent=fast"], presets) + assert sel == [("agent", "", "fast")] + + +# ============================================================================= +# Tests: apply_overrides -- PresetCfg (nested + broadcast + root) +# ============================================================================= + + +def test_presetcfg_auto_default(class_presets): + """'default' field auto-applied when no CLI override.""" + env_cfg, agent_cfg, presets = class_presets + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + apply_overrides(env_cfg, agent_cfg, hydra_cfg, [], [], [], presets) + assert isinstance(env_cfg.backend, PhysxCfg) + assert isinstance(env_cfg.observations, NoiselessObservationsCfg) + assert isinstance(agent_cfg.policy, SmallPolicyCfg) + + +def test_presetcfg_cli_selection(class_presets): + """Path selection replaces with chosen preset.""" + env_cfg, agent_cfg, presets = class_presets + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + apply_overrides(env_cfg, agent_cfg, hydra_cfg, [], [("env", "backend", "newton_mjwarp")], [], presets) + assert isinstance(env_cfg.backend, NewtonCfg) + assert env_cfg.backend.dt == 0.002 + + +def test_presetcfg_global_broadcast(class_presets): + """Global preset 'fast' broadcasts across env and agent PresetCfg fields.""" + env_cfg, agent_cfg, presets = class_presets + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + apply_overrides(env_cfg, agent_cfg, hydra_cfg, ["fast"], [], [], presets) + assert isinstance(env_cfg.observations, FastObservationsCfg) + assert isinstance(agent_cfg.policy, FastPolicyCfg) + + +def test_presetcfg_path_selection_others_default(class_presets): + """Path preset on one field, others get auto-default.""" + env_cfg, agent_cfg, presets = class_presets + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + apply_overrides(env_cfg, agent_cfg, hydra_cfg, [], [("env", "backend", "newton_mjwarp")], [], presets) + assert isinstance(env_cfg.backend, NewtonCfg) + assert isinstance(env_cfg.observations, NoiselessObservationsCfg) + assert isinstance(agent_cfg.policy, SmallPolicyCfg) + + +def test_root_presetcfg_auto_default(): + """Root-level PresetCfg auto-applies 'default'.""" + env_cfg, agent_cfg = _apply(SampleEnvCfg(), RootAgentCfg()) + assert isinstance(agent_cfg, SampleAgentCfg) + assert agent_cfg.max_iterations == 1000 + + +def test_root_presetcfg_cli_selection(): + """Root-level PresetCfg resolved via path selection.""" + env_cfg, agent_cfg = _apply(SampleEnvCfg(), RootAgentCfg(), preset_sel=[("agent", "", "fast")]) + assert isinstance(agent_cfg, SampleAgentCfg) + assert agent_cfg.max_iterations == 100 + assert agent_cfg.learning_rate == 1e-3 + + +def test_root_presetcfg_global_preset(): + """Root-level PresetCfg resolved via global preset.""" + env_cfg, agent_cfg = _apply(SampleEnvCfg(), RootAgentCfg(), global_presets=["fast"]) + assert isinstance(agent_cfg, SampleAgentCfg) + assert agent_cfg.max_iterations == 100 + + +# ============================================================================= +# Tests: nested PresetCfg inside PresetCfg +# ============================================================================= + + +def test_collect_nested_presetcfg(): + """PresetCfg inside another PresetCfg's alternatives is discovered.""" + presets = collect_presets(NestedPresetEnvCfg()) + assert "scene" in presets + assert set(presets["scene"].keys()) == {"default", "with_camera"} + assert "scene.camera" in presets + assert set(presets["scene.camera"].keys()) == {"small", "large", "default"} + assert isinstance(presets["scene.camera"]["small"], CameraSmallCfg) + assert isinstance(presets["scene.camera"]["large"], CameraLargeCfg) + + +def test_nested_presetcfg_pruned_when_parent_has_none(): + """When scene auto-defaults to default (camera=None), nested camera preset is pruned.""" + env_cfg, _ = _apply(NestedPresetEnvCfg()) + assert isinstance(env_cfg.scene, BaseSceneCfg) + assert env_cfg.scene.camera is None + + +def test_nested_presetcfg_auto_default_with_camera(): + """When with_camera scene is selected, camera auto-defaults to small (the default).""" + env_cfg, _ = _apply(NestedPresetEnvCfg(), global_presets=["with_camera"]) + assert isinstance(env_cfg.scene, BaseSceneCfg) + assert isinstance(env_cfg.scene.camera, CameraSmallCfg) + assert env_cfg.scene.camera.width == 64 + + +def test_nested_presetcfg_global_broadcast(): + """Global preset resolves both outer and nested PresetCfg.""" + env_cfg, _ = _apply(NestedPresetEnvCfg(), global_presets=["with_camera", "large"]) + assert isinstance(env_cfg.scene, BaseSceneCfg) + assert isinstance(env_cfg.scene.camera, CameraLargeCfg) + assert env_cfg.scene.camera.width == 256 + + +def test_nested_presetcfg_path_selection(): + """Path selection on nested PresetCfg resolves correctly.""" + sel = [("env", "scene", "with_camera"), ("env", "scene.camera", "large")] + env_cfg, _ = _apply(NestedPresetEnvCfg(), preset_sel=sel) + assert isinstance(env_cfg.scene, BaseSceneCfg) + assert isinstance(env_cfg.scene.camera, CameraLargeCfg) + assert env_cfg.scene.camera.width == 256 + + +# ============================================================================= +# Tests: root-level PresetCfg with nested PresetCfg inside alternatives +# (mirrors CartpoleCameraPresetsEnvCfg structure) +# ============================================================================= + + +@configclass +class RendererACfg: + backend: str = "rtx" + + +@configclass +class RendererBCfg: + backend: str = "warp" + + +@configclass +class RendererPresetCfg(PresetCfg): + default: RendererACfg = RendererACfg() + newton_renderer: RendererBCfg = RendererBCfg() + + +@configclass +class SensorBaseCfg: + data_types: list[str] = [] + width: int = 100 + height: int = 100 + renderer: RendererPresetCfg = RendererPresetCfg() + + +@configclass +class SensorPresetCfg(PresetCfg): + default: SensorBaseCfg = SensorBaseCfg(data_types=["rgb"]) + depth: SensorBaseCfg = SensorBaseCfg(data_types=["depth"]) + + +@configclass +class RootEnvBaseCfg: + decimation: int = 2 + sensor: SensorPresetCfg = SensorPresetCfg() + obs_shape: list[int] = [100, 100, 3] + + +@configclass +class RootPresetEnvCfg(PresetCfg): + default: RootEnvBaseCfg = RootEnvBaseCfg() + depth: RootEnvBaseCfg = RootEnvBaseCfg(obs_shape=[100, 100, 1]) + + +def test_root_presetcfg_with_nested_preset_collect(): + """collect_presets discovers nested PresetCfg inside root PresetCfg alternatives.""" + presets = collect_presets(RootPresetEnvCfg()) + assert "" in presets + assert set(presets[""].keys()) == {"default", "depth"} + assert "sensor" in presets + assert set(presets["sensor"].keys()) == {"default", "depth"} + assert "sensor.renderer" in presets + assert set(presets["sensor.renderer"].keys()) == {"default", "newton_renderer"} + + +def test_root_presetcfg_resolve_defaults(): + """resolve_presets resolves nested PresetCfg inside root.""" + resolved = resolve_presets(RootPresetEnvCfg()) + assert isinstance(resolved, RootEnvBaseCfg) + assert isinstance(resolved.sensor, SensorBaseCfg) + assert resolved.sensor.data_types == ["rgb"] + assert isinstance(resolved.sensor.renderer, RendererACfg) + assert resolved.sensor.renderer.backend == "rtx" + + +@configclass +class OptionalFeatureCfg: + buffer_size: int = 200 + export_path: str = "." + + +@configclass +class OptionalFeaturePresetCfg(PresetCfg): + default = None + enabled: OptionalFeatureCfg = OptionalFeatureCfg() + + +@configclass +class EnvWithOptionalFeatureCfg: + decimation: int = 4 + optional_feature: OptionalFeaturePresetCfg = OptionalFeaturePresetCfg() + + +def test_presetcfg_none_default_auto_applies(): + """PresetCfg with default=None auto-applies None without crashing.""" + env_cfg, _ = _apply(EnvWithOptionalFeatureCfg()) + assert env_cfg.optional_feature is None + + +def test_presetcfg_none_default_cli_selects_enabled(): + """PresetCfg with default=None can be overridden to a real config via CLI.""" + env_cfg = EnvWithOptionalFeatureCfg() + agent_cfg = PresetCfgAgentCfg() + presets = {"env": collect_presets(env_cfg), "agent": collect_presets(agent_cfg)} + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + sel = [("env", "optional_feature", "enabled")] + apply_overrides(env_cfg, agent_cfg, hydra_cfg, [], sel, [], presets) + assert isinstance(env_cfg.optional_feature, OptionalFeatureCfg) + assert env_cfg.optional_feature.buffer_size == 200 + + +def test_root_presetcfg_global_depth_resolves_nested(): + """Global preset=depth on root PresetCfg also resolves nested sensor and renderer.""" + env_cfg, _ = _apply(RootPresetEnvCfg(), global_presets=["depth"]) + assert isinstance(env_cfg, RootEnvBaseCfg) + assert env_cfg.obs_shape == [100, 100, 1] + assert isinstance(env_cfg.sensor, SensorBaseCfg), ( + f"sensor should be SensorBaseCfg, got {type(env_cfg.sensor).__name__}" + ) + assert env_cfg.sensor.data_types == ["depth"] + assert isinstance(env_cfg.sensor.renderer, RendererACfg), ( + f"renderer should be RendererACfg (default), got {type(env_cfg.sensor.renderer).__name__}" + ) + + +# ============================================================================= +# Tests: scalar PresetCfg (e.g., armature=PresetCfg(default=0.0, newton_mjwarp=0.01)) +# ============================================================================= + + +@configclass +class ScalarPresetEnvCfg: + decimation: int = 4 + actuator: ActuatorWithPresetCfg = ActuatorWithPresetCfg() + + +def test_scalar_presetcfg_collect(): + """Scalar PresetCfg fields collected with correct values.""" + presets = collect_presets(ScalarPresetEnvCfg()) + assert "actuator.armature" in presets + assert presets["actuator.armature"]["default"] == 0.0 + assert presets["actuator.armature"]["newton_mjwarp"] == 0.01 + + +def test_scalar_presetcfg_resolve_default(): + """resolve_presets replaces scalar PresetCfg with its default value.""" + cfg = ScalarPresetEnvCfg() + resolved = resolve_presets(cfg) + assert resolved.actuator.armature == 0.0 + assert not isinstance(resolved.actuator.armature, PresetCfg) + + +def test_scalar_presetcfg_auto_default(): + """Scalar PresetCfg auto-applies default=0.0 when no CLI override.""" + env_cfg, _ = _apply(ScalarPresetEnvCfg()) + assert env_cfg.actuator.armature == 0.0 + + +def test_scalar_presetcfg_global_newton_mjwarp(): + """Global preset=newton_mjwarp replaces scalar PresetCfg with MJWarp value.""" + env_cfg, _ = _apply(ScalarPresetEnvCfg(), global_presets=["newton_mjwarp"]) + assert env_cfg.actuator.armature == 0.01 + + +def test_scalar_presetcfg_path_selection(): + """Path selection replaces scalar PresetCfg with chosen value.""" + env_cfg, _ = _apply(ScalarPresetEnvCfg(), preset_sel=[("env", "actuator.armature", "newton_mjwarp")]) + assert env_cfg.actuator.armature == 0.01 + assert env_cfg.actuator.stiffness == 40.0 + + +# ============================================================================= +# Tests: PresetCfg inside dict values (e.g., actuators["legs"].armature) +# ============================================================================= + + +@configclass +class RobotCfg: + prim_path: str = "/World/Robot" + actuators: dict = None + + def __post_init__(self): + if self.actuators is None: + self.actuators = {"legs": ActuatorWithPresetCfg()} + + +@configclass +class DictPresetEnvCfg: + decimation: int = 4 + robot: RobotCfg = RobotCfg() + + +def test_collect_presets_traverses_dict_values(): + """collect_presets finds PresetCfg inside dict-held configclass values.""" + cfg = DictPresetEnvCfg() + presets = collect_presets(cfg) + assert "robot.actuators.legs.armature" in presets + assert presets["robot.actuators.legs.armature"]["default"] == 0.0 + assert presets["robot.actuators.legs.armature"]["newton_mjwarp"] == 0.01 + + +def test_resolve_presets_traverses_dict_values(): + """resolve_presets resolves PresetCfg inside dict-held configclass values.""" + cfg = DictPresetEnvCfg() + resolved = resolve_presets(cfg) + assert resolved.robot.actuators["legs"].armature == 0.0 + assert not isinstance(resolved.robot.actuators["legs"].armature, PresetCfg) + + +def test_dict_preset_auto_default(): + """Dict-held PresetCfg auto-applies default when no CLI override.""" + env_cfg, _ = _apply(DictPresetEnvCfg()) + assert env_cfg.robot.actuators["legs"].armature == 0.0 + + +def test_dict_preset_global_newton_mjwarp(): + """Global preset=newton_mjwarp replaces dict-held scalar PresetCfg.""" + env_cfg, _ = _apply(DictPresetEnvCfg(), global_presets=["newton_mjwarp"]) + assert env_cfg.robot.actuators["legs"].armature == 0.01 + + +def test_dict_preset_path_selection(): + """Path selection replaces dict-held scalar PresetCfg.""" + env_cfg, _ = _apply(DictPresetEnvCfg(), preset_sel=[("env", "robot.actuators.legs.armature", "newton_mjwarp")]) + assert env_cfg.robot.actuators["legs"].armature == 0.01 + assert env_cfg.robot.actuators["legs"].stiffness == 40.0 + + +def test_dict_preset_with_factory(): + """preset() factory works inside dict-held configclass values.""" + + @configclass + class ActuatorCfgFactory: + joint_names: list = [".*"] + armature: object = None + + def __post_init__(self): + if self.armature is None: + self.armature = preset(default=0.0, newton_mjwarp=0.01, physx=0.0) + + @configclass + class RobotCfgFactory: + actuators: dict = None + + def __post_init__(self): + if self.actuators is None: + self.actuators = {"legs": ActuatorCfgFactory()} + + @configclass + class EnvCfgFactory: + robot: RobotCfgFactory = RobotCfgFactory() + + cfg = EnvCfgFactory() + presets = collect_presets(cfg) + assert "robot.actuators.legs.armature" in presets + assert presets["robot.actuators.legs.armature"]["default"] == 0.0 + assert presets["robot.actuators.legs.armature"]["newton_mjwarp"] == 0.01 + assert presets["robot.actuators.legs.armature"]["physx"] == 0.0 + + +# ============================================================================= +# Tests: rough terrain config regressions +# ============================================================================= + + +def test_go1_rough_newton_mjwarp_armature_preset(): + """Go1 rough terrain uses higher MJWarp armature without changing PhysX.""" + from isaaclab_tasks.manager_based.locomotion.velocity.config.go1.rough_env_cfg import UnitreeGo1RoughEnvCfg + + env_cfg, _ = _apply(UnitreeGo1RoughEnvCfg(), global_presets=["newton_mjwarp"]) + assert env_cfg.scene.robot.actuators["base_legs"].armature == 0.02 + + env_cfg, _ = _apply(UnitreeGo1RoughEnvCfg()) + assert env_cfg.scene.robot.actuators["base_legs"].armature == 0.0 + + +def test_go1_rough_legacy_newton_alias_resolves_to_newton_mjwarp(): + """Real-config alias path: ``presets=newton`` against an actual env cfg resolves to newton_mjwarp.""" + from isaaclab_tasks.manager_based.locomotion.velocity.config.go1.rough_env_cfg import UnitreeGo1RoughEnvCfg + + with pytest.warns(FutureWarning, match="Preset 'newton' is deprecated"): + env_cfg, _ = _apply(UnitreeGo1RoughEnvCfg(), global_presets=["newton"]) + assert env_cfg.scene.robot.actuators["base_legs"].armature == 0.02 + + +# ============================================================================= +# Tests: PresetCfg inside deeply nested dicts (e.g., event term params) +# ============================================================================= + + +def test_collect_presets_deep_nested_dicts(): + """collect_presets discovers PresetCfg inside dict->dict->configclass->dict chains.""" + cfg = DeepDictEnvCfg() + presets = collect_presets(cfg) + offset_path = "events.params.terms.step_one.params.offset" + fraction_path = "events.params.terms.step_one.params.fraction" + assert offset_path in presets, f"Expected '{offset_path}' in {list(presets.keys())}" + assert fraction_path in presets, f"Expected '{fraction_path}' in {list(presets.keys())}" + assert presets[offset_path]["task_a"] == (0.0, 0.0, 0.01) + assert presets[offset_path]["task_b"] == (0.02, 0.0, 0.005) + assert presets[fraction_path]["task_a"] == (0.05, 0.5) + assert presets[fraction_path]["task_b"] == (0.3, 1.0) + + +def test_resolve_presets_deep_nested_dicts(): + """resolve_presets resolves presets inside deeply nested dicts.""" + cfg = DeepDictEnvCfg() + resolved = resolve_presets(cfg) + inner = resolved.events.params["terms"]["step_one"] + assert inner.params["offset"] == (0.0, 0.0, 0.01) + assert inner.params["fraction"] == (0.05, 0.5) + assert not isinstance(inner.params["offset"], PresetCfg) + assert not isinstance(inner.params["fraction"], PresetCfg) + assert inner.params["robot_cfg"].joint_names is None + assert not isinstance(inner.params["robot_cfg"].joint_names, PresetCfg) + + +def test_deep_nested_dict_auto_default(): + """Deeply nested dict presets auto-apply default when no CLI override.""" + env_cfg, _ = _apply(DeepDictEnvCfg()) + inner = env_cfg.events.params["terms"]["step_one"] + assert inner.params["offset"] == (0.0, 0.0, 0.01) + assert inner.params["fraction"] == (0.05, 0.5) + + +def test_deep_nested_dict_global_preset(): + """Global preset=task_b replaces deeply nested dict presets.""" + env_cfg, _ = _apply(DeepDictEnvCfg(), global_presets=["task_b"]) + inner = env_cfg.events.params["terms"]["step_one"] + assert inner.params["offset"] == (0.02, 0.0, 0.005), f"offset should be task_b value, got {inner.params['offset']}" + assert inner.params["fraction"] == (0.3, 1.0), f"fraction should be task_b value, got {inner.params['fraction']}" + + +def test_deep_nested_dict_path_selection(): + """Path selection replaces a specific deeply nested dict preset.""" + sel = [("env", "events.params.terms.step_one.params.offset", "task_b")] + env_cfg, _ = _apply(DeepDictEnvCfg(), preset_sel=sel) + inner = env_cfg.events.params["terms"]["step_one"] + assert inner.params["offset"] == (0.02, 0.0, 0.005) + assert inner.params["fraction"] == (0.05, 0.5) + + +def test_deep_nested_dict_mixed_global_and_path(): + """Global preset applies to nested dicts, path selection overrides one.""" + sel = [("env", "events.params.terms.step_one.params.fraction", "task_a")] + env_cfg, _ = _apply(DeepDictEnvCfg(), global_presets=["task_b"], preset_sel=sel) + inner = env_cfg.events.params["terms"]["step_one"] + assert inner.params["offset"] == (0.02, 0.0, 0.005) + assert inner.params["fraction"] == (0.05, 0.5) + + +# ============================================================================= +# Tests: preset resolving to dict containing further presets +# ============================================================================= + + +def test_collect_presets_discovers_presets_inside_dict_valued_alternatives(): + """collect_presets must recurse into dict-valued preset alternatives to + discover further PresetCfg nodes nested inside them. + """ + cfg = PresetResolvesToDictEnvCfg() + presets = collect_presets(cfg) + offset_paths = [p for p in presets if "offset" in p] + fraction_paths = [p for p in presets if "fraction" in p] + assert len(offset_paths) > 0, ( + f"OffsetCfg inside dict-valued preset alternative not discovered. Found: {list(presets.keys())}" + ) + assert len(fraction_paths) > 0, ( + f"FractionCfg inside dict-valued preset alternative not discovered. Found: {list(presets.keys())}" + ) + + +def test_resolve_preset_resolving_to_dict_walks_contents(): + """When a preset resolves to a dict, presets inside that dict are also resolved. + + Also verifies that PresetCfg(default=None) nested inside the resolved dict + correctly resolves to None (not skipped). + """ + cfg = PresetResolvesToDictEnvCfg() + resolved = resolve_presets(cfg) + + terms = resolved.events.params["terms"] + assert isinstance(terms, dict), f"Expected dict, got {type(terms)}" + assert not isinstance(terms, PresetCfg), "Top-level preset was not resolved" + + for name, term in terms.items(): + entity = term.params["robot_cfg"] + assert not isinstance(entity.joint_names, PresetCfg), ( + f"PresetCfg leaked into {name}.params.robot_cfg.joint_names" + ) + assert entity.joint_names is None + assert not isinstance(term.params["offset"], PresetCfg) + assert not isinstance(term.params["fraction"], PresetCfg) + + +def test_resolve_preset_uses_class_level_override(): + """When a robot-specific module overrides PresetCfg.default at class level + after instances are created, resolve_presets picks up the override.""" + + @configclass + class BodyNameCfg(PresetCfg): + default: str = "generic_body" + + @configclass + class TermWithBody: + func: str = "some_fn" + params: dict = None + + def __post_init__(self): + if self.params is None: + self.params = {"cfg": EntityCfg(name="robot", joint_names=BodyNameCfg())} + + @configclass + class EnvWithBody: + events: TermWithBody = TermWithBody() + + BodyNameCfg.default = "robot_specific_body" + + cfg = EnvWithBody() + resolved = resolve_presets(cfg) + assert resolved.events.params["cfg"].joint_names == "robot_specific_body" + assert not isinstance(resolved.events.params["cfg"].joint_names, PresetCfg) + + +def test_resolve_presets_with_selected_name_in_deeply_nested_dict(): + """resolve_presets(cfg, {"task_b"}) must select task_b alternatives + for PresetCfg instances nested inside dict-valued preset alternatives. + """ + cfg = PresetResolvesToDictEnvCfg() + resolved = resolve_presets(cfg, {"task_b"}) + + terms = resolved.events.params["terms"] + assert isinstance(terms, dict) + for name, term in terms.items(): + assert term.params["offset"] == (0.02, 0.0, 0.005), ( + f"{name}: offset should be task_b, got {term.params['offset']}" + ) + assert term.params["fraction"] == (0.3, 1.0), ( + f"{name}: fraction should be task_b, got {term.params['fraction']}" + ) + + +# ============================================================================= +# Tests: preset() factory function +# ============================================================================= + + +def test_preset_factory_creates_presetcfg(): + """preset() returns a PresetCfg subclass instance with correct fields.""" + p = preset(default=0.0, high=1.0, low=-1.0) + assert isinstance(p, PresetCfg) + assert p.default == 0.0 + assert p.high == 1.0 + assert p.low == -1.0 + + +def test_preset_factory_collectable(): + """preset()-created instances are discovered by collect_presets.""" + + @configclass + class FactoryEnvCfg: + damping: object = None + + def __post_init__(self): + if self.damping is None: + self.damping = preset(default=5.0, high=20.0) + + cfg = FactoryEnvCfg() + presets = collect_presets(cfg) + assert "damping" in presets + assert presets["damping"]["default"] == 5.0 + assert presets["damping"]["high"] == 20.0 + + +def test_preset_factory_requires_default(): + """preset() raises ValueError when 'default' is not provided.""" + with pytest.raises(ValueError, match="default"): + preset(high=1.0, low=-1.0) + + +def test_preset_factory_string_values(): + """preset() works with string values.""" + p = preset(default="cpu", gpu="cuda:0") + assert isinstance(p, PresetCfg) + assert p.default == "cpu" + assert p.gpu == "cuda:0" + + +# ============================================================================= +# Tests: _collect_fields class-vs-instance priority +# ============================================================================= + + +def test_collect_fields_prefers_class_attr_over_instance(): + """Class-level attr mutations take priority over instance attrs in collection. + + This mirrors the pattern where robot-specific modules (e.g., joint_pos_env_cfg.py) + mutate PresetCfg class attributes after instances are already created. + """ + + @configclass + class MutablePresetCfg(PresetCfg): + default: str = "original_default" + alt: str = "alternative" + + instance = MutablePresetCfg() + assert instance.default == "original_default" + + MutablePresetCfg.default = "robot_specific_default" + + presets = collect_presets(instance) + assert "" in presets + assert presets[""]["default"] == "robot_specific_default" + + MutablePresetCfg.default = "original_default" + + +def test_collect_fields_includes_dynamic_class_attrs(): + """Fields added to PresetCfg class at runtime are discovered.""" + + @configclass + class ExtensiblePresetCfg(PresetCfg): + default: str = "base" + alt_a: str = "a" + + ExtensiblePresetCfg.alt_b = "b" + + instance = ExtensiblePresetCfg() + presets = collect_presets(instance) + assert "" in presets + assert "alt_b" in presets[""] + assert presets[""]["alt_b"] == "b" + + delattr(ExtensiblePresetCfg, "alt_b") + + +# ============================================================================= +# Tests: apply_overrides error handling +# ============================================================================= + + +def test_apply_overrides_unknown_preset_group_raises(): + """apply_overrides raises ValueError for unknown preset group paths.""" + env_cfg = PresetCfgEnvCfg() + agent_cfg = PresetCfgAgentCfg() + presets = {"env": collect_presets(env_cfg), "agent": collect_presets(agent_cfg)} + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + with pytest.raises(ValueError, match="Unknown preset group"): + apply_overrides(env_cfg, agent_cfg, hydra_cfg, [], [("env", "nonexistent", "val")], [], presets) + + +def test_apply_overrides_unknown_preset_name_raises(): + """apply_overrides raises ValueError for unknown preset name.""" + env_cfg = PresetCfgEnvCfg() + agent_cfg = PresetCfgAgentCfg() + presets = {"env": collect_presets(env_cfg), "agent": collect_presets(agent_cfg)} + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + with pytest.raises(ValueError, match="Unknown preset 'nonexistent'"): + apply_overrides(env_cfg, agent_cfg, hydra_cfg, [], [("env", "backend", "nonexistent")], [], presets) + + +def test_apply_overrides_conflicting_globals_raises(): + """Two global presets matching the same path cause ValueError.""" + + @configclass + class TwoAltsPresetCfg(PresetCfg): + default: str = "d" + opt_a: str = "a" + opt_b: str = "b" + + @configclass + class ConflictEnvCfg: + mode: TwoAltsPresetCfg = TwoAltsPresetCfg() + + env_cfg = ConflictEnvCfg() + agent_cfg = PresetCfgAgentCfg() + presets = {"env": collect_presets(env_cfg), "agent": collect_presets(agent_cfg)} + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + with pytest.raises(ValueError, match="Conflicting global presets"): + apply_overrides(env_cfg, agent_cfg, hydra_cfg, ["opt_a", "opt_b"], [], [], presets) + + +def test_apply_overrides_aliased_globals_no_conflict(): + """Two global presets resolving to equal values do not raise. + + Mirrors the dexsuite ObjectCfg pattern where ``newton_mjwarp = cube`` creates + separate but equal dataclass instances after @configclass processing. + """ + + @configclass + class SharedCfg: + value: int = 42 + + cube_val = SharedCfg() + mjwarp_val = SharedCfg() + + @configclass + class AliasedPresetCfg(PresetCfg): + default: str = "d" + cube: SharedCfg = cube_val + newton_mjwarp: SharedCfg = mjwarp_val + + @configclass + class AliasedEnvCfg: + mode: AliasedPresetCfg = AliasedPresetCfg() + + env_cfg = AliasedEnvCfg() + agent_cfg = PresetCfgAgentCfg() + presets = {"env": collect_presets(env_cfg), "agent": collect_presets(agent_cfg)} + assert presets["env"]["mode"]["cube"] is not presets["env"]["mode"]["newton_mjwarp"] + assert presets["env"]["mode"]["cube"] == presets["env"]["mode"]["newton_mjwarp"] + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + apply_overrides(env_cfg, agent_cfg, hydra_cfg, ["cube", "newton_mjwarp"], [], [], presets) + assert env_cfg.mode == SharedCfg() + + +# ============================================================================= +# Tests: parse_overrides edge cases +# ============================================================================= + + +def test_parse_overrides_multiple_global_presets(): + """Multiple comma-separated global presets are split correctly.""" + presets = {"env": {"backend": {"default": None, "newton_mjwarp": None}}, "agent": {}} + global_p, _, _, _ = parse_overrides(["presets=fast,newton_mjwarp,debug"], presets) + assert global_p == ["fast", "newton_mjwarp", "debug"] + + +def test_parse_overrides_maps_legacy_newton_preset_to_newton_mjwarp(): + """Legacy ``newton`` preset selections resolve to ``newton_mjwarp`` when available.""" + presets = {"env": {"backend": {"default": None, "newton_mjwarp": None}}, "agent": {}} + legacy_name = "newton" + + global_p, sel, _, _ = parse_overrides(["presets=fast," + legacy_name, f"env.backend={legacy_name}"], presets) + + assert global_p == ["fast", "newton_mjwarp"] + assert sel == [("env", "backend", "newton_mjwarp")] + + +def test_parse_overrides_maps_legacy_kamino_preset_to_newton_kamino(): + """Legacy ``kamino`` preset selections resolve to ``newton_kamino`` when available.""" + presets = {"env": {"solver": {"default": None, "newton_kamino": None}}, "agent": {}} + legacy_name = "kamino" + + global_p, sel, _, _ = parse_overrides(["presets=" + legacy_name, f"env.solver={legacy_name}"], presets) + + assert global_p == ["newton_kamino"] + assert sel == [("env", "solver", "newton_kamino")] + + +def test_apply_overrides_resolves_legacy_alias_in_global_and_path_selection(class_presets): + """``apply_overrides`` resolves legacy names supplied directly (bypassing ``parse_overrides``).""" + env_cfg, agent_cfg, presets = class_presets + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + with pytest.warns(FutureWarning, match="Preset 'newton' is deprecated"): + apply_overrides( + env_cfg, + agent_cfg, + hydra_cfg, + global_presets=["newton"], + preset_sel=[("env", "backend", "newton")], + preset_scalar=[], + presets=presets, + ) + assert isinstance(env_cfg.backend, NewtonCfg) + + +def test_apply_overrides_legacy_and_current_alias_do_not_conflict(class_presets): + """``presets=newton,newton_mjwarp`` (legacy + current) resolves to one preset, not a conflict.""" + env_cfg, agent_cfg, presets = class_presets + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + with pytest.warns(FutureWarning, match="Preset 'newton' is deprecated"): + apply_overrides(env_cfg, agent_cfg, hydra_cfg, ["newton", "newton_mjwarp"], [], [], presets) + assert isinstance(env_cfg.backend, NewtonCfg) + + +def test_parse_overrides_no_equals_treated_as_global_scalar(): + """Arguments without '=' are passed through as global scalars.""" + presets = {"env": {}, "agent": {}} + _, _, _, global_scalar = parse_overrides(["--flag", "positional"], presets) + assert "--flag" in global_scalar + assert "positional" in global_scalar + + +def test_parse_overrides_preset_scalar_detection(): + """Scalar within a preset path is detected as preset_scalar.""" + presets = {"env": {"backend": {"default": None}}, "agent": {}} + _, _, preset_scalar, _ = parse_overrides(["env.backend.dt=0.001", "env.backend.substeps=4"], presets) + assert ("env.backend.dt", "0.001") in preset_scalar + assert ("env.backend.substeps", "4") in preset_scalar + + +def test_parse_overrides_root_level_env_preset(): + """Root-level PresetCfg (path='') makes env= a valid preset selection.""" + presets = {"env": {"": {"default": None, "fast": None}}, "agent": {}} + _, sel, _, _ = parse_overrides(["env=fast"], presets) + assert sel == [("env", "", "fast")] + + +# ============================================================================= +# Tests: _parse_val +# ============================================================================= + + +def test_parse_val_types(): + """_parse_val converts strings to correct Python types.""" + from isaaclab_tasks.utils.hydra import _parse_val + + assert _parse_val("true") is True + assert _parse_val("True") is True + assert _parse_val("false") is False + assert _parse_val("none") is None + assert _parse_val("null") is None + assert _parse_val("42") == 42 + assert isinstance(_parse_val("42"), int) + assert _parse_val("3.14") == 3.14 + assert isinstance(_parse_val("3.14"), float) + assert _parse_val("hello") == "hello" + assert _parse_val('"quoted"') == "quoted" + assert _parse_val("'single'") == "single" + + +# ============================================================================= +# Tests: scalar override within preset path +# ============================================================================= + + +def test_scalar_override_within_preset_path(class_presets): + """Scalar overrides within preset paths are applied on top of the preset.""" + env_cfg, agent_cfg, presets = class_presets + env_cfg = resolve_presets(env_cfg) + agent_cfg = resolve_presets(agent_cfg) + hydra_cfg = {"env": env_cfg.to_dict(), "agent": agent_cfg.to_dict()} + apply_overrides( + env_cfg, + agent_cfg, + hydra_cfg, + [], + [("env", "backend", "newton_mjwarp")], + [("env.backend.dt", "0.001")], + presets, + ) + assert isinstance(env_cfg.backend, NewtonCfg) + assert env_cfg.backend.dt == 0.001 + assert env_cfg.backend.substeps == 4 + + +# ============================================================================= +# Tests: resolve_presets idempotency +# ============================================================================= + + +def test_resolve_presets_idempotent(): + """Calling resolve_presets twice yields the same result.""" + cfg = PresetCfgEnvCfg() + first = resolve_presets(cfg) + second = resolve_presets(first) + assert isinstance(second.backend, PhysxCfg) + assert isinstance(second.observations, NoiselessObservationsCfg) + assert second.backend.dt == first.backend.dt + + +def test_unknown_global_preset_name_detected(): + """A selected preset name that doesn't match any PresetCfg field is detected. + + This catches typos like presets=peg_insrt_4mm (missing 'e'). The validation + in register_task raises ValueError before resolution begins. + """ + cfg = PresetCfgEnvCfg() + presets = {"env": collect_presets(cfg), "agent": {}} + all_known = {name for alts in presets.values() for fields in alts.values() for name in fields if name != "default"} + + assert "newton_mjwarp" in all_known + assert "typo_preset" not in all_known + + +def test_resolve_presets_errors_on_no_default(): + """A PresetCfg with no 'default' field and no matching selected name + must raise ValueError, not silently linger or infinite loop.""" + + @configclass + class NoDefaultPreset(PresetCfg): + option_a: int = 1 + + @configclass + class EnvCfg: + mode: NoDefaultPreset = NoDefaultPreset() + + with pytest.raises(ValueError, match="no 'default' field"): + resolve_presets(EnvCfg()) + + +def test_resolve_presets_errors_on_chained_no_default(): + """A PresetCfg whose default is another PresetCfg with no 'default' + must raise ValueError on the inner preset.""" + + @configclass + class InnerNoDefault(PresetCfg): + option_a: int = 1 + + @configclass + class OuterPreset(PresetCfg): + default: InnerNoDefault = InnerNoDefault() + + @configclass + class EnvCfg: + mode: OuterPreset = OuterPreset() + + with pytest.raises(ValueError, match="no 'default' field"): + resolve_presets(EnvCfg()) + + +def test_resolve_presets_errors_on_cyclic_preset(): + """Cyclic PresetCfg chain (A.default -> B, B.default -> A) must raise + ValueError instead of looping forever.""" + + @configclass + class CyclicB(PresetCfg): + pass + + @configclass + class CyclicA(PresetCfg): + default: CyclicB = CyclicB() + + CyclicA.default = CyclicB() + CyclicB.default = CyclicA() + + @configclass + class EnvCfg: + mode: CyclicA = CyclicA() + + with pytest.raises(ValueError, match="[Cc]ycl"): + resolve_presets(EnvCfg()) + + +def test_resolve_presets_errors_on_cyclic_preset_at_root(): + """Cyclic PresetCfg at root level must raise ValueError, not RecursionError.""" + + @configclass + class RootCyclicB(PresetCfg): + pass + + @configclass + class RootCyclicA(PresetCfg): + default: RootCyclicB = RootCyclicB() + + RootCyclicA.default = RootCyclicB() + RootCyclicB.default = RootCyclicA() - @hydra_task_config_test("Isaac-Velocity-Flat-H1-v0", "rsl_rl_cfg_entry_point") - def main(env_cfg, agent_cfg): - # env - assert env_cfg.decimation == 42 - assert env_cfg.events.physics_material.params["asset_cfg"].joint_ids == slice(0, 1, 2) - assert env_cfg.scene.robot.init_state.joint_vel == {".*": 4.0} - assert env_cfg.rewards.feet_air_time is None - # agent - assert agent_cfg.max_iterations == 3 - - main() - # clean up - sys.argv = [sys.argv[0]] - hydra.core.global_hydra.GlobalHydra.instance().clear() - - -def test_nested_iterable_dict(): - """Test the hydra configuration system when dict is nested in an Iterable.""" - - @hydra_task_config_test("Isaac-Lift-Cube-Franka-v0", "rsl_rl_cfg_entry_point") - def main(env_cfg, agent_cfg): - # env - assert env_cfg.scene.ee_frame.target_frames[0].name == "end_effector" - assert env_cfg.scene.ee_frame.target_frames[0].offset.pos[2] == 0.1034 - - main() - # clean up - sys.argv = [sys.argv[0]] - hydra.core.global_hydra.GlobalHydra.instance().clear() + with pytest.raises(ValueError, match="[Cc]ycl"): + resolve_presets(RootCyclicA()) diff --git a/source/isaaclab_tasks/test/test_lazy_export_stubs.py b/source/isaaclab_tasks/test/test_lazy_export_stubs.py new file mode 100644 index 000000000000..b7e9a824d727 --- /dev/null +++ b/source/isaaclab_tasks/test/test_lazy_export_stubs.py @@ -0,0 +1,208 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Verify lazy_export() call-site conventions and _parse_stub behaviour. + +Every ``__init__.py`` that calls ``lazy_export()`` should pass no arguments. +Fallback packages and wildcard re-exports are inferred from the ``.pyi`` +stub. Passing ``packages=`` is deprecated and indicates a stub that has +not been updated with the corresponding ``from pkg import *`` line. + +This test is purely static (AST-based) and requires no simulator. +""" + +import ast +import os +import tempfile +from pathlib import Path + +import pytest + +from isaaclab.utils.module import _parse_stub + +_SOURCE_ROOT = Path(__file__).resolve().parent.parent.parent + + +def _find_lazy_export_calls() -> list[tuple[Path, int, str]]: + """Return ``(file, lineno, source_line)`` for every ``lazy_export(...)`` with args.""" + results: list[tuple[Path, int, str]] = [] + for root, _dirs, files in os.walk(_SOURCE_ROOT): + for fname in files: + if fname != "__init__.py": + continue + path = Path(root) / fname + try: + source = path.read_text() + except OSError: + continue + if "lazy_export" not in source: + continue + + tree = ast.parse(source, filename=str(path)) + for node in ast.walk(tree): + if not isinstance(node, ast.Call): + continue + func = node.func + is_lazy_export = (isinstance(func, ast.Attribute) and func.attr == "lazy_export") or ( + isinstance(func, ast.Name) and func.id == "lazy_export" + ) + if not is_lazy_export: + continue + if node.args or node.keywords: + line = source.splitlines()[node.lineno - 1].strip() + results.append((path, node.lineno, line)) + + return sorted(results) + + +_VIOLATIONS = _find_lazy_export_calls() +_IDS = [f"{p.relative_to(_SOURCE_ROOT)}:{lineno}" for p, lineno, _ in _VIOLATIONS] + + +@pytest.mark.parametrize("violation", _VIOLATIONS or [None], ids=_IDS or ["no-violations"]) +def test_lazy_export_has_no_args(violation: tuple[Path, int, str] | None): + """lazy_export() must be called with no arguments.""" + if violation is None: + return + path, lineno, line = violation + pytest.fail( + f"{path.relative_to(_SOURCE_ROOT)}:{lineno}: {line}\n\n" + "lazy_export() should take no arguments. Move fallback packages into\n" + "the .pyi stub as 'from import *' and remove the packages= arg." + ) + + +def test_no_lazy_export_violations_found(): + """Canary: confirm we actually scanned files (guard against broken discovery).""" + init_count = sum( + 1 + for root, _dirs, files in os.walk(_SOURCE_ROOT) + for f in files + if f == "__init__.py" and "lazy_export" in (Path(root) / f).read_text(errors="ignore") + ) + assert init_count > 0, "No __init__.py files with lazy_export() found — discovery may be broken" + + +# --------------------------------------------------------------------------- +# _parse_stub unit tests +# --------------------------------------------------------------------------- + + +def _write_stub(content: str) -> str: + """Write *content* to a temporary ``.pyi`` file and return its path.""" + fd, path = tempfile.mkstemp(suffix=".pyi") + with os.fdopen(fd, "w") as f: + f.write(content) + return path + + +def test_parse_stub_single_absolute_named_import(): + """Test single absolute named import extraction.""" + stub = _write_stub("from some.package import alpha, beta\n") + try: + _, _, _, absolute_named = _parse_stub(stub) + finally: + os.unlink(stub) + + assert "some.package" in absolute_named + assert absolute_named["some.package"] == ["alpha", "beta"] + + +def test_parse_stub_multiple_absolute_named_imports(): + """Test multiple absolute named imports from different packages.""" + stub = _write_stub("from pkg_a import foo\nfrom pkg_b import bar, baz\n") + try: + _, _, _, absolute_named = _parse_stub(stub) + finally: + os.unlink(stub) + + assert absolute_named["pkg_a"] == ["foo"] + assert absolute_named["pkg_b"] == ["bar", "baz"] + + +def test_parse_stub_same_package_multiple_lines_accumulates(): + """Test that imports from the same package on multiple lines accumulate.""" + stub = _write_stub("from pkg import a\nfrom pkg import b, c\n") + try: + _, _, _, absolute_named = _parse_stub(stub) + finally: + os.unlink(stub) + + assert absolute_named["pkg"] == ["a", "b", "c"] + + +def test_parse_stub_absolute_wildcard_not_in_absolute_named(): + """Test that absolute wildcard imports go to fallbacks, not absolute_named.""" + stub = _write_stub("from some.package import *\n") + try: + _, fallbacks, _, absolute_named = _parse_stub(stub) + finally: + os.unlink(stub) + + assert "some.package" in fallbacks + assert absolute_named == {} + + +def test_parse_stub_relative_import_not_in_absolute_named(): + """Test that relative imports are not included in absolute_named.""" + stub = _write_stub("from .sub import foo, bar\n") + try: + _, _, _, absolute_named = _parse_stub(stub) + finally: + os.unlink(stub) + + assert absolute_named == {} + + +def test_parse_stub_mixed_import_kinds(): + """All four import kinds in one stub are routed correctly.""" + stub = _write_stub( + "from .local import thing\nfrom .wildmod import *\nfrom abs.pkg import *\nfrom abs.other import x, y\n" + ) + try: + filtered_path, fallbacks, rel_wildcards, absolute_named = _parse_stub(stub) + finally: + if filtered_path is not None: + os.unlink(filtered_path) + os.unlink(stub) + + assert fallbacks == ["abs.pkg"] + assert rel_wildcards == ["wildmod"] + assert absolute_named == {"abs.other": ["x", "y"]} + assert filtered_path is not None + + +def test_parse_stub_no_imports_returns_empty(): + """Test that a stub with no imports returns empty collections.""" + stub = _write_stub("X: int\n") + try: + filtered_path, fallbacks, rel_wildcards, absolute_named = _parse_stub(stub) + finally: + os.unlink(stub) + + assert filtered_path is None + assert fallbacks == [] + assert rel_wildcards == [] + assert absolute_named == {} + + +def test_parse_stub_filtered_stub_excludes_absolute_named(): + """Absolute named imports must not leak into the filtered stub. + + The filtered stub is passed to lazy_loader which only handles relative named imports. + """ + stub = _write_stub("from .local import thing\nfrom abs.pkg import alpha\n") + try: + filtered_path, _, _, _ = _parse_stub(stub) + assert filtered_path is not None + with open(filtered_path) as f: + content = f.read() + assert "alpha" not in content + assert "abs" not in content + assert "thing" in content + finally: + if filtered_path is not None: + os.unlink(filtered_path) + os.unlink(stub) diff --git a/source/isaaclab_tasks/test/test_lift_teddy_bear.py b/source/isaaclab_tasks/test/test_lift_teddy_bear.py index e131e0357498..392b756357f4 100644 --- a/source/isaaclab_tasks/test/test_lift_teddy_bear.py +++ b/source/isaaclab_tasks/test/test_lift_teddy_bear.py @@ -5,14 +5,6 @@ """Launch Isaac Sim Simulator first.""" -import sys - -# Import pinocchio in the main script to force the use of the dependencies -# installed by IsaacLab and not the one installed by Isaac Sim. -# pinocchio is required by the Pink IK controller -if sys.platform != "win32": - import pinocchio # noqa: F401 - from isaaclab.app import AppLauncher # launch the simulator with specific settings for teddy bear environment @@ -31,7 +23,7 @@ from env_test_utils import _run_environments # isort: skip -@pytest.mark.parametrize("num_envs, device", [(32, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) def test_lift_teddy_bear_environment(num_envs, device): """Test the Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0 environment in isolation.""" task_name = "Isaac-Lift-Teddy-Bear-Franka-IK-Abs-v0" diff --git a/source/isaaclab_tasks/test/test_multi_agent_environments.py b/source/isaaclab_tasks/test/test_multi_agent_environments.py index 478cb3942e10..a6608fb8f95d 100644 --- a/source/isaaclab_tasks/test/test_multi_agent_environments.py +++ b/source/isaaclab_tasks/test/test_multi_agent_environments.py @@ -22,7 +22,7 @@ from env_test_utils import _check_random_actions, setup_environment # isort: skip -@pytest.mark.parametrize("num_envs, device", [(32, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) @pytest.mark.parametrize("task_name", setup_environment(multi_agent=True)) def test_environments(task_name, num_envs, device): """Run all environments with given parameters and check environments return valid signals.""" diff --git a/source/isaaclab_tasks/test/test_newton_solver_preset_names.py b/source/isaaclab_tasks/test/test_newton_solver_preset_names.py new file mode 100644 index 000000000000..a1324273f67d --- /dev/null +++ b/source/isaaclab_tasks/test/test_newton_solver_preset_names.py @@ -0,0 +1,138 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Regression checks for the public Newton-backend solver preset names. + +These tests prevent reintroduction of the legacy preset names ``newton`` and +``kamino``, which were renamed to ``newton_mjwarp`` and ``newton_kamino`` to +disambiguate from the Newton backend label, package, and visualizer (which all +retain the bare ``newton`` spelling). +""" + +from __future__ import annotations + +import re +from pathlib import Path + +_REPO_ROOT = Path(__file__).resolve().parents[3] + +_TEXT_ROOTS = ( + _REPO_ROOT / "docs" / "source", + _REPO_ROOT / "scripts", + _REPO_ROOT / "source" / "isaaclab_tasks", + _REPO_ROOT / "source" / "isaaclab", + _REPO_ROOT / "source" / "isaaclab_visualizers", +) +_TEXT_SUFFIXES = {".py", ".rst", ".md"} + +# Files that intentionally reference the deprecated names (deprecation tests, +# the alias map, and this scanner's own pattern strings). +_SCAN_EXCLUDE_FILES = frozenset( + { + "test_hydra.py", + "test_newton_solver_preset_names.py", + "hydra.py", + } +) + +# Legacy preset names that must not appear as public preset references in +# user-facing surfaces. Word-boundary anchors (``\b``) keep ``newton_mjwarp``, +# ``newton_renderer``, ``newton_kamino`` and similar prefixed names from +# matching, since ``_`` is a Python word character. +_LEGACY_TOKEN = r"(?:newton|kamino)" +_LEGACY_PRESET_PATTERNS = ( + # CLI: ``presets=...,newton`` or ``presets=newton,...`` + re.compile(rf"presets=[^\s`]*\b{_LEGACY_TOKEN}\b"), + # CLI: ``env.=newton`` or ``env.=kamino`` + re.compile(rf"\benv\.[\w.]*={_LEGACY_TOKEN}\b"), + # Dict-literal preset entry, e.g. ``"newton": NewtonCfg(...)`` + re.compile(rf"""[\"']{_LEGACY_TOKEN}[\"']\s*:\s*\w*Cfg\b"""), + # ``PresetCfg`` field declaration, e.g. `` newton: NewtonCfg = ...`` + re.compile(rf"^\s+{_LEGACY_TOKEN}\s*:\s*[A-Za-z_][\w.]*Cfg\b", re.MULTILINE), + # ``PresetCfg`` field assignment, e.g. `` kamino = NewtonCfg(...)`` + re.compile(rf"^\s+{_LEGACY_TOKEN}\s*=\s*[A-Za-z_][\w.]*Cfg\(", re.MULTILINE), +) + +# RST inline literals are caught only for files known to enumerate per-task +# preset choices, where bare ``newton``/``kamino`` literals must refer to the +# (renamed) physics preset rather than the Newton backend, package, or +# visualizer that share the spelling. +_PRESET_TABLE_FILES = ("docs/source/overview/environments.rst",) +_LEGACY_RST_LITERAL_PATTERN = re.compile(rf"``{_LEGACY_TOKEN}``") + +# Field declaration scoped to physics-related Cfg classes only (used by the +# task-level scanner to keep its message focused on env config files). +_LEGACY_PHYSICS_FIELD_PATTERN = re.compile( + rf"^\s+{_LEGACY_TOKEN}\s*(?::\s*(?:NewtonCfg|SimulationCfg)\b|=\s*(?:NewtonCfg|SimulationCfg)\()", + re.MULTILINE, +) + + +def _iter_text_files() -> list[Path]: + files: list[Path] = [] + for root in _TEXT_ROOTS: + files.extend(path for path in root.rglob("*") if path.suffix in _TEXT_SUFFIXES) + return sorted(files) + + +def test_public_examples_use_renamed_preset_names() -> None: + """Public examples should use ``newton_mjwarp``/``newton_kamino``, not the legacy names.""" + files = _iter_text_files() + assert files, f"no text files scanned -- _TEXT_ROOTS likely stale: {_TEXT_ROOTS!r}" + + offenders: list[str] = [] + for path in files: + if "CHANGELOG" in path.name or "changelog.d" in path.parts: + continue + if path.name in _SCAN_EXCLUDE_FILES: + continue + text = path.read_text(encoding="utf-8") + for pattern in _LEGACY_PRESET_PATTERNS: + for match in pattern.finditer(text): + line = text.count("\n", 0, match.start()) + 1 + offenders.append(f"{path.relative_to(_REPO_ROOT)}:{line}: {match.group(0)}") + if path.relative_to(_REPO_ROOT).as_posix() in _PRESET_TABLE_FILES: + for match in _LEGACY_RST_LITERAL_PATTERN.finditer(text): + line = text.count("\n", 0, match.start()) + 1 + offenders.append(f"{path.relative_to(_REPO_ROOT)}:{line}: {match.group(0)}") + + assert not offenders, "Legacy Newton-backend solver preset references found:\n" + "\n".join(offenders) + + +def test_rendering_test_utils_maps_newton_label_to_newton_mjwarp() -> None: + """Golden-image fixtures keep the ``"newton"`` backend label; the helper must + map it to the ``"newton_mjwarp"`` preset so Hydra resolves the right config. + """ + import sys + + import pytest + + pytest.importorskip("torch") # rendering_test_utils imports torch eagerly + + sys.path.insert(0, str(Path(__file__).resolve().parent)) + try: + from rendering_test_utils import _physics_preset_name + finally: + sys.path.pop(0) + + assert _physics_preset_name("newton") == "newton_mjwarp" + assert _physics_preset_name("physx") == "physx" + assert _physics_preset_name("ovphysx") == "ovphysx" + + +def test_task_physics_presets_use_renamed_field_names() -> None: + """Task ``PresetCfg`` field names should use ``newton_mjwarp`` / ``newton_kamino``.""" + tasks_root = _REPO_ROOT / "source" / "isaaclab_tasks" / "isaaclab_tasks" + files = sorted(tasks_root.rglob("*.py")) + assert files, f"no task config files scanned at {tasks_root}" + + offenders: list[str] = [] + for path in files: + text = path.read_text(encoding="utf-8") + for match in _LEGACY_PHYSICS_FIELD_PATTERN.finditer(text): + line = text.count("\n", 0, match.start()) + 1 + offenders.append(f"{path.relative_to(_REPO_ROOT)}:{line}: {match.group(0).strip()}") + + assert not offenders, "Legacy Newton-backend solver field declarations found:\n" + "\n".join(offenders) diff --git a/source/isaaclab_tasks/test/test_pickplace_stack_environments.py b/source/isaaclab_tasks/test/test_pickplace_stack_environments.py new file mode 100644 index 000000000000..cbc10c272b73 --- /dev/null +++ b/source/isaaclab_tasks/test/test_pickplace_stack_environments.py @@ -0,0 +1,50 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +import sys + +# Import pinocchio in the main script to force the use of the dependencies +# installed by IsaacLab and not the one installed by Isaac Sim. +# pinocchio is required by the Pink IK controller used in some pick-place environments +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +from env_test_utils import _run_environments, setup_environment # isort: skip + + +# TODO: 2-env tests are too slow for pick-place environments +# @pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize("num_envs, device", [(1, "cuda")]) +@pytest.mark.parametrize( + "task_name", + setup_environment( + include_play=False, + factory_envs=False, + multi_agent=False, + teleop_envs=False, + cartpole_showcase_envs=False, + pickplace_stack_envs=True, + ), +) +@pytest.mark.isaacsim_ci +def test_pickplace_stack_environments(task_name, num_envs, device): + # run PickPlace and Stack environments without stage in memory + _run_environments(task_name, device, num_envs, create_stage_in_memory=False) diff --git a/source/isaaclab_tasks/test/test_preset_kit_decision.py b/source/isaaclab_tasks/test/test_preset_kit_decision.py new file mode 100644 index 000000000000..62c51caaa73d --- /dev/null +++ b/source/isaaclab_tasks/test/test_preset_kit_decision.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for preset resolution and Kit decision logic. + +These tests verify that given presets (e.g. ``presets=newton_mjwarp,ovrtx_renderer``), +the config-based logic correctly decides whether Isaac Sim Kit is needed. +No Kit/GPU required — safe for CI and beginners. +""" + +import sys + +import isaaclab_tasks # noqa: F401 +from isaaclab_tasks.utils import compute_kit_requirements, resolve_task_config + +_CAMERA_PRESETS_TASK = "Isaac-Cartpole-Camera-Presets-Direct-v0" + + +def _resolve_with_presets(presets: str): + """Resolve env_cfg with given presets. Modifies sys.argv temporarily.""" + old_argv = sys.argv.copy() + try: + sys.argv = [sys.argv[0], f"presets={presets}"] + env_cfg, agent_cfg = resolve_task_config(_CAMERA_PRESETS_TASK, "rl_games_cfg_entry_point") + return env_cfg + finally: + sys.argv = old_argv + + +def test_preset_mjwarp_ovrtx_does_not_need_kit(): + """Newton + OVRTX renderer is kitless — no AppLauncher required.""" + env_cfg = _resolve_with_presets("newton_mjwarp,ovrtx_renderer") + needs_kit, _, _ = compute_kit_requirements(env_cfg) + assert needs_kit is False + + +def test_preset_mjwarp_newton_renderer_does_not_need_kit(): + """Newton + Newton Warp renderer is kitless.""" + env_cfg = _resolve_with_presets("newton_mjwarp,newton_renderer") + needs_kit, _, _ = compute_kit_requirements(env_cfg) + assert needs_kit is False + + +def test_preset_physx_needs_kit(): + """PhysX physics requires Kit.""" + env_cfg = _resolve_with_presets("physx") + needs_kit, _, _ = compute_kit_requirements(env_cfg) + assert needs_kit is True + + +def test_preset_default_needs_kit(): + """Default (PhysX + Isaac RTX) requires Kit.""" + env_cfg = _resolve_with_presets("default") + needs_kit, _, _ = compute_kit_requirements(env_cfg) + assert needs_kit is True + + +def test_preset_mjwarp_isaac_rtx_needs_kit(): + """Newton + Isaac RTX renderer requires Kit (RTX runs in Kit).""" + env_cfg = _resolve_with_presets("newton_mjwarp,isaacsim_rtx_renderer") + needs_kit, _, _ = compute_kit_requirements(env_cfg) + assert needs_kit is True diff --git a/source/isaaclab_tasks/test/test_record_video.py b/source/isaaclab_tasks/test/test_record_video.py index a84eb846e887..bb1d756fcfb7 100644 --- a/source/isaaclab_tasks/test/test_record_video.py +++ b/source/isaaclab_tasks/test/test_record_video.py @@ -19,7 +19,7 @@ import pytest import torch -import omni.usd +import isaaclab.sim as sim_utils import isaaclab_tasks # noqa: F401 from isaaclab_tasks.utils import parse_env_cfg @@ -28,6 +28,9 @@ from env_test_utils import setup_environment # isort: skip +pytestmark = pytest.mark.isaacsim_ci + + @pytest.fixture(scope="function") def setup_video_params(): # common parameters @@ -45,7 +48,7 @@ def test_record_video(task_name, setup_video_params): num_envs, device, step_trigger, video_length = setup_video_params videos_dir = os.path.join(os.path.dirname(__file__), "output", "videos", "train") # create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # parse configuration env_cfg = parse_env_cfg(task_name, device=device, num_envs=num_envs) diff --git a/source/isaaclab_tasks/test/test_rendering_cartpole.py b/source/isaaclab_tasks/test/test_rendering_cartpole.py new file mode 100644 index 000000000000..6f10a80a1d4b --- /dev/null +++ b/source/isaaclab_tasks/test/test_rendering_cartpole.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Rendering correctness tests for Cartpole environment backend combinations.""" + +# Launch Isaac Sim Simulator first for kit-based combinations. +from isaaclab.app import AppLauncher + +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + +from pathlib import Path # noqa: E402 + +import pytest # noqa: E402 +from rendering_test_utils import ( # noqa: E402 + PHYSICS_RENDERER_AOV_COMBINATIONS, + make_attach_comparison_properties_fixture, + make_determinism_fixture, + make_generate_html_report_fixture, + rendering_test_cartpole, +) + +pytestmark = pytest.mark.isaacsim_ci + +_COMPARISON_SCORES: list[dict] = [] + +_determinism_fixture = make_determinism_fixture() +_generate_html_report_fixture = make_generate_html_report_fixture(_COMPARISON_SCORES, Path(__file__).stem + ".html") +_attach_comparison_properties_fixture = make_attach_comparison_properties_fixture(_COMPARISON_SCORES) + + +@pytest.mark.parametrize("physics_backend,renderer,data_type", PHYSICS_RENDERER_AOV_COMBINATIONS) +def test_rendering_cartpole(physics_backend, renderer, data_type): + """Test cartpole environment rendering correctness.""" + rendering_test_cartpole(physics_backend, renderer, data_type, _COMPARISON_SCORES) diff --git a/source/isaaclab_tasks/test/test_rendering_cartpole_kitless.py b/source/isaaclab_tasks/test/test_rendering_cartpole_kitless.py new file mode 100644 index 000000000000..802ecfd32cfc --- /dev/null +++ b/source/isaaclab_tasks/test/test_rendering_cartpole_kitless.py @@ -0,0 +1,33 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kit-less rendering correctness tests for Cartpole environment backend combinations.""" + +from pathlib import Path + +import pytest +from rendering_test_utils import ( + KITLESS_PHYSICS_RENDERER_AOV_COMBINATIONS, + make_attach_comparison_properties_fixture, + make_determinism_fixture, + make_generate_html_report_fixture, + make_require_ovrtx_install_fixture, + rendering_test_cartpole, +) + +pytestmark = pytest.mark.isaacsim_ci + +_COMPARISON_SCORES: list[dict] = [] + +_determinism_fixture = make_determinism_fixture() +_generate_html_report_fixture = make_generate_html_report_fixture(_COMPARISON_SCORES, Path(__file__).stem + ".html") +_attach_comparison_properties_fixture = make_attach_comparison_properties_fixture(_COMPARISON_SCORES) +_require_ovrtx_install_fixture = make_require_ovrtx_install_fixture() + + +@pytest.mark.parametrize("physics_backend,renderer,data_type", KITLESS_PHYSICS_RENDERER_AOV_COMBINATIONS) +def test_rendering_cartpole_kitless(physics_backend, renderer, data_type): + """Camera output must match golden images (Cartpole camera presets env).""" + rendering_test_cartpole(physics_backend, renderer, data_type, _COMPARISON_SCORES) diff --git a/source/isaaclab_tasks/test/test_rendering_dexsuite_kuka.py b/source/isaaclab_tasks/test/test_rendering_dexsuite_kuka.py new file mode 100644 index 000000000000..0400c3386e63 --- /dev/null +++ b/source/isaaclab_tasks/test/test_rendering_dexsuite_kuka.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Rendering correctness tests for Dexsuite Kuka-Allegro Lift backend combinations.""" + +# Launch Isaac Sim Simulator first for kit-based combinations. +from isaaclab.app import AppLauncher + +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + +from pathlib import Path # noqa: E402 + +import pytest # noqa: E402 +from rendering_test_utils import ( # noqa: E402 + PHYSICS_RENDERER_AOV_COMBINATIONS, + make_attach_comparison_properties_fixture, + make_determinism_fixture, + make_generate_html_report_fixture, + rendering_test_dexsuite_kuka, +) + +pytestmark = pytest.mark.isaacsim_ci + +_COMPARISON_SCORES: list[dict] = [] + +_determinism_fixture = make_determinism_fixture() +_generate_html_report_fixture = make_generate_html_report_fixture(_COMPARISON_SCORES, Path(__file__).stem + ".html") +_attach_comparison_properties_fixture = make_attach_comparison_properties_fixture(_COMPARISON_SCORES) + + +@pytest.mark.parametrize("physics_backend,renderer,data_type", PHYSICS_RENDERER_AOV_COMBINATIONS) +def test_rendering_dexsuite_kuka(physics_backend, renderer, data_type): + """Test dexsuite kuka allegro lift environment rendering correctness.""" + rendering_test_dexsuite_kuka(physics_backend, renderer, data_type, _COMPARISON_SCORES) diff --git a/source/isaaclab_tasks/test/test_rendering_dexsuite_kuka_kitless.py b/source/isaaclab_tasks/test/test_rendering_dexsuite_kuka_kitless.py new file mode 100644 index 000000000000..15afbee806b1 --- /dev/null +++ b/source/isaaclab_tasks/test/test_rendering_dexsuite_kuka_kitless.py @@ -0,0 +1,33 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kit-less rendering correctness tests for Dexsuite Kuka-Allegro Lift backend combinations.""" + +from pathlib import Path + +import pytest +from rendering_test_utils import ( + KITLESS_PHYSICS_RENDERER_AOV_COMBINATIONS, + make_attach_comparison_properties_fixture, + make_determinism_fixture, + make_generate_html_report_fixture, + make_require_ovrtx_install_fixture, + rendering_test_dexsuite_kuka, +) + +pytestmark = pytest.mark.isaacsim_ci + +_COMPARISON_SCORES: list[dict] = [] + +_determinism_fixture = make_determinism_fixture() +_generate_html_report_fixture = make_generate_html_report_fixture(_COMPARISON_SCORES, Path(__file__).stem + ".html") +_attach_comparison_properties_fixture = make_attach_comparison_properties_fixture(_COMPARISON_SCORES) +_require_ovrtx_install_fixture = make_require_ovrtx_install_fixture() + + +@pytest.mark.parametrize("physics_backend,renderer,data_type", KITLESS_PHYSICS_RENDERER_AOV_COMBINATIONS) +def test_rendering_dexsuite_kuka_kitless(physics_backend, renderer, data_type): + """Camera output must match golden images (Dexsuite Kuka-Allegro Lift, single camera).""" + rendering_test_dexsuite_kuka(physics_backend, renderer, data_type, _COMPARISON_SCORES) diff --git a/source/isaaclab_tasks/test/test_rendering_registered_tasks.py b/source/isaaclab_tasks/test/test_rendering_registered_tasks.py new file mode 100644 index 000000000000..f1b208f4d727 --- /dev/null +++ b/source/isaaclab_tasks/test/test_rendering_registered_tasks.py @@ -0,0 +1,119 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Rendering correctness tests for camera-based registered tasks.""" + +# Launch Isaac Sim Simulator first for kit-based combinations. +from isaaclab.app import AppLauncher + +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + +from pathlib import Path # noqa: E402 +from typing import Any # noqa: E402 + +import gymnasium as gym # noqa: E402 +import pytest # noqa: E402 +import torch # noqa: E402 +from rendering_test_utils import ( # noqa: E402 + MAX_DIFFERENT_PIXELS_PERCENTAGE_BY_ENV_NAME, + make_attach_comparison_properties_fixture, + make_determinism_fixture, + make_generate_html_report_fixture, + maybe_save_stage, + validate_camera_outputs, +) + +pytestmark = pytest.mark.isaacsim_ci + +_COMPARISON_SCORES: list[dict] = [] + +_determinism_fixture = make_determinism_fixture() +_generate_html_report_fixture = make_generate_html_report_fixture(_COMPARISON_SCORES, Path(__file__).stem + ".html") +_attach_comparison_properties_fixture = make_attach_comparison_properties_fixture(_COMPARISON_SCORES) + + +def _collect_camera_outputs(env: object) -> dict[str, dict[str, torch.Tensor]]: + """Collect camera outputs from env.scene.sensors.""" + base = getattr(env, "unwrapped", env) + outputs = {} + + scene = getattr(base, "scene", None) + if scene is not None: + sensors = getattr(scene, "sensors", None) + if sensors is not None: + for name, sensor in sensors.items(): + data = getattr(sensor, "data", None) + output = getattr(data, "output", None) if data is not None else None + if not isinstance(output, dict): + continue + + tensor_output = {k: v for k, v in output.items() if isinstance(v, torch.Tensor) and v.numel() > 0} + if tensor_output: + outputs[name] = tensor_output + + return outputs + + +# Task IDs that expose camera/tiled_camera image observations; each is validated for non-blank rendering. +# The max different pixels percentage is set based on the screen space taken up by the env. +_RENDER_CORRECTNESS_TASK_IDS = [ + ("Isaac-Cartpole-Albedo-Camera-Direct-v0", "cartpole"), + ("Isaac-Cartpole-Camera-Presets-Direct-v0", "cartpole"), + ("Isaac-Cartpole-Depth-Camera-Direct-v0", "cartpole"), + ("Isaac-Cartpole-RGB-Camera-Direct-v0", "cartpole"), + ("Isaac-Cartpole-SimpleShading-Constant-Camera-Direct-v0", "cartpole"), + ("Isaac-Cartpole-SimpleShading-Diffuse-Camera-Direct-v0", "cartpole"), + ("Isaac-Cartpole-SimpleShading-Full-Camera-Direct-v0", "cartpole"), + pytest.param( + "Isaac-Repose-Cube-Shadow-Vision-Direct-v0", + "shadow_hand", + # The Shadow-Vision render is right at the SSIM/diff-pixel tolerance and intermittently + # exceeds the 3% diff threshold by a fraction of a percent. Allow up to 3 attempts and + # require at least one pass while we tighten the validation tolerances for this scene. + marks=pytest.mark.flaky(max_runs=3, min_passes=1), + ), +] + + +@pytest.mark.parametrize("task_id, env_name", _RENDER_CORRECTNESS_TASK_IDS) +def test_rendering_registered_tasks(task_id: str, env_name: str): + """Test registered tasks rendering correctness.""" + env = None + + try: + from isaaclab_tasks.utils.parse_cfg import parse_env_cfg + + env_cfg = parse_env_cfg(task_id, num_envs=4) + + env = gym.make(task_id, cfg=env_cfg) + unwrapped: Any = env.unwrapped + sim = getattr(unwrapped, "sim", None) + if sim is not None: + sim._app_control_on_stop_handle = None + + maybe_save_stage(f"registered_tasks_{task_id}", "default_physics", "default_renderer", "stage") + + camera_outputs_nested_dict = _collect_camera_outputs(env) + num_camera_outputs = len(camera_outputs_nested_dict) + assert num_camera_outputs == 1, f"[{task_id}] Expected 1 camera output, got {num_camera_outputs}." + + camera_outputs = next(iter(camera_outputs_nested_dict.values())) + + validate_camera_outputs( + f"registered_tasks/{task_id}", + "default_physics", + "default_renderer", + camera_outputs, + max_different_pixels_percentage=MAX_DIFFERENT_PIXELS_PERCENTAGE_BY_ENV_NAME[env_name], + comparison_scores=_COMPARISON_SCORES, + ) + finally: + if env is not None: + env.close() + + # This invokes camera sensor and renderer cleanup explicitly before pytest teardown, otherwise OV + # native code could probably complain about leaks and trigger segmentation fault. + env = None diff --git a/source/isaaclab_tasks/test/test_rendering_shadow_hand.py b/source/isaaclab_tasks/test/test_rendering_shadow_hand.py new file mode 100644 index 000000000000..b49bff692170 --- /dev/null +++ b/source/isaaclab_tasks/test/test_rendering_shadow_hand.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Rendering correctness tests for Shadow Hand environment backend combinations.""" + +# Launch Isaac Sim Simulator first for kit-based combinations. +from isaaclab.app import AppLauncher + +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + +from pathlib import Path # noqa: E402 + +import pytest # noqa: E402 +from rendering_test_utils import ( # noqa: E402 + PHYSICS_RENDERER_AOV_COMBINATIONS, + make_attach_comparison_properties_fixture, + make_determinism_fixture, + make_generate_html_report_fixture, + rendering_test_shadow_hand, +) + +pytestmark = pytest.mark.isaacsim_ci + +_COMPARISON_SCORES: list[dict] = [] + +_determinism_fixture = make_determinism_fixture() +_generate_html_report_fixture = make_generate_html_report_fixture(_COMPARISON_SCORES, Path(__file__).stem + ".html") +_attach_comparison_properties_fixture = make_attach_comparison_properties_fixture(_COMPARISON_SCORES) + + +@pytest.mark.parametrize("physics_backend,renderer,data_type", PHYSICS_RENDERER_AOV_COMBINATIONS) +def test_rendering_shadow_hand(physics_backend, renderer, data_type): + """Test shadow hand environment rendering correctness.""" + rendering_test_shadow_hand(physics_backend, renderer, data_type, _COMPARISON_SCORES) diff --git a/source/isaaclab_tasks/test/test_rendering_shadow_hand_kitless.py b/source/isaaclab_tasks/test/test_rendering_shadow_hand_kitless.py new file mode 100644 index 000000000000..2244dcce5fab --- /dev/null +++ b/source/isaaclab_tasks/test/test_rendering_shadow_hand_kitless.py @@ -0,0 +1,33 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kit-less rendering correctness tests for Shadow Hand environment backend combinations.""" + +from pathlib import Path + +import pytest +from rendering_test_utils import ( + KITLESS_PHYSICS_RENDERER_AOV_COMBINATIONS, + make_attach_comparison_properties_fixture, + make_determinism_fixture, + make_generate_html_report_fixture, + make_require_ovrtx_install_fixture, + rendering_test_shadow_hand, +) + +pytestmark = pytest.mark.isaacsim_ci + +_COMPARISON_SCORES: list[dict] = [] + +_determinism_fixture = make_determinism_fixture() +_generate_html_report_fixture = make_generate_html_report_fixture(_COMPARISON_SCORES, Path(__file__).stem + ".html") +_attach_comparison_properties_fixture = make_attach_comparison_properties_fixture(_COMPARISON_SCORES) +_require_ovrtx_install_fixture = make_require_ovrtx_install_fixture() + + +@pytest.mark.parametrize("physics_backend,renderer,data_type", KITLESS_PHYSICS_RENDERER_AOV_COMBINATIONS) +def test_rendering_shadow_hand_kitless(physics_backend, renderer, data_type): + """Test shadow hand environment rendering correctness.""" + rendering_test_shadow_hand(physics_backend, renderer, data_type, _COMPARISON_SCORES) diff --git a/source/isaaclab_tasks/test/test_rl_device_separation.py b/source/isaaclab_tasks/test/test_rl_device_separation.py index ef6bd1e093f1..cafa3c777995 100644 --- a/source/isaaclab_tasks/test/test_rl_device_separation.py +++ b/source/isaaclab_tasks/test/test_rl_device_separation.py @@ -47,8 +47,7 @@ import pytest import torch -import carb -import omni.usd +import isaaclab.sim as sim_utils import isaaclab_tasks # noqa: F401 from isaaclab_tasks.utils.parse_cfg import parse_env_cfg @@ -68,9 +67,11 @@ def _create_env(sim_device: str): Initialized gym environment """ # Create a new stage - omni.usd.get_context().new_stage() - # Reset the rtx sensors carb setting to False - carb.settings.get_settings().set_bool("/isaaclab/render/rtx_sensors", False) + sim_utils.create_new_stage() + # Reset the rtx sensors setting to False + from isaaclab.app.settings_manager import get_settings_manager + + get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", False) try: env_cfg = parse_env_cfg(TEST_ENV, device=sim_device, num_envs=NUM_ENVS) diff --git a/source/isaaclab_tasks/test/test_shadow_hand_vision_presets.py b/source/isaaclab_tasks/test/test_shadow_hand_vision_presets.py new file mode 100644 index 000000000000..5faf370f1cdb --- /dev/null +++ b/source/isaaclab_tasks/test/test_shadow_hand_vision_presets.py @@ -0,0 +1,424 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for shadow hand vision environment preset combinations. + +Two test suites are provided: + +1. **Validation unit tests** — use lightweight ``types.SimpleNamespace`` mocks. + These exercise :meth:`ShadowHandVisionEnvCfg.validate_config` directly and + do not require Isaac Sim. + +2. **Preset resolution tests** — verify that each named preset in + :class:`ShadowHandVisionTiledCameraCfg` and + :class:`~isaaclab_tasks.utils.renderer_cfg.RendererPresetCfg` resolves to the expected + concrete config class and data types, using the real config classes. + These require Isaac Sim to be launched so that the renderer cfg imports + are available. +""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + + +import copy # noqa: E402 +import types # noqa: E402 + +import pytest # noqa: E402 +import torch # noqa: E402 +from isaaclab_newton.renderers import NewtonWarpRendererCfg # noqa: E402 +from isaaclab_ov.renderers import OVRTXRendererCfg # noqa: E402 +from isaaclab_physx.renderers import IsaacRtxRendererCfg # noqa: E402 + +from isaaclab_tasks.direct.shadow_hand.shadow_hand_vision_env import ShadowHandVisionEnv # noqa: E402 +from isaaclab_tasks.direct.shadow_hand.shadow_hand_vision_env_cfg import ( # noqa: E402 + ShadowHandVisionBenchmarkEnvCfg, + ShadowHandVisionEnvCfg, +) +from isaaclab_tasks.utils.hydra import collect_presets, resolve_presets # noqa: E402 + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_cfg(renderer_type: str | None, data_types: list[str], feature_extractor_enabled: bool = True): + """Build a minimal mock cfg with a :meth:`validate_config` method. + + The mock reuses the real validation logic from :class:`ShadowHandVisionEnvCfg`. + """ + cfg = types.SimpleNamespace() + cfg.tiled_camera = types.SimpleNamespace( + renderer_cfg=types.SimpleNamespace(renderer_type=renderer_type), + data_types=data_types, + ) + cfg.feature_extractor = types.SimpleNamespace(enabled=feature_extractor_enabled) + cfg.validate_config = lambda: ShadowHandVisionEnvCfg.validate_config(cfg) + return cfg + + +# --------------------------------------------------------------------------- +# Valid combinations — must not raise +# --------------------------------------------------------------------------- + +_VALID_COMBOS = [ + # renderer_type, data_types, feature_extractor_enabled + # ── Non-warp renderers accept every data type ── + (None, ["rgb"], True), + (None, ["rgb", "depth", "semantic_segmentation"], True), + (None, ["albedo"], True), + (None, ["simple_shading_constant_diffuse"], True), + (None, ["simple_shading_diffuse_mdl"], True), + (None, ["simple_shading_full_mdl"], True), + (None, ["depth"], False), # depth-only OK when CNN disabled + ("isaac_rtx", ["rgb"], True), + ("isaac_rtx", ["albedo"], True), + ("isaac_rtx", ["simple_shading_full_mdl"], True), + ("isaac_rtx", ["rgb", "depth", "semantic_segmentation"], True), + ("isaac_rtx", ["depth"], False), + pytest.param("ovrtx", ["rgb"], True, marks=pytest.mark.skip(reason="OVRTX testing disabled")), + pytest.param("ovrtx", ["albedo"], True, marks=pytest.mark.skip(reason="OVRTX testing disabled")), + pytest.param("ovrtx", ["depth"], False, marks=pytest.mark.skip(reason="OVRTX testing disabled")), + # ── Warp renderer: only rgb and depth are supported ── + ("newton_warp", ["rgb"], True), + ("newton_warp", ["depth"], False), # depth-only OK when CNN disabled + ("newton_warp", ["rgb", "depth"], True), # multiple supported types +] + + +@pytest.mark.parametrize("renderer_type,data_types,enabled", _VALID_COMBOS) +def test_valid_combinations_do_not_raise(renderer_type, data_types, enabled): + cfg = _make_cfg(renderer_type, data_types, enabled) + cfg.validate_config() # must not raise + + +# --------------------------------------------------------------------------- +# Invalid combinations — must raise ValueError with a descriptive message +# --------------------------------------------------------------------------- + +_INVALID_COMBOS = [ + # renderer_type, data_types, enabled, substring expected in error message + # ── Warp does not support colour-space data types ── + ( + "newton_warp", + ["albedo"], + True, + "albedo", + ), + ( + "newton_warp", + ["simple_shading_constant_diffuse"], + True, + "simple_shading_constant_diffuse", + ), + ( + "newton_warp", + ["simple_shading_diffuse_mdl"], + True, + "simple_shading_diffuse_mdl", + ), + ( + "newton_warp", + ["simple_shading_full_mdl"], + True, + "simple_shading_full_mdl", + ), + ( + "newton_warp", + ["rgb", "depth", "semantic_segmentation"], + True, + "semantic_segmentation", + ), + # ── Depth-only with CNN enabled is not valid for training ── + ( + None, + ["depth"], + True, + "Depth-only", + ), + ( + "isaac_rtx", + ["depth"], + True, + "Depth-only", + ), + pytest.param( + "ovrtx", + ["depth"], + True, + "Depth-only", + marks=pytest.mark.skip(reason="OVRTX testing disabled"), + ), + ( + "newton_warp", + ["depth"], + True, + "Depth-only", # depth is warp-supported but CNN can't train on it + ), +] + + +@pytest.mark.parametrize("renderer_type,data_types,enabled,match", _INVALID_COMBOS) +def test_invalid_combinations_raise_value_error(renderer_type, data_types, enabled, match): + cfg = _make_cfg(renderer_type, data_types, enabled) + with pytest.raises(ValueError, match=match): + cfg.validate_config() + + +# --------------------------------------------------------------------------- +# Preset resolution — camera data types +# --------------------------------------------------------------------------- + + +@pytest.fixture(scope="module") +def shadow_hand_vision_presets(): + """Collect all presets from ShadowHandVisionEnvCfg once for the module.""" + return collect_presets(ShadowHandVisionEnvCfg()) + + +_CAMERA_DATA_TYPE_PRESETS = [ + # preset_name, expected_data_types + ("default", ["rgb", "depth", "semantic_segmentation"]), + ("full", ["rgb", "depth", "semantic_segmentation"]), + ("rgb", ["rgb"]), + ("albedo", ["albedo"]), + ("simple_shading_constant_diffuse", ["simple_shading_constant_diffuse"]), + ("simple_shading_diffuse_mdl", ["simple_shading_diffuse_mdl"]), + ("simple_shading_full_mdl", ["simple_shading_full_mdl"]), + ("depth", ["depth"]), +] + + +@pytest.mark.parametrize("preset_name,expected_data_types", _CAMERA_DATA_TYPE_PRESETS) +def test_camera_preset_data_types(shadow_hand_vision_presets, preset_name, expected_data_types): + camera_presets = shadow_hand_vision_presets["tiled_camera"] + assert preset_name in camera_presets, f"Preset '{preset_name}' not found in tiled_camera presets" + resolved = camera_presets[preset_name] + assert resolved.data_types == expected_data_types, ( + f"Preset '{preset_name}': expected data_types={expected_data_types}, got {resolved.data_types}" + ) + + +@pytest.mark.parametrize("preset_name,_", _CAMERA_DATA_TYPE_PRESETS) +def test_camera_preset_cfg_is_valid(shadow_hand_vision_presets, preset_name, _): + """Every camera preset config must request at least one data type and have valid dimensions. + + Note: this is a config-level check only. It verifies that the preset is correctly wired up + (non-empty data_types, positive width/height) but does NOT run the renderer or inspect actual + pixel values. Verifying that rendered frames are non-empty requires a full integration test + that steps the simulation and checks the camera output tensors. + """ + resolved = shadow_hand_vision_presets["tiled_camera"][preset_name] + assert len(resolved.data_types) > 0, ( + f"Camera preset '{preset_name}' has an empty data_types list — nothing would be rendered." + ) + assert resolved.width > 0, f"Camera preset '{preset_name}' has non-positive width: {resolved.width}" + assert resolved.height > 0, f"Camera preset '{preset_name}' has non-positive height: {resolved.height}" + + +def test_all_camera_presets_present(shadow_hand_vision_presets): + """Every preset defined in ShadowHandVisionTiledCameraCfg is discoverable.""" + camera_presets = shadow_hand_vision_presets["tiled_camera"] + expected_names = {name for name, _ in _CAMERA_DATA_TYPE_PRESETS} + missing = expected_names - set(camera_presets.keys()) + assert not missing, f"Camera presets missing from collected presets: {missing}" + + +# --------------------------------------------------------------------------- +# Preset resolution — renderer +# --------------------------------------------------------------------------- + +_RENDERER_PRESETS = [ + # preset_name, expected_class + ("default", IsaacRtxRendererCfg), + ("isaacsim_rtx_renderer", IsaacRtxRendererCfg), + ("newton_renderer", NewtonWarpRendererCfg), + pytest.param("ovrtx_renderer", OVRTXRendererCfg, marks=pytest.mark.skip(reason="OVRTX testing disabled")), +] + + +@pytest.mark.parametrize("preset_name,expected_class", _RENDERER_PRESETS) +def test_renderer_preset_class(shadow_hand_vision_presets, preset_name, expected_class): + renderer_presets = shadow_hand_vision_presets["tiled_camera.renderer_cfg"] + assert preset_name in renderer_presets, f"Preset '{preset_name}' not found in renderer presets" + resolved = renderer_presets[preset_name] + assert isinstance(resolved, expected_class), ( + f"Renderer preset '{preset_name}': expected {expected_class.__name__}, got {type(resolved).__name__}" + ) + + +def test_warp_renderer_has_correct_renderer_type(shadow_hand_vision_presets): + """NewtonWarpRendererCfg must expose renderer_type='newton_warp' for validation to work.""" + warp_cfg = shadow_hand_vision_presets["tiled_camera.renderer_cfg"]["newton_renderer"] + assert warp_cfg.renderer_type == "newton_warp" + + +def test_all_renderer_presets_present(shadow_hand_vision_presets): + """Every preset in MultiBackendRendererCfg is discoverable.""" + renderer_presets = shadow_hand_vision_presets["tiled_camera.renderer_cfg"] + expected_names = {"default", "isaacsim_rtx_renderer", "newton_renderer", "ovrtx_renderer"} + missing = expected_names - set(renderer_presets.keys()) + assert not missing, f"Renderer presets missing from collected presets: {missing}" + + +# --------------------------------------------------------------------------- +# Cross-validation: every camera preset resolves to a valid warp combination +# when paired with the warp renderer preset +# --------------------------------------------------------------------------- + +_WARP_VALID_CAMERA_PRESETS = ["rgb", "depth"] +_WARP_INVALID_CAMERA_PRESETS = [ + "default", + "full", + "albedo", + "simple_shading_constant_diffuse", + "simple_shading_diffuse_mdl", + "simple_shading_full_mdl", +] + + +@pytest.mark.parametrize("camera_preset", _WARP_VALID_CAMERA_PRESETS) +def test_warp_with_valid_camera_preset(shadow_hand_vision_presets, camera_preset): + """Warp + {rgb, depth} camera presets must not raise (depth with CNN disabled).""" + camera_cfg = shadow_hand_vision_presets["tiled_camera"][camera_preset] + warp_cfg = shadow_hand_vision_presets["tiled_camera.renderer_cfg"]["newton_renderer"] + enabled = camera_cfg.data_types != ["depth"] # disable CNN for depth-only + cfg = _make_cfg(warp_cfg.renderer_type, camera_cfg.data_types, enabled) + cfg.validate_config() # must not raise + + +@pytest.mark.parametrize("camera_preset", _WARP_INVALID_CAMERA_PRESETS) +def test_warp_with_invalid_camera_preset(shadow_hand_vision_presets, camera_preset): + """Warp + unsupported camera presets must raise ValueError.""" + camera_cfg = shadow_hand_vision_presets["tiled_camera"][camera_preset] + warp_cfg = shadow_hand_vision_presets["tiled_camera.renderer_cfg"]["newton_renderer"] + cfg = _make_cfg(warp_cfg.renderer_type, camera_cfg.data_types, True) + with pytest.raises(ValueError): + cfg.validate_config() + + +# --------------------------------------------------------------------------- +# Integration: camera output tensors must contain non-zero pixel values +# --------------------------------------------------------------------------- + +_RENDER_CORRECTNESS_CASES = [ + # (renderer_preset, camera_preset, physics) - physics is "physx" or "newton" + # ── PhysX physics (default) + IsaacRTX: supports all data types ── + pytest.param(("isaacsim_rtx_renderer", "rgb", "physx"), id="physx-isaacsim_rtx-rgb"), + pytest.param(("isaacsim_rtx_renderer", "depth", "physx"), id="physx-isaacsim_rtx-depth"), + pytest.param(("isaacsim_rtx_renderer", "albedo", "physx"), id="physx-isaacsim_rtx-albedo"), + pytest.param( + ("isaacsim_rtx_renderer", "simple_shading_constant_diffuse", "physx"), + id="physx-isaacsim_rtx-simple_shading_constant_diffuse", + ), + pytest.param( + ("isaacsim_rtx_renderer", "simple_shading_diffuse_mdl", "physx"), + id="physx-isaacsim_rtx-simple_shading_diffuse_mdl", + ), + pytest.param( + ("isaacsim_rtx_renderer", "simple_shading_full_mdl", "physx"), + id="physx-isaacsim_rtx-simple_shading_full_mdl", + ), + # ── PhysX physics + Warp: only rgb and depth are supported ── + # xfail: standard Shadow Hand USD contains PhysX tendons that Newton's ModelBuilder cannot parse, + # so the Newton model build fails and the Warp renderer cannot initialise. + pytest.param( + ("newton_renderer", "rgb", "physx"), + id="physx-warp-rgb", + marks=pytest.mark.xfail(raises=RuntimeError, reason="PhysX tendon schemas unsupported by Newton ModelBuilder"), + ), + pytest.param( + ("newton_renderer", "depth", "physx"), + id="physx-warp-depth", + marks=pytest.mark.xfail(raises=RuntimeError, reason="PhysX tendon schemas unsupported by Newton ModelBuilder"), + ), + # ── Newton physics + Warp: Warp renderer is physics-backend agnostic ── + pytest.param(("newton_renderer", "rgb", "newton"), id="newton-warp-rgb"), + pytest.param(("newton_renderer", "depth", "newton"), id="newton-warp-depth"), + # ── Newton physics + IsaacRTX ── + pytest.param(("isaacsim_rtx_renderer", "rgb", "newton"), id="newton-isaacsim_rtx-rgb"), + pytest.param(("isaacsim_rtx_renderer", "depth", "newton"), id="newton-isaacsim_rtx-depth"), + pytest.param(("isaacsim_rtx_renderer", "albedo", "newton"), id="newton-isaacsim_rtx-albedo"), + pytest.param( + ("isaacsim_rtx_renderer", "simple_shading_constant_diffuse", "newton"), + id="newton-isaacsim_rtx-simple_shading_constant_diffuse", + ), + pytest.param( + ("isaacsim_rtx_renderer", "simple_shading_diffuse_mdl", "newton"), + id="newton-isaacsim_rtx-simple_shading_diffuse_mdl", + ), + pytest.param( + ("isaacsim_rtx_renderer", "simple_shading_full_mdl", "newton"), + id="newton-isaacsim_rtx-simple_shading_full_mdl", + ), + # ── OVRTX: disabled ── + pytest.param( + ("ovrtx_renderer", "rgb", "physx"), + id="physx-ovrtx-rgb", + marks=pytest.mark.skip(reason="OVRTX testing disabled"), + ), +] + + +@pytest.fixture(params=_RENDER_CORRECTNESS_CASES) +def render_correctness_env(request, shadow_hand_vision_presets): + """Build an env with the specified renderer+camera+physics combination, step once, yield, close. + + Function-scoped so each parametrized case creates and closes its own env sequentially. + ``SimulationContext.clear_instance()`` (called by ``env.close()``) fully tears down the + singleton, allowing a new env with a different physics backend to be created next. + + The shared ``shadow_hand_vision_presets`` fixture is deepcopied before mutation so that + subsequent parametrized cases see clean preset configs. + """ + renderer_preset, camera_preset, physics = request.param + cfg = ShadowHandVisionBenchmarkEnvCfg() + # Wire in the requested camera and renderer presets. + camera_cfg = copy.deepcopy(shadow_hand_vision_presets["tiled_camera"][camera_preset]) + camera_cfg.renderer_cfg = copy.deepcopy(shadow_hand_vision_presets["tiled_camera.renderer_cfg"][renderer_preset]) + cfg.tiled_camera = camera_cfg + # Apply MJWarp presets before resolve_presets so they are not overwritten by defaults. + # Newton needs a specific solver config, a different robot USD, an articulation-based object, + # and a stripped-down event cfg (no PhysX-specific material randomization). + if physics == "newton": + cfg.sim.physics = copy.deepcopy(shadow_hand_vision_presets["sim.physics"]["newton_mjwarp"]) + cfg.robot_cfg = copy.deepcopy(shadow_hand_vision_presets["robot_cfg"]["newton_mjwarp"]) + cfg.object_cfg = copy.deepcopy(shadow_hand_vision_presets["object_cfg"]["newton_mjwarp"]) + if "events" in shadow_hand_vision_presets: + cfg.events = copy.deepcopy(shadow_hand_vision_presets["events"]["newton_mjwarp"]) + cfg = resolve_presets(cfg) + cfg.scene.num_envs = 4 + cfg.feature_extractor.write_image_to_file = False + env = ShadowHandVisionEnv(cfg) + env.reset() + actions = torch.zeros(cfg.scene.num_envs, env.action_space.shape[-1], device=env.device) + env.step(actions) + yield renderer_preset, camera_preset, physics, env + env.close() + + +def test_camera_renders_not_empty(render_correctness_env): + """Camera output must contain at least one non-zero pixel for every valid renderer+camera combo. + + Depth tensors may contain ``inf`` for background pixels (empty space). ``inf`` is replaced + with 0 before checking ``max()``; a non-zero max confirms the renderer produced geometry pixels. + + All renderer+camera+physics combinations are expected to produce non-empty frames. + """ + renderer_preset, camera_preset, physics, env = render_correctness_env + label = f"{physics}-{renderer_preset}+{camera_preset}" + camera_output = env._tiled_camera.data.output + assert len(camera_output) > 0, f"[{label}] Camera produced no output tensors at all." + for dt, tensor in camera_output.items(): + finite = torch.where(torch.isinf(tensor), torch.zeros_like(tensor), tensor) + # import pdb; pdb.set_trace() + assert finite.max() > 0.2, ( + f"[{label}] Camera output '{dt}' is all zeros or all inf " + f"after stepping. Tensor shape: {tensor.shape}, dtype: {tensor.dtype}." + ) diff --git a/source/isaaclab_tasks/test/test_sim_launcher_visualizer_intent.py b/source/isaaclab_tasks/test/test_sim_launcher_visualizer_intent.py new file mode 100644 index 000000000000..c6bad5c19f1e --- /dev/null +++ b/source/isaaclab_tasks/test/test_sim_launcher_visualizer_intent.py @@ -0,0 +1,92 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration-style tests for visualizer intent plumbing in sim launcher.""" + +from __future__ import annotations + +import argparse +import sys +import types + +import isaaclab_tasks.utils.sim_launcher as sim_launcher + + +class _DummyVizCfg: + def __init__(self, visualizer_type: str): + self.visualizer_type = visualizer_type + + +class _DummySimCfg: + def __init__(self, visualizer_cfgs): + self.visualizer_cfgs = visualizer_cfgs + + +class _DummyEnvCfg: + def __init__(self, sim_cfg): + self.sim = sim_cfg + + +def test_launch_simulation_passes_visualizer_intent_to_applauncher(monkeypatch): + """Ensure canonical launcher path forwards visualizer intent upstream.""" + captured: dict[str, object] = {} + + class _FakeAppLauncher: + def __init__(self, launcher_args): + captured["launcher_args"] = launcher_args + captured["closed"] = False + self.app = types.SimpleNamespace(close=lambda: captured.update({"closed": True})) + + monkeypatch.setitem(sys.modules, "isaaclab.app", types.SimpleNamespace(AppLauncher=_FakeAppLauncher)) + monkeypatch.setattr("importlib.util.find_spec", lambda name: object() if name == "omni.kit" else None) + + env_cfg = _DummyEnvCfg(_DummySimCfg([_DummyVizCfg("kit"), _DummyVizCfg("newton")])) + launcher_args = argparse.Namespace() + + with sim_launcher.launch_simulation(env_cfg, launcher_args): + pass + + forwarded_args = captured["launcher_args"] + assert isinstance(forwarded_args, argparse.Namespace) + assert getattr(forwarded_args, "visualizer_intent") == { + "has_any_visualizers": True, + "has_kit_visualizer": True, + } + assert captured["closed"] is True + + +def test_launch_simulation_kitless_viz_none_sets_disable_all(monkeypatch): + """Kitless mode should persist explicit disable-all semantics for --viz none.""" + captured = {"types": None, "explicit": None, "disable_all": None} + + class _FakeSettings: + def set_string(self, path: str, value: str) -> None: + if path == "/isaaclab/visualizer/types": + captured["types"] = value + + def set_bool(self, path: str, value: bool) -> None: + if path == "/isaaclab/visualizer/explicit": + captured["explicit"] = value + elif path == "/isaaclab/visualizer/disable_all": + captured["disable_all"] = value + + monkeypatch.setattr( + sim_launcher, "compute_kit_requirements", lambda env_cfg, launcher_args: (False, False, {"none"}) + ) + # `app_launcher` imports both names from settings_manager; provide a full stub module + # so `from isaaclab.app import AppLauncher` succeeds in kitless mode. + _sm = types.ModuleType("isaaclab.app.settings_manager") + _sm.get_settings_manager = lambda: _FakeSettings() + _sm.initialize_carb_settings = lambda: None + monkeypatch.setitem(sys.modules, "isaaclab.app.settings_manager", _sm) + + env_cfg = _DummyEnvCfg(_DummySimCfg(None)) + launcher_args = argparse.Namespace(visualizer=["none"]) + with sim_launcher.launch_simulation(env_cfg, launcher_args): + pass + + # `sync_visualizer_cli_settings_to_carb` uses ``" ".join(visualizer)`` → ``"none"`` for ``["none"]``, + # not an empty string (empty only when *visualizer* is missing/empty). + assert captured == {"types": "none", "explicit": True, "disable_all": True} diff --git a/source/isaaclab_tasks/test/test_teleop_environments.py b/source/isaaclab_tasks/test/test_teleop_environments.py new file mode 100644 index 000000000000..c30af100038a --- /dev/null +++ b/source/isaaclab_tasks/test/test_teleop_environments.py @@ -0,0 +1,45 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +import sys + +# Import pinocchio in the main script to force the use of the dependencies +# installed by IsaacLab and not the one installed by Isaac Sim. +# pinocchio is required by the Pink IK controller used in some teleop environments +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +from env_test_utils import _run_environments, setup_environment # isort: skip + + +# Teleop environments require isaacteleop / isaaclab_teleop and may interfere +# with other environment tests when run in the same process. They are collected +# separately here so they execute in their own test session. + + +@pytest.mark.parametrize("num_envs, device", [(2, "cuda"), (1, "cuda")]) +@pytest.mark.parametrize( + "task_name", setup_environment(include_play=False, factory_envs=False, multi_agent=False, teleop_envs=True) +) +@pytest.mark.isaacsim_ci +def test_teleop_environments(task_name, num_envs, device): + # run teleop environments without stage in memory + _run_environments(task_name, device, num_envs, create_stage_in_memory=False) diff --git a/source/isaaclab_tasks/test/test_teleop_environments_with_stage_in_memory.py b/source/isaaclab_tasks/test/test_teleop_environments_with_stage_in_memory.py new file mode 100644 index 000000000000..155543d0ed70 --- /dev/null +++ b/source/isaaclab_tasks/test/test_teleop_environments_with_stage_in_memory.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +import sys + +# Import pinocchio in the main script to force the use of the dependencies +# installed by IsaacLab and not the one installed by Isaac Sim. +# pinocchio is required by the Pink IK controller used in some teleop environments +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch the simulator +app_launcher = AppLauncher(headless=True, enable_cameras=True) +simulation_app = app_launcher.app + +from isaaclab.utils.version import get_isaac_sim_version + +"""Rest everything follows.""" + +import pytest + +import isaaclab_tasks # noqa: F401 + +# Local imports should be imported last +from env_test_utils import _run_environments, setup_environment # isort: skip + + +# Teleop environments require isaacteleop / isaaclab_teleop and may interfere +# with other environment tests when run in the same process. They are collected +# separately here so they execute in their own test session. + + +@pytest.mark.parametrize("num_envs, device", [(2, "cuda")]) +@pytest.mark.parametrize( + "task_name", setup_environment(include_play=False, factory_envs=False, multi_agent=False, teleop_envs=True) +) +def test_teleop_environments_with_stage_in_memory_and_clone_in_fabric_disabled(task_name, num_envs, device): + # skip test if stage in memory is not supported + if get_isaac_sim_version().major < 5: + pytest.skip("Stage in memory is not supported in this version of Isaac Sim") + + # run teleop environments with stage in memory + _run_environments(task_name, device, num_envs, create_stage_in_memory=True, disable_clone_in_fabric=True) diff --git a/source/isaaclab_tasks_experimental/config/extension.toml b/source/isaaclab_tasks_experimental/config/extension.toml new file mode 100644 index 000000000000..75de8d6da0ef --- /dev/null +++ b/source/isaaclab_tasks_experimental/config/extension.toml @@ -0,0 +1,22 @@ +[package] + +# Note: Semantic Versioning is used: https://semver.org/ +version = "0.0.1" + +# Description +title = "Experimental environments for IsaacLab" +description="Extension containing suite of experimental environments for robot learning." +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["robotics", "rl", "il", "learning"] + +[dependencies] +"isaaclab" = {} +"isaaclab_assets" = {} + +[core] +reloadable = false + +[[python.module]] +name = "isaaclab_tasks.experimental" diff --git a/source/isaaclab_tasks_experimental/docs/README.md b/source/isaaclab_tasks_experimental/docs/README.md new file mode 100644 index 000000000000..d9e681518d64 --- /dev/null +++ b/source/isaaclab_tasks_experimental/docs/README.md @@ -0,0 +1,3 @@ +# Isaac Lab: Experimental Environment Suite + +Experimental environments for robot learning built on top of Isaac Lab. diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/__init__.py new file mode 100644 index 000000000000..918c41d73d7b --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package containing task implementations for various robotic environments.""" + +import os +import toml + +# Conveniences to other module directories via relative paths +ISAACLAB_TASKS_EXPERIMENTAL_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) +"""Path to the extension source directory.""" + +ISAACLAB_TASKS_EXPERIMENTAL_METADATA = toml.load( + os.path.join(ISAACLAB_TASKS_EXPERIMENTAL_EXT_DIR, "config", "extension.toml") +) +"""Extension metadata dictionary parsed from the extension.toml file.""" + +# Configure the module-level variables +__version__ = ISAACLAB_TASKS_EXPERIMENTAL_METADATA["package"]["version"] + +## +# Register Gym environments. +## + +from isaaclab_tasks.utils import import_packages + +# The blacklist is used to prevent importing configs from sub-packages +_BLACKLIST_PKGS = ["utils", ".mdp"] +# Import all configs in this package +import_packages(__name__, _BLACKLIST_PKGS) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/__init__.py new file mode 100644 index 000000000000..3e2b7945ebde --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Direct workflow environments. +""" + +import gymnasium as gym diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/allegro_hand/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/allegro_hand/__init__.py new file mode 100644 index 000000000000..c954081b9c54 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/allegro_hand/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Allegro Inhand Manipulation environment. +""" + +import gymnasium as gym + +## +# Register Gym environments. +## + +inhand_task_entry = "isaaclab_tasks_experimental.direct.inhand_manipulation" +stable_agents = "isaaclab_tasks.direct.allegro_hand.agents" + +gym.register( + id="Isaac-Repose-Cube-Allegro-Direct-Warp-v0", + entry_point=f"{inhand_task_entry}.inhand_manipulation_warp_env:InHandManipulationWarpEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.allegro_hand_warp_env_cfg:AllegroHandWarpEnvCfg", + "rl_games_cfg_entry_point": f"{stable_agents}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{stable_agents}.rsl_rl_ppo_cfg:AllegroHandPPORunnerCfg", + "skrl_cfg_entry_point": f"{stable_agents}:skrl_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/allegro_hand/allegro_hand_warp_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/allegro_hand/allegro_hand_warp_env_cfg.py new file mode 100644 index 000000000000..2d607d475329 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/allegro_hand/allegro_hand_warp_env_cfg.py @@ -0,0 +1,136 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg # , RigidObjectCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.markers import VisualizationMarkersCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + +from isaaclab_assets.robots.allegro import ALLEGRO_HAND_CFG + + +@configclass +class AllegroHandWarpEnvCfg(DirectRLEnvCfg): + # env + decimation = 4 + episode_length_s = 10.0 + action_space = 16 + observation_space = 124 # (full) + state_space = 0 + asymmetric_obs = False + obs_type = "full" + + solver_cfg = MJWarpSolverCfg( + solver="newton", + integrator="implicitfast", + njmax=80, + nconmax=70, + impratio=10.0, + cone="elliptic", + update_data_interval=2, + iterations=100, + ls_iterations=15, + ls_parallel=False, + # save_to_mjcf="AllegroHand.xml", + ) + + newton_cfg = NewtonCfg( + solver_cfg=solver_cfg, + num_substeps=2, + debug_mode=False, + ) + # simulation + sim: SimulationCfg = SimulationCfg( + dt=1 / 120, + render_interval=decimation, + physics_material=RigidBodyMaterialCfg( + static_friction=1.0, + dynamic_friction=1.0, + ), + physics=newton_cfg, + ) + # robot + robot_cfg: ArticulationCfg = ALLEGRO_HAND_CFG.replace(prim_path="/World/envs/env_.*/Robot") + + actuated_joint_names = [ + "index_joint_0", + "middle_joint_0", + "ring_joint_0", + "thumb_joint_0", + "index_joint_1", + "index_joint_2", + "index_joint_3", + "middle_joint_1", + "middle_joint_2", + "middle_joint_3", + "ring_joint_1", + "ring_joint_2", + "ring_joint_3", + "thumb_joint_1", + "thumb_joint_2", + "thumb_joint_3", + ] + fingertip_body_names = [ + "index_link_3", + "middle_link_3", + "ring_link_3", + "thumb_link_3", + ] + + # in-hand object + object_cfg: ArticulationCfg = ArticulationCfg( + prim_path="/World/envs/env_.*/object", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + mass_props=sim_utils.MassPropertiesCfg(density=400.0), + scale=(1.2, 1.2, 1.2), + ), + # FIXME: it does seem to be a bug for ArticulationCfg for handling empty joint list + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, -0.17, 0.565), rot=(0.0, 0.0, 0.0, 1.0), joint_pos={}, joint_vel={} + ), + actuators={}, + articulation_root_prim_path="", + ) + # goal object + goal_object_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg( + prim_path="/Visuals/goal_marker", + markers={ + "goal": sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + scale=(1.2, 1.2, 1.2), + ) + }, + ) + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=8192, env_spacing=0.75, replicate_physics=True, clone_in_fabric=True + ) + # reset + reset_position_noise = 0.01 # range of position at reset + reset_dof_pos_noise = 0.2 # range of dof pos at reset + reset_dof_vel_noise = 0.0 # range of dof vel at reset + # reward scales + dist_reward_scale = -10.0 + rot_reward_scale = 1.0 + rot_eps = 0.1 + action_penalty_scale = -0.0002 + reach_goal_bonus = 250 + fall_penalty = 0 + fall_dist = 0.24 + vel_obs_scale = 0.2 + success_tolerance = 0.2 + max_consecutive_success = 0 + av_factor = 0.1 + act_moving_average = 1.0 + force_torque_obs_scale = 10.0 diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/ant/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/ant/__init__.py new file mode 100644 index 000000000000..d5f4b459a8a6 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/ant/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Ant locomotion environment. +""" + +import gymnasium as gym + +## +# Register Gym environments. +## + +stable_agents = "isaaclab_tasks.direct.ant.agents" + +gym.register( + id="Isaac-Ant-Direct-Warp-v0", + entry_point=f"{__name__}.ant_env_warp:AntWarpEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.ant_env_warp:AntWarpEnvCfg", + "rl_games_cfg_entry_point": f"{stable_agents}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{stable_agents}.rsl_rl_ppo_cfg:AntPPORunnerCfg", + "skrl_cfg_entry_point": f"{stable_agents}:skrl_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/ant/ant_env_warp.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/ant/ant_env_warp.py new file mode 100644 index 000000000000..a2c8f91fa3db --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/ant/ant_env_warp.py @@ -0,0 +1,91 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +from isaaclab_assets import ANT_CFG + +from isaaclab_tasks_experimental.direct.locomotion.locomotion_env_warp import LocomotionWarpEnv + + +@configclass +class AntWarpEnvCfg(DirectRLEnvCfg): + # env + episode_length_s = 15.0 + decimation = 2 + action_scale = 0.5 + action_space = 8 + observation_space = 36 + state_space = 0 + + solver_cfg = MJWarpSolverCfg( + njmax=45, + nconmax=25, + cone="pyramidal", + integrator="implicitfast", + impratio=1, + ) + newton_cfg = NewtonCfg( + solver_cfg=solver_cfg, + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation, physics=newton_cfg) + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="average", + restitution_combine_mode="average", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + debug_vis=False, + ) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True + ) + + # robot + robot: ArticulationCfg = ANT_CFG.replace(prim_path="/World/envs/env_.*/Robot") + joint_gears: list = [15, 15, 15, 15, 15, 15, 15, 15] + + heading_weight: float = 0.5 + up_weight: float = 0.1 + + energy_cost_scale: float = 0.05 + actions_cost_scale: float = 0.005 + alive_reward_scale: float = 0.5 + dof_vel_scale: float = 0.2 + + death_cost: float = -2.0 + termination_height: float = 0.31 + + angular_velocity_scale: float = 1.0 + contact_force_scale: float = 0.1 + + +class AntWarpEnv(LocomotionWarpEnv): + cfg: AntWarpEnvCfg + + def __init__(self, cfg: AntWarpEnvCfg, render_mode: str | None = None, **kwargs): + super().__init__(cfg, render_mode, **kwargs) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/cartpole/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/cartpole/__init__.py new file mode 100644 index 000000000000..19b9f26a287c --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/cartpole/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Cartpole balancing environment. +""" + +import gymnasium as gym + +## +# Register Gym environments. +## + +stable_agents = "isaaclab_tasks.direct.cartpole.agents" + +gym.register( + id="Isaac-Cartpole-Direct-Warp-v0", + entry_point=f"{__name__}.cartpole_warp_env:CartpoleWarpEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.cartpole_warp_env:CartpoleWarpEnvCfg", + "rl_games_cfg_entry_point": f"{stable_agents}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{stable_agents}.rsl_rl_ppo_cfg:CartpolePPORunnerCfg", + "skrl_cfg_entry_point": f"{stable_agents}:skrl_ppo_cfg.yaml", + "sb3_cfg_entry_point": f"{stable_agents}:sb3_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/cartpole/cartpole_warp_env.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/cartpole/cartpole_warp_env.py new file mode 100644 index 000000000000..ff3c87da847e --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/cartpole/cartpole_warp_env.py @@ -0,0 +1,357 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import math + +import warp as wp +from isaaclab_experimental.envs import DirectRLEnvWarp +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation, ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane +from isaaclab.utils import configclass + +from isaaclab_assets.robots.cartpole import CARTPOLE_CFG + + +@configclass +class CartpoleWarpEnvCfg(DirectRLEnvCfg): + # env + decimation = 2 + episode_length_s = 5.0 + action_scale = 100.0 # [N] + action_space = 1 + observation_space = 4 + state_space = 0 + + solver_cfg = MJWarpSolverCfg( + njmax=5, + nconmax=3, + cone="pyramidal", + integrator="implicitfast", + impratio=1, + ) + + newton_cfg = NewtonCfg( + solver_cfg=solver_cfg, + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation, physics=newton_cfg) + + # robot + robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot") + cart_dof_name = "slider_to_cart" + pole_dof_name = "cart_to_pole" + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True + ) + + # reset + max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m] + initial_pole_angle_range = [-0.25, 0.25] # the range in which the pole angle is sampled from on reset [x pi rad] + + # reward scales + rew_scale_alive = 1.0 + rew_scale_terminated = -2.0 + rew_scale_pole_pos = -1.0 + rew_scale_cart_vel = -0.01 + rew_scale_pole_vel = -0.005 + + +@wp.kernel +def get_observations( + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + cart_dof_idx: wp.int32, + pole_dof_idx: wp.int32, + observations: wp.array(dtype=wp.vec4f), +): + env_index = wp.tid() + observations[env_index][0] = joint_pos[env_index, pole_dof_idx] + observations[env_index][1] = joint_vel[env_index, pole_dof_idx] + observations[env_index][2] = joint_pos[env_index, cart_dof_idx] + observations[env_index][3] = joint_vel[env_index, cart_dof_idx] + + +@wp.kernel +def update_actions( + input_actions: wp.array2d(dtype=wp.float32), + actions: wp.array2d(dtype=wp.float32), + action_scale: wp.float32, + cart_dof_idx: wp.int32, +): + env_index = wp.tid() + actions[env_index, cart_dof_idx] = action_scale * input_actions[env_index, 0] + + +@wp.kernel +def get_dones( + joint_pos: wp.array2d(dtype=wp.float32), + episode_length_buf: wp.array(dtype=wp.int32), + cart_dof_idx: wp.int32, + pole_dof_idx: wp.int32, + max_episode_length: wp.int32, + max_cart_pos: wp.float32, + out_of_bounds: wp.array(dtype=wp.bool), + time_out: wp.array(dtype=wp.bool), + reset: wp.array(dtype=wp.bool), +): + env_index = wp.tid() + out_of_bounds[env_index] = (wp.abs(joint_pos[env_index, cart_dof_idx]) > max_cart_pos) or ( + wp.abs(joint_pos[env_index, pole_dof_idx]) > math.pi / 2.0 + ) + time_out[env_index] = episode_length_buf[env_index] >= (max_episode_length - 1) + reset[env_index] = out_of_bounds[env_index] or time_out[env_index] + + +@wp.func +def compute_rew_alive(rew_scale_alive: wp.float32, reset_terminated: bool) -> wp.float32: + if reset_terminated: + return wp.float32(0.0) + return rew_scale_alive + + +@wp.func +def compute_rew_termination(rew_scale_terminated: wp.float32, reset_terminated: bool) -> wp.float32: + if reset_terminated: + return rew_scale_terminated + return wp.float32(0.0) + + +@wp.func +def compute_rew_pole_pos( + rew_scale_pole_pos: wp.float32, + pole_pos: wp.float32, +) -> wp.float32: + return rew_scale_pole_pos * pole_pos * pole_pos + + +@wp.func +def compute_rew_cart_vel( + rew_scale_cart_vel: wp.float32, + cart_vel: wp.float32, +) -> wp.float32: + return rew_scale_cart_vel * wp.abs(cart_vel) + + +@wp.func +def compute_rew_pole_vel( + rew_scale_pole_vel: wp.float32, + pole_vel: wp.float32, +) -> wp.float32: + return rew_scale_pole_vel * wp.abs(pole_vel) + + +@wp.kernel +def compute_rewards( + rew_scale_alive: wp.float32, + rew_scale_terminated: wp.float32, + rew_scale_pole_pos: wp.float32, + rew_scale_cart_vel: wp.float32, + rew_scale_pole_vel: wp.float32, + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + cart_dof_idx: wp.int32, + pole_dof_idx: wp.int32, + reset_terminated: wp.array(dtype=wp.bool), + reward: wp.array(dtype=wp.float32), +): + env_index = wp.tid() + reward[env_index] = ( + compute_rew_alive(rew_scale_alive, reset_terminated[env_index]) + + compute_rew_termination(rew_scale_terminated, reset_terminated[env_index]) + + compute_rew_pole_pos(rew_scale_pole_pos, joint_pos[env_index, pole_dof_idx]) + + compute_rew_cart_vel(rew_scale_cart_vel, joint_vel[env_index, cart_dof_idx]) + + compute_rew_pole_vel(rew_scale_pole_vel, joint_vel[env_index, pole_dof_idx]) + ) + + +@wp.kernel +def reset( + default_joint_pos: wp.array2d(dtype=wp.float32), + default_joint_vel: wp.array2d(dtype=wp.float32), + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + cart_dof_idx: wp.int32, + pole_dof_idx: wp.int32, + initial_pose_angle_range: wp.vec2f, + env_mask: wp.array(dtype=wp.bool), + state: wp.array(dtype=wp.uint32), +): + env_index = wp.tid() + if env_mask[env_index]: + joint_pos[env_index, cart_dof_idx] = default_joint_pos[env_index, cart_dof_idx] + joint_pos[env_index, pole_dof_idx] = default_joint_pos[env_index, pole_dof_idx] + wp.randf( + state[env_index], initial_pose_angle_range[0] * wp.pi, initial_pose_angle_range[1] * wp.pi + ) + joint_vel[env_index, cart_dof_idx] = default_joint_vel[env_index, cart_dof_idx] + joint_vel[env_index, pole_dof_idx] = default_joint_vel[env_index, pole_dof_idx] + state[env_index] += wp.uint32(1) + + +@wp.kernel +def initialize_state( + state: wp.array(dtype=wp.uint32), + seed: wp.int32, +): + env_index = wp.tid() + state[env_index] = wp.rand_init(seed, env_index) + + +class CartpoleWarpEnv(DirectRLEnvWarp): + cfg: CartpoleWarpEnvCfg + + def __init__(self, cfg: CartpoleWarpEnvCfg, render_mode: str | None = None, **kwargs) -> None: + super().__init__(cfg, render_mode, **kwargs) + + # Get the indices (develop API: find_joints returns (indices, names)) + self._cart_dof_idx, _ = self.cartpole.find_joints(self.cfg.cart_dof_name) + self._pole_dof_idx, _ = self.cartpole.find_joints(self.cfg.pole_dof_name) + + self.action_scale = self.cfg.action_scale + + # Simulation bindings + # Note: these are direct memory views into the Newton simulation data, they should not be modified directly + self.joint_pos = self.cartpole.data.joint_pos.warp + self.joint_vel = self.cartpole.data.joint_vel.warp + + # Buffers + self.observations = wp.zeros((self.num_envs), dtype=wp.vec4f, device=self.device) + self.actions = wp.zeros((self.num_envs, self.cartpole.num_joints), dtype=wp.float32, device=self.device) + self.rewards = wp.zeros((self.num_envs), dtype=wp.float32, device=self.device) + self.states = wp.zeros((self.num_envs), dtype=wp.uint32, device=self.device) + + if self.cfg.seed is None: + self.cfg.seed = -1 + + wp.launch( + initialize_state, + dim=self.num_envs, + inputs=[ + self.states, + self.cfg.seed, + ], + ) + + # Bind torch buffers to warp buffers + self.torch_obs_buf = wp.to_torch(self.observations) + self.torch_reward_buf = wp.to_torch(self.rewards) + self.torch_reset_terminated = wp.to_torch(self.reset_terminated) + self.torch_reset_time_outs = wp.to_torch(self.reset_time_outs) + self.torch_episode_length_buf = self.episode_length_buf # already a torch tensor via wp.to_torch + + def _setup_scene(self) -> None: + self.cartpole = Articulation(self.cfg.robot_cfg) + # add ground plane + spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg()) + # clone and replicate + self.scene.clone_environments(copy_from_source=False) + # we need to explicitly filter collisions for CPU simulation + if self.device == "cpu": + self.scene.filter_collisions(global_prim_paths=[]) + # add articulation to scene + self.scene.articulations["cartpole"] = self.cartpole + # add lights + light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) + light_cfg.func("/World/Light", light_cfg) + + def _pre_physics_step(self, actions: wp.array) -> None: + wp.launch( + update_actions, + dim=self.num_envs, + inputs=[ + actions, + self.actions, + self.action_scale, + self._cart_dof_idx[0], + ], + ) + + def _apply_action(self) -> None: + self.cartpole.set_joint_effort_target_mask(target=self.actions) + + def _get_observations(self) -> dict: + wp.launch( + get_observations, + dim=self.num_envs, + inputs=[ + self.joint_pos, + self.joint_vel, + self._cart_dof_idx[0], + self._pole_dof_idx[0], + self.observations, + ], + ) + return {"policy": self.torch_obs_buf} + + def _get_rewards(self) -> None: + wp.launch( + compute_rewards, + dim=self.num_envs, + inputs=[ + self.cfg.rew_scale_alive, + self.cfg.rew_scale_terminated, + self.cfg.rew_scale_pole_pos, + self.cfg.rew_scale_cart_vel, + self.cfg.rew_scale_pole_vel, + self.joint_pos, + self.joint_vel, + self._cart_dof_idx[0], + self._pole_dof_idx[0], + self.reset_terminated, + self.rewards, + ], + ) + + def _get_dones(self) -> None: + wp.launch( + get_dones, + dim=self.num_envs, + inputs=[ + self.joint_pos, + self._episode_length_buf_wp, + self._cart_dof_idx[0], + self._pole_dof_idx[0], + self.max_episode_length, + self.cfg.max_cart_pos, + self.reset_terminated, + self.reset_time_outs, + self.reset_buf, + ], + ) + + def _reset_idx(self, mask: wp.array | None = None) -> None: + if mask is None: + mask = self._ALL_ENV_MASK + + super()._reset_idx(mask) + + wp.launch( + reset, + dim=self.num_envs, + inputs=[ + self.cartpole.data.default_joint_pos.warp, + self.cartpole.data.default_joint_vel.warp, + self.joint_pos, + self.joint_vel, + self._cart_dof_idx[0], + self._pole_dof_idx[0], + self.cfg.initial_pole_angle_range, + mask, + self.states, + ], + ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/humanoid/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/humanoid/__init__.py new file mode 100644 index 000000000000..8dcb10a57bfd --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/humanoid/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Humanoid locomotion environment. +""" + +import gymnasium as gym + +## +# Register Gym environments. +## + +stable_agents = "isaaclab_tasks.direct.humanoid.agents" + +gym.register( + id="Isaac-Humanoid-Direct-Warp-v0", + entry_point=f"{__name__}.humanoid_warp_env:HumanoidWarpEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.humanoid_warp_env:HumanoidWarpEnvCfg", + "rl_games_cfg_entry_point": f"{stable_agents}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{stable_agents}.rsl_rl_ppo_cfg:HumanoidPPORunnerCfg", + "skrl_cfg_entry_point": f"{stable_agents}:skrl_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/humanoid/humanoid_warp_env.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/humanoid/humanoid_warp_env.py new file mode 100644 index 000000000000..1dc4d23f7814 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/humanoid/humanoid_warp_env.py @@ -0,0 +1,114 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +from isaaclab_assets import HUMANOID_CFG + +from isaaclab_tasks_experimental.direct.locomotion.locomotion_env_warp import LocomotionWarpEnv + + +@configclass +class HumanoidWarpEnvCfg(DirectRLEnvCfg): + # env + episode_length_s = 15.0 + decimation = 2 + action_scale = 1.0 + action_space = 21 + observation_space = 75 + state_space = 0 + + solver_cfg = MJWarpSolverCfg( + njmax=80, + nconmax=25, + cone="pyramidal", + integrator="implicitfast", + update_data_interval=2, + impratio=1, + ) + newton_cfg = NewtonCfg( + solver_cfg=solver_cfg, + num_substeps=2, + debug_mode=False, + use_cuda_graph=True, + ) + + # simulation + sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=decimation, physics=newton_cfg) + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="average", + restitution_combine_mode="average", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + debug_vis=False, + ) + + # scene + scene: InteractiveSceneCfg = InteractiveSceneCfg( + num_envs=4096, env_spacing=4.0, replicate_physics=True, clone_in_fabric=True + ) + + # robot + robot: ArticulationCfg = HUMANOID_CFG.replace(prim_path="/World/envs/env_.*/Robot") + joint_gears: list = [ + 67.5000, # left_upper_arm + 67.5000, # left_upper_arm + 45.0000, # left_lower_arm + 67.5000, # lower_waist + 67.5000, # lower_waist + 67.5000, # pelvis + 45.0000, # left_thigh: x + 135.0000, # left_thigh: y + 45.0000, # left_thigh: z + 90.0000, # left_knee + 22.5, # left_foot + 22.5, # left_foot + 45.0000, # right_thigh: x + 135.0000, # right_thigh: y + 45.0000, # right_thigh: z + 90.0000, # right_knee + 22.5, # right_foot + 22.5, # right_foot + 67.5000, # right_upper_arm + 67.5000, # right_upper_arm + 45.0000, # right_lower_arm + ] + + heading_weight: float = 0.5 + up_weight: float = 0.1 + + energy_cost_scale: float = 0.05 + actions_cost_scale: float = 0.01 + alive_reward_scale: float = 2.0 + dof_vel_scale: float = 0.1 + + death_cost: float = -1.0 + termination_height: float = 0.8 + + angular_velocity_scale: float = 0.25 + contact_force_scale: float = 0.01 + + +class HumanoidWarpEnv(LocomotionWarpEnv): + cfg: HumanoidWarpEnvCfg + + def __init__(self, cfg: HumanoidWarpEnvCfg, render_mode: str | None = None, **kwargs): + super().__init__(cfg, render_mode, **kwargs) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/inhand_manipulation/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/inhand_manipulation/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/inhand_manipulation/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/inhand_manipulation/inhand_manipulation_warp_env.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/inhand_manipulation/inhand_manipulation_warp_env.py new file mode 100644 index 000000000000..d5bc6a2ad529 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/inhand_manipulation/inhand_manipulation_warp_env.py @@ -0,0 +1,980 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp +from isaaclab_experimental.envs import DirectRLEnvWarp + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation # , RigidObject +from isaaclab.markers import VisualizationMarkers +from isaaclab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane + +if TYPE_CHECKING: + from isaaclab_tasks_experimental.direct.allegro_hand.allegro_hand_warp_env_cfg import AllegroHandWarpEnvCfg + + +@wp.kernel +def initialize_rng_state( + # input + seed: wp.int32, + # output + state: wp.array(dtype=wp.uint32), +): + env_id = wp.tid() + state[env_id] = wp.rand_init(seed, wp.int32(env_id)) + + +@wp.kernel +def apply_actions_to_targets( + # input + actions: wp.array2d(dtype=wp.float32), + lower_limits: wp.array2d(dtype=wp.float32), + upper_limits: wp.array2d(dtype=wp.float32), + actuated_dof_indices: wp.array(dtype=wp.int32), + act_moving_average: wp.float32, + # input/output + prev_targets: wp.array2d(dtype=wp.float32), + # output + cur_targets: wp.array2d(dtype=wp.float32), +): + env_id, i = wp.tid() + dof_id = actuated_dof_indices[i] + + # clamp and scale action to target range + a = wp.clamp(actions[env_id, i], wp.float32(-1.0), wp.float32(1.0)) + lower = lower_limits[env_id, dof_id] + upper = upper_limits[env_id, dof_id] + t = scale(a, lower, upper) + + # smoothing and boundary clamping + t = act_moving_average * t + (wp.float32(1.0) - act_moving_average) * prev_targets[env_id, dof_id] + t = wp.clamp(t, lower, upper) + + # update targets + cur_targets[env_id, dof_id] = t + prev_targets[env_id, dof_id] = t + + +@wp.kernel +def reset_target_pose( + # input + env_mask: wp.array(dtype=wp.bool), + x_unit_vec: wp.vec3f, + y_unit_vec: wp.vec3f, + env_origins: wp.array(dtype=wp.vec3f), + goal_pos: wp.array(dtype=wp.vec3f), + # input/output + rng_state: wp.array(dtype=wp.uint32), + # output + goal_rot: wp.array(dtype=wp.quatf), + reset_goal_buf: wp.array(dtype=wp.bool), + goal_pos_w: wp.array(dtype=wp.vec3f), +): + env_id = wp.tid() + if env_mask[env_id]: + rand0 = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + rand1 = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + + goal_rot[env_id] = randomize_rotation(rand0, rand1, x_unit_vec, y_unit_vec) + reset_goal_buf[env_id] = False + + # Warp-native addition: goal position in world frame. + goal_pos_w[env_id] = goal_pos[env_id] + env_origins[env_id] + + +@wp.kernel +def reset_object( + # input + default_root_pose: wp.array(dtype=wp.transformf), + env_origins: wp.array(dtype=wp.vec3f), + reset_position_noise: wp.float32, + x_unit_vec: wp.vec3f, + y_unit_vec: wp.vec3f, + env_mask: wp.array(dtype=wp.bool), + # input/output + rng_state: wp.array(dtype=wp.uint32), + # output + root_pose_w: wp.array(dtype=wp.transformf), + root_vel_w: wp.array(dtype=wp.spatial_vectorf), +): + env_id = wp.tid() + if env_mask[env_id]: + nx = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + ny = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + nz = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + + pos_noise = reset_position_noise * wp.vec3f(nx, ny, nz) + base_pos = wp.transform_get_translation(default_root_pose[env_id]) + pos_w = base_pos + env_origins[env_id] + pos_noise + + rand0 = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + rand1 = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + rot_w = randomize_rotation(rand0, rand1, x_unit_vec, y_unit_vec) + + # The following should be equivalent, but consider using write_root_pose_to_sim and write_root_velocity_to_sim + root_pose_w[env_id] = wp.transform(pos_w, rot_w) + root_vel_w[env_id] = wp.spatial_vectorf( + wp.float32(0.0), wp.float32(0.0), wp.float32(0.0), wp.float32(0.0), wp.float32(0.0), wp.float32(0.0) + ) + + +@wp.kernel +def reset_hand( + # input + default_joint_pos: wp.array2d(dtype=wp.float32), + default_joint_vel: wp.array2d(dtype=wp.float32), + lower_limits: wp.array2d(dtype=wp.float32), + upper_limits: wp.array2d(dtype=wp.float32), + reset_dof_pos_noise: wp.float32, + reset_dof_vel_noise: wp.float32, + env_mask: wp.array(dtype=wp.bool), + num_dofs: wp.int32, + # input/output + rng_state: wp.array(dtype=wp.uint32), + # output + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + prev_targets: wp.array2d(dtype=wp.float32), + cur_targets: wp.array2d(dtype=wp.float32), + hand_dof_targets: wp.array2d(dtype=wp.float32), +): + env_id = wp.tid() + if env_mask[env_id]: + # Each env runs sequentially inside this kernel (avoids RNG races across DOFs). + for dof_id in range(num_dofs): + dof_pos_noise = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + + delta_max = upper_limits[env_id, dof_id] - default_joint_pos[env_id, dof_id] + delta_min = lower_limits[env_id, dof_id] - default_joint_pos[env_id, dof_id] + rand_delta = delta_min + (delta_max - delta_min) * 0.5 * dof_pos_noise + pos = default_joint_pos[env_id, dof_id] + reset_dof_pos_noise * rand_delta + + dof_vel_noise = wp.randf(rng_state[env_id], wp.float32(-1.0), wp.float32(1.0)) + rng_state[env_id] += wp.uint32(1) + vel = default_joint_vel[env_id, dof_id] + reset_dof_vel_noise * dof_vel_noise + + # The following lines should be equivalent to the following: + # self.hand.write_joint_state_to_sim(dof_pos, dof_vel, env_ids=env_ids) + joint_pos[env_id, dof_id] = pos + joint_vel[env_id, dof_id] = vel + + prev_targets[env_id, dof_id] = pos + cur_targets[env_id, dof_id] = pos + hand_dof_targets[env_id, dof_id] = pos + + +@wp.kernel +def reset_successes( + # input + env_mask: wp.array(dtype=wp.bool), + # output + successes: wp.array(dtype=wp.float32), +): + env_id = wp.tid() + if env_mask[env_id]: + successes[env_id] = wp.float32(0.0) + + +@wp.kernel +def compute_intermediate_values( + # input + body_pos_w: wp.array2d(dtype=wp.vec3f), + body_quat_w: wp.array2d(dtype=wp.quatf), + body_vel_w: wp.array2d(dtype=wp.spatial_vectorf), + finger_bodies: wp.array(dtype=wp.int32), + env_origins: wp.array(dtype=wp.vec3f), + object_root_pose_w: wp.array(dtype=wp.transformf), + object_root_vel_w: wp.array(dtype=wp.spatial_vectorf), + num_fingertips: wp.int32, + # output + fingertip_pos: wp.array2d(dtype=wp.vec3f), + fingertip_rot: wp.array2d(dtype=wp.quatf), + fingertip_velocities: wp.array2d(dtype=wp.spatial_vectorf), + object_pose: wp.array(dtype=wp.transformf), + object_vels: wp.array(dtype=wp.spatial_vectorf), +): + env_id = wp.tid() + + for i in range(num_fingertips): + body_id = finger_bodies[i] + fingertip_pos[env_id, i] = body_pos_w[env_id, body_id] - env_origins[env_id] + fingertip_rot[env_id, i] = body_quat_w[env_id, body_id] + fingertip_velocities[env_id, i] = body_vel_w[env_id, body_id] + + # Store object pose in env-local frame (translation only; orientation unchanged). + pos_w = wp.transform_get_translation(object_root_pose_w[env_id]) + pos = pos_w - env_origins[env_id] + rot = wp.transform_get_rotation(object_root_pose_w[env_id]) + object_pose[env_id] = wp.transform(pos, rot) + object_vels[env_id] = object_root_vel_w[env_id] + + +@wp.kernel +def get_dones( + # input + max_episode_length: wp.int32, + object_pose: wp.array(dtype=wp.transformf), + in_hand_pos: wp.array(dtype=wp.vec3f), + goal_rot: wp.array(dtype=wp.quatf), + fall_dist: wp.float32, + success_tolerance: wp.float32, + max_consecutive_success: wp.int32, + successes: wp.array(dtype=wp.float32), + # input/output + episode_length_buf: wp.array(dtype=wp.int32), + # output + out_of_reach: wp.array(dtype=wp.bool), + time_out: wp.array(dtype=wp.bool), + reset: wp.array(dtype=wp.bool), +): + env_id = wp.tid() + + object_pos = wp.transform_get_translation(object_pose[env_id]) + object_rot = wp.transform_get_rotation(object_pose[env_id]) + + goal_dist = wp.length(object_pos - in_hand_pos[env_id]) + out_of_reach[env_id] = goal_dist >= fall_dist + + max_success_reached = False + if max_consecutive_success > 0: + # Reset progress (episode length buf) on goal envs if max_consecutive_success > 0 + rot_dist = rotation_distance(object_rot, goal_rot[env_id]) + if wp.abs(rot_dist) <= success_tolerance: + episode_length_buf[env_id] = 0 + max_success_reached = successes[env_id] >= wp.float32(max_consecutive_success) + + time_out[env_id] = episode_length_buf[env_id] >= (max_episode_length - 1) or max_success_reached + reset[env_id] = out_of_reach[env_id] or time_out[env_id] + + +@wp.kernel +def compute_reduced_observations( + # input + fingertip_pos: wp.array2d(dtype=wp.vec3f), + object_pose: wp.array(dtype=wp.transformf), + goal_rot: wp.array(dtype=wp.quatf), + actions: wp.array2d(dtype=wp.float32), + num_fingertips: wp.int32, + action_dim: wp.int32, + # output + observations: wp.array2d(dtype=wp.float32), +): + env_id = wp.tid() + + obj_pos = wp.transform_get_translation(object_pose[env_id]) + obj_rot = wp.transform_get_rotation(object_pose[env_id]) + + idx = int(0) + for i in range(num_fingertips): + observations[env_id, idx + 0] = fingertip_pos[env_id, i][0] + observations[env_id, idx + 1] = fingertip_pos[env_id, i][1] + observations[env_id, idx + 2] = fingertip_pos[env_id, i][2] + idx += 3 + + observations[env_id, idx + 0] = obj_pos[0] + observations[env_id, idx + 1] = obj_pos[1] + observations[env_id, idx + 2] = obj_pos[2] + idx += 3 + + rel = obj_rot * wp.quat_inverse(goal_rot[env_id]) + observations[env_id, idx + 0] = rel[0] + observations[env_id, idx + 1] = rel[1] + observations[env_id, idx + 2] = rel[2] + observations[env_id, idx + 3] = rel[3] + idx += 4 + + for i in range(action_dim): + observations[env_id, idx + i] = actions[env_id, i] + + +@wp.kernel +def compute_full_observations( + # input + hand_dof_pos: wp.array2d(dtype=wp.float32), + hand_dof_vel: wp.array2d(dtype=wp.float32), + hand_dof_lower_limits: wp.array2d(dtype=wp.float32), + hand_dof_upper_limits: wp.array2d(dtype=wp.float32), + vel_obs_scale: wp.float32, + object_pose: wp.array(dtype=wp.transformf), + object_vels: wp.array(dtype=wp.spatial_vectorf), + in_hand_pos: wp.array(dtype=wp.vec3f), + goal_rot: wp.array(dtype=wp.quatf), + fingertip_pos: wp.array2d(dtype=wp.vec3f), + fingertip_rot: wp.array2d(dtype=wp.quatf), + fingertip_velocities: wp.array2d(dtype=wp.spatial_vectorf), + actions: wp.array2d(dtype=wp.float32), + num_hand_dofs: wp.int32, + num_fingertips: wp.int32, + action_dim: wp.int32, + # output + observations: wp.array2d(dtype=wp.float32), +): + env_id = wp.tid() + + # hand + for i in range(num_hand_dofs): + observations[env_id, i] = unscale( + hand_dof_pos[env_id, i], hand_dof_lower_limits[env_id, i], hand_dof_upper_limits[env_id, i] + ) + + offset = num_hand_dofs + for i in range(num_hand_dofs): + observations[env_id, offset + i] = vel_obs_scale * hand_dof_vel[env_id, i] + offset += num_hand_dofs + + # object + obj_pos = wp.transform_get_translation(object_pose[env_id]) + obj_rot = wp.transform_get_rotation(object_pose[env_id]) + + observations[env_id, offset + 0] = obj_pos[0] + observations[env_id, offset + 1] = obj_pos[1] + observations[env_id, offset + 2] = obj_pos[2] + offset += 3 + + observations[env_id, offset + 0] = obj_rot[0] + observations[env_id, offset + 1] = obj_rot[1] + observations[env_id, offset + 2] = obj_rot[2] + observations[env_id, offset + 3] = obj_rot[3] + offset += 4 + + # spatial_vectorf layout: [0:3]=angular, [3:6]=linear + # torch reference order: linear (unscaled) first, then angular (scaled) + observations[env_id, offset + 0] = object_vels[env_id][3] + observations[env_id, offset + 1] = object_vels[env_id][4] + observations[env_id, offset + 2] = object_vels[env_id][5] + offset += 3 + + observations[env_id, offset + 0] = vel_obs_scale * object_vels[env_id][0] + observations[env_id, offset + 1] = vel_obs_scale * object_vels[env_id][1] + observations[env_id, offset + 2] = vel_obs_scale * object_vels[env_id][2] + offset += 3 + + # goal + observations[env_id, offset + 0] = in_hand_pos[env_id][0] + observations[env_id, offset + 1] = in_hand_pos[env_id][1] + observations[env_id, offset + 2] = in_hand_pos[env_id][2] + offset += 3 + + observations[env_id, offset + 0] = goal_rot[env_id][0] + observations[env_id, offset + 1] = goal_rot[env_id][1] + observations[env_id, offset + 2] = goal_rot[env_id][2] + observations[env_id, offset + 3] = goal_rot[env_id][3] + offset += 4 + + rel = obj_rot * wp.quat_inverse(goal_rot[env_id]) + observations[env_id, offset + 0] = rel[0] + observations[env_id, offset + 1] = rel[1] + observations[env_id, offset + 2] = rel[2] + observations[env_id, offset + 3] = rel[3] + offset += 4 + + # fingertips + for i in range(num_fingertips): + observations[env_id, offset + 0] = fingertip_pos[env_id, i][0] + observations[env_id, offset + 1] = fingertip_pos[env_id, i][1] + observations[env_id, offset + 2] = fingertip_pos[env_id, i][2] + offset += 3 + + for i in range(num_fingertips): + observations[env_id, offset + 0] = fingertip_rot[env_id, i][0] + observations[env_id, offset + 1] = fingertip_rot[env_id, i][1] + observations[env_id, offset + 2] = fingertip_rot[env_id, i][2] + observations[env_id, offset + 3] = fingertip_rot[env_id, i][3] + offset += 4 + + for i in range(num_fingertips): + for j in range(6): + observations[env_id, offset + j] = fingertip_velocities[env_id, i][j] + offset += 6 + + # actions + for i in range(action_dim): + observations[env_id, offset + i] = actions[env_id, i] + + +@wp.kernel +def sanitize_and_print_once( + # input/output + obs: wp.array(dtype=wp.float32), + printed_flag: wp.array(dtype=wp.int32), +): + i = wp.tid() + v = obs[i] + + if not wp.isfinite(v): + # Try to claim the "print token" + if wp.atomic_cas(printed_flag, 0, 0, 1) == 0: + wp.printf("Non-finite values in observations") + + obs[i] = wp.float32(0.0) + + +@wp.kernel +def compute_rewards( + # input + reset_buf: wp.array(dtype=wp.bool), + object_pose: wp.array(dtype=wp.transformf), + target_pos: wp.array(dtype=wp.vec3f), + target_rot: wp.array(dtype=wp.quatf), + dist_reward_scale: wp.float32, + rot_reward_scale: wp.float32, + rot_eps: wp.float32, + actions: wp.array2d(dtype=wp.float32), + action_penalty_scale: wp.float32, + success_tolerance: wp.float32, + reach_goal_bonus: wp.float32, + fall_dist: wp.float32, + fall_penalty: wp.float32, + action_dim: wp.int32, + # input/output + reset_goal_buf: wp.array(dtype=wp.bool), + successes: wp.array(dtype=wp.float32), + num_resets_out: wp.array(dtype=wp.float32), + finished_cons_successes_out: wp.array(dtype=wp.float32), + # output + reward_out: wp.array(dtype=wp.float32), +): + env_id = wp.tid() + + obj_pos = wp.transform_get_translation(object_pose[env_id]) + obj_rot = wp.transform_get_rotation(object_pose[env_id]) + + goal_dist = wp.length(obj_pos - target_pos[env_id]) + rot_dist = rotation_distance(obj_rot, target_rot[env_id]) + + dist_rew = goal_dist * dist_reward_scale + rot_rew = wp.float32(1.0) / (wp.abs(rot_dist) + rot_eps) * rot_reward_scale + + action_penalty = wp.float32(0.0) + for i in range(action_dim): + action_penalty += actions[env_id, i] * actions[env_id, i] + + # Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty + reward = dist_rew + rot_rew + action_penalty * action_penalty_scale + + # Find out which envs hit the goal and update successes count + reached = wp.abs(rot_dist) <= success_tolerance + goal_resets = reached or reset_goal_buf[env_id] + reset_goal_buf[env_id] = goal_resets + if goal_resets: + successes[env_id] = successes[env_id] + wp.float32(1.0) + + # Success bonus: orientation is within `success_tolerance` of goal orientation + if goal_resets: + reward = reward + reach_goal_bonus + + # Fall penalty: distance to the goal is larger than a threshold + if goal_dist >= fall_dist: + reward = reward + fall_penalty + + # Consecutive-successes stats (mirrors Torch env): + # resets = torch.where(goal_dist >= fall_dist, ones_like(reset_buf), reset_buf) + resets = (goal_dist >= fall_dist) or reset_buf[env_id] + if resets: + wp.atomic_add(num_resets_out, 0, wp.float32(1.0)) + wp.atomic_add(finished_cons_successes_out, 0, successes[env_id]) + + reward_out[env_id] = reward + + +@wp.kernel +def update_consecutive_successes_from_stats( + # input + num_resets: wp.array(dtype=wp.float32), + finished_cons_successes: wp.array(dtype=wp.float32), + av_factor: wp.float32, + # input/output + consecutive_successes: wp.array(dtype=wp.float32), +): + """Finalize the Torch env's EMA update for consecutive_successes and clear the accumulators.""" + # single-thread kernel (dim=1) + n = num_resets[0] + prev = consecutive_successes[0] + if n > wp.float32(0.0): + consecutive_successes[0] = av_factor * (finished_cons_successes[0] / n) + (wp.float32(1.0) - av_factor) * prev + + +@wp.func +def scale(x: wp.float32, lower: wp.float32, upper: wp.float32) -> wp.float32: + return wp.float32(0.5) * (x + wp.float32(1.0)) * (upper - lower) + lower + + +@wp.func +def unscale(x: wp.float32, lower: wp.float32, upper: wp.float32) -> wp.float32: + return (wp.float32(2.0) * x - upper - lower) / (upper - lower) + + +@wp.func +def randomize_rotation(rand0: wp.float32, rand1: wp.float32, x_axis: wp.vec3f, y_axis: wp.vec3f) -> wp.quatf: + return wp.quat_from_axis_angle(x_axis, rand0 * wp.pi) * wp.quat_from_axis_angle(y_axis, rand1 * wp.pi) + + +@wp.func +def rotation_distance(object_rot: wp.quatf, target_rot: wp.quatf) -> wp.float32: + # Orientation alignment for the cube in hand and goal cube + quat_diff = object_rot * wp.quat_inverse(target_rot) + # Match Torch env convention: uses indices [1:4] for the vector part (see `rotation_distance` in Torch env). + v_norm = wp.sqrt(quat_diff[1] * quat_diff[1] + quat_diff[2] * quat_diff[2] + quat_diff[3] * quat_diff[3]) + v_norm = wp.min(v_norm, wp.float32(1.0)) + return wp.float32(2.0) * wp.asin(v_norm) + + +class InHandManipulationWarpEnv(DirectRLEnvWarp): + cfg: AllegroHandWarpEnvCfg # | ShadowHandWarpEnvCfg + + # def __init__(self, cfg: AllegroHandWarpEnvCfg | ShadowHandWarpEnvCfg, render_mode: str | None = None, **kwargs): + def __init__(self, cfg: AllegroHandWarpEnvCfg, render_mode: str | None = None, **kwargs): + super().__init__(cfg, render_mode, **kwargs) + + # --------------------------------------------------------------------- + # Constants + # --------------------------------------------------------------------- + + # dof used for joint related init and sample + self.num_hand_dofs = self.hand.num_joints + + # list of actuated joints + actuated_dof_indices: list[int] = list() + for joint_name in cfg.actuated_joint_names: + actuated_dof_indices.append(self.hand.joint_names.index(joint_name)) + actuated_dof_indices.sort() + self.num_actuated_dofs = len(actuated_dof_indices) + + # Warp index/mask helpers for kernels and articulation APIs. + self.actuated_dof_indices = wp.array(actuated_dof_indices, dtype=wp.int32, device=self.device) + actuated_mask = [False] * self.num_hand_dofs + for idx in actuated_dof_indices: + actuated_mask[idx] = True + self.actuated_dof_mask = wp.array(actuated_mask, dtype=wp.bool, device=self.device) + + # finger bodies + finger_bodies: list[int] = list() + for body_name in self.cfg.fingertip_body_names: + finger_bodies.append(self.hand.body_names.index(body_name)) + finger_bodies.sort() + self.num_fingertips = len(finger_bodies) + self.finger_bodies = wp.array(finger_bodies, dtype=wp.int32, device=self.device) + + # joint limits + self.hand_dof_lower_limits = self.hand.data.joint_pos_limits_lower.warp + self.hand_dof_upper_limits = self.hand.data.joint_pos_limits_upper.warp + + # unit vectors + self.x_unit_vec = wp.vec3f(1.0, 0.0, 0.0) + self.y_unit_vec = wp.vec3f(0.0, 1.0, 0.0) + self.z_unit_vec = wp.vec3f(0.0, 0.0, 1.0) + + # Per-env origins (Warp view for kernels; Torch env uses `self.scene.env_origins` directly). + self.env_origins = wp.from_torch(self.scene.env_origins, dtype=wp.vec3f) + + # --------------------------------------------------------------------- + # Warp buffers + # --------------------------------------------------------------------- + + # buffers for position targets + self.hand_dof_targets = wp.zeros((self.num_envs, self.num_hand_dofs), dtype=wp.float32, device=self.device) + self.prev_targets = wp.zeros((self.num_envs, self.num_hand_dofs), dtype=wp.float32, device=self.device) + self.cur_targets = wp.zeros((self.num_envs, self.num_hand_dofs), dtype=wp.float32, device=self.device) + + # track goal resets + self.reset_goal_buf = wp.zeros(self.num_envs, dtype=wp.bool, device=self.device) + # used to compare object position + self.in_hand_pos = wp.zeros(self.num_envs, dtype=wp.vec3f, device=self.device) + # default goal positions + self.goal_rot = wp.zeros(self.num_envs, dtype=wp.quatf, device=self.device) + self.goal_pos = wp.zeros(self.num_envs, dtype=wp.vec3f, device=self.device) + self.goal_pos_w = wp.zeros(self.num_envs, dtype=wp.vec3f, device=self.device) + + # Initialize goal constants from Torch (avoid a one-off kernel launch). + default_root_pose = self.object.data.default_root_pose.torch.to(self.device) + in_hand_pos = default_root_pose[:, 0:3].clone() + in_hand_pos[:, 2] -= 0.04 + self.in_hand_pos.assign(wp.from_torch(in_hand_pos, dtype=wp.vec3f)) + + goal_pos = torch.tensor([-0.2, -0.45, 0.68], device=self.device, dtype=torch.float32).repeat((self.num_envs, 1)) + self.goal_pos.assign(wp.from_torch(goal_pos, dtype=wp.vec3f)) + + goal_rot = torch.zeros((self.num_envs, 4), device=self.device, dtype=torch.float32) + goal_rot[:, 3] = 1.0 # (x, y, z, w) + self.goal_rot.assign(wp.from_torch(goal_rot, dtype=wp.quatf)) + + # initialize goal marker + self.goal_markers = VisualizationMarkers(self.cfg.goal_object_cfg) + + # Reduction buffers for consecutive_successes update (Warp-only). + self._num_resets = wp.zeros(1, dtype=wp.float32, device=self.device) + self._finished_cons_successes = wp.zeros(1, dtype=wp.float32, device=self.device) + # track successes + self.successes = wp.zeros(self.num_envs, dtype=wp.float32, device=self.device) + self.consecutive_successes = wp.zeros(1, dtype=wp.float32, device=self.device) + + # Persistent RL buffers (Warp). + self.actions = wp.zeros((self.num_envs, self.cfg.action_space), dtype=wp.float32, device=self.device) + self.observations = wp.zeros((self.num_envs, self.cfg.observation_space), dtype=wp.float32, device=self.device) + self.rewards = wp.zeros((self.num_envs,), dtype=wp.float32, device=self.device) + # Flag used as a print token for non-finite observations (Warp). + self.obs_nonfinite_flag = wp.zeros(1, dtype=wp.int32, device=self.device) + + # Intermediate values (Warp) -- mirrors the Torch env's `_compute_intermediate_values` fields. + self.fingertip_pos = wp.zeros((self.num_envs, self.num_fingertips), dtype=wp.vec3f, device=self.device) + self.fingertip_rot = wp.zeros((self.num_envs, self.num_fingertips), dtype=wp.quatf, device=self.device) + self.fingertip_velocities = wp.zeros( + (self.num_envs, self.num_fingertips), dtype=wp.spatial_vectorf, device=self.device + ) + + self.object_pose = wp.zeros(self.num_envs, dtype=wp.transformf, device=self.device) + self.object_vels = wp.zeros(self.num_envs, dtype=wp.spatial_vectorf, device=self.device) + + # RNG state (per-env) for randomizations in reset/goal resets. + self.rng_state = wp.zeros(self.num_envs, dtype=wp.uint32, device=self.device) + if self.cfg.seed is None: + self.cfg.seed = -1 + wp.launch( + initialize_rng_state, + dim=self.num_envs, + inputs=[ + self.cfg.seed, + self.rng_state, + ], + device=self.device, + ) + + # --------------------------------------------------------------------- + # Torch views / aliases + # --------------------------------------------------------------------- + + # Bind torch buffers to warp buffers (same pattern as Warp Cartpole). + self.torch_obs_buf = wp.to_torch(self.observations) + self.torch_reward_buf = wp.to_torch(self.rewards) + self.torch_reset_terminated = wp.to_torch(self.reset_terminated) + self.torch_reset_time_outs = wp.to_torch(self.reset_time_outs) + self.torch_episode_length_buf = self.episode_length_buf # already a torch tensor via wp.to_torch + + def _setup_scene(self): + # add hand, in-hand object, and goal object + self.hand = Articulation(self.cfg.robot_cfg) + self.object = Articulation(self.cfg.object_cfg) + # add ground plane + spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg()) + # clone and replicate (no need to filter for this environment) + self.scene.clone_environments(copy_from_source=False) + # add articulation to scene - we must register to scene to randomize with EventManager + self.scene.articulations["robot"] = self.hand + self.scene.articulations["object"] = self.object + # add lights + light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) + light_cfg.func("/World/Light", light_cfg) + + def _pre_physics_step(self, actions: wp.array) -> None: + # Store actions in a persistent Warp buffer (analogous to `actions.clone()` in the Torch env). + wp.copy(self.actions, actions) + + def _apply_action(self) -> None: + wp.launch( + apply_actions_to_targets, + dim=(self.num_envs, self.num_actuated_dofs), + inputs=[ + self.actions, + self.hand_dof_lower_limits, + self.hand_dof_upper_limits, + self.actuated_dof_indices, + self.cfg.act_moving_average, + self.prev_targets, + self.cur_targets, + ], + device=self.device, + ) + + # Apply position targets using mask method (CUDA graph safe). + # All joints are actuated for Allegro, so default masks (None = all) are correct. + self.hand.set_joint_position_target_mask(target=self.cur_targets) + + def _get_observations(self) -> dict: + # if self.cfg.asymmetric_obs: + # self.fingertip_force_sensors = self.hand.root_physx_view.get_link_incoming_joint_force()[ + # :, self.finger_bodies + # ] + if self.cfg.obs_type == "openai": + self.compute_reduced_observations() + elif self.cfg.obs_type == "full": + self.compute_full_observations() + else: + raise ValueError(f"Unknown obs_type: {self.cfg.obs_type}") + return {"policy": self.torch_obs_buf} + + def _get_rewards(self) -> None: + # Clear reduction buffers before launching the reward kernel. + # wp.assign(self._num_resets, 0.0) + # wp.assign(self._finished_cons_successes, 0.0) + self._num_resets.zero_() + self._finished_cons_successes.zero_() + wp.launch( + compute_rewards, + dim=self.num_envs, + inputs=[ + self.reset_buf, + self.object_pose, + self.in_hand_pos, + self.goal_rot, + self.cfg.dist_reward_scale, + self.cfg.rot_reward_scale, + self.cfg.rot_eps, + self.actions, + self.cfg.action_penalty_scale, + self.cfg.success_tolerance, + self.cfg.reach_goal_bonus, + self.cfg.fall_dist, + self.cfg.fall_penalty, + self.cfg.action_space, + self.reset_goal_buf, + self.successes, + self._num_resets, + self._finished_cons_successes, + self.rewards, + ], + device=self.device, + ) + + # A separate kernel is needed as Warp does not support thread synchronization for reductions. + wp.launch( + update_consecutive_successes_from_stats, + dim=1, + inputs=[ + self._num_resets, + self._finished_cons_successes, + self.cfg.av_factor, + self.consecutive_successes, + ], + device=self.device, + ) + + if "log" not in self.extras: + self.extras["log"] = dict() + # .mean() cannot be called here as it causes problems on stream + self.extras["log"]["consecutive_successes"] = wp.to_torch(self.consecutive_successes) + + # Reset goals for envs that reached the target (mask is `reset_goal_buf`). + # This avoids Torch-side index extraction and keeps the step graphable. + self._reset_target_pose(mask=self.reset_goal_buf) + + def _get_dones(self) -> None: + self._compute_intermediate_values() + + wp.launch( + get_dones, + dim=self.num_envs, + inputs=[ + self.max_episode_length, + self.object_pose, + self.in_hand_pos, + self.goal_rot, + self.cfg.fall_dist, + self.cfg.success_tolerance, + self.cfg.max_consecutive_success, + self.successes, + self._episode_length_buf_wp, + self.reset_terminated, + self.reset_time_outs, + self.reset_buf, + ], + device=self.device, + ) + + def _reset_idx(self, mask: wp.array | None = None): + if mask is None: + mask = self._ALL_ENV_MASK + + # resets articulation and rigid body attributes + super()._reset_idx(mask) + + # reset goals + self._reset_target_pose(mask=mask) + + # reset object + wp.launch( + reset_object, + dim=self.num_envs, + inputs=[ + self.object.data.default_root_pose.warp, + self.env_origins, + self.cfg.reset_position_noise, + self.x_unit_vec, + self.y_unit_vec, + mask, + self.rng_state, + self.object.data.root_link_pose_w.warp, + self.object.data.root_com_vel_w.warp, + ], + device=self.device, + ) + + # reset hand + wp.launch( + reset_hand, + dim=self.num_envs, + inputs=[ + self.hand.data.default_joint_pos.warp, + self.hand.data.default_joint_vel.warp, + self.hand_dof_lower_limits, + self.hand_dof_upper_limits, + self.cfg.reset_dof_pos_noise, + self.cfg.reset_dof_vel_noise, + mask, + self.num_hand_dofs, + self.rng_state, + self.hand.data.joint_pos.warp, + self.hand.data.joint_vel.warp, + self.prev_targets, + self.cur_targets, + self.hand_dof_targets, + ], + device=self.device, + ) + + self.hand.set_joint_position_target_mask(target=self.cur_targets, env_mask=mask) + + wp.launch( + reset_successes, + dim=self.num_envs, + inputs=[ + mask, + self.successes, + ], + device=self.device, + ) + + self._compute_intermediate_values() + + def _reset_target_pose(self, env_ids: Sequence[int] | None = None, mask: wp.array | None = None): + # reset goal rotation + if mask is None: + if env_ids is None: + return + env_mask_list = [False] * self.num_envs + for env_id in env_ids: + env_mask_list[int(env_id)] = True + mask = wp.array(env_mask_list, dtype=wp.bool, device=self.device) + + # update goal pose and markers + wp.launch( + reset_target_pose, + dim=self.num_envs, + inputs=[ + mask, + self.x_unit_vec, + self.y_unit_vec, + self.env_origins, + self.goal_pos, + self.rng_state, + self.goal_rot, + self.reset_goal_buf, + self.goal_pos_w, + ], + device=self.device, + ) + + def _post_step_visualize(self) -> None: + """Update goal markers outside CUDA graph scope.""" + self.goal_markers.visualize(wp.to_torch(self.goal_pos_w), wp.to_torch(self.goal_rot)) + + def _compute_intermediate_values(self): + # data for hand/object (Warp version of the Torch env's `_compute_intermediate_values`) + wp.launch( + compute_intermediate_values, + dim=self.num_envs, + inputs=[ + self.hand.data.body_pos_w.warp, + self.hand.data.body_quat_w.warp, + self.hand.data.body_vel_w.warp, + self.finger_bodies, + self.env_origins, + self.object.data.root_link_pose_w.warp, + self.object.data.root_com_vel_w.warp, + self.num_fingertips, + self.fingertip_pos, + self.fingertip_rot, + self.fingertip_velocities, + self.object_pose, + self.object_vels, + ], + device=self.device, + ) + + def compute_reduced_observations(self): + # Per https://arxiv.org/pdf/1808.00177.pdf Table 2 + # Fingertip positions + # Object Position, but not orientation + # Relative target orientation + wp.launch( + compute_reduced_observations, + dim=self.num_envs, + inputs=[ + self.fingertip_pos, + self.object_pose, + self.goal_rot, + self.actions, + self.num_fingertips, + self.cfg.action_space, + self.observations, + ], + device=self.device, + ) + # Warp-native non-finite sanitization + print-once. + wp.launch( + sanitize_and_print_once, + dim=(self.num_envs * self.cfg.observation_space), + inputs=[self.observations.flatten(), self.obs_nonfinite_flag], + device=self.device, + ) + self.obs_nonfinite_flag.zero_() + + def compute_full_observations(self): + wp.launch( + compute_full_observations, + dim=self.num_envs, + inputs=[ + self.hand.data.joint_pos.warp, + self.hand.data.joint_vel.warp, + self.hand_dof_lower_limits, + self.hand_dof_upper_limits, + self.cfg.vel_obs_scale, + self.object_pose, + self.object_vels, + self.in_hand_pos, + self.goal_rot, + self.fingertip_pos, + self.fingertip_rot, + self.fingertip_velocities, + self.actions, + self.num_hand_dofs, + self.num_fingertips, + self.cfg.action_space, + self.observations, + ], + device=self.device, + ) + # Warp-native non-finite sanitization + print-once. + wp.launch( + sanitize_and_print_once, + dim=(self.num_envs * self.cfg.observation_space), + inputs=[self.observations.flatten(), self.obs_nonfinite_flag], + device=self.device, + ) + self.obs_nonfinite_flag.zero_() diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/locomotion/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/locomotion/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/locomotion/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/locomotion/locomotion_env_warp.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/locomotion/locomotion_env_warp.py new file mode 100644 index 000000000000..d8d712a655e9 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/direct/locomotion/locomotion_env_warp.py @@ -0,0 +1,575 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import warp as wp +from isaaclab_experimental.envs import DirectRLEnvWarp + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation +from isaaclab.envs import DirectRLEnvCfg + + +@wp.func +def safe_normalize(v: wp.vec3f) -> wp.vec3f: + """Normalize a vector with epsilon to avoid NaN on zero-length vectors.""" + length = wp.max(wp.length(v), 1e-9) + return v / length + + +@wp.func +def fmod(x: wp.float32, y: wp.float32) -> wp.float32: + return x - y * wp.floor(x / y) + + +@wp.func +def euler_from_quat(q: wp.quatf) -> wp.vec3f: + sinr_cosp = 2.0 * (q[3] * q[0] + q[1] * q[2]) + cosr_cosp = q[3] * q[3] - q[0] * q[0] - q[1] * q[1] + q[2] * q[2] + sinp = 2.0 * (q[3] * q[1] - q[2] * q[0]) + siny_cosp = 2.0 * (q[3] * q[2] + q[0] * q[1]) + cosy_cosp = q[3] * q[3] + q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + roll = wp.atan2(sinr_cosp, cosr_cosp) + if wp.abs(sinp) >= 1: + pitch = wp.sign(sinp) * wp.pi / 2.0 + else: + pitch = wp.asin(sinp) + yaw = wp.atan2(siny_cosp, cosy_cosp) + + return wp.vec3f( + fmod(roll, wp.static(2.0 * wp.pi)), + fmod(pitch, wp.static(2.0 * wp.pi)), + fmod(yaw, wp.static(2.0 * wp.pi)), + ) + + +@wp.kernel +def get_dones( + episode_length_buf: wp.array(dtype=wp.int32), + torso_pose: wp.array(dtype=wp.transformf), + max_episode_length: wp.int32, + termination_height: wp.float32, + out_of_bounds: wp.array(dtype=wp.bool), + time_out: wp.array(dtype=wp.bool), + reset: wp.array(dtype=wp.bool), +): + env_index = wp.tid() + out_of_bounds[env_index] = wp.abs(torso_pose[env_index][2]) < termination_height + time_out[env_index] = episode_length_buf[env_index] >= (max_episode_length - 1) + reset[env_index] = out_of_bounds[env_index] or time_out[env_index] + + +@wp.kernel +def observations( + torso_pose: wp.array(dtype=wp.transformf), + velocity: wp.array(dtype=wp.spatial_vectorf), + rpy: wp.array(dtype=wp.vec3f), + angle_to_target: wp.array(dtype=wp.float32), + up_proj: wp.array(dtype=wp.float32), + heading_proj: wp.array(dtype=wp.float32), + dof_pos_scaled: wp.array2d(dtype=wp.float32), + dof_vel: wp.array2d(dtype=wp.float32), + actions: wp.array2d(dtype=wp.float32), + observations: wp.array2d(dtype=wp.float32), + dof_vel_scale: wp.float32, + angular_velocity_scale: wp.float32, + num_dof: wp.int32, +): + env_index = wp.tid() + observations[env_index, 0] = torso_pose[env_index][2] + observations[env_index, 1] = velocity[env_index][0] + observations[env_index, 2] = velocity[env_index][1] + observations[env_index, 3] = velocity[env_index][2] + observations[env_index, 4] = velocity[env_index][3] * angular_velocity_scale + observations[env_index, 5] = velocity[env_index][4] * angular_velocity_scale + observations[env_index, 6] = velocity[env_index][5] * angular_velocity_scale + observations[env_index, 7] = wp.atan2(wp.sin(rpy[env_index][2]), wp.cos(rpy[env_index][2])) + observations[env_index, 8] = wp.atan2(wp.sin(rpy[env_index][0]), wp.cos(rpy[env_index][0])) + observations[env_index, 9] = wp.atan2(wp.sin(angle_to_target[env_index]), wp.cos(angle_to_target[env_index])) + observations[env_index, 10] = up_proj[env_index] + observations[env_index, 11] = heading_proj[env_index] + + offset_1 = 12 + num_dof + offset_2 = offset_1 + num_dof + + for i in range(num_dof): + observations[env_index, 12 + i] = dof_pos_scaled[env_index, i] + for i in range(num_dof): + observations[env_index, offset_1 + i] = dof_vel[env_index, i] * dof_vel_scale + for i in range(num_dof): + observations[env_index, offset_2 + i] = actions[env_index, i] + + +@wp.func +def translate_transform( + transform: wp.transformf, + translation: wp.vec3f, +) -> wp.transformf: + return wp.transform( + wp.transform_get_translation(transform) + translation, + wp.transform_get_rotation(transform), + ) + + +@wp.kernel +def reset_root( + default_root_pose: wp.array(dtype=wp.transformf), + default_root_vel: wp.array(dtype=wp.spatial_vectorf), + env_origins: wp.array(dtype=wp.vec3f), + dt: wp.float32, + targets: wp.array(dtype=wp.vec3f), + to_targets: wp.array(dtype=wp.vec3f), + potentials: wp.array(dtype=wp.float32), + root_pose: wp.array(dtype=wp.transformf), + root_vel: wp.array(dtype=wp.spatial_vectorf), + env_mask: wp.array(dtype=wp.bool), +): + env_index = wp.tid() + if env_mask[env_index]: + root_pose[env_index] = default_root_pose[env_index] + root_pose[env_index] = translate_transform(root_pose[env_index], env_origins[env_index]) + root_vel[env_index] = default_root_vel[env_index] + to_targets[env_index] = targets[env_index] - wp.transform_get_translation(root_pose[env_index]) + to_targets[env_index][2] = 0.0 + potentials[env_index] = -wp.length(to_targets[env_index]) / dt + + +@wp.kernel +def reset_joints( + default_joint_pos: wp.array2d(dtype=wp.float32), + default_joint_vel: wp.array2d(dtype=wp.float32), + joint_pos: wp.array2d(dtype=wp.float32), + joint_vel: wp.array2d(dtype=wp.float32), + env_mask: wp.array(dtype=wp.bool), +): + env_index, joint_index = wp.tid() + if env_mask[env_index]: + joint_pos[env_index, joint_index] = default_joint_pos[env_index, joint_index] + joint_vel[env_index, joint_index] = default_joint_vel[env_index, joint_index] + + +@wp.func +def heading_reward( + heading_proj: wp.float32, + heading_weight: wp.float32, +) -> wp.float32: + if heading_proj > 0.8: + return heading_weight + else: + return heading_weight * heading_proj / 0.8 + + +@wp.func +def up_reward( + up_proj: wp.float32, + up_weight: wp.float32, +) -> wp.float32: + if up_proj > 0.93: + return up_weight + else: + return 0.0 + + +@wp.func +def progress_reward( + current_value: wp.float32, + prev_value: wp.float32, +) -> wp.float32: + return current_value - prev_value + + +@wp.func +def actions_cost( + actions: wp.array(dtype=wp.float32), +) -> wp.float32: + sum_ = wp.float32(0.0) + for i in range(len(actions)): + sum_ += actions[i] * actions[i] + return sum_ + + +@wp.func +def electricity_cost( + actions: wp.array(dtype=wp.float32), + dof_vel: wp.array(dtype=wp.float32), + dof_vel_scale: wp.float32, + motor_effort_ratio: wp.array(dtype=wp.float32), +) -> wp.float32: + sum_ = wp.float32(0.0) + for i in range(len(actions)): + sum_ += wp.abs(actions[i] * dof_vel[i] * dof_vel_scale) * motor_effort_ratio[i] + return sum_ + + +@wp.func +def dof_at_limit_cost( + dof_pos_scaled: wp.array(dtype=wp.float32), +) -> wp.float32: + sum_ = wp.float32(0.0) + for i in range(len(dof_pos_scaled)): + if dof_pos_scaled[i] > 0.98: + sum_ += 1.0 + return sum_ + + +@wp.kernel +def compute_rewards( + actions: wp.array2d(dtype=wp.float32), + dof_vel: wp.array2d(dtype=wp.float32), + dof_pos_scaled: wp.array2d(dtype=wp.float32), + reset_terminated: wp.array(dtype=wp.bool), + heading_proj: wp.array(dtype=wp.float32), + up_proj: wp.array(dtype=wp.float32), + potentials: wp.array(dtype=wp.float32), + prev_potentials: wp.array(dtype=wp.float32), + motor_effort_ratio: wp.array(dtype=wp.float32), + up_weight: wp.float32, + heading_weight: wp.float32, + actions_cost_scale: wp.float32, + energy_cost_scale: wp.float32, + dof_vel_scale: wp.float32, + death_cost: wp.float32, + alive_reward_scale: wp.float32, + reward: wp.array(dtype=wp.float32), +): + env_index = wp.tid() + if reset_terminated[env_index]: + reward[env_index] = death_cost + else: + reward[env_index] = ( + progress_reward(potentials[env_index], prev_potentials[env_index]) + + alive_reward_scale + + up_reward(up_proj[env_index], up_weight) + + heading_reward(heading_proj[env_index], heading_weight) + - actions_cost_scale * actions_cost(actions[env_index]) + - energy_cost_scale + * electricity_cost(actions[env_index], dof_vel[env_index], dof_vel_scale, motor_effort_ratio) + - dof_at_limit_cost(dof_pos_scaled[env_index]) + ) + + +@wp.kernel +def compute_heading_and_up( + torso_pose: wp.array(dtype=wp.transformf), + targets: wp.array(dtype=wp.vec3f), + dt: wp.float32, + to_targets: wp.array(dtype=wp.vec3f), + up_proj: wp.array(dtype=wp.float32), + heading_proj: wp.array(dtype=wp.float32), + up_vec: wp.array(dtype=wp.vec3f), + heading_vec: wp.array(dtype=wp.vec3f), + potentials: wp.array(dtype=wp.float32), + prev_potentials: wp.array(dtype=wp.float32), +): + env_index = wp.tid() + up_vec[env_index] = wp.quat_rotate(wp.transform_get_rotation(torso_pose[env_index]), wp.static(wp.vec3f(0, 0, 1))) + heading_vec[env_index] = wp.quat_rotate( + wp.transform_get_rotation(torso_pose[env_index]), wp.static(wp.vec3f(1, 0, 0)) + ) + up_proj[env_index] = up_vec[env_index][2] + to_targets[env_index] = targets[env_index] - wp.transform_get_translation(torso_pose[env_index]) + to_targets[env_index][2] = 0.0 + heading_proj[env_index] = wp.dot(heading_vec[env_index], safe_normalize(to_targets[env_index])) + prev_potentials[env_index] = potentials[env_index] + potentials[env_index] = -wp.length(to_targets[env_index]) / dt + + +@wp.func +def spatial_rotate_inv(quat: wp.quatf, vec: wp.spatial_vectorf) -> wp.spatial_vectorf: + return wp.spatial_vector( + wp.quat_rotate_inv(quat, wp.spatial_top(vec)), + wp.quat_rotate_inv(quat, wp.spatial_bottom(vec)), + ) + + +@wp.func +def unscale(x: wp.float32, lower: wp.float32, upper: wp.float32) -> wp.float32: + return (2.0 * x - upper - lower) / (upper - lower) + + +@wp.kernel +def compute_rot( + torso_pose: wp.array(dtype=wp.transformf), + velocity: wp.array(dtype=wp.spatial_vectorf), + targets: wp.array(dtype=wp.vec3f), + vec_loc: wp.array(dtype=wp.spatial_vectorf), + rpy: wp.array(dtype=wp.vec3f), + angle_to_target: wp.array(dtype=wp.float32), +): + env_index = wp.tid() + vec_loc[env_index] = spatial_rotate_inv(wp.transform_get_rotation(torso_pose[env_index]), velocity[env_index]) + rpy[env_index] = euler_from_quat(wp.transform_get_rotation(torso_pose[env_index])) + angle_to_target[env_index] = ( + wp.atan2(targets[env_index][1] - torso_pose[env_index][1], targets[env_index][0] - torso_pose[env_index][0]) + - rpy[env_index][2] + ) + + +@wp.kernel +def scale_dof_pos( + dof_pos: wp.array2d(dtype=wp.float32), + dof_limits: wp.array2d(dtype=wp.vec2f), + dof_pos_scaled: wp.array2d(dtype=wp.float32), +): + env_index, joint_index = wp.tid() + dof_pos_scaled[env_index, joint_index] = unscale( + dof_pos[env_index, joint_index], dof_limits[env_index, joint_index][0], dof_limits[env_index, joint_index][1] + ) + + +@wp.kernel +def update_actions( + input_actions: wp.array2d(dtype=wp.float32), + actions: wp.array2d(dtype=wp.float32), + joint_gears: wp.array(dtype=wp.float32), + action_scale: wp.float32, +): + env_index, joint_index = wp.tid() + actions[env_index, joint_index] = ( + action_scale * joint_gears[joint_index] * wp.clamp(input_actions[env_index, joint_index], -1.0, 1.0) + ) + + +@wp.kernel +def initialize_state( + env_origins: wp.array(dtype=wp.vec3f), + targets: wp.array(dtype=wp.vec3f), + state: wp.array(dtype=wp.uint32), + seed: wp.int32, +): + env_index = wp.tid() + state[env_index] = wp.rand_init(seed, env_index) + targets[env_index] = env_origins[env_index] + targets[env_index] += wp.static(wp.vec3f(1000.0, 0.0, 0.0)) + + +class LocomotionWarpEnv(DirectRLEnvWarp): + cfg: DirectRLEnvCfg + + def __init__(self, cfg: DirectRLEnvCfg, render_mode: str | None = None, **kwargs): + super().__init__(cfg, render_mode, **kwargs) + + self.action_scale = self.cfg.action_scale + self.joint_gears = wp.array(self.cfg.joint_gears, dtype=wp.float32, device=self.sim.device) + self.motor_effort_ratio = wp.ones_like(self.joint_gears, device=self.sim.device) + self._joint_dof_idx, _ = self.robot.find_joints(".*") + + # Simulation bindings + # Note: these are direct memory views into the Newton simulation data, they should not be modified directly + self.joint_pos = self.robot.data.joint_pos.warp + self.joint_vel = self.robot.data.joint_vel.warp + self.root_pose_w = self.robot.data.root_pose_w.warp + self.root_vel_w = self.robot.data.root_vel_w.warp + self.soft_joint_pos_limits = self.robot.data.soft_joint_pos_limits.warp + + # Buffers + self.observations = wp.zeros( + (self.num_envs, self.cfg.observation_space), dtype=wp.float32, device=self.sim.device + ) + self.rewards = wp.zeros((self.num_envs), dtype=wp.float32, device=self.sim.device) + self.actions = wp.zeros((self.num_envs, self.robot.num_joints), dtype=wp.float32, device=self.sim.device) + self.states = wp.zeros((self.num_envs), dtype=wp.uint32, device=self.sim.device) + self.potentials = wp.zeros(self.num_envs, dtype=wp.float32, device=self.sim.device) + self.prev_potentials = wp.zeros_like(self.potentials) + self.targets = wp.zeros((self.num_envs), dtype=wp.vec3f, device=self.sim.device) + self.up_vec = wp.zeros((self.num_envs), dtype=wp.vec3f, device=self.sim.device) + self.heading_vec = wp.zeros((self.num_envs), dtype=wp.vec3f, device=self.sim.device) + self.to_targets = wp.zeros((self.num_envs), dtype=wp.vec3f, device=self.sim.device) + self.up_proj = wp.zeros((self.num_envs), dtype=wp.float32, device=self.sim.device) + self.heading_proj = wp.zeros((self.num_envs), dtype=wp.float32, device=self.sim.device) + self.vec_loc = wp.zeros((self.num_envs), dtype=wp.spatial_vectorf, device=self.sim.device) + self.rpy = wp.zeros((self.num_envs), dtype=wp.vec3f, device=self.sim.device) + self.angle_to_target = wp.zeros((self.num_envs), dtype=wp.float32, device=self.sim.device) + self.dof_pos_scaled = wp.zeros((self.num_envs, self.robot.num_joints), dtype=wp.float32, device=self.sim.device) + self.env_origins = wp.from_torch(self.scene.env_origins, dtype=wp.vec3f) + self.actions_mapped = wp.zeros((self.num_envs, self.robot.num_joints), dtype=wp.float32, device=self.sim.device) + + # Initial states and targets + if self.cfg.seed is None: + self.cfg.seed = -1 + + wp.launch( + initialize_state, + dim=self.num_envs, + inputs=[ + self.env_origins, + self.targets, + self.states, + self.cfg.seed, + ], + ) + + # Bind torch buffers to warp buffers + self.torch_obs_buf = wp.to_torch(self.observations) + self.torch_reward_buf = wp.to_torch(self.rewards) + self.torch_reset_terminated = wp.to_torch(self.reset_terminated) + self.torch_reset_time_outs = wp.to_torch(self.reset_time_outs) + self.torch_episode_length_buf = self.episode_length_buf # already a torch tensor via wp.to_torch + + def _setup_scene(self) -> None: + self.robot = Articulation(self.cfg.robot) + # add ground plane + self.cfg.terrain.num_envs = self.scene.cfg.num_envs + self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing + self.terrain = self.cfg.terrain.class_type(self.cfg.terrain) + # clone and replicate + self.scene.clone_environments(copy_from_source=False) + # add articulation to scene + self.scene.articulations["robot"] = self.robot + # add lights + light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) + light_cfg.func("/World/Light", light_cfg) + + def _pre_physics_step(self, actions: wp.array) -> None: + self.actions.assign(actions) + wp.launch( + update_actions, + dim=(self.num_envs, self.robot.num_joints), + inputs=[actions, self.actions_mapped, self.joint_gears, self.action_scale], + ) + + def _apply_action(self) -> None: + self.robot.set_joint_effort_target_mask(target=self.actions_mapped) + + def _compute_intermediate_values(self) -> None: + wp.launch( + compute_heading_and_up, + dim=self.num_envs, + inputs=[ + self.root_pose_w, + self.targets, + self.cfg.sim.dt, + self.to_targets, + self.up_proj, + self.heading_proj, + self.up_vec, + self.heading_vec, + self.potentials, + self.prev_potentials, + ], + ) + + wp.launch( + compute_rot, + dim=self.num_envs, + inputs=[ + self.root_pose_w, + self.root_vel_w, + self.targets, + self.vec_loc, + self.rpy, + self.angle_to_target, + ], + ) + wp.launch( + scale_dof_pos, + dim=(self.num_envs, self.robot.num_joints), + inputs=[ + self.joint_pos, + self.soft_joint_pos_limits, + self.dof_pos_scaled, + ], + ) + + def _get_observations(self) -> dict: + wp.launch( + observations, + dim=self.num_envs, + inputs=[ + self.root_pose_w, + self.vec_loc, + self.rpy, + self.angle_to_target, + self.up_proj, + self.heading_proj, + self.dof_pos_scaled, + self.joint_vel, + self.actions, + self.observations, + self.cfg.dof_vel_scale, + self.cfg.angular_velocity_scale, + self.robot.num_joints, + ], + ) + return {"policy": self.torch_obs_buf} + + def _get_rewards(self) -> None: + wp.launch( + compute_rewards, + dim=self.num_envs, + inputs=[ + self.actions, + self.joint_vel, + self.dof_pos_scaled, + self.reset_terminated, + self.heading_proj, + self.up_proj, + self.potentials, + self.prev_potentials, + self.motor_effort_ratio, + self.cfg.up_weight, + self.cfg.heading_weight, + self.cfg.actions_cost_scale, + self.cfg.energy_cost_scale, + self.cfg.dof_vel_scale, + self.cfg.death_cost, + self.cfg.alive_reward_scale, + self.rewards, + ], + ) + + def _get_dones(self) -> None: + self._compute_intermediate_values() + + wp.launch( + get_dones, + dim=self.num_envs, + inputs=[ + self._episode_length_buf_wp, + self.root_pose_w, + self.max_episode_length, + self.cfg.termination_height, + self.reset_terminated, + self.reset_time_outs, + self.reset_buf, + ], + ) + + def _reset_idx(self, mask: wp.array | None = None): + if mask is None: + mask = self._ALL_ENV_MASK + + super()._reset_idx(mask) + + wp.launch( + reset_root, + dim=self.num_envs, + inputs=[ + self.robot.data.default_root_pose.warp, + self.robot.data.default_root_vel.warp, + self.env_origins, + self.cfg.sim.dt, + self.targets, + self.to_targets, + self.potentials, + self.root_pose_w, + self.root_vel_w, + mask, + ], + ) + wp.launch( + reset_joints, + dim=(self.num_envs, self.robot.num_joints), + inputs=[ + self.robot.data.default_joint_pos.warp, + self.robot.data.default_joint_vel.warp, + self.joint_pos, + self.joint_vel, + mask, + ], + ) + + self._compute_intermediate_values() diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/__init__.py new file mode 100644 index 000000000000..7f23883e6332 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Experimental registrations for manager-based tasks. + +We intentionally only register new Gym IDs pointing at experimental entry points. +Task definitions (configs/mdp) remain in `isaaclab_tasks` to avoid duplication. +""" diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/__init__.py new file mode 100644 index 000000000000..79c13e2aa8f4 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Classic environments for control. + +These environments are based on the MuJoCo environments provided by OpenAI. + +Reference: + https://github.com/openai/gym/tree/master/gym/envs/mujoco +""" diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/ant/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/ant/__init__.py new file mode 100644 index 000000000000..5f123abaa756 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/ant/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Ant locomotion environment (experimental manager-based entry point). +""" + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.classic.ant import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Ant-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.ant_env_cfg:AntEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AntPPORunnerCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", + "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/ant/ant_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/ant/ant_env_cfg.py new file mode 100644 index 000000000000..106d1a78ba09 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/ant/ant_env_cfg.py @@ -0,0 +1,198 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ant reuses humanoid's experimental MDP (mirrors stable pattern). +from isaaclab_experimental.managers import ObservationTermCfg as ObsTerm +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import TerminationTermCfg as DoneTerm +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.classic.humanoid.mdp as mdp + +## +# Pre-defined configs +## +from isaaclab_assets.robots.ant import ANT_CFG # isort: skip + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Configuration for the terrain scene with an ant robot.""" + + # terrain + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="average", + restitution_combine_mode="average", + static_friction=1.0, + dynamic_friction=1.0, + restitution=0.0, + ), + debug_vis=False, + ) + + # robot + robot = ANT_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + +## +# MDP settings +## + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + joint_effort = mdp.JointEffortActionCfg(asset_name="robot", joint_names=[".*"], scale=7.5) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for the policy.""" + + base_height = ObsTerm(func=mdp.base_pos_z) + base_lin_vel = ObsTerm(func=mdp.base_lin_vel) + base_ang_vel = ObsTerm(func=mdp.base_ang_vel) + base_yaw_roll = ObsTerm(func=mdp.base_yaw_roll) + base_angle_to_target = ObsTerm(func=mdp.base_angle_to_target, params={"target_pos": (1000.0, 0.0, 0.0)}) + base_up_proj = ObsTerm(func=mdp.base_up_proj) + base_heading_proj = ObsTerm(func=mdp.base_heading_proj, params={"target_pos": (1000.0, 0.0, 0.0)}) + joint_pos_norm = ObsTerm(func=mdp.joint_pos_limit_normalized) + joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel, scale=0.2) + actions = ObsTerm(func=mdp.last_action) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = True + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + reset_base = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={"pose_range": {}, "velocity_range": {}}, + ) + + reset_robot_joints = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "position_range": (-0.2, 0.2), + "velocity_range": (-0.1, 0.1), + }, + ) + + +@configclass +class RewardsCfg: + """Reward terms for the MDP.""" + + # (1) Reward for moving forward + progress = RewTerm(func=mdp.progress_reward, weight=1.0, params={"target_pos": (1000.0, 0.0, 0.0)}) + # (2) Stay alive bonus + alive = RewTerm(func=mdp.is_alive, weight=0.5) + # (3) Reward for non-upright posture + upright = RewTerm(func=mdp.upright_posture_bonus, weight=0.1, params={"threshold": 0.93}) + # (4) Reward for moving in the right direction + move_to_target = RewTerm( + func=mdp.move_to_target_bonus, weight=0.5, params={"threshold": 0.8, "target_pos": (1000.0, 0.0, 0.0)} + ) + # (5) Penalty for large action commands + action_l2 = RewTerm(func=mdp.action_l2, weight=-0.005) + # (6) Penalty for energy consumption + energy = RewTerm(func=mdp.power_consumption, weight=-0.05, params={"gear_ratio": {".*": 15.0}}) + # (7) Penalty for reaching close to joint limits + joint_pos_limits = RewTerm( + func=mdp.joint_pos_limits_penalty_ratio, weight=-0.1, params={"threshold": 0.99, "gear_ratio": {".*": 15.0}} + ) + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + # (1) Terminate if the episode length is exceeded + time_out = DoneTerm(func=mdp.time_out, time_out=True) + # (2) Terminate if the robot falls + torso_height = DoneTerm(func=mdp.root_height_below_minimum, params={"minimum_height": 0.31}) + + +## +# Environment configuration +## + + +@configclass +class AntEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the MuJoCo-style Ant walking environment.""" + + # Simulation settings + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=38, + nconmax=15, + ls_iterations=10, + cone="pyramidal", + ls_parallel=True, + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + # Scene settings + scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=5.0, clone_in_fabric=True) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + # MDP settings + rewards: RewardsCfg = RewardsCfg() + terminations: TerminationsCfg = TerminationsCfg() + events: EventCfg = EventCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 2 + self.episode_length_s = 16.0 + # simulation settings + self.sim.dt = 1 / 120.0 + self.sim.render_interval = self.decimation + # default friction material + self.sim.physics_material.static_friction = 1.0 + self.sim.physics_material.dynamic_friction = 1.0 diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/__init__.py new file mode 100644 index 000000000000..4e3324264949 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Cartpole balancing environment (experimental manager-based entry point). +""" + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.classic.cartpole import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Cartpole-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.cartpole_env_cfg:CartpoleEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:CartpolePPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", + "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/cartpole_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/cartpole_env_cfg.py new file mode 100644 index 000000000000..898ac8be4fd0 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/cartpole_env_cfg.py @@ -0,0 +1,199 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math + +from isaaclab_experimental.managers import ObservationTermCfg as ObsTerm +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_experimental.managers import TerminationTermCfg as DoneTerm +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.classic.cartpole.mdp as mdp + +## +# Pre-defined configs +## +from isaaclab_assets.robots.cartpole import CARTPOLE_CFG # isort:skip + + +## +# Scene definition +## + + +@configclass +class CartpoleSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + # ground = AssetBaseCfg( + # prim_path="/World/ground", + # spawn=sim_utils.GroundPlaneCfg(size=(100.0, 100.0)), + # ) + + # cartpole + robot: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/DomeLight", + spawn=sim_utils.DomeLightCfg(color=(0.9, 0.9, 0.9), intensity=500.0), + ) + + +## +# MDP settings +## + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + joint_effort = mdp.JointEffortActionCfg(asset_name="robot", joint_names=["slider_to_cart"], scale=100.0) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group.""" + + # observation terms (order preserved) + joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel) + joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel) + + def __post_init__(self) -> None: + self.enable_corruption = False + self.concatenate_terms = True + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + # reset + reset_cart_position = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]), + "position_range": (-1.0, 1.0), + "velocity_range": (-0.5, 0.5), + }, + ) + + reset_pole_position = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), + "position_range": (-0.25 * math.pi, 0.25 * math.pi), + "velocity_range": (-0.25 * math.pi, 0.25 * math.pi), + }, + ) + + +@configclass +class RewardsCfg: + """Reward terms for the MDP.""" + + # (1) Constant running reward + alive = RewTerm(func=mdp.is_alive, weight=1.0) + # (2) Failure penalty + terminating = RewTerm(func=mdp.is_terminated, weight=-2.0) + # (3) Primary task: keep pole upright + pole_pos = RewTerm( + func=mdp.joint_pos_target_l2, + weight=-1.0, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), "target": 0.0}, + ) + # (4) Shaping tasks: lower cart velocity + cart_vel = RewTerm( + func=mdp.joint_vel_l1, + weight=-0.01, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"])}, + ) + # (5) Shaping tasks: lower pole angular velocity + pole_vel = RewTerm( + func=mdp.joint_vel_l1, + weight=-0.005, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"])}, + ) + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + # (1) Time out + time_out = DoneTerm(func=mdp.time_out, time_out=True) + # (2) Cart out of bounds + cart_out_of_bounds = DoneTerm( + func=mdp.joint_pos_out_of_manual_limit, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]), "bounds": (-3.0, 3.0)}, + ) + + +## +# Environment configuration +## + + +@configclass +class CartpoleEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the cartpole environment.""" + + # Scene settings + scene: CartpoleSceneCfg = CartpoleSceneCfg(num_envs=4096, env_spacing=4.0, clone_in_fabric=True) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + events: EventCfg = EventCfg() + # MDP settings + rewards: RewardsCfg = RewardsCfg() + terminations: TerminationsCfg = TerminationsCfg() + # Simulation settings + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=5, + nconmax=3, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + ) + + # Post initialization + def __post_init__(self) -> None: + """Post initialization.""" + # general settings + self.decimation = 2 + self.episode_length_s = 5 + # viewer settings + self.viewer.eye = (8.0, 0.0, 5.0) + # simulation settings + self.sim.dt = 1 / 120 + self.sim.render_interval = self.decimation diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/mdp/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/mdp/__init__.py new file mode 100644 index 000000000000..73b1cf4fb2c5 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/mdp/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This sub-module contains the functions that are specific to the cartpole environments.""" + +from isaaclab_experimental.envs.mdp import * # noqa: F401, F403 + +from .rewards import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/mdp/rewards.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/mdp/rewards.py new file mode 100644 index 000000000000..b83d2ea61706 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/cartpole/mdp/rewards.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_experimental.utils.warp.utils import wrap_to_pi + +from isaaclab.assets import Articulation + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +@wp.kernel +def _joint_pos_target_l2_kernel( + joint_pos: wp.array(dtype=wp.float32, ndim=2), + joint_mask: wp.array(dtype=wp.bool), + out: wp.array(dtype=wp.float32), + target: float, +): + i = wp.tid() + s = float(0.0) + for j in range(joint_pos.shape[1]): + if joint_mask[j]: + a = wrap_to_pi(joint_pos[i, j]) + d = a - target + s += d * d + out[i] = s + + +def joint_pos_target_l2(env: ManagerBasedRLEnv, out, target: float, asset_cfg: SceneEntityCfg) -> None: + """Penalize joint position deviation from a target value. Writes into ``out``.""" + asset: Articulation = env.scene[asset_cfg.name] + assert asset.data.joint_pos.warp.shape[1] == asset_cfg.joint_mask.shape[0] + wp.launch( + kernel=_joint_pos_target_l2_kernel, + dim=env.num_envs, + inputs=[asset.data.joint_pos.warp, asset_cfg.joint_mask, out, target], + device=env.device, + ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/__init__.py new file mode 100644 index 000000000000..c08e5156b926 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Humanoid locomotion environment (experimental manager-based entry point). +""" + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.classic.humanoid import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Humanoid-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.humanoid_env_cfg:HumanoidEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:HumanoidPPORunnerCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", + "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/humanoid_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/humanoid_env_cfg.py new file mode 100644 index 000000000000..781541a495e6 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/humanoid_env_cfg.py @@ -0,0 +1,233 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_experimental.managers import ObservationTermCfg as ObsTerm +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import TerminationTermCfg as DoneTerm +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +import isaaclab.sim as sim_utils +from isaaclab.assets import AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.classic.humanoid.mdp as mdp + +from isaaclab_assets.robots.humanoid import HUMANOID_CFG # isort:skip + + +## +# Scene definition +## + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Configuration for the terrain scene with a humanoid robot.""" + + # terrain + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0, dynamic_friction=1.0, restitution=0.0), + debug_vis=False, + ) + + # robot + robot = HUMANOID_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + +## +# MDP settings +## + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + joint_effort = mdp.JointEffortActionCfg( + asset_name="robot", + joint_names=[".*"], + scale={ + ".*_waist.*": 67.5, + ".*_upper_arm.*": 67.5, + "pelvis": 67.5, + ".*_lower_arm": 45.0, + ".*_thigh:0": 45.0, + ".*_thigh:1": 135.0, + ".*_thigh:2": 45.0, + ".*_shin": 90.0, + ".*_foot.*": 22.5, + }, + ) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for the policy.""" + + base_height = ObsTerm(func=mdp.base_pos_z) + base_lin_vel = ObsTerm(func=mdp.base_lin_vel) + base_ang_vel = ObsTerm(func=mdp.base_ang_vel, scale=0.25) + base_yaw_roll = ObsTerm(func=mdp.base_yaw_roll) + base_angle_to_target = ObsTerm(func=mdp.base_angle_to_target, params={"target_pos": (1000.0, 0.0, 0.0)}) + base_up_proj = ObsTerm(func=mdp.base_up_proj) + base_heading_proj = ObsTerm(func=mdp.base_heading_proj, params={"target_pos": (1000.0, 0.0, 0.0)}) + joint_pos_norm = ObsTerm(func=mdp.joint_pos_limit_normalized) + joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel, scale=0.1) + actions = ObsTerm(func=mdp.last_action) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = True + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + reset_base = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={"pose_range": {}, "velocity_range": {}}, + ) + + reset_robot_joints = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "position_range": (-0.2, 0.2), + "velocity_range": (-0.1, 0.1), + }, + ) + + +@configclass +class RewardsCfg: + """Reward terms for the MDP.""" + + # (1) Reward for moving forward + progress = RewTerm(func=mdp.progress_reward, weight=1.0, params={"target_pos": (1000.0, 0.0, 0.0)}) + # (2) Stay alive bonus + alive = RewTerm(func=mdp.is_alive, weight=2.0) + # (3) Reward for non-upright posture + upright = RewTerm(func=mdp.upright_posture_bonus, weight=0.1, params={"threshold": 0.93}) + # (4) Reward for moving in the right direction + move_to_target = RewTerm( + func=mdp.move_to_target_bonus, weight=0.5, params={"threshold": 0.8, "target_pos": (1000.0, 0.0, 0.0)} + ) + # (5) Penalty for large action commands + action_l2 = RewTerm(func=mdp.action_l2, weight=-0.01) + # (6) Penalty for energy consumption + energy = RewTerm( + func=mdp.power_consumption, + weight=-0.005, + params={ + "gear_ratio": { + ".*_waist.*": 67.5, + ".*_upper_arm.*": 67.5, + "pelvis": 67.5, + ".*_lower_arm": 45.0, + ".*_thigh:0": 45.0, + ".*_thigh:1": 135.0, + ".*_thigh:2": 45.0, + ".*_shin": 90.0, + ".*_foot.*": 22.5, + } + }, + ) + # (7) Penalty for reaching close to joint limits + joint_pos_limits = RewTerm( + func=mdp.joint_pos_limits_penalty_ratio, + weight=-0.25, + params={ + "threshold": 0.98, + "gear_ratio": { + ".*_waist.*": 67.5, + ".*_upper_arm.*": 67.5, + "pelvis": 67.5, + ".*_lower_arm": 45.0, + ".*_thigh:0": 45.0, + ".*_thigh:1": 135.0, + ".*_thigh:2": 45.0, + ".*_shin": 90.0, + ".*_foot.*": 22.5, + }, + }, + ) + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + # (1) Terminate if the episode length is exceeded + time_out = DoneTerm(func=mdp.time_out, time_out=True) + # (2) Terminate if the robot falls + torso_height = DoneTerm(func=mdp.root_height_below_minimum, params={"minimum_height": 0.8}) + + +@configclass +class HumanoidEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the MuJoCo-style Humanoid walking environment.""" + + # Scene settings + scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=5.0, clone_in_fabric=True) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + # MDP settings + rewards: RewardsCfg = RewardsCfg() + terminations: TerminationsCfg = TerminationsCfg() + events: EventCfg = EventCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 2 + self.episode_length_s = 16.0 + # simulation settings + self.sim: SimulationCfg = SimulationCfg( + dt=1 / 120.0, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=80, + nconmax=25, + ls_iterations=15, + cone="pyramidal", + ls_parallel=True, + update_data_interval=2, + impratio=1, + integrator="implicitfast", + ), + num_substeps=2, + debug_mode=False, + ), + ) + # self.sim.dt = 1 / 120.0 + self.sim.render_interval = self.decimation + # default friction material + self.sim.physics_material.static_friction = 1.0 + self.sim.physics_material.dynamic_friction = 1.0 diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/__init__.py new file mode 100644 index 000000000000..df0802edf058 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This sub-module contains the functions that are specific to the humanoid environments.""" + +from isaaclab_experimental.envs.mdp import * # noqa: F401, F403 + +from .observations import * # noqa: F401, F403 +from .rewards import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/observations.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/observations.py new file mode 100644 index 000000000000..23c3c779eab9 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/observations.py @@ -0,0 +1,186 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first observation terms for the humanoid task. + +All observation functions follow the ``func(env, out, **params) -> None`` signature. +Dimensions are declared via ``out_dim`` on the ``@generic_io_descriptor_warp`` decorator. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp +from isaaclab_experimental.envs.utils.io_descriptors import generic_io_descriptor_warp +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_newton.kernels.state_kernels import rotate_vec_to_body_frame + +from isaaclab.assets import Articulation + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + +@wp.kernel +def _base_yaw_roll_kernel( + root_quat_w: wp.array(dtype=wp.quatf), + out: wp.array(dtype=wp.float32, ndim=2), +): + """Extract yaw and roll angles from root quaternion (x, y, z, w layout).""" + i = wp.tid() + q = root_quat_w[i] + qx = q[0] + qy = q[1] + qz = q[2] + qw = q[3] + # roll = atan2(2*(qw*qx + qy*qz), 1 - 2*(qx^2 + qy^2)) + sin_roll = 2.0 * (qw * qx + qy * qz) + cos_roll = 1.0 - 2.0 * (qx * qx + qy * qy) + roll = wp.atan2(sin_roll, cos_roll) + # yaw = atan2(2*(qw*qz + qx*qy), 1 - 2*(qy^2 + qz^2)) + sin_yaw = 2.0 * (qw * qz + qx * qy) + cos_yaw = 1.0 - 2.0 * (qy * qy + qz * qz) + yaw = wp.atan2(sin_yaw, cos_yaw) + out[i, 0] = yaw + out[i, 1] = roll + + +@generic_io_descriptor_warp(out_dim=2, observation_type="RootState") +def base_yaw_roll(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Yaw and roll of the base in the simulation world frame. Shape: (num_envs, 2).""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_base_yaw_roll_kernel, + dim=env.num_envs, + inputs=[asset.data.root_quat_w.warp, out], + device=env.device, + ) + + +# Inline Tier 1 access: derives projected gravity directly from root_link_pose_w, +# avoiding the lazy TimestampedWarpBuffer which is not CUDA-graph-capturable. +# See GRAPH_CAPTURE_MIGRATION.md in isaaclab_newton for background. + + +@wp.kernel +def _base_up_proj_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + gravity_w: wp.array(dtype=wp.vec3f), + out: wp.array(dtype=wp.float32, ndim=2), +): + """Project base up vector onto world up: -gravity_b[2].""" + i = wp.tid() + out[i, 0] = -rotate_vec_to_body_frame(gravity_w[0], root_pose_w[i])[2] + + +@generic_io_descriptor_warp(out_dim=1, observation_type="RootState") +def base_up_proj(env: ManagerBasedEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Projection of the base up vector onto the world up vector. Shape: (num_envs, 1).""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_base_up_proj_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.GRAVITY_VEC_W.warp, out], + device=env.device, + ) + + +@wp.kernel +def _base_heading_proj_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + root_quat_w: wp.array(dtype=wp.quatf), + target_x: float, + target_y: float, + target_z: float, + out: wp.array(dtype=wp.float32, ndim=2), +): + """Dot product between robot forward and direction to target.""" + i = wp.tid() + pos = root_pos_w[i] + q = root_quat_w[i] + # compute direction to target (zeroed z) + dx = target_x - pos[0] + dy = target_y - pos[1] + dist = wp.sqrt(dx * dx + dy * dy) + # avoid division by zero + inv_dist = wp.where(dist > 1.0e-6, 1.0 / dist, 0.0) + to_target_x = dx * inv_dist + to_target_y = dy * inv_dist + # compute forward vector via quaternion rotation of (1,0,0) + fwd = wp.quat_rotate(q, wp.vec3f(1.0, 0.0, 0.0)) + # dot product (xy only) + heading_proj = fwd[0] * to_target_x + fwd[1] * to_target_y + out[i, 0] = heading_proj + + +@generic_io_descriptor_warp(out_dim=1, observation_type="RootState") +def base_heading_proj( + env: ManagerBasedEnv, + out, + target_pos: tuple[float, float, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> None: + """Dot product between the base forward direction and direction to target. Shape: (num_envs, 1).""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_base_heading_proj_kernel, + dim=env.num_envs, + inputs=[ + asset.data.root_pos_w.warp, + asset.data.root_quat_w.warp, + target_pos[0], + target_pos[1], + target_pos[2], + out, + ], + device=env.device, + ) + + +@wp.kernel +def _base_angle_to_target_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + root_quat_w: wp.array(dtype=wp.quatf), + target_x: float, + target_y: float, + out: wp.array(dtype=wp.float32, ndim=2), +): + """Angle between base forward and vector to target, normalized to [-pi, pi].""" + i = wp.tid() + pos = root_pos_w[i] + q = root_quat_w[i] + # angle to target in world frame + dx = target_x - pos[0] + dy = target_y - pos[1] + walk_target_angle = wp.atan2(dy, dx) + # extract yaw from quaternion + qx = q[0] + qy = q[1] + qz = q[2] + qw = q[3] + sin_yaw = 2.0 * (qw * qz + qx * qy) + cos_yaw = 1.0 - 2.0 * (qy * qy + qz * qz) + yaw = wp.atan2(sin_yaw, cos_yaw) + # normalize to [-pi, pi] + angle = walk_target_angle - yaw + out[i, 0] = wp.atan2(wp.sin(angle), wp.cos(angle)) + + +@generic_io_descriptor_warp(out_dim=1, observation_type="RootState") +def base_angle_to_target( + env: ManagerBasedEnv, + out, + target_pos: tuple[float, float, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> None: + """Angle between the base forward vector and the vector to the target. Shape: (num_envs, 1).""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_base_angle_to_target_kernel, + dim=env.num_envs, + inputs=[asset.data.root_pos_w.warp, asset.data.root_quat_w.warp, target_pos[0], target_pos[1], out], + device=env.device, + ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/rewards.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/rewards.py new file mode 100644 index 000000000000..f0e47dfbbf05 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/classic/humanoid/mdp/rewards.py @@ -0,0 +1,321 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first reward terms for the humanoid task. + +All reward functions follow the ``func(env, out, **params) -> None`` signature +where ``out`` is a pre-allocated Warp array of shape ``(num_envs,)`` with float32 dtype. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import torch +import warp as wp +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_experimental.managers.manager_base import ManagerTermBase +from isaaclab_newton.kernels.state_kernels import rotate_vec_to_body_frame + +import isaaclab.utils.string as string_utils +from isaaclab.assets import Articulation + +if TYPE_CHECKING: + from isaaclab_experimental.managers.manager_term_cfg import RewardTermCfg + + from isaaclab.envs import ManagerBasedRLEnv + + +# --------------------------------------------------------------------------- +# Function-based reward terms +# --------------------------------------------------------------------------- + + +# Inline Tier 1 access: derives projected gravity directly from root_link_pose_w, +# avoiding the lazy TimestampedWarpBuffer which is not CUDA-graph-capturable. +# See GRAPH_CAPTURE_MIGRATION.md in isaaclab_newton for background. +# If ArticulationData Tier 2 lazy update is made graph-safe, this can revert to +# reading asset.data.projected_gravity_b directly. + + +@wp.kernel +def _upright_posture_bonus_kernel( + root_pose_w: wp.array(dtype=wp.transformf), + gravity_w: wp.array(dtype=wp.vec3f), + threshold: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + up_proj = -rotate_vec_to_body_frame(gravity_w[0], root_pose_w[i])[2] + out[i] = wp.where(up_proj > threshold, 1.0, 0.0) + + +def upright_posture_bonus( + env: ManagerBasedRLEnv, out, threshold: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") +) -> None: + """Reward for maintaining an upright posture. Writes 1.0 if up_proj > threshold, else 0.0.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_upright_posture_bonus_kernel, + dim=env.num_envs, + inputs=[asset.data.root_link_pose_w.warp, asset.data.GRAVITY_VEC_W.warp, threshold, out], + device=env.device, + ) + + +@wp.kernel +def _move_to_target_bonus_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + root_quat_w: wp.array(dtype=wp.quatf), + target_x: float, + target_y: float, + threshold: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + pos = root_pos_w[i] + q = root_quat_w[i] + # direction to target + dx = target_x - pos[0] + dy = target_y - pos[1] + dist = wp.sqrt(dx * dx + dy * dy) + inv_dist = wp.where(dist > 1.0e-6, 1.0 / dist, 0.0) + to_target_x = dx * inv_dist + to_target_y = dy * inv_dist + # forward vector + fwd = wp.quat_rotate(q, wp.vec3f(1.0, 0.0, 0.0)) + heading_proj = fwd[0] * to_target_x + fwd[1] * to_target_y + out[i] = wp.where(heading_proj > threshold, 1.0, heading_proj / threshold) + + +def move_to_target_bonus( + env: ManagerBasedRLEnv, + out, + threshold: float, + target_pos: tuple[float, float, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> None: + """Reward for heading towards the target.""" + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_move_to_target_bonus_kernel, + dim=env.num_envs, + inputs=[ + asset.data.root_pos_w.warp, + asset.data.root_quat_w.warp, + target_pos[0], + target_pos[1], + threshold, + out, + ], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# Class-based reward terms +# --------------------------------------------------------------------------- + + +@wp.kernel +def _progress_reward_reset_kernel( + env_mask: wp.array(dtype=wp.bool), + root_pos_w: wp.array(dtype=wp.vec3f), + target_x: float, + target_y: float, + target_z: float, + inv_step_dt: float, + potentials: wp.array(dtype=wp.float32), +): + i = wp.tid() + if env_mask[i]: + pos = root_pos_w[i] + dx = target_x - pos[0] + dy = target_y - pos[1] + dz = target_z - pos[2] + dist = wp.sqrt(dx * dx + dy * dy + dz * dz) + potentials[i] = -dist * inv_step_dt + + +@wp.kernel +def _progress_reward_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + target_x: float, + target_y: float, + inv_step_dt: float, + potentials: wp.array(dtype=wp.float32), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + pos = root_pos_w[i] + dx = target_x - pos[0] + dy = target_y - pos[1] + # z component is zeroed (xy distance only, matching stable) + dist = wp.sqrt(dx * dx + dy * dy) + prev = potentials[i] + pot = -dist * inv_step_dt + potentials[i] = pot + out[i] = pot - prev + + +class progress_reward(ManagerTermBase): + """Reward for making progress towards the target (potential-based).""" + + def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): + super().__init__(cfg, env) + self.potentials = wp.zeros(env.num_envs, dtype=wp.float32, device=env.device) + self._target_pos = cfg.params["target_pos"] + + def reset(self, env_mask: wp.array | None = None) -> None: + if env_mask is None: + self.potentials.zero_() + return + asset: Articulation = self._env.scene["robot"] + inv_dt = 1.0 / self._env.step_dt + wp.launch( + kernel=_progress_reward_reset_kernel, + dim=self.num_envs, + inputs=[ + env_mask, + asset.data.root_pos_w.warp, + self._target_pos[0], + self._target_pos[1], + self._target_pos[2], + inv_dt, + self.potentials, + ], + device=self.device, + ) + + def __call__( + self, + env: ManagerBasedRLEnv, + out, + target_pos: tuple[float, float, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + ) -> None: + asset: Articulation = env.scene[asset_cfg.name] + inv_dt = 1.0 / env.step_dt + wp.launch( + kernel=_progress_reward_kernel, + dim=env.num_envs, + inputs=[asset.data.root_pos_w.warp, target_pos[0], target_pos[1], inv_dt, self.potentials, out], + device=env.device, + ) + + +@wp.kernel +def _joint_pos_limits_penalty_ratio_kernel( + joint_pos: wp.array(dtype=wp.float32, ndim=2), + soft_limits: wp.array(dtype=wp.vec2f, ndim=2), + gear_ratio_scaled: wp.array(dtype=wp.float32, ndim=2), + threshold: float, + inv_range: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + n_joints = joint_pos.shape[1] + s = float(0.0) + for j in range(n_joints): + lim = soft_limits[i, j] + lower = lim.x + upper = lim.y + mid = (lower + upper) * 0.5 + half_range = (upper - lower) * 0.5 + scaled = float(0.0) + if half_range > 0.0: + scaled = (joint_pos[i, j] - mid) / half_range + abs_scaled = wp.abs(scaled) + if abs_scaled > threshold: + violation = (abs_scaled - threshold) * inv_range + s += violation * gear_ratio_scaled[i, j] + out[i] = s + + +class joint_pos_limits_penalty_ratio(ManagerTermBase): + """Penalty for violating joint position limits weighted by the gear ratio.""" + + def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): + asset_cfg = cfg.params.get("asset_cfg", SceneEntityCfg("robot")) + asset: Articulation = env.scene[asset_cfg.name] + + # resolve the gear ratio for each joint (torch in __init__ is fine) + gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) + index_list, _, value_list = string_utils.resolve_matching_names_values( + cfg.params["gear_ratio"], asset.joint_names + ) + gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) + gear_ratio_scaled = gear_ratio / torch.max(gear_ratio) + self._gear_ratio_scaled_wp = wp.from_torch(gear_ratio_scaled) + + def __call__( + self, + env: ManagerBasedRLEnv, + out, + threshold: float, + gear_ratio: dict[str, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + ) -> None: + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_joint_pos_limits_penalty_ratio_kernel, + dim=env.num_envs, + inputs=[ + asset.data.joint_pos.warp, + asset.data.soft_joint_pos_limits.warp, + self._gear_ratio_scaled_wp, + threshold, + 1.0 / (1.0 - threshold), + out, + ], + device=env.device, + ) + + +@wp.kernel +def _power_consumption_kernel( + action: wp.array(dtype=wp.float32, ndim=2), + joint_vel: wp.array(dtype=wp.float32, ndim=2), + gear_ratio_scaled: wp.array(dtype=wp.float32, ndim=2), + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + n_joints = action.shape[1] + s = float(0.0) + for j in range(n_joints): + s += wp.abs(action[i, j] * joint_vel[i, j] * gear_ratio_scaled[i, j]) + out[i] = s + + +class power_consumption(ManagerTermBase): + """Penalty for the power consumed by the actions to the environment.""" + + def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRLEnv): + asset_cfg = cfg.params.get("asset_cfg", SceneEntityCfg("robot")) + asset: Articulation = env.scene[asset_cfg.name] + + # resolve the gear ratio for each joint (torch in __init__ is fine) + gear_ratio = torch.ones(env.num_envs, asset.num_joints, device=env.device) + index_list, _, value_list = string_utils.resolve_matching_names_values( + cfg.params["gear_ratio"], asset.joint_names + ) + gear_ratio[:, index_list] = torch.tensor(value_list, device=env.device) + gear_ratio_scaled = gear_ratio / torch.max(gear_ratio) + self._gear_ratio_scaled_wp = wp.from_torch(gear_ratio_scaled) + + def __call__( + self, + env: ManagerBasedRLEnv, + out, + gear_ratio: dict[str, float], + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), + ) -> None: + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_power_consumption_kernel, + dim=env.num_envs, + inputs=[env.action_manager.action, asset.data.joint_vel.warp, self._gear_ratio_scaled_wp, out], + device=env.device, + ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/__init__.py new file mode 100644 index 000000000000..0660d38f0658 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Locomotion experimental task registrations (manager-based).""" diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/__init__.py new file mode 100644 index 000000000000..0857176a3fc7 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Velocity locomotion experimental task registrations (manager-based).""" diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/__init__.py new file mode 100644 index 000000000000..26f3257daef4 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configurations for velocity-based locomotion environments.""" + +# We leave this file empty since we don't want to expose any configs in this package directly. +# We still need this file to import the "config" module in the parent package. diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/__init__.py new file mode 100644 index 000000000000..6c79524e8532 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.a1 import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Velocity-Flat-Unitree-A1-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:UnitreeA1FlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UnitreeA1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-Unitree-A1-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:UnitreeA1FlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UnitreeA1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + "sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/flat_env_cfg.py new file mode 100644 index 000000000000..b27f8098d629 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/flat_env_cfg.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import UnitreeA1RoughEnvCfg + + +@configclass +class UnitreeA1FlatEnvCfg(UnitreeA1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=60, + nconmax=30, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # override rewards + self.rewards.flat_orientation_l2.weight = -2.5 + self.rewards.feet_air_time.weight = 0.25 + + # change terrain to flat + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + # no height scan + # self.scene.height_scanner = None + # self.observations.policy.height_scan = None + # no terrain curriculum + self.curriculum.terrain_levels = None + + +class UnitreeA1FlatEnvCfg_PLAY(UnitreeA1FlatEnvCfg): + def __post_init__(self) -> None: + # post init of parent + super().__post_init__() + + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + # disable randomization for play + self.observations.policy.enable_corruption = False + # remove random pushing event + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/rough_env_cfg.py new file mode 100644 index 000000000000..03aec88a4018 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/a1/rough_env_cfg.py @@ -0,0 +1,91 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_experimental.managers import TerminationTermCfg as DoneTerm + +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.locomotion.velocity.mdp as mdp +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import ( + LocomotionVelocityRoughEnvCfg, + TerminationsCfg, +) + +from isaaclab_assets.robots.unitree import UNITREE_A1_CFG # isort: skip + + +class TerminationsCfg_A1(TerminationsCfg): + base_too_low = DoneTerm(func=mdp.root_height_below_minimum, params={"minimum_height": 0.2}) + + +@configclass +class UnitreeA1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): + terminations: TerminationsCfg_A1 = TerminationsCfg_A1() + + def __post_init__(self): + # post init of parent + super().__post_init__() + + self.scene.robot = UNITREE_A1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) + self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) + self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 + + # reduce action scale + self.actions.joint_pos.scale = 0.25 + + # event + self.events.push_robot = None + # TODO: TEMPORARILY DISABLED - adding this causes NaNs in the simulation + # self.events.add_base_mass.params["mass_distribution_params"] = (-1.0, 3.0) + # self.events.add_base_mass.params["asset_cfg"].body_names = "trunk" + self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk" + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) + self.events.reset_base.params = { + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (0.0, 0.0), + "y": (0.0, 0.0), + "z": (0.0, 0.0), + "roll": (0.0, 0.0), + "pitch": (0.0, 0.0), + "yaw": (0.0, 0.0), + }, + } + self.events.base_com = None + + # rewards + self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" + self.rewards.feet_air_time.weight = 0.01 + self.rewards.undesired_contacts.params["sensor_cfg"].body_names = ".*thigh" + self.rewards.dof_torques_l2.weight = -0.0002 + self.rewards.track_lin_vel_xy_exp.weight = 1.5 + self.rewards.track_ang_vel_z_exp.weight = 0.75 + self.rewards.dof_acc_l2.weight = -2.5e-7 + self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk" + + +@configclass +class UnitreeA1RoughEnvCfg_PLAY(UnitreeA1RoughEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + # spawn the robot randomly in the grid (instead of their terrain levels) + self.scene.terrain.max_init_terrain_level = None + # reduce the number of terrains to save memory + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + + # disable randomization for play + self.observations.policy.enable_corruption = False + # remove random pushing event + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/__init__.py new file mode 100644 index 000000000000..cbbf5290e82a --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.anymal_b import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Velocity-Flat-Anymal-B-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:AnymalBFlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalBFlatPPORunnerCfg", + "rsl_rl_with_symmetry_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalBFlatPPORunnerWithSymmetryCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-Anymal-B-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:AnymalBFlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalBFlatPPORunnerCfg", + "rsl_rl_with_symmetry_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalBFlatPPORunnerWithSymmetryCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py new file mode 100644 index 000000000000..28c0dc5c26b6 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import AnymalBRoughEnvCfg + + +@configclass +class AnymalBFlatEnvCfg(AnymalBRoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=75, + nconmax=15, + cone="elliptic", + impratio=100, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # override rewards + self.rewards.flat_orientation_l2.weight = -5.0 + self.rewards.dof_torques_l2.weight = -2.5e-5 + self.rewards.feet_air_time.weight = 0.5 + # change terrain to flat + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + # no height scan + self.scene.height_scanner = None + self.observations.policy.height_scan = None + # no terrain curriculum + self.curriculum.terrain_levels = None + + +class AnymalBFlatEnvCfg_PLAY(AnymalBFlatEnvCfg): + def __post_init__(self) -> None: + # post init of parent + super().__post_init__() + + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + # disable randomization for play + self.observations.policy.enable_corruption = False + # remove random pushing event + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py new file mode 100644 index 000000000000..9811356ef220 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py @@ -0,0 +1,34 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg + +from isaaclab_assets import ANYMAL_B_CFG # isort: skip + + +@configclass +class AnymalBRoughEnvCfg(LocomotionVelocityRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.robot = ANYMAL_B_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.manager_call_max_mode = {"Scene": 1} + + +@configclass +class AnymalBRoughEnvCfg_PLAY(AnymalBRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.scene.terrain.max_init_terrain_level = None + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/__init__.py new file mode 100644 index 000000000000..318b13cc4707 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/__init__.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.anymal_c import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Velocity-Flat-Anymal-C-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:AnymalCFlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalCFlatPPORunnerCfg", + "rsl_rl_with_symmetry_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalCFlatPPORunnerWithSymmetryCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_flat_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-Anymal-C-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:AnymalCFlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalCFlatPPORunnerCfg", + "rsl_rl_with_symmetry_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalCFlatPPORunnerWithSymmetryCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_flat_ppo_cfg.yaml", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py new file mode 100644 index 000000000000..e82cf9559d61 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import AnymalCRoughEnvCfg + + +@configclass +class AnymalCFlatEnvCfg(AnymalCRoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=120, + nconmax=15, + cone="elliptic", + impratio=100, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # override rewards + self.rewards.flat_orientation_l2.weight = -5.0 + self.rewards.dof_torques_l2.weight = -2.5e-5 + self.rewards.feet_air_time.weight = 0.5 + # change terrain to flat + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + # no terrain curriculum + self.curriculum.terrain_levels = None + + +class AnymalCFlatEnvCfg_PLAY(AnymalCFlatEnvCfg): + def __post_init__(self) -> None: + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py new file mode 100644 index 000000000000..36af75613c07 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py @@ -0,0 +1,40 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg + +## +# Pre-defined configs +## +from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort: skip + + +@configclass +class AnymalCRoughEnvCfg(LocomotionVelocityRoughEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + # switch robot to anymal-c + self.scene.robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot.actuators["legs"].armature = 0.01 + self.manager_call_max_mode = {"Scene": 1} + + +@configclass +class AnymalCRoughEnvCfg_PLAY(AnymalCRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.scene.terrain.max_init_terrain_level = None + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/__init__.py new file mode 100644 index 000000000000..e5e75d19dc2d --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/__init__.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.anymal_d import agents + +## +# Register Gym environments. +## + +# Rough env disabled: requires isaaclab_physx which is not yet available on dev/newton. +# The package exists on upstream/develop (commit 308400f1d35) but has not been merged. +# Re-enable once dev/newton picks up isaaclab_physx. +# gym.register( +# id="Isaac-Velocity-Rough-Anymal-D-Warp-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.rough_env_cfg:AnymalDRoughEnvCfg", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalDRoughPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", +# }, +# ) + +# gym.register( +# id="Isaac-Velocity-Rough-Anymal-D-Warp-Play-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.rough_env_cfg:AnymalDRoughEnvCfg_PLAY", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalDRoughPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", +# }, +# ) + +gym.register( + id="Isaac-Velocity-Flat-Anymal-D-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:AnymalDFlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalDFlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-Anymal-D-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:AnymalDFlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:AnymalDFlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py new file mode 100644 index 000000000000..1f98c1b56120 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import AnymalDRoughEnvCfg + + +@configclass +class AnymalDFlatEnvCfg(AnymalDRoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=60, + nconmax=25, + cone="elliptic", + impratio=100.0, + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + super().__post_init__() + self.rewards.flat_orientation_l2.weight = -5.0 + self.rewards.dof_torques_l2.weight = -2.5e-5 + self.rewards.feet_air_time.weight = 0.5 + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + self.curriculum.terrain_levels = None + + +class AnymalDFlatEnvCfg_PLAY(AnymalDFlatEnvCfg): + def __post_init__(self) -> None: + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py new file mode 100644 index 000000000000..1cafa006ee64 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg + +## +# Pre-defined configs +## +from isaaclab_assets.robots.anymal import ANYMAL_D_CFG # isort: skip + + +@configclass +class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.manager_call_max_mode = {"Scene": 1} + + +@configclass +class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.scene.terrain.max_init_terrain_level = None + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/__init__.py new file mode 100644 index 000000000000..4d9d4a77883a --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.cassie import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Velocity-Flat-Cassie-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:CassieFlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:CassieFlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-Cassie-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:CassieFlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:CassieFlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py new file mode 100644 index 000000000000..bcc670cf3788 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/flat_env_cfg.py @@ -0,0 +1,45 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import CassieRoughEnvCfg + + +@configclass +class CassieFlatEnvCfg(CassieRoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=52, + nconmax=15, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + super().__post_init__() + self.rewards.flat_orientation_l2.weight = -2.5 + self.rewards.feet_air_time.weight = 5.0 + self.rewards.joint_deviation_hip.params["asset_cfg"].joint_names = ["hip_rotation_.*"] + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + self.curriculum.terrain_levels = None + + +class CassieFlatEnvCfg_PLAY(CassieFlatEnvCfg): + def __post_init__(self) -> None: + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.observations.policy.enable_corruption = False diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py new file mode 100644 index 000000000000..43341babeb3d --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/cassie/rough_env_cfg.py @@ -0,0 +1,96 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import SceneEntityCfg + +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.locomotion.velocity.mdp as mdp +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import ( + LocomotionVelocityRoughEnvCfg, + RewardsCfg, +) + +from isaaclab_assets.robots.cassie import CASSIE_CFG # isort: skip + + +@configclass +class CassieRewardsCfg(RewardsCfg): + termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0) + feet_air_time = RewTerm( + func=mdp.feet_air_time_positive_biped, + weight=2.5, + params={ + "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*toe"), + "command_name": "base_velocity", + "threshold": 0.3, + }, + ) + joint_deviation_hip = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.2, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=["hip_abduction_.*", "hip_rotation_.*"])}, + ) + joint_deviation_toes = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.2, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=["toe_joint_.*"])}, + ) + dof_pos_limits = RewTerm( + func=mdp.joint_pos_limits, + weight=-1.0, + params={"asset_cfg": SceneEntityCfg("robot", joint_names="toe_joint_.*")}, + ) + + +@configclass +class CassieRoughEnvCfg(LocomotionVelocityRoughEnvCfg): + rewards: CassieRewardsCfg = CassieRewardsCfg() + + def __post_init__(self): + super().__post_init__() + self.scene.robot = CASSIE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.actions.joint_pos.scale = 0.5 + self.events.push_robot = None + # TODO: TEMPORARILY DISABLED - adding this causes NaNs in the simulation + # self.events.add_base_mass = None + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) + self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*pelvis"] + self.events.reset_base.params = { + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (0.0, 0.0), + "y": (0.0, 0.0), + "z": (0.0, 0.0), + "roll": (0.0, 0.0), + "pitch": (0.0, 0.0), + "yaw": (0.0, 0.0), + }, + } + self.events.base_com = None + self.terminations.base_contact.params["sensor_cfg"].body_names = [".*pelvis"] + self.rewards.undesired_contacts = None + self.rewards.dof_torques_l2.weight = -5.0e-6 + self.rewards.track_lin_vel_xy_exp.weight = 2.0 + self.rewards.track_ang_vel_z_exp.weight = 1.0 + self.rewards.action_rate_l2.weight *= 1.5 + self.rewards.dof_acc_l2.weight *= 1.5 + + +@configclass +class CassieRoughEnvCfg_PLAY(CassieRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.scene.terrain.max_init_terrain_level = None + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/__init__.py new file mode 100644 index 000000000000..83bdf047a488 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/__init__.py @@ -0,0 +1,58 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.g1 import agents + +## +# Register Gym environments. +## + +# gym.register( +# id="Isaac-Velocity-Rough-G1-Warp-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.rough_env_cfg:G1RoughEnvCfg", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:G1RoughPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", +# }, +# ) + + +# gym.register( +# id="Isaac-Velocity-Rough-G1-Warp-Play-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.rough_env_cfg:G1RoughEnvCfg_PLAY", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:G1RoughPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", +# }, +# ) + +gym.register( + id="Isaac-Velocity-Flat-G1-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:G1FlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:G1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-G1-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:G1FlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:G1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/flat_env_cfg.py new file mode 100644 index 000000000000..3d9047a98506 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/flat_env_cfg.py @@ -0,0 +1,58 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import G1RoughEnvCfg + + +@configclass +class G1FlatEnvCfg(G1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=95, + nconmax=10, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + super().__post_init__() + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + self.curriculum.terrain_levels = None + self.rewards.track_ang_vel_z_exp.weight = 1.0 + self.rewards.lin_vel_z_l2.weight = -0.2 + self.rewards.action_rate_l2.weight = -0.005 + self.rewards.dof_acc_l2.weight = -1.0e-7 + self.rewards.feet_air_time.weight = 0.75 + self.rewards.feet_air_time.params["threshold"] = 0.4 + self.rewards.dof_torques_l2.weight = -2.0e-6 + self.rewards.dof_torques_l2.params["asset_cfg"] = SceneEntityCfg( + "robot", joint_names=[".*_hip_.*", ".*_knee_joint"] + ) + self.commands.base_velocity.ranges.lin_vel_x = (0.0, 1.0) + self.commands.base_velocity.ranges.lin_vel_y = (-0.5, 0.5) + self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0) + + +class G1FlatEnvCfg_PLAY(G1FlatEnvCfg): + def __post_init__(self) -> None: + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/rough_env_cfg.py new file mode 100644 index 000000000000..db4cc159e4c5 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/g1/rough_env_cfg.py @@ -0,0 +1,178 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import SceneEntityCfg + +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.locomotion.velocity.mdp as mdp +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import ( + LocomotionVelocityRoughEnvCfg, + RewardsCfg, +) + +## +# Pre-defined configs +## +from isaaclab_assets import G1_MINIMAL_CFG # isort: skip + + +@configclass +class G1Rewards(RewardsCfg): + """Reward terms for the MDP.""" + + termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0) + track_lin_vel_xy_exp = RewTerm( + func=mdp.track_lin_vel_xy_yaw_frame_exp, + weight=1.0, + params={"command_name": "base_velocity", "std": 0.5}, + ) + track_ang_vel_z_exp = RewTerm( + func=mdp.track_ang_vel_z_world_exp, weight=2.0, params={"command_name": "base_velocity", "std": 0.5} + ) + feet_air_time = RewTerm( + func=mdp.feet_air_time_positive_biped, + weight=0.25, + params={ + "command_name": "base_velocity", + "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*_ankle_roll_link"), + "threshold": 0.4, + }, + ) + feet_slide = RewTerm( + func=mdp.feet_slide, + weight=-0.1, + params={ + "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*_ankle_roll_link"), + "asset_cfg": SceneEntityCfg("robot", body_names=".*_ankle_roll_link"), + }, + ) + dof_pos_limits = RewTerm( + func=mdp.joint_pos_limits, + weight=-1.0, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_ankle_pitch_joint", ".*_ankle_roll_joint"])}, + ) + joint_deviation_hip = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.1, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_hip_yaw_joint", ".*_hip_roll_joint"])}, + ) + joint_deviation_arms = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.1, + params={ + "asset_cfg": SceneEntityCfg( + "robot", + joint_names=[ + ".*_shoulder_pitch_joint", + ".*_shoulder_roll_joint", + ".*_shoulder_yaw_joint", + ".*_elbow_pitch_joint", + ".*_elbow_roll_joint", + ], + ) + }, + ) + joint_deviation_fingers = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.05, + params={ + "asset_cfg": SceneEntityCfg( + "robot", + joint_names=[ + ".*_five_joint", + ".*_three_joint", + ".*_six_joint", + ".*_four_joint", + ".*_zero_joint", + ".*_one_joint", + ".*_two_joint", + ], + ) + }, + ) + joint_deviation_torso = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.1, + params={"asset_cfg": SceneEntityCfg("robot", joint_names="torso_joint")}, + ) + + +@configclass +class G1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): + rewards: G1Rewards = G1Rewards() + + def __post_init__(self): + super().__post_init__() + self.scene.robot = G1_MINIMAL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.events.push_robot = None + # TODO: TEMPORARILY DISABLED - adding this causes NaNs in the simulation + # self.events.add_base_mass = None + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) + self.events.base_external_force_torque.params["asset_cfg"].body_names = ["torso_link"] + self.events.reset_base.params = { + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (0.0, 0.0), + "y": (0.0, 0.0), + "z": (0.0, 0.0), + "roll": (0.0, 0.0), + "pitch": (0.0, 0.0), + "yaw": (0.0, 0.0), + }, + } + self.events.base_com = None + + # Rewards + self.rewards.lin_vel_z_l2.weight = 0.0 + self.rewards.undesired_contacts = None + self.rewards.flat_orientation_l2.weight = -1.0 + self.rewards.action_rate_l2.weight = -0.005 + self.rewards.dof_acc_l2.weight = -1.25e-7 + self.rewards.dof_acc_l2.params["asset_cfg"] = SceneEntityCfg( + "robot", joint_names=[".*_hip_.*", ".*_knee_joint"] + ) + self.rewards.dof_torques_l2.weight = -1.5e-7 + self.rewards.dof_torques_l2.params["asset_cfg"] = SceneEntityCfg( + "robot", joint_names=[".*_hip_.*", ".*_knee_joint", ".*_ankle_.*"] + ) + + # Commands + self.commands.base_velocity.ranges.lin_vel_x = (0.0, 1.0) + self.commands.base_velocity.ranges.lin_vel_y = (-0.0, 0.0) + self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0) + + # terminations + self.terminations.base_contact.params["sensor_cfg"].body_names = "torso_link" + + +@configclass +class G1RoughEnvCfg_PLAY(G1RoughEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.episode_length_s = 40.0 + # spawn the robot randomly in the grid (instead of their terrain levels) + self.scene.terrain.max_init_terrain_level = None + # reduce the number of terrains to save memory + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + + self.commands.base_velocity.ranges.lin_vel_x = (1.0, 1.0) + self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0) + self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0) + self.commands.base_velocity.ranges.heading = (0.0, 0.0) + # disable randomization for play + self.observations.policy.enable_corruption = False + # remove random pushing + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/__init__.py new file mode 100644 index 000000000000..038f55740728 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.go1 import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Velocity-Flat-Unitree-Go1-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:UnitreeGo1FlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UnitreeGo1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-Unitree-Go1-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:UnitreeGo1FlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UnitreeGo1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/flat_env_cfg.py new file mode 100644 index 000000000000..e4fbc73e1d08 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/flat_env_cfg.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import UnitreeGo1RoughEnvCfg + + +@configclass +class UnitreeGo1FlatEnvCfg(UnitreeGo1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=60, + nconmax=25, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + super().__post_init__() + self.rewards.flat_orientation_l2.weight = -2.5 + self.rewards.feet_air_time.weight = 0.25 + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + self.curriculum.terrain_levels = None + + +class UnitreeGo1FlatEnvCfg_PLAY(UnitreeGo1FlatEnvCfg): + def __post_init__(self) -> None: + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/rough_env_cfg.py new file mode 100644 index 000000000000..2864bbba3130 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go1/rough_env_cfg.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg + +from isaaclab_assets.robots.unitree import UNITREE_GO1_CFG # isort: skip + + +@configclass +class UnitreeGo1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.robot = UNITREE_GO1_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.manager_call_max_mode = {"Scene": 1} + self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) + self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) + self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 + self.actions.joint_pos.scale = 0.25 + self.events.push_robot = None + self.events.base_external_force_torque.params["asset_cfg"].body_names = "trunk" + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) + self.events.reset_base.params = { + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (0.0, 0.0), + "y": (0.0, 0.0), + "z": (0.0, 0.0), + "roll": (0.0, 0.0), + "pitch": (0.0, 0.0), + "yaw": (0.0, 0.0), + }, + } + self.events.base_com = None + self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" + self.rewards.feet_air_time.weight = 0.01 + self.rewards.undesired_contacts = None + self.rewards.dof_torques_l2.weight = -0.0002 + self.rewards.track_lin_vel_xy_exp.weight = 1.5 + self.rewards.track_ang_vel_z_exp.weight = 0.75 + self.rewards.dof_acc_l2.weight = -2.5e-7 + self.terminations.base_contact.params["sensor_cfg"].body_names = "trunk" + + +@configclass +class UnitreeGo1RoughEnvCfg_PLAY(UnitreeGo1RoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.scene.terrain.max_init_terrain_level = None + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/__init__.py new file mode 100644 index 000000000000..7e124029c682 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.go2 import agents + +## +# Register Gym environments. +## + +gym.register( + id="Isaac-Velocity-Flat-Unitree-Go2-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:UnitreeGo2FlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UnitreeGo2FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-Unitree-Go2-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:UnitreeGo2FlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UnitreeGo2FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/flat_env_cfg.py new file mode 100644 index 000000000000..ad8a8aa862f0 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/flat_env_cfg.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import UnitreeGo2RoughEnvCfg + + +@configclass +class UnitreeGo2FlatEnvCfg(UnitreeGo2RoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=65, + nconmax=35, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + super().__post_init__() + self.rewards.flat_orientation_l2.weight = -2.5 + self.rewards.feet_air_time.weight = 0.25 + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + self.curriculum.terrain_levels = None + + +class UnitreeGo2FlatEnvCfg_PLAY(UnitreeGo2FlatEnvCfg): + def __post_init__(self) -> None: + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/rough_env_cfg.py new file mode 100644 index 000000000000..ff13b7e86176 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/go2/rough_env_cfg.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import LocomotionVelocityRoughEnvCfg + +from isaaclab_assets.robots.unitree import UNITREE_GO2_CFG # isort: skip + + +@configclass +class UnitreeGo2RoughEnvCfg(LocomotionVelocityRoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.robot = UNITREE_GO2_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.terrain.terrain_generator.sub_terrains["boxes"].grid_height_range = (0.025, 0.1) + self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_range = (0.01, 0.06) + self.scene.terrain.terrain_generator.sub_terrains["random_rough"].noise_step = 0.01 + self.actions.joint_pos.scale = 0.25 + self.events.push_robot = None + self.events.base_external_force_torque.params["asset_cfg"].body_names = "base" + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) + self.events.reset_base.params = { + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (0.0, 0.0), + "y": (0.0, 0.0), + "z": (0.0, 0.0), + "roll": (0.0, 0.0), + "pitch": (0.0, 0.0), + "yaw": (0.0, 0.0), + }, + } + self.events.base_com = None + self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" + self.rewards.feet_air_time.weight = 0.01 + self.rewards.undesired_contacts = None + self.rewards.dof_torques_l2.weight = -0.0002 + self.rewards.track_lin_vel_xy_exp.weight = 1.5 + self.rewards.track_ang_vel_z_exp.weight = 0.75 + self.rewards.dof_acc_l2.weight = -2.5e-7 + self.terminations.base_contact.params["sensor_cfg"].body_names = "base" + + +@configclass +class UnitreeGo2RoughEnvCfg_PLAY(UnitreeGo2RoughEnvCfg): + def __post_init__(self): + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.scene.terrain.max_init_terrain_level = None + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/__init__.py new file mode 100644 index 000000000000..95a1e8f29e3f --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/__init__.py @@ -0,0 +1,57 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.locomotion.velocity.config.h1 import agents + +## +# Register Gym environments. +## + +# gym.register( +# id="Isaac-Velocity-Rough-H1-Warp-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.rough_env_cfg:H1RoughEnvCfg", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:H1RoughPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", +# }, +# ) + +# gym.register( +# id="Isaac-Velocity-Rough-H1-Warp-Play-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.rough_env_cfg:H1RoughEnvCfg_PLAY", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:H1RoughPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml", +# }, +# ) + +gym.register( + id="Isaac-Velocity-Flat-H1-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:H1FlatEnvCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:H1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Velocity-Flat-H1-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.flat_env_cfg:H1FlatEnvCfg_PLAY", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:H1FlatPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py new file mode 100644 index 000000000000..22648c27a2c5 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +from .rough_env_cfg import H1RoughEnvCfg + + +@configclass +class H1FlatEnvCfg(H1RoughEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=65, + nconmax=15, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + super().__post_init__() + self.scene.terrain.terrain_type = "plane" + self.scene.terrain.terrain_generator = None + self.curriculum.terrain_levels = None + self.rewards.feet_air_time.weight = 1.0 + self.rewards.feet_air_time.params["threshold"] = 0.6 + + +class H1FlatEnvCfg_PLAY(H1FlatEnvCfg): + def __post_init__(self) -> None: + super().__post_init__() + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.observations.policy.enable_corruption = False + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py new file mode 100644 index 000000000000..edb71956a3fc --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py @@ -0,0 +1,131 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import SceneEntityCfg + +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.locomotion.velocity.mdp as mdp +from isaaclab_tasks_experimental.manager_based.locomotion.velocity.velocity_env_cfg import ( + LocomotionVelocityRoughEnvCfg, + RewardsCfg, +) + +## +# Pre-defined configs +## +from isaaclab_assets import H1_MINIMAL_CFG # isort: skip + + +@configclass +class H1Rewards(RewardsCfg): + """Reward terms for the MDP.""" + + termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0) + lin_vel_z_l2 = None + track_lin_vel_xy_exp = RewTerm( + func=mdp.track_lin_vel_xy_yaw_frame_exp, + weight=1.0, + params={"command_name": "base_velocity", "std": 0.5}, + ) + track_ang_vel_z_exp = RewTerm( + func=mdp.track_ang_vel_z_world_exp, weight=1.0, params={"command_name": "base_velocity", "std": 0.5} + ) + feet_air_time = RewTerm( + func=mdp.feet_air_time_positive_biped, + weight=0.25, + params={ + "command_name": "base_velocity", + "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"), + "threshold": 0.4, + }, + ) + feet_slide = RewTerm( + func=mdp.feet_slide, + weight=-0.25, + params={ + "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"), + "asset_cfg": SceneEntityCfg("robot", body_names=".*ankle_link"), + }, + ) + dof_pos_limits = RewTerm( + func=mdp.joint_pos_limits, weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot", joint_names=".*_ankle")} + ) + joint_deviation_hip = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.2, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_hip_yaw", ".*_hip_roll"])}, + ) + joint_deviation_arms = RewTerm( + func=mdp.joint_deviation_l1, + weight=-0.2, + params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_shoulder_.*", ".*_elbow"])}, + ) + joint_deviation_torso = RewTerm( + func=mdp.joint_deviation_l1, weight=-0.1, params={"asset_cfg": SceneEntityCfg("robot", joint_names="torso")} + ) + + +@configclass +class H1RoughEnvCfg(LocomotionVelocityRoughEnvCfg): + rewards: H1Rewards = H1Rewards() + + def __post_init__(self): + super().__post_init__() + self.scene.robot = H1_MINIMAL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.events.push_robot = None + self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0) + self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*torso_link"] + self.events.reset_base.params = { + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (0.0, 0.0), + "y": (0.0, 0.0), + "z": (0.0, 0.0), + "roll": (0.0, 0.0), + "pitch": (0.0, 0.0), + "yaw": (0.0, 0.0), + }, + } + self.events.base_com = None + self.rewards.undesired_contacts = None + self.rewards.flat_orientation_l2.weight = -1.0 + self.rewards.dof_torques_l2.weight = 0.0 + self.rewards.action_rate_l2.weight = -0.005 + self.rewards.dof_acc_l2.weight = -1.25e-7 + self.commands.base_velocity.ranges.lin_vel_x = (0.0, 1.0) + self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0) + self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0) + self.terminations.base_contact.params["sensor_cfg"].body_names = ".*torso_link" + + +@configclass +class H1RoughEnvCfg_PLAY(H1RoughEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + self.episode_length_s = 40.0 + # spawn the robot randomly in the grid (instead of their terrain levels) + self.scene.terrain.max_init_terrain_level = None + # reduce the number of terrains to save memory + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.num_rows = 5 + self.scene.terrain.terrain_generator.num_cols = 5 + self.scene.terrain.terrain_generator.curriculum = False + + self.commands.base_velocity.ranges.lin_vel_x = (1.0, 1.0) + self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0) + self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0) + self.commands.base_velocity.ranges.heading = (0.0, 0.0) + # disable randomization for play + self.observations.policy.enable_corruption = False + # remove random pushing + self.events.base_external_force_torque = None + self.events.push_robot = None diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/__init__.py new file mode 100644 index 000000000000..cdc532db4255 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This sub-module contains the functions that are specific to the locomotion environments.""" + +from isaaclab_experimental.envs.mdp import * # noqa: F401, F403 + +from .curriculums import * # noqa: F401, F403 +from .rewards import * # noqa: F401, F403 +from .terminations import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/curriculums.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/curriculums.py new file mode 100644 index 000000000000..01653af7d96b --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/curriculums.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Curriculum functions for the velocity locomotion environment. + +Curriculum terms are not warp-managed (they run at reset time, not per-step), +so they remain torch-based. +""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +from isaaclab.assets import Articulation +from isaaclab.managers import SceneEntityCfg +from isaaclab.terrains import TerrainImporter + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def terrain_levels_vel( + env: ManagerBasedRLEnv, env_ids: Sequence[int], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") +) -> torch.Tensor: + """Curriculum based on the distance the robot walked when commanded to move at a desired velocity.""" + asset: Articulation = env.scene[asset_cfg.name] + terrain: TerrainImporter = env.scene.terrain + command = env.command_manager.get_command("base_velocity") + distance = torch.norm(asset.data.root_pos_w.torch[env_ids, :2] - env.scene.env_origins[env_ids, :2], dim=1) + move_up = distance > terrain.cfg.terrain_generator.size[0] / 2 + move_down = distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5 + move_down *= ~move_up + terrain.update_env_origins(env_ids, move_up, move_down) + return torch.mean(terrain.terrain_levels.float()) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/rewards.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/rewards.py new file mode 100644 index 000000000000..6daea9d45499 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/rewards.py @@ -0,0 +1,319 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first reward functions for the velocity locomotion environment. + +All functions follow the ``func(env, out, **params) -> None`` signature. +Cross-manager torch tensors (contact sensor, commands) are cached as zero-copy +warp views on first call via ``wp.from_torch``. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.managers import SceneEntityCfg +from isaaclab.sensors import ContactSensor + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + +# --------------------------------------------------------------------------- +# feet_air_time +# --------------------------------------------------------------------------- + + +@wp.kernel +def _feet_air_time_kernel( + last_air_time: wp.array(dtype=wp.float32, ndim=2), + first_contact: wp.array(dtype=wp.float32, ndim=2), + body_ids: wp.array(dtype=wp.int32), + cmd_xy: wp.array(dtype=wp.float32, ndim=2), + threshold: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + s = float(0.0) + for k in range(body_ids.shape[0]): + b = body_ids[k] + s += (last_air_time[i, b] - threshold) * first_contact[i, b] + # gate by command magnitude + cx = cmd_xy[i, 0] + cy = cmd_xy[i, 1] + cmd_norm = wp.sqrt(cx * cx + cy * cy) + out[i] = wp.where(cmd_norm > 0.1, s, 0.0) + + +def feet_air_time(env: ManagerBasedRLEnv, out, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float) -> None: + """Reward long steps taken by the feet using L2-kernel.""" + fn = feet_air_time + contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] + # Cache command bridge (persistent pointer) + if not getattr(fn, "_is_warmed_up", False) or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + fn._is_warmed_up = True + # Newton contact sensor returns persistent wp.arrays — use directly, no wp.from_torch needed + first_contact = contact_sensor.compute_first_contact(env.step_dt) + wp.launch( + kernel=_feet_air_time_kernel, + dim=env.num_envs, + inputs=[ + contact_sensor.data.last_air_time.warp, + first_contact, + sensor_cfg.body_ids_wp, + fn._cmd_wp, + threshold, + out, + ], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# feet_air_time_positive_biped +# --------------------------------------------------------------------------- + + +@wp.kernel +def _feet_air_time_positive_biped_kernel( + air_time: wp.array(dtype=wp.float32, ndim=2), + contact_time: wp.array(dtype=wp.float32, ndim=2), + body_ids: wp.array(dtype=wp.int32), + cmd_xy: wp.array(dtype=wp.float32, ndim=2), + threshold: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + n_feet = body_ids.shape[0] + # count feet in contact and find single-stance min mode time + n_contact = int(0) + for k in range(n_feet): + b = body_ids[k] + if contact_time[i, b] > 0.0: + n_contact += 1 + single_stance = n_contact == 1 + min_val = threshold # clamp upper bound + for k in range(n_feet): + b = body_ids[k] + in_contact = contact_time[i, b] > 0.0 + mode_time = wp.where(in_contact, contact_time[i, b], air_time[i, b]) + val = wp.where(single_stance, mode_time, 0.0) + min_val = wp.min(min_val, val) + # gate by command magnitude + cx = cmd_xy[i, 0] + cy = cmd_xy[i, 1] + cmd_norm = wp.sqrt(cx * cx + cy * cy) + out[i] = wp.where(cmd_norm > 0.1, min_val, 0.0) + + +def feet_air_time_positive_biped(env, out, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> None: + """Reward long steps taken by the feet for bipeds.""" + fn = feet_air_time_positive_biped + contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] + if not getattr(fn, "_is_warmed_up", False) or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + fn._is_warmed_up = True + wp.launch( + kernel=_feet_air_time_positive_biped_kernel, + dim=env.num_envs, + inputs=[ + contact_sensor.data.current_air_time.warp, + contact_sensor.data.current_contact_time.warp, + sensor_cfg.body_ids_wp, + fn._cmd_wp, + threshold, + out, + ], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# feet_slide +# --------------------------------------------------------------------------- + + +@wp.kernel +def _feet_slide_kernel( + body_lin_vel_w: wp.array(dtype=wp.vec3f, ndim=2), + net_forces_w: wp.array(dtype=wp.vec3f, ndim=3), + body_ids: wp.array(dtype=wp.int32), + n_history: int, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + s = float(0.0) + for k in range(body_ids.shape[0]): + b = body_ids[k] + # check if in contact: max force norm over history > 1.0 + max_force = float(0.0) + for h in range(n_history): + f = net_forces_w[i, h, b] + f_norm = wp.sqrt(f[0] * f[0] + f[1] * f[1] + f[2] * f[2]) + max_force = wp.max(max_force, f_norm) + in_contact = wp.where(max_force > 1.0, 1.0, 0.0) + # planar velocity norm + vx = body_lin_vel_w[i, b][0] + vy = body_lin_vel_w[i, b][1] + vel_norm = wp.sqrt(vx * vx + vy * vy) + s += vel_norm * in_contact + out[i] = s + + +def feet_slide(env, out, sensor_cfg: SceneEntityCfg, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> None: + """Penalize feet sliding.""" + contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name] + asset = env.scene[asset_cfg.name] + wp.launch( + kernel=_feet_slide_kernel, + dim=env.num_envs, + inputs=[ + asset.data.body_lin_vel_w.warp, + contact_sensor.data.net_forces_w_history.warp, + sensor_cfg.body_ids_wp, + contact_sensor.data.net_forces_w_history.warp.shape[1], + out, + ], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# track_lin_vel_xy_yaw_frame_exp +# --------------------------------------------------------------------------- + + +@wp.kernel +def _track_lin_vel_xy_yaw_frame_exp_kernel( + root_quat_w: wp.array(dtype=wp.quatf), + root_lin_vel_w: wp.array(dtype=wp.vec3f), + cmd: wp.array(dtype=wp.float32, ndim=2), + inv_std_sq: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + q = root_quat_w[i] + # extract yaw-only quaternion + qx = q[0] + qy = q[1] + qz = q[2] + qw = q[3] + sin_yaw = 2.0 * (qw * qz + qx * qy) + cos_yaw = 1.0 - 2.0 * (qy * qy + qz * qz) + yaw_half = wp.atan2(sin_yaw, cos_yaw) * 0.5 + yaw_q = wp.quatf(0.0, 0.0, wp.sin(yaw_half), wp.cos(yaw_half)) + # rotate world velocity into yaw frame (inverse = conjugate for unit quat) + vel_w = root_lin_vel_w[i] + vel_yaw = wp.quat_rotate(wp.quat_inverse(yaw_q), vel_w) + # error + ex = cmd[i, 0] - vel_yaw[0] + ey = cmd[i, 1] - vel_yaw[1] + err_sq = ex * ex + ey * ey + out[i] = wp.exp(-err_sq * inv_std_sq) + + +def track_lin_vel_xy_yaw_frame_exp( + env, out, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") +) -> None: + """Reward tracking of linear velocity commands (xy axes) in the gravity aligned robot frame.""" + fn = track_lin_vel_xy_yaw_frame_exp + asset = env.scene[asset_cfg.name] + if not getattr(fn, "_is_warmed_up", False) or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + fn._is_warmed_up = True + wp.launch( + kernel=_track_lin_vel_xy_yaw_frame_exp_kernel, + dim=env.num_envs, + inputs=[asset.data.root_quat_w.warp, asset.data.root_lin_vel_w.warp, fn._cmd_wp, 1.0 / (std * std), out], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# track_ang_vel_z_world_exp +# --------------------------------------------------------------------------- + + +@wp.kernel +def _track_ang_vel_z_world_exp_kernel( + root_ang_vel_w: wp.array(dtype=wp.vec3f), + cmd: wp.array(dtype=wp.float32, ndim=2), + inv_std_sq: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + err = cmd[i, 2] - root_ang_vel_w[i][2] + out[i] = wp.exp(-(err * err) * inv_std_sq) + + +def track_ang_vel_z_world_exp( + env, out, command_name: str, std: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") +) -> None: + """Reward tracking of angular velocity commands (yaw) in world frame.""" + fn = track_ang_vel_z_world_exp + asset = env.scene[asset_cfg.name] + if not getattr(fn, "_is_warmed_up", False) or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + fn._is_warmed_up = True + wp.launch( + kernel=_track_ang_vel_z_world_exp_kernel, + dim=env.num_envs, + inputs=[asset.data.root_ang_vel_w.warp, fn._cmd_wp, 1.0 / (std * std), out], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# stand_still_joint_deviation_l1 +# --------------------------------------------------------------------------- + + +@wp.kernel +def _stand_still_joint_deviation_l1_kernel( + joint_pos: wp.array(dtype=wp.float32, ndim=2), + default_joint_pos: wp.array(dtype=wp.float32, ndim=2), + cmd: wp.array(dtype=wp.float32, ndim=2), + command_threshold: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + n_joints = joint_pos.shape[1] + dev = float(0.0) + for j in range(n_joints): + dev += wp.abs(joint_pos[i, j] - default_joint_pos[i, j]) + # gate: only penalize when command is small + cx = cmd[i, 0] + cy = cmd[i, 1] + cmd_norm = wp.sqrt(cx * cx + cy * cy) + out[i] = wp.where(cmd_norm < command_threshold, dev, 0.0) + + +def stand_still_joint_deviation_l1( + env, out, command_name: str, command_threshold: float = 0.06, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") +) -> None: + """Penalize offsets from the default joint positions when the command is very small.""" + fn = stand_still_joint_deviation_l1 + asset = env.scene[asset_cfg.name] + if not getattr(fn, "_is_warmed_up", False) or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + fn._is_warmed_up = True + wp.launch( + kernel=_stand_still_joint_deviation_l1_kernel, + dim=env.num_envs, + inputs=[asset.data.joint_pos.warp, asset.data.default_joint_pos.warp, fn._cmd_wp, command_threshold, out], + device=env.device, + ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/terminations.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/terminations.py new file mode 100644 index 000000000000..4b3699e0e0bf --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/mdp/terminations.py @@ -0,0 +1,66 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first termination functions for the velocity locomotion environment.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.assets import Articulation +from isaaclab.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +@wp.kernel +def _terrain_out_of_bounds_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + half_width: float, + half_height: float, + distance_buffer: float, + out: wp.array(dtype=wp.bool), +): + i = wp.tid() + px = wp.abs(root_pos_w[i][0]) + py = wp.abs(root_pos_w[i][1]) + out[i] = px > half_width - distance_buffer or py > half_height - distance_buffer + + +def terrain_out_of_bounds( + env: ManagerBasedRLEnv, out, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), distance_buffer: float = 3.0 +) -> None: + """Terminate when the actor moves too close to the edge of the terrain.""" + fn = terrain_out_of_bounds + if not getattr(fn, "_is_warmed_up", False): + terrain_type = env.scene.cfg.terrain.terrain_type + if terrain_type == "plane": + fn._is_plane = True + elif terrain_type == "generator": + fn._is_plane = False + terrain_gen_cfg = env.scene.terrain.cfg.terrain_generator + grid_width, grid_length = terrain_gen_cfg.size + n_rows, n_cols = terrain_gen_cfg.num_rows, terrain_gen_cfg.num_cols + border_width = terrain_gen_cfg.border_width + fn._half_width = 0.5 * (n_rows * grid_width + 2 * border_width) + fn._half_height = 0.5 * (n_cols * grid_length + 2 * border_width) + else: + raise ValueError("Received unsupported terrain type, must be either 'plane' or 'generator'.") + fn._is_warmed_up = True + + if fn._is_plane: + out.zero_() + return + + asset: Articulation = env.scene[asset_cfg.name] + wp.launch( + kernel=_terrain_out_of_bounds_kernel, + dim=env.num_envs, + inputs=[asset.data.root_pos_w.warp, fn._half_width, fn._half_height, distance_buffer, out], + device=env.device, + ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/velocity_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/velocity_env_cfg.py new file mode 100644 index 000000000000..7442d3654ba3 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/locomotion/velocity/velocity_env_cfg.py @@ -0,0 +1,296 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math +from dataclasses import MISSING + +from isaaclab_experimental.managers import ObservationTermCfg as ObsTerm +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_experimental.managers import TerminationTermCfg as DoneTerm + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import CurriculumTermCfg as CurrTerm +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sensors import ContactSensorCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR +from isaaclab.utils.noise import UniformNoiseCfg as Unoise + +import isaaclab_tasks_experimental.manager_based.locomotion.velocity.mdp as mdp + +## +# Pre-defined configs +## +from isaaclab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip + + +## +# Scene definition +## + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Configuration for the terrain scene with a legged robot.""" + + # ground terrain + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="generator", + terrain_generator=ROUGH_TERRAINS_CFG, + max_init_terrain_level=5, + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="multiply", + restitution_combine_mode="multiply", + static_friction=1.0, + dynamic_friction=1.0, + ), + visual_material=sim_utils.MdlFileCfg( + mdl_path=f"{ISAACLAB_NUCLEUS_DIR}/Materials/TilesMarbleSpiderWhiteBrickBondHoned/TilesMarbleSpiderWhiteBrickBondHoned.mdl", + project_uvw=True, + texture_scale=(0.25, 0.25), + ), + debug_vis=False, + ) + # robots + robot: ArticulationCfg = MISSING + # sensors + contact_forces = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*", + filter_prim_paths_expr=[], + history_length=3, + track_air_time=True, + ) + # lights + sky_light = AssetBaseCfg( + prim_path="/World/skyLight", + spawn=sim_utils.DomeLightCfg( + intensity=750.0, + texture_file=f"{ISAAC_NUCLEUS_DIR}/Materials/Textures/Skies/PolyHaven/kloofendal_43d_clear_puresky_4k.hdr", + ), + ) + + +## +# MDP settings +## + + +@configclass +class CommandsCfg: + """Command specifications for the MDP.""" + + base_velocity = mdp.UniformVelocityCommandCfg( + asset_name="robot", + resampling_time_range=(10.0, 10.0), + rel_standing_envs=0.02, + rel_heading_envs=1.0, + heading_command=True, + heading_control_stiffness=0.5, + debug_vis=True, + ranges=mdp.UniformVelocityCommandCfg.Ranges( + lin_vel_x=(-1.0, 1.0), lin_vel_y=(-1.0, 1.0), ang_vel_z=(-1.0, 1.0), heading=(-math.pi, math.pi) + ), + ) + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) + + +@configclass +class PolicyCfg(ObsGroup): + """Observations for policy group.""" + + # observation terms (order preserved) + base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1)) + base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2)) + projected_gravity = ObsTerm( + func=mdp.projected_gravity, + noise=Unoise(n_min=-0.05, n_max=0.05), + ) + velocity_commands = ObsTerm(func=mdp.generated_commands, params={"command_name": "base_velocity"}) + joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) + joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5)) + actions = ObsTerm(func=mdp.last_action) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = True + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + # FIXME(warp-migration): COM randomization in exp manager-based locomotion currently causes + # NaNs and is temporarily disabled. + # base_com = EventTerm( + # func=mdp.randomize_rigid_body_com, + # mode="startup", + # params={ + # "asset_cfg": SceneEntityCfg("robot", body_names="base"), + # "com_range": {"x": (-0.05, 0.05), "y": (-0.05, 0.05), "z": (-0.01, 0.01)}, + # }, + # ) + base_com = None + + # reset + base_external_force_torque = EventTerm( + func=mdp.apply_external_force_torque, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names="base"), + "force_range": (0.0, 0.0), + "torque_range": (-0.0, 0.0), + }, + ) + + reset_base = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={ + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (-0.5, 0.5), + "y": (-0.5, 0.5), + "z": (-0.5, 0.5), + "roll": (-0.5, 0.5), + "pitch": (-0.5, 0.5), + "yaw": (-0.5, 0.5), + }, + }, + ) + + reset_robot_joints = EventTerm( + func=mdp.reset_joints_by_scale, + mode="reset", + params={ + "position_range": (0.5, 1.5), + "velocity_range": (0.0, 0.0), + }, + ) + + # interval + push_robot = EventTerm( + func=mdp.push_by_setting_velocity, + mode="interval", + interval_range_s=(10.0, 15.0), + params={"velocity_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5)}}, + ) + + +@configclass +class RewardsCfg: + """Reward terms for the MDP.""" + + # -- task + track_lin_vel_xy_exp = RewTerm( + func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} + ) + track_ang_vel_z_exp = RewTerm( + func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} + ) + # -- penalties + lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0) + ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05) + dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5) + dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7) + action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01) + feet_air_time = RewTerm( + func=mdp.feet_air_time, + weight=0.125, + params={ + "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"), + "command_name": "base_velocity", + "threshold": 0.5, + }, + ) + undesired_contacts = RewTerm( + func=mdp.undesired_contacts, + weight=-1.0, + params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0}, + ) + # -- optional penalties + flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0) + dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0) + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=mdp.time_out, time_out=True) + base_contact = DoneTerm( + func=mdp.illegal_contact, + params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0}, + ) + + +@configclass +class CurriculumCfg: + """Curriculum terms for the MDP.""" + + terrain_levels = CurrTerm(func=mdp.terrain_levels_vel) + + +## +# Environment configuration +## + + +@configclass +class LocomotionVelocityRoughEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the locomotion velocity-tracking environment.""" + + # Scene settings + scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5, replicate_physics=True) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + commands: CommandsCfg = CommandsCfg() + # MDP settings + rewards: RewardsCfg = RewardsCfg() + terminations: TerminationsCfg = TerminationsCfg() + events: EventCfg = EventCfg() + curriculum: CurriculumCfg = CurriculumCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 4 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1.0 / 200.0 + self.sim.render_interval = self.decimation + self.sim.physics_material = self.scene.terrain.physics_material + # update sensor update periods + if self.scene.contact_forces is not None: + self.scene.contact_forces.update_period = self.sim.dt + # check if terrain levels curriculum is enabled + if getattr(self.curriculum, "terrain_levels", None) is not None: + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.curriculum = True + else: + if self.scene.terrain.terrain_generator is not None: + self.scene.terrain.terrain_generator.curriculum = False diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/__init__.py new file mode 100644 index 000000000000..6cd56351b6e5 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .reach import * diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/__init__.py new file mode 100644 index 000000000000..fe34199f2321 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Reach experimental task registrations (manager-based).""" diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/franka/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/franka/__init__.py new file mode 100644 index 000000000000..b08612ccc741 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/franka/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +# Reuse agent configs from the stable task package. +from isaaclab_tasks.manager_based.manipulation.reach.config.franka import agents + +## +# Register Gym environments. +## + +## +# Joint Position Control +## + +gym.register( + id="Isaac-Reach-Franka-Warp-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.joint_pos_env_cfg:FrankaReachEnvCfg", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:FrankaReachPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", + }, +) + +gym.register( + id="Isaac-Reach-Franka-Warp-Play-v0", + entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.joint_pos_env_cfg:FrankaReachEnvCfg_PLAY", + "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:FrankaReachPPORunnerCfg", + "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", + }, +) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py new file mode 100644 index 000000000000..5b3c7ec0c2f4 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py @@ -0,0 +1,74 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.manipulation.reach.mdp as mdp +from isaaclab_tasks_experimental.manager_based.manipulation.reach.reach_env_cfg import ReachEnvCfg + +## +# Pre-defined configs +## +from isaaclab_assets import FRANKA_PANDA_CFG # isort: skip + + +## +# Environment configuration +## + + +@configclass +class FrankaReachEnvCfg(ReachEnvCfg): + sim: SimulationCfg = SimulationCfg( + dt=1 / 60, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=50, + nconmax=20, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ), + ) + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # switch robot to franka + self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + # override rewards + self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["panda_hand"] + self.rewards.end_effector_position_tracking_fine_grained.params["asset_cfg"].body_names = ["panda_hand"] + self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["panda_hand"] + + # override actions + self.actions.arm_action = mdp.JointPositionActionCfg( + asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True + ) + # override command generator body + # end-effector is along z-direction + self.commands.ee_pose.body_name = "panda_hand" + self.commands.ee_pose.ranges.pitch = (math.pi, math.pi) + + +@configclass +class FrankaReachEnvCfg_PLAY(FrankaReachEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + # disable randomization for play + self.observations.policy.enable_corruption = False diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/ur_10/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/ur_10/__init__.py new file mode 100644 index 000000000000..85908c158052 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/ur_10/__init__.py @@ -0,0 +1,36 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# UR10 env disabled: USD asset has composition errors (broken asset file). +# Fails on both torch baseline and warp with: +# RuntimeError: USD stage has composition errors while loading provided stage +# Re-enable once the UR10 USD asset is fixed. + +# import gymnasium as gym +# from isaaclab_tasks.manager_based.manipulation.reach.config.ur_10 import agents + +# gym.register( +# id="Isaac-Reach-UR10-Warp-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.joint_pos_env_cfg:UR10ReachEnvCfg", +# "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", +# }, +# ) + +# gym.register( +# id="Isaac-Reach-UR10-Warp-Play-v0", +# entry_point="isaaclab_experimental.envs:ManagerBasedRLEnvWarp", +# disable_env_checker=True, +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.joint_pos_env_cfg:UR10ReachEnvCfg_PLAY", +# "rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml", +# "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_ppo_cfg:UR10ReachPPORunnerCfg", +# "skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml", +# }, +# ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/ur_10/joint_pos_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/ur_10/joint_pos_env_cfg.py new file mode 100644 index 000000000000..7eddda91f5ac --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/config/ur_10/joint_pos_env_cfg.py @@ -0,0 +1,74 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import math + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.utils import configclass + +import isaaclab_tasks_experimental.manager_based.manipulation.reach.mdp as mdp +from isaaclab_tasks_experimental.manager_based.manipulation.reach.reach_env_cfg import ReachEnvCfg + +## +# Pre-defined configs +## +from isaaclab_assets import UR10_CFG # isort: skip + + +## +# Environment configuration +## + + +@configclass +class UR10ReachEnvCfg(ReachEnvCfg): + sim: SimulationCfg = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=50, + nconmax=20, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + ) + ) + + def __post_init__(self): + # post init of parent + super().__post_init__() + + # switch robot to ur10 + self.scene.robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + # override events + self.events.reset_robot_joints.params["position_range"] = (0.75, 1.25) + # override rewards + self.rewards.end_effector_position_tracking.params["asset_cfg"].body_names = ["ee_link"] + self.rewards.end_effector_position_tracking_fine_grained.params["asset_cfg"].body_names = ["ee_link"] + self.rewards.end_effector_orientation_tracking.params["asset_cfg"].body_names = ["ee_link"] + # override actions + self.actions.arm_action = mdp.JointPositionActionCfg( + asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True + ) + # override command generator body + # end-effector is along x-direction + self.commands.ee_pose.body_name = "ee_link" + self.commands.ee_pose.ranges.pitch = (math.pi / 2, math.pi / 2) + + +@configclass +class UR10ReachEnvCfg_PLAY(UR10ReachEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + # make a smaller scene for play + self.scene.num_envs = 50 + self.scene.env_spacing = 2.5 + # disable randomization for play + self.observations.policy.enable_corruption = False diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/mdp/__init__.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/mdp/__init__.py new file mode 100644 index 000000000000..b0845d6735b6 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/mdp/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This sub-module contains the functions that are specific to the reach environments.""" + +from isaaclab_experimental.envs.mdp import * # noqa: F401, F403 + +from .rewards import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/mdp/rewards.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/mdp/rewards.py new file mode 100644 index 000000000000..e378bbfcee80 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/mdp/rewards.py @@ -0,0 +1,166 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp-first reward terms for the reach task. + +All functions follow the ``func(env, out, **params) -> None`` signature. +Command tensors are cached as zero-copy warp views on first call. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.assets import Articulation +from isaaclab.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +# --------------------------------------------------------------------------- +# position_command_error +# --------------------------------------------------------------------------- + + +@wp.kernel +def _position_command_error_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + root_quat_w: wp.array(dtype=wp.quatf), + body_pos_w: wp.array(dtype=wp.vec3f, ndim=2), + cmd: wp.array(dtype=wp.float32, ndim=2), + body_idx: int, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + # desired position in body frame -> world frame + des_b = wp.vec3f(cmd[i, 0], cmd[i, 1], cmd[i, 2]) + des_w = root_pos_w[i] + wp.quat_rotate(root_quat_w[i], des_b) + # current end-effector position + cur_w = body_pos_w[i, body_idx] + dx = cur_w[0] - des_w[0] + dy = cur_w[1] - des_w[1] + dz = cur_w[2] - des_w[2] + out[i] = wp.sqrt(dx * dx + dy * dy + dz * dz) + + +def position_command_error(env: ManagerBasedRLEnv, out, command_name: str, asset_cfg: SceneEntityCfg) -> None: + """Penalize tracking of the position error using L2-norm.""" + fn = position_command_error + asset: Articulation = env.scene[asset_cfg.name] + if not hasattr(fn, "_cmd_wp") or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + wp.launch( + kernel=_position_command_error_kernel, + dim=env.num_envs, + inputs=[ + asset.data.root_pos_w.warp, + asset.data.root_quat_w.warp, + asset.data.body_pos_w.warp, + fn._cmd_wp, + asset_cfg.body_ids[0], + out, + ], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# position_command_error_tanh +# --------------------------------------------------------------------------- + + +@wp.kernel +def _position_command_error_tanh_kernel( + root_pos_w: wp.array(dtype=wp.vec3f), + root_quat_w: wp.array(dtype=wp.quatf), + body_pos_w: wp.array(dtype=wp.vec3f, ndim=2), + cmd: wp.array(dtype=wp.float32, ndim=2), + body_idx: int, + inv_std: float, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + des_b = wp.vec3f(cmd[i, 0], cmd[i, 1], cmd[i, 2]) + des_w = root_pos_w[i] + wp.quat_rotate(root_quat_w[i], des_b) + cur_w = body_pos_w[i, body_idx] + dx = cur_w[0] - des_w[0] + dy = cur_w[1] - des_w[1] + dz = cur_w[2] - des_w[2] + dist = wp.sqrt(dx * dx + dy * dy + dz * dz) + out[i] = 1.0 - wp.tanh(dist * inv_std) + + +def position_command_error_tanh( + env: ManagerBasedRLEnv, out, std: float, command_name: str, asset_cfg: SceneEntityCfg +) -> None: + """Reward tracking of the position using the tanh kernel.""" + fn = position_command_error_tanh + asset: Articulation = env.scene[asset_cfg.name] + if not hasattr(fn, "_cmd_wp") or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + wp.launch( + kernel=_position_command_error_tanh_kernel, + dim=env.num_envs, + inputs=[ + asset.data.root_pos_w.warp, + asset.data.root_quat_w.warp, + asset.data.body_pos_w.warp, + fn._cmd_wp, + asset_cfg.body_ids[0], + 1.0 / std, + out, + ], + device=env.device, + ) + + +# --------------------------------------------------------------------------- +# orientation_command_error +# --------------------------------------------------------------------------- + + +@wp.kernel +def _orientation_command_error_kernel( + root_quat_w: wp.array(dtype=wp.quatf), + body_quat_w: wp.array(dtype=wp.quatf, ndim=2), + cmd: wp.array(dtype=wp.float32, ndim=2), + body_idx: int, + out: wp.array(dtype=wp.float32), +): + i = wp.tid() + # desired quat in body frame -> world frame: q_des_w = q_root * q_des_b + des_b = wp.quatf(cmd[i, 3], cmd[i, 4], cmd[i, 5], cmd[i, 6]) + des_w = root_quat_w[i] * des_b + # current ee orientation + cur_w = body_quat_w[i, body_idx] + # shortest-path error: angle of q_err = cur^-1 * des + q_err = wp.quat_inverse(cur_w) * des_w + # error magnitude = 2 * acos(|w|) (w component of the error quaternion) + qw = wp.abs(q_err[3]) + qw = wp.clamp(qw, 0.0, 1.0) + out[i] = 2.0 * wp.acos(qw) + + +def orientation_command_error(env: ManagerBasedRLEnv, out, command_name: str, asset_cfg: SceneEntityCfg) -> None: + """Penalize tracking orientation error using shortest path.""" + fn = orientation_command_error + asset: Articulation = env.scene[asset_cfg.name] + if not hasattr(fn, "_cmd_wp") or fn._cmd_name != command_name: + cmd = env.command_manager.get_command(command_name) + fn._cmd_wp = cmd if isinstance(cmd, wp.array) else wp.from_torch(cmd) + fn._cmd_name = command_name + wp.launch( + kernel=_orientation_command_error_kernel, + dim=env.num_envs, + inputs=[asset.data.root_quat_w.warp, asset.data.body_quat_w.warp, fn._cmd_wp, asset_cfg.body_ids[0], out], + device=env.device, + ) diff --git a/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/reach_env_cfg.py b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/reach_env_cfg.py new file mode 100644 index 000000000000..d019b0531402 --- /dev/null +++ b/source/isaaclab_tasks_experimental/isaaclab_tasks_experimental/manager_based/manipulation/reach/reach_env_cfg.py @@ -0,0 +1,206 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab_experimental.managers import ObservationTermCfg as ObsTerm +from isaaclab_experimental.managers import RewardTermCfg as RewTerm +from isaaclab_experimental.managers import SceneEntityCfg +from isaaclab_experimental.managers import TerminationTermCfg as DoneTerm + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import ActionTermCfg as ActionTerm +from isaaclab.managers import CurriculumTermCfg as CurrTerm +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.utils import configclass +from isaaclab.utils.noise import UniformNoiseCfg as Unoise + +import isaaclab_tasks_experimental.manager_based.manipulation.reach.mdp as mdp + +## +# Scene definition +## + + +@configclass +class ReachSceneCfg(InteractiveSceneCfg): + """Configuration for the scene with a robotic arm.""" + + # world + ground = AssetBaseCfg( + prim_path="/World/ground", + spawn=sim_utils.GroundPlaneCfg(), + init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), + ) + + # robots + robot: ArticulationCfg = MISSING + + # lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=2500.0), + ) + + +## +# MDP settings +## + + +@configclass +class CommandsCfg: + """Command terms for the MDP.""" + + ee_pose = mdp.UniformPoseCommandCfg( + asset_name="robot", + body_name=MISSING, + resampling_time_range=(4.0, 4.0), + debug_vis=True, + ranges=mdp.UniformPoseCommandCfg.Ranges( + pos_x=(0.35, 0.65), + pos_y=(-0.2, 0.2), + pos_z=(0.15, 0.5), + roll=(0.0, 0.0), + pitch=MISSING, # depends on end-effector axis + yaw=(-3.14, 3.14), + ), + ) + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + arm_action: ActionTerm = MISSING + gripper_action: ActionTerm | None = None + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group.""" + + # observation terms (order preserved) + joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) + joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) + pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "ee_pose"}) + actions = ObsTerm(func=mdp.last_action) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = True + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + reset_base = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={"pose_range": {}, "velocity_range": {}}, + ) + + reset_robot_joints = EventTerm( + func=mdp.reset_joints_by_scale, + mode="reset", + params={ + "position_range": (0.5, 1.5), + "velocity_range": (0.0, 0.0), + }, + ) + + +@configclass +class RewardsCfg: + """Reward terms for the MDP.""" + + # task terms + end_effector_position_tracking = RewTerm( + func=mdp.position_command_error, + weight=-0.2, + params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, + ) + end_effector_position_tracking_fine_grained = RewTerm( + func=mdp.position_command_error_tanh, + weight=0.1, + params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "std": 0.1, "command_name": "ee_pose"}, + ) + end_effector_orientation_tracking = RewTerm( + func=mdp.orientation_command_error, + weight=-0.1, + params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"}, + ) + + # action penalty + action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.0001) + joint_vel = RewTerm( + func=mdp.joint_vel_l2, + weight=-0.0001, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=mdp.time_out, time_out=True) + + +@configclass +class CurriculumCfg: + """Curriculum terms for the MDP.""" + + action_rate = CurrTerm( + func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -0.005, "num_steps": 4500} + ) + + joint_vel = CurrTerm( + func=mdp.modify_reward_weight, params={"term_name": "joint_vel", "weight": -0.001, "num_steps": 4500} + ) + + +## +# Environment configuration +## + + +@configclass +class ReachEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the reach end-effector pose tracking environment.""" + + # Scene settings + scene: ReachSceneCfg = ReachSceneCfg(num_envs=4096, env_spacing=2.5) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + commands: CommandsCfg = CommandsCfg() + # MDP settings + rewards: RewardsCfg = RewardsCfg() + terminations: TerminationsCfg = TerminationsCfg() + events: EventCfg = EventCfg() + curriculum: CurriculumCfg = CurriculumCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 2 + self.sim.render_interval = self.decimation + self.episode_length_s = 12.0 + self.viewer.eye = (3.5, 3.5, 3.5) + # simulation settings + self.sim.dt = 1.0 / 60.0 diff --git a/source/isaaclab_tasks_experimental/pyproject.toml b/source/isaaclab_tasks_experimental/pyproject.toml new file mode 100644 index 000000000000..d90ac3536f16 --- /dev/null +++ b/source/isaaclab_tasks_experimental/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel", "toml"] +build-backend = "setuptools.build_meta" diff --git a/source/isaaclab_tasks_experimental/setup.py b/source/isaaclab_tasks_experimental/setup.py new file mode 100644 index 000000000000..4e6efc076704 --- /dev/null +++ b/source/isaaclab_tasks_experimental/setup.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_tasks_experimental' python package.""" + +import os + +import toml +from setuptools import find_packages, setup + +# Obtain the extension data from the extension.toml file +EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) +# Read the extension.toml file +EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +# Installation operation +setup( + name="isaaclab_tasks_experimental", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url=EXTENSION_TOML_DATA["package"]["repository"], + version=EXTENSION_TOML_DATA["package"]["version"], + description=EXTENSION_TOML_DATA["package"]["description"], + keywords=EXTENSION_TOML_DATA["package"]["keywords"], + include_package_data=True, + python_requires=">=3.12", + install_requires=["isaaclab_tasks"], + packages=find_packages(), + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", + ], + zip_safe=False, +) diff --git a/source/isaaclab_teleop/changelog.d/.gitkeep b/source/isaaclab_teleop/changelog.d/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/source/isaaclab_teleop/changelog.d/rwiltz-restore-legacy-teleop.rst b/source/isaaclab_teleop/changelog.d/rwiltz-restore-legacy-teleop.rst new file mode 100644 index 000000000000..4c534f674c8a --- /dev/null +++ b/source/isaaclab_teleop/changelog.d/rwiltz-restore-legacy-teleop.rst @@ -0,0 +1,11 @@ +Changed +^^^^^^^ + +* Changed ``--teleop_device`` default to ``None`` in ``teleop_se3_agent.py`` + and ``record_demos.py``. When omitted, the IsaacTeleop pipeline is used if + the env configures ``isaac_teleop``; otherwise keyboard is used as fallback. + When explicitly provided, the scripts use the legacy ``teleop_devices`` path + and error out if no matching entry exists. +* Removed automatic ``--xr`` detection from ``--teleop_device`` containing + ``"handtracking"``. Users who need XR with the legacy path should pass + ``--xr`` explicitly. diff --git a/source/isaaclab_teleop/config/extension.toml b/source/isaaclab_teleop/config/extension.toml new file mode 100644 index 000000000000..947103618b9a --- /dev/null +++ b/source/isaaclab_teleop/config/extension.toml @@ -0,0 +1,21 @@ +[package] +# Semantic Versioning is used: https://semver.org/ +version = "0.3.9" + +# Description +title = "Isaac Lab Teleop" +description="Extension providing IsaacTeleop-based teleoperation device for Isaac Lab" +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["kit", "robotics", "teleoperation", "xr", "isaaclab"] + +[dependencies] +"isaaclab" = {} + +[core] +reloadable = false + +# Main python module this extension provides. +[[python.module]] +name = "isaaclab_teleop" diff --git a/source/isaaclab_teleop/docs/CHANGELOG.rst b/source/isaaclab_teleop/docs/CHANGELOG.rst new file mode 100644 index 000000000000..3856e6ec1346 --- /dev/null +++ b/source/isaaclab_teleop/docs/CHANGELOG.rst @@ -0,0 +1,188 @@ +Changelog +--------- + +0.3.9 (2026-04-29) +~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed installation failure on Windows by adding ``platform_system == 'Linux'`` + marker to the ``isaacteleop`` dependency, which is only available on Linux. + + +0.3.8 (2026-04-24) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Switched :class:`~isaaclab_teleop.xr_anchor_utils.XrAnchorSynchronizer` to import + ``get_current_stage`` from :mod:`isaaclab.sim.utils.stage` instead of + ``isaacsim.core.experimental.utils.stage``, aligning with the Isaac Lab API. + + +0.3.7 (2026-04-22) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated XR anchor prim creation to use :func:`isaaclab.sim.utils.prims.create_prim` + instead of ``isaacsim.core.experimental.prims.XformPrim``. + + +0.3.6 (2026-04-21) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :attr:`~isaaclab_teleop.IsaacTeleopCfg.control_channel_uuid` for + receiving teleop control commands (start/stop/reset) from the headset via + an OpenXR message channel. The channel is managed by TeleopCore's native + ``teleop_control_pipeline`` mechanism. + +* Added :class:`~isaaclab_teleop.teleop_message_processor.TeleopMessageProcessor` + retargeter that converts raw message-channel payloads into boolean control + signals for :class:`~isaacteleop.teleop_session_manager.DefaultTeleopStateManager`. + +* Added :func:`~isaaclab_teleop.poll_control_events` helper, + :class:`~isaaclab_teleop.ControlEvents` dataclass, and + :class:`~isaaclab_teleop.SupportsControlEvents` protocol for polling + start/stop/reset signals from any teleop device in a single call. + +* Added :attr:`~isaaclab_teleop.IsaacTeleopDevice.last_control_events` + property exposing the most recent control events from the message channel. + Control events are automatically bridged to legacy + :meth:`~isaaclab_teleop.IsaacTeleopDevice.add_callback` callbacks. + +Changed +^^^^^^^ + +* :meth:`~isaaclab_teleop.IsaacTeleopDevice.reset` now injects a + ``reset`` :class:`ExecutionEvents` into TeleopCore's ``ComputeContext`` + on the next pipeline step, resetting retargeter cross-step state. + Previously only the XR anchor was reset. + +Fixed +^^^^^ + +* Fixed ``record_demos.py`` not resetting the teleop device when a + success condition triggers an environment reset. Retargeters now + reinitialize their state on success-triggered resets. + +* Fixed shutdown hang caused by Kit's pre-shutdown callback calling + ``stop()`` while the simulation loop was still running. The callback + now uses the same graceful teardown path as the XR-disabled handler. + +0.3.5 (2026-04-06) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``cloudxr_env_file`` and ``auto_launch_cloudxr`` parameters to + :func:`~isaaclab_teleop.create_isaac_teleop_device`, + :class:`~isaaclab_teleop.IsaacTeleopDevice`, and + :class:`~isaaclab_teleop.session_lifecycle.TeleopSessionLifecycle` for + auto-launching the CloudXR runtime and WSS proxy during session startup. + When a ``.env`` file path is provided via ``--cloudxr_env``, users no + longer need to run ``python -m isaacteleop.cloudxr`` in a separate + terminal. +* Added device-specific CloudXR ``.env`` profiles: + :data:`~isaaclab_teleop.CLOUDXR_JS_ENV` (Quest/Pico, ``auto-webrtc``) and + :data:`~isaaclab_teleop.CLOUDXR_AVP_ENV` (Apple Vision Pro, ``auto-native``). +* Added ``dex-retargeting==0.5.0`` as a required dependency on Linux x86_64. + +Changed +^^^^^^^ + +* Made ``isaacteleop[retargeters,ui,cloudxr]~=1.2.0`` a required dependency of + ``isaaclab_teleop`` (previously an optional extra via + ``isaaclab_teleop[teleop]``). + + +0.3.4 (2026-03-17) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :attr:`~isaaclab_teleop.IsaacTeleopCfg.target_frame_prim_path` for + config-driven frame rebasing. When set to a USD prim path, the device + automatically reads the prim's world transform each frame and uses its + inverse as the ``target_T_world`` rebase matrix, so all output poses are + expressed in the target frame (e.g. robot base link for IK). + +* Added ``target_T_world`` parameter to + :meth:`~isaaclab_teleop.IsaacTeleopDevice.advance` for rebasing all output + poses into an arbitrary target coordinate frame (e.g. robot base link for + IK). Accepts :class:`numpy.ndarray`, :class:`torch.Tensor`, or + ``wp.array``. + + +0.3.3 (2026-03-13) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed race condition in headless XR where ``xr.profile.ar.enabled`` was set + in the ``.kit`` file before the teleop bridge extension finished loading, + causing ``BridgeComponent`` to miss its lifecycle callbacks. The setting is + now deferred to + :meth:`~isaaclab_teleop.session_lifecycle.TeleopSessionLifecycle._ensure_xr_ar_profile_enabled` + after all extensions have loaded. + + +0.3.2 (2026-03-12) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Add nvidia-srl-usd-to-urdf dependency to isaaclab_teleop extension. + + +0.3.1 (2026-02-26) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Add cleanup for Isaac Teleop session when Stop XR button is clicked and when Kit is closed. + + +0.3.0 (2026-02-26) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Update Isaac Teleop API usage for querying controller button states. + + +0.2.0 (2026-02-24) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :meth:`~isaaclab_teleop.session_lifecycle.TeleopSessionLifecycle._on_request_required_extensions` to request required + OpenXR extensions at runtime based on Teleop devices needed for the specified environment. + +0.1.0 (2026-02-18) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Initial release of the ``isaaclab_teleop`` extension. + +* Added :class:`~isaaclab_teleop.IsaacTeleopDevice` providing a unified teleoperation interface + that manages IsaacTeleop sessions, XR anchor synchronization, and retargeting pipelines within + Isaac Lab environments. + +* Added :class:`~isaaclab_teleop.IsaacTeleopCfg` for pipeline-based configuration of + retargeting, XR anchors, and device settings directly in environment configs. diff --git a/source/isaaclab_teleop/docs/README.md b/source/isaaclab_teleop/docs/README.md new file mode 100644 index 000000000000..bd6a3c7f6c7b --- /dev/null +++ b/source/isaaclab_teleop/docs/README.md @@ -0,0 +1,165 @@ +# Isaac Lab Teleop + +`isaaclab_teleop` integrates the [IsaacTeleop](https://github.com/NVIDIA/IsaacTeleop) retargeting +framework with Isaac Lab, providing a single teleoperation device class that manages OpenXR sessions, +XR anchor synchronization, retargeting pipelines, and action-tensor generation. + +## Key Features + +- **`IsaacTeleopDevice`** -- unified device that wraps an IsaacTeleop `TeleopSession` behind a + context-manager interface. Returns a flat `torch.Tensor` action each frame. +- **`IsaacTeleopCfg` / `XrCfg`** -- declarative configuration for retargeting pipelines, XR anchor + placement, rotation modes, and tuning UI. +- **Deferred session creation** -- if OpenXR handles are not yet available (e.g. the user has not + clicked *Start AR*), the session is created transparently on the first `advance()` call once + handles appear. +- **Teleop commands** -- register callbacks for `START`, `STOP`, and `RESET` commands dispatched via + XR controller buttons or the Carbonite message bus. +- **XR anchor rotation modes** -- `FIXED`, `FOLLOW_PRIM`, `FOLLOW_PRIM_SMOOTHED`, and `CUSTOM` + modes for controlling how the anchor orientation tracks the reference prim. +- **Retargeting tuning UI** -- optional ImGui window for real-time adjustment of retargeter + parameters when `retargeters_to_tune` is provided. + +## Architecture + +`IsaacTeleopDevice` composes three focused collaborators: + +| Component | Responsibility | +|---|---| +| `XrAnchorManager` | XR anchor prim setup, dynamic/static synchronization, coordinate-frame transform computation | +| `TeleopSessionLifecycle` | Pipeline building, OpenXR handle acquisition, session create/destroy, action-tensor extraction | +| `CommandHandler` | Callback registration and XR message-bus command dispatch | + +## Usage + +### 1. Configure Your Environment + +Add an `isaac_teleop` attribute to your environment config: + +```python +from isaaclab_teleop import IsaacTeleopCfg, XrCfg + +@configclass +class MyEnvCfg(ManagerBasedRLEnvCfg): + def __post_init__(self): + super().__post_init__() + + pipeline, retargeters = my_pipeline_builder() + self.isaac_teleop = IsaacTeleopCfg( + xr_cfg=XrCfg( + anchor_pos=(0.5, 0.0, 0.5), + anchor_prim_path="{ENV_REGEX_NS}/Robot/base_link", + ), + pipeline_builder=lambda: pipeline, + retargeters_to_tune=lambda: retargeters, + ) +``` + +> Both `pipeline_builder` and `retargeters_to_tune` must be **callables** (lambdas or functions) +> because `@configclass` deep-copies mutable attributes and retargeter objects often contain +> non-picklable handles. + +### 2. Define a Pipeline Builder + +Create a function that builds your IsaacTeleop retargeting pipeline. The builder should return an +`OutputCombiner` with an `"action"` key containing the flattened action tensor (typically via +`TensorReorderer`). Optionally return a list of retargeters to expose in the tuning UI: + +```python +from isaacteleop.retargeting_engine.deviceio_source_nodes import ControllersSource +from isaacteleop.retargeters import ( + GripperRetargeter, Se3AbsRetargeter, TensorReorderer, +) +from isaacteleop.retargeting_engine.interface import OutputCombiner + +def my_pipeline_builder(): + controllers = ControllersSource(name="controllers") + se3 = Se3AbsRetargeter(cfg, name="ee_pose") + # ... connect retargeters and flatten with TensorReorderer ... + pipeline = OutputCombiner({"action": reorderer.output("output")}) + return pipeline, [se3] +``` + +### 3. Run Teleoperation + +The existing teleop scripts automatically detect `isaac_teleop` in the environment config: + +```bash +./isaaclab.sh -p scripts/environments/teleoperation/teleop_se3_agent.py \ + --task My-IsaacTeleop-Env-v0 +``` + +### 4. Programmatic Usage + +`IsaacTeleopDevice` supports Python's context-manager protocol: + +```python +from isaaclab_teleop import IsaacTeleopCfg, IsaacTeleopDevice + +cfg = IsaacTeleopCfg(pipeline_builder=my_pipeline_builder) + +with IsaacTeleopDevice(cfg) as device: + device.add_callback("RESET", env.reset) + while running: + action = device.advance() + if action is not None: + env.step(action.repeat(num_envs, 1)) +``` + +`advance()` returns `None` while waiting for the OpenXR session, so callers can continue +rendering without blocking. + +## Configuration Reference + +### `IsaacTeleopCfg` + +| Field | Type | Default | Description | +|---|---|---|---| +| `xr_cfg` | `XrCfg` | `XrCfg()` | XR anchor position, rotation, and dynamic-anchoring settings | +| `pipeline_builder` | `Callable[[], OutputCombiner]` | *required* | Builds the retargeting pipeline | +| `retargeters_to_tune` | `Callable[[], list[BaseRetargeter]] \| None` | `None` | Retargeters to expose in the tuning UI | +| `plugins` | `list[PluginConfig]` | `[]` | IsaacTeleop plugin configurations | +| `sim_device` | `str` | `"cuda:0"` | Torch device for output action tensors | +| `teleoperation_active_default` | `bool` | `False` | Whether teleoperation is active on session start | +| `app_name` | `str` | `"IsaacLabTeleop"` | Application name for the IsaacTeleop session | + +### `XrCfg` + +| Field | Type | Default | Description | +|---|---|---|---| +| `anchor_pos` | `tuple[float, float, float]` | `(0, 0, 0)` | XR anchor position in world frame | +| `anchor_rot` | `tuple[float, float, float, float]` | `(0, 0, 0, 1)` | XR anchor rotation (quaternion xyzw) | +| `anchor_prim_path` | `str \| None` | `None` | Prim to attach anchor to for dynamic positioning | +| `anchor_rotation_mode` | `XrAnchorRotationMode` | `FIXED` | How anchor rotation tracks the reference prim | +| `anchor_rotation_smoothing_time` | `float` | `1.0` | Slerp time constant (seconds) for `FOLLOW_PRIM_SMOOTHED` mode | +| `anchor_rotation_custom_func` | `Callable` | identity | Custom rotation function for `CUSTOM` mode | +| `near_plane` | `float` | `0.15` | Near clipping plane distance for the XR device | +| `fixed_anchor_height` | `bool` | `True` | Fix anchor height to initial value of the reference prim | + +## Utilities + +- **`remove_camera_configs(env_cfg)`** -- strips camera sensors and their associated observation + terms from an environment config. XR does not support additional cameras as they cause rendering + conflicts. + +## Run with Docker + +Teleoperation with Isaac Lab runs in a **single container**. Build the image yourself and run a single container. **Do not use Docker Compose** for this workflow (no multi-container setup). Everything runs inside one container with Isaac Lab. + +Inside the container: install Isaac Teleop once (`./isaaclab.sh -p -m pip install 'isaacteleop[retargeters,cloudxr]~=1.0.0' --extra-index-url https://pypi.nvidia.com`), then start the CloudXR runtime with `--accept-eula` so there is no interactive EULA prompt, and run your teleop script. Example: + +```bash +./isaaclab.sh -p -m isaacteleop.cloudxr --accept-eula & +source ~/.cloudxr/run/cloudxr.env +./isaaclab.sh -p scripts/tools/record_demos.py --task Isaac-PickPlace-Locomanipulation-G1-Abs-v0 --num_demos 5 --dataset_file ./datasets/dataset.hdf5 --xr --visualizer kit +``` + +In the Isaac Sim UI, set the AR panel to **System OpenXR Runtime** and click **Start XR**. For the full flow and options, see the [CloudXR teleoperation how-to](https://isaac-sim.github.io/IsaacLab/main/source/how-to/cloudxr_teleoperation.html) and [Isaac Teleop Quick Start](https://nvidia.github.io/IsaacTeleop/main/getting_started/quick_start.html). + +For a fully headless experience, replace `--visualizer kit` with `--headless` when running docker and XR teleop session will run automatically. + +## Dependencies + +- **`isaaclab`** -- core Isaac Lab framework +- **`isaacteleop`** -- IsaacTeleop retargeting engine, device I/O, and session management +- **`isaacsim`** -- Isaac Sim runtime (provides the Kit XR bridge for OpenXR handle acquisition) diff --git a/source/isaaclab_teleop/isaaclab_teleop/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/__init__.py new file mode 100644 index 000000000000..930cf1225e26 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package providing IsaacTeleop-based teleoperation for Isaac Lab.""" + +import os +import toml +ISAACLAB_TELEOP_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) +"""Path to the extension source directory.""" +ISAACLAB_TELEOP_METADATA = toml.load(os.path.join(ISAACLAB_TELEOP_EXT_DIR, "config", "extension.toml")) +"""Extension metadata dictionary parsed from the extension.toml file.""" +__version__ = ISAACLAB_TELEOP_METADATA["package"]["version"] + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_teleop/isaaclab_teleop/__init__.pyi b/source/isaaclab_teleop/isaaclab_teleop/__init__.pyi new file mode 100644 index 000000000000..045f16f0c690 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/__init__.pyi @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "CLOUDXR_AVP_ENV", + "CLOUDXR_JS_ENV", + "ControlEvents", + "IsaacTeleopCfg", + "IsaacTeleopDevice", + "SupportsControlEvents", + "TELEOP_CONTROL_CHANNEL_UUID", + "XrAnchorRotationMode", + "XrAnchorSynchronizer", + "XrCfg", + "create_isaac_teleop_device", + "poll_control_events", + "remove_camera_configs", +] + +from .control_events import TELEOP_CONTROL_CHANNEL_UUID, ControlEvents, SupportsControlEvents, poll_control_events +from .isaac_teleop_cfg import CLOUDXR_AVP_ENV, CLOUDXR_JS_ENV, IsaacTeleopCfg +from .isaac_teleop_device import IsaacTeleopDevice, create_isaac_teleop_device +from .xr_anchor_utils import XrAnchorSynchronizer +from .xr_cfg import XrAnchorRotationMode, XrCfg, remove_camera_configs diff --git a/source/isaaclab_teleop/isaaclab_teleop/avp-cloudxr.env b/source/isaaclab_teleop/isaaclab_teleop/avp-cloudxr.env new file mode 100644 index 000000000000..df2de8eb3811 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/avp-cloudxr.env @@ -0,0 +1,2 @@ +NV_DEVICE_PROFILE=auto-native +NV_CXR_ENABLE_PUSH_DEVICES=0 diff --git a/source/isaaclab_teleop/isaaclab_teleop/cloudxrjs-cloudxr.env b/source/isaaclab_teleop/isaaclab_teleop/cloudxrjs-cloudxr.env new file mode 100644 index 000000000000..55e16229f3fa --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/cloudxrjs-cloudxr.env @@ -0,0 +1,2 @@ +NV_DEVICE_PROFILE=auto-webrtc +NV_CXR_ENABLE_PUSH_DEVICES=0 diff --git a/source/isaaclab_teleop/isaaclab_teleop/command_handler.py b/source/isaaclab_teleop/isaaclab_teleop/command_handler.py new file mode 100644 index 000000000000..7e999e638f5e --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/command_handler.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Teleop command callback registry for IsaacTeleop-based teleoperation.""" + +from __future__ import annotations + +from collections.abc import Callable + + +class CommandHandler: + """Lightweight callback registry for teleop commands. + + Scripts can register callbacks for ``START``, ``STOP``, and ``RESET`` + commands via :meth:`add_callback`. The callbacks are dispatched by + :meth:`fire` when the corresponding command is received. + + Note: + In the current architecture control signals arrive through + TeleopCore's ``teleop_control_pipeline`` and are consumed via + :func:`~isaaclab_teleop.poll_control_events`. This registry is + retained for backward compatibility with scripts that register + callbacks before the pipeline-based path was introduced. + """ + + def __init__(self) -> None: + self._callbacks: dict[str, Callable] = {} + + @property + def callbacks(self) -> dict[str, Callable]: + """The registered callbacks dictionary (read-only view).""" + return self._callbacks + + def add_callback(self, key: str, func: Callable) -> None: + """Add a callback function for a teleop command. + + Args: + key: The command type to bind to. Valid values are + ``"START"``, ``"STOP"``, ``"RESET"``, and ``"R"`` + (``"R"`` is mapped to ``"RESET"`` for compatibility). + func: The function to call when the command is received. + Should take no arguments. + """ + if key == "R": + key = "RESET" + self._callbacks[key] = func + + def fire(self, command: str) -> None: + """Dispatch a named command callback if registered. + + Args: + command: The command name (e.g. ``"START"``, ``"STOP"``, ``"RESET"``). + """ + if command in self._callbacks: + self._callbacks[command]() + + def cleanup(self) -> None: + """Release resources (no-op; retained for API compatibility).""" diff --git a/source/isaaclab_teleop/isaaclab_teleop/control_events.py b/source/isaaclab_teleop/isaaclab_teleop/control_events.py new file mode 100644 index 000000000000..69ddd7c1ed21 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/control_events.py @@ -0,0 +1,78 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Teleop control events dataclass, polling helper, and well-known channel UUID.""" + +from __future__ import annotations + +import dataclasses +import uuid +from typing import Protocol, runtime_checkable + +TELEOP_CONTROL_CHANNEL_UUID: bytes = uuid.uuid5(uuid.NAMESPACE_DNS, "teleop_command").bytes +"""Well-known 16-byte UUID for the teleop control message channel. + +Derived deterministically as ``uuid5(NAMESPACE_DNS, "teleop_command")`` +so that both the Isaac Lab server and the Quest client can independently +compute the same channel identifier from the string ``"teleop_command"``. + +Pass this value as :attr:`~isaaclab_teleop.IsaacTeleopCfg.control_channel_uuid` +when configuring a teleop session with message-channel-based control. +""" + + +@dataclasses.dataclass(frozen=True) +class ControlEvents: + """Result of :func:`poll_control_events`. + + Attributes: + is_active: ``True`` when the teleop state machine is in RUNNING, + ``False`` when PAUSED or STOPPED, or ``None`` when no control + channel is configured (callers should leave their own active + flag unchanged). + should_reset: ``True`` when a reset was triggered this frame. + """ + + is_active: bool | None = None + should_reset: bool = False + + +_NO_OP_EVENTS = ControlEvents() +"""Shared immutable sentinel returned when no control channel is active.""" + + +@runtime_checkable +class SupportsControlEvents(Protocol): + """Duck type for teleop devices that expose control events.""" + + @property + def last_control_events(self) -> ControlEvents: ... + + +def poll_control_events(teleop_interface: SupportsControlEvents | object) -> ControlEvents: + """Poll control events from any teleop interface. + + Safe to call with any device type (keyboard, spacemouse, etc.). + Devices that do not expose the message-channel protocol return + a no-op :class:`ControlEvents`. + + Args: + teleop_interface: The teleop device to poll. Devices implementing + :class:`SupportsControlEvents` provide full type safety; other + devices are handled gracefully via duck typing. + + Returns: + A :class:`ControlEvents` with the latest start/stop and reset + signals. + """ + events = getattr(teleop_interface, "last_control_events", None) + if events is None: + return _NO_OP_EVENTS + if isinstance(events, ControlEvents): + return events + return ControlEvents( + is_active=getattr(events, "is_active", None), + should_reset=getattr(events, "should_reset", False), + ) diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/__init__.py new file mode 100644 index 000000000000..08f3fd2be574 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Deprecated teleoperation classes consolidated under isaaclab_teleop. + +This sub-package contains legacy OpenXR device classes, retargeters, and the +teleop device factory that were previously located in +:mod:`isaaclab.devices.openxr` and :mod:`isaaclab.devices`. They are preserved +here for backward compatibility while users migrate to the new +:class:`~isaaclab_teleop.IsaacTeleopDevice` API. +""" diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/__init__.py new file mode 100644 index 000000000000..e1e354cc726b --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Legacy OpenXR teleoperation devices. + +.. deprecated:: + This package is deprecated. Please migrate to :mod:`isaaclab_teleop` which + provides the :class:`~isaaclab_teleop.IsaacTeleopDevice` as a replacement + for :class:`OpenXRDevice` and :class:`ManusVive`. + + XR configuration classes (:class:`XrCfg`, :class:`XrAnchorRotationMode`, + :func:`remove_camera_configs`) have moved to :mod:`isaaclab_teleop.xr_cfg`. + Anchor utilities (:class:`XrAnchorSynchronizer`) have moved to + :mod:`isaaclab_teleop.xr_anchor_utils`. +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/__init__.pyi b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/__init__.pyi new file mode 100644 index 000000000000..a7a2d80b6c0d --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/__init__.pyi @@ -0,0 +1,18 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ManusVive", + "ManusViveCfg", + "OpenXRDevice", + "OpenXRDeviceCfg", + "XrAnchorRotationMode", + "XrCfg", + "remove_camera_configs", +] + +from .manus_vive import ManusVive, ManusViveCfg +from .openxr_device import OpenXRDevice, OpenXRDeviceCfg +from .xr_cfg import XrAnchorRotationMode, XrCfg, remove_camera_configs diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/common.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/common.py new file mode 100644 index 000000000000..088641c2886a --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/common.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Standard set of hand joint names based on OpenXR specification. +# Input devices for dexterous hands can use this as a reference, +# but may provide any subset or superset of these joints. +HAND_JOINT_NAMES = [ + # Palm + "palm", + # Wrist + "wrist", + # Thumb + "thumb_metacarpal", + "thumb_proximal", + "thumb_distal", + "thumb_tip", + # Index + "index_metacarpal", + "index_proximal", + "index_intermediate", + "index_distal", + "index_tip", + # Middle + "middle_metacarpal", + "middle_proximal", + "middle_intermediate", + "middle_distal", + "middle_tip", + # Ring + "ring_metacarpal", + "ring_proximal", + "ring_intermediate", + "ring_distal", + "ring_tip", + # Little + "little_metacarpal", + "little_proximal", + "little_intermediate", + "little_distal", + "little_tip", +] diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/manus_vive.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/manus_vive.py new file mode 100644 index 000000000000..0f76d31c7b6b --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/manus_vive.py @@ -0,0 +1,262 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Manus and Vive for teleoperation and interaction. + +.. deprecated:: + :class:`ManusVive` and :class:`ManusViveCfg` are deprecated. + Please use :class:`isaaclab_teleop.IsaacTeleopDevice` and + :class:`isaaclab_teleop.IsaacTeleopCfg` instead. +""" + +from __future__ import annotations + +import contextlib +from collections.abc import Callable +from dataclasses import dataclass + +import numpy as np +from packaging import version + +import carb + +from isaaclab.devices.device_base import DeviceBase, DeviceCfg +from isaaclab.devices.retargeter_base import RetargeterBase +from isaaclab.utils.version import get_isaac_sim_version + +from .common import HAND_JOINT_NAMES +from .xr_cfg import XrCfg + +# For testing purposes, we need to mock the XRCore +XRCore = None + +with contextlib.suppress(ModuleNotFoundError): + from omni.kit.xr.core import XRCore + +import isaaclab.sim as sim_utils + +from .manus_vive_utils import HAND_JOINT_MAP, ManusViveIntegration + + +class ManusVive(DeviceBase): + """Manus gloves and Vive trackers for teleoperation and interaction. + + This device tracks hand joints using Manus gloves and Vive trackers and makes them available as: + + 1. A dictionary of tracking data (when used without retargeters) + 2. Retargeted commands for robot control (when retargeters are provided) + + The user needs to install the Manus SDK and add `{path_to_manus_sdk}/manus_sdk/lib` to `LD_LIBRARY_PATH`. + Data are acquired by `ManusViveIntegration` from `isaaclab_teleop.deprecated.openxr.manus_vive_utils`, including + + * Vive tracker poses in scene frame, calibrated from AVP wrist poses. + * Hand joints calculated from Vive wrist joints and Manus hand joints (relative to wrist). + * Vive trackers are automatically mapped to the left and right wrist joints. + + Raw data format (_get_raw_data output): consistent with :class:`OpenXRDevice`. + Joint names are defined in `HAND_JOINT_MAP` from `isaaclab_teleop.deprecated.openxr.manus_vive_utils`. + + Teleop commands: consistent with :class:`OpenXRDevice`. + + The device tracks the left hand, right hand, head position, or any combination of these + based on the TrackingTarget enum values. When retargeters are provided, the raw tracking + data is transformed into robot control commands suitable for teleoperation. + """ + + TELEOP_COMMAND_EVENT_TYPE = "teleop_command" + + def __init__(self, cfg: ManusViveCfg, retargeters: list[RetargeterBase] | None = None): + """Initialize the Manus+Vive device. + + Args: + cfg: Configuration object for Manus+Vive settings. + retargeters: List of retargeter instances to use for transforming raw tracking data. + """ + import warnings + + warnings.warn( + "ManusVive is deprecated. Please use isaaclab_teleop.IsaacTeleopDevice instead.", + DeprecationWarning, + stacklevel=2, + ) + super().__init__(retargeters) + # Enforce minimum Isaac Sim version (>= 5.1) + isaac_sim_version = get_isaac_sim_version() + if isaac_sim_version < version.parse("5.1"): + raise RuntimeError(f"ManusVive requires Isaac Sim >= 5.1. Detected version: '{isaac_sim_version}'.") + self._xr_cfg = cfg.xr_cfg or XrCfg() + self._additional_callbacks = dict() + self._vc_subscription = ( + XRCore.get_singleton() + .get_message_bus() + .create_subscription_to_pop_by_type( + carb.events.type_from_string(self.TELEOP_COMMAND_EVENT_TYPE), self._on_teleop_command + ) + ) + self._manus_vive = ManusViveIntegration() + + # Initialize dictionaries instead of arrays + default_pose = np.array([0, 0, 0, 1, 0, 0, 0], dtype=np.float32) + self._previous_joint_poses_left = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_headpose = default_pose.copy() + + xr_anchor_prim_path = "/XRAnchor" + sim_utils.create_prim( + xr_anchor_prim_path, + prim_type="Xform", + position=self._xr_cfg.anchor_pos, + orientation=self._xr_cfg.anchor_rot, + ) + carb.settings.get_settings().set_float("/persistent/xr/render/nearPlane", self._xr_cfg.near_plane) + carb.settings.get_settings().set_string("/persistent/xr/anchorMode", "custom anchor") + carb.settings.get_settings().set_string("/xrstage/customAnchor", xr_anchor_prim_path) + + def __del__(self): + """Clean up resources when the object is destroyed. + Properly unsubscribes from the XR message bus to prevent memory leaks + and resource issues when the device is no longer needed. + """ + if hasattr(self, "_vc_subscription") and self._vc_subscription is not None: + self._vc_subscription = None + + # No need to explicitly clean up OpenXR instance as it's managed by NVIDIA Isaac Sim + + def __str__(self) -> str: + """Provide details about the device configuration, tracking settings, + and available gesture commands. + + Returns: + Formatted string with device information. + """ + + msg = f"Manus+Vive Hand Tracking Device: {self.__class__.__name__}\n" + msg += f"\tAnchor Position: {self._xr_cfg.anchor_pos}\n" + msg += f"\tAnchor Rotation: {self._xr_cfg.anchor_rot}\n" + + # Add retargeter information + if self._retargeters: + msg += "\tRetargeters:\n" + for i, retargeter in enumerate(self._retargeters): + msg += f"\t\t{i + 1}. {retargeter.__class__.__name__}\n" + else: + msg += "\tRetargeters: None (raw joint data output)\n" + + # Add available gesture commands + msg += "\t----------------------------------------------\n" + msg += "\tAvailable Gesture Commands:\n" + + # Check which callbacks are registered + start_avail = "START" in self._additional_callbacks + stop_avail = "STOP" in self._additional_callbacks + reset_avail = "RESET" in self._additional_callbacks + + msg += f"\t\tStart Teleoperation: {'✓' if start_avail else '✗'}\n" + msg += f"\t\tStop Teleoperation: {'✓' if stop_avail else '✗'}\n" + msg += f"\t\tReset Environment: {'✓' if reset_avail else '✗'}\n" + + # Add joint tracking information + msg += "\t----------------------------------------------\n" + msg += "\tTracked Joints: 26 XR hand joints including:\n" + msg += "\t\t- Wrist, palm\n" + msg += "\t\t- Thumb (tip, intermediate joints)\n" + msg += "\t\t- Fingers (tip, distal, intermediate, proximal)\n" + + return msg + + def reset(self): + """Reset cached joint and head poses.""" + default_pose = np.array([0, 0, 0, 0, 0, 0, 1], dtype=np.float32) + self._previous_joint_poses_left = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_headpose = default_pose.copy() + + def add_callback(self, key: str, func: Callable): + """Register a callback for a given key. + + Args: + key: The message key to bind ('START', 'STOP', 'RESET'). + func: The function to invoke when the message key is received. + """ + self._additional_callbacks[key] = func + + def _get_raw_data(self) -> dict: + """Get the latest tracking data from Manus and Vive. + + Returns: + Dictionary with TrackingTarget enum keys (HAND_LEFT, HAND_RIGHT, HEAD) containing: + - Left hand joint poses: Dictionary of 26 joints with position and orientation + - Right hand joint poses: Dictionary of 26 joints with position and orientation + - Head pose: Single 7-element array with position and orientation + + Each pose is represented as a 7-element array: [x, y, z, qx, qy, qz, qw] + where the first 3 elements are position and the last 4 are quaternion orientation. + """ + hand_tracking_data = self._manus_vive.get_all_device_data()["manus_gloves"] + result = {"left": self._previous_joint_poses_left, "right": self._previous_joint_poses_right} + for joint, pose in hand_tracking_data.items(): + hand, index = joint.split("_") + joint_name = HAND_JOINT_MAP[int(index)] + result[hand][joint_name] = np.array(pose["position"] + pose["orientation"], dtype=np.float32) + return { + DeviceBase.TrackingTarget.HAND_LEFT: result["left"], + DeviceBase.TrackingTarget.HAND_RIGHT: result["right"], + DeviceBase.TrackingTarget.HEAD: self._calculate_headpose(), + } + + def _calculate_headpose(self) -> np.ndarray: + """Calculate the head pose from OpenXR. + + Returns: + 7-element numpy.ndarray [x, y, z, qx, qy, qz, qw]. + """ + head_device = XRCore.get_singleton().get_input_device("/user/head") + if head_device: + hmd = head_device.get_virtual_world_pose("") + position = hmd.ExtractTranslation() + quat = hmd.ExtractRotationQuat() + quati = quat.GetImaginary() + quatw = quat.GetReal() + + # Store in x, y, z, w order to match our convention + self._previous_headpose = np.array( + [ + position[0], + position[1], + position[2], + quati[0], + quati[1], + quati[2], + quatw, + ] + ) + + return self._previous_headpose + + def _on_teleop_command(self, event: carb.events.IEvent): + """Handle teleoperation command events. + + Args: + event: The XR message-bus event containing a 'message' payload. + """ + msg = event.payload["message"] + + if "start" in msg: + if "START" in self._additional_callbacks: + self._additional_callbacks["START"]() + elif "stop" in msg: + if "STOP" in self._additional_callbacks: + self._additional_callbacks["STOP"]() + elif "reset" in msg: + if "RESET" in self._additional_callbacks: + self._additional_callbacks["RESET"]() + + +@dataclass +class ManusViveCfg(DeviceCfg): + """Configuration for Manus and Vive.""" + + xr_cfg: XrCfg | None = None + class_type: type[DeviceBase] = ManusVive diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/manus_vive_utils.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/manus_vive_utils.py new file mode 100644 index 000000000000..a67a738c0c83 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/manus_vive_utils.py @@ -0,0 +1,527 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import contextlib +import logging +from time import time + +import numpy as np + +from isaacsim.core.experimental.utils.app import enable_extension + +# For testing purposes, we need to mock the XRCore +XRCore, XRPoseValidityFlags = None, None + +with contextlib.suppress(ModuleNotFoundError): + from omni.kit.xr.core import XRCore, XRPoseValidityFlags + +from pxr import Gf + +# import logger +logger = logging.getLogger(__name__) + +# Mapping from Manus joint index (0-24) to joint name. Palm (25) is calculated from middle metacarpal and proximal. +HAND_JOINT_MAP = { + # Wrist + 0: "wrist", + # Thumb + 1: "thumb_metacarpal", + 2: "thumb_proximal", + 3: "thumb_distal", + 4: "thumb_tip", + # Index + 5: "index_metacarpal", + 6: "index_proximal", + 7: "index_intermediate", + 8: "index_distal", + 9: "index_tip", + # Middle + 10: "middle_metacarpal", + 11: "middle_proximal", + 12: "middle_intermediate", + 13: "middle_distal", + 14: "middle_tip", + # Ring + 15: "ring_metacarpal", + 16: "ring_proximal", + 17: "ring_intermediate", + 18: "ring_distal", + 19: "ring_tip", + # Little + 20: "little_metacarpal", + 21: "little_proximal", + 22: "little_intermediate", + 23: "little_distal", + 24: "little_tip", + # Palm + 25: "palm", +} + + +class ManusViveIntegration: + def __init__(self): + enable_extension("isaacsim.xr.input_devices") + from isaacsim.xr.input_devices.impl.manus_vive_integration import get_manus_vive_integration + + _manus_vive_integration = get_manus_vive_integration() + self.manus = _manus_vive_integration.manus_tracker + self.vive_tracker = _manus_vive_integration.vive_tracker + self.device_status = _manus_vive_integration.device_status + self.default_pose = {"position": [0, 0, 0], "orientation": [0, 0, 0, 1]} + # 90-degree ccw rotation on Y-axis and 90-degree ccw rotation on Z-axis + self.rot_adjust = Gf.Matrix3d().SetRotate(Gf.Quatd(0.5, Gf.Vec3d(-0.5, 0.5, 0.5))) + self.scene_T_lighthouse_static = None + self._vive_left_id = None + self._vive_right_id = None + self._pairA_candidates = [] # Pair A: WM0->Left, WM1->Right + self._pairB_candidates = [] # Pair B: WM1->Left, WM0->Right + self._pairA_trans_errs = [] + self._pairA_rot_errs = [] + self._pairB_trans_errs = [] + self._pairB_rot_errs = [] + + def get_all_device_data(self) -> dict: + """Get all tracked device data in scene coordinates. + + Returns: + Manus glove joint data and Vive tracker data. + { + 'manus_gloves': { + '{left/right}_{joint_index}': { + 'position': [x, y, z], + 'orientation': [x, y, z, w] + }, + ... + }, + 'vive_trackers': { + '{vive_tracker_id}': { + 'position': [x, y, z], + 'orientation': [x, y, z, w] + }, + ... + } + } + """ + self.update_manus() + self.update_vive() + # Get raw data from trackers + manus_data = self.manus.get_data() + vive_data = self.vive_tracker.get_data() + vive_transformed = self._transform_vive_data(vive_data) + scene_T_wrist = self._get_scene_T_wrist_matrix(vive_transformed) + + return { + "manus_gloves": self._transform_manus_data(manus_data, scene_T_wrist), + "vive_trackers": vive_transformed, + } + + def get_device_status(self) -> dict: + """Get connection and data freshness status for Manus gloves and Vive trackers. + + Returns: + Dictionary containing connection flags and last-data timestamps. + Format: { + 'manus_gloves': {'connected': bool, 'last_data_time': float}, + 'vive_trackers': {'connected': bool, 'last_data_time': float}, + 'left_hand_connected': bool, + 'right_hand_connected': bool + } + """ + return self.device_status + + def update_manus(self): + """Update raw Manus glove data and status flags.""" + self.manus.update() + self.device_status["manus_gloves"]["last_data_time"] = time() + manus_data = self.manus.get_data() + self.device_status["left_hand_connected"] = "left_0" in manus_data + self.device_status["right_hand_connected"] = "right_0" in manus_data + + def update_vive(self): + """Update raw Vive tracker data, and initialize coordinate transformation if it is the first data update.""" + self.vive_tracker.update() + self.device_status["vive_trackers"]["last_data_time"] = time() + try: + # Initialize coordinate transformation from first Vive wrist position + if self.scene_T_lighthouse_static is None: + self._initialize_coordinate_transformation() + except Exception as e: + logger.error(f"Vive tracker update failed: {e}") + + def _initialize_coordinate_transformation(self): + """Initialize the scene to lighthouse coordinate transformation. + + The coordinate transformation is used to transform the wrist pose from lighthouse + coordinate system to isaac sim scene coordinate. It is computed from multiple + frames of AVP/OpenXR wrist pose and Vive wrist pose samples at the beginning of the session. + """ + min_frames = 6 + tolerance = 3.0 + vive_data = self.vive_tracker.get_data() + wm0_id, wm1_id = get_vive_wrist_ids(vive_data) + if wm0_id is None and wm1_id is None: + return + + try: + # Fetch OpenXR wrists + L, R, gloves = None, None, [] + if self.device_status["left_hand_connected"]: + gloves.append("left") + L = get_openxr_wrist_matrix("left") + if self.device_status["right_hand_connected"]: + gloves.append("right") + R = get_openxr_wrist_matrix("right") + + M0, M1, vives = None, None, [] + if wm0_id is not None: + vives.append(wm0_id) + M0 = pose_to_matrix(vive_data[wm0_id]) + if wm1_id is not None: + vives.append(wm1_id) + M1 = pose_to_matrix(vive_data[wm1_id]) + + TL0, TL1, TR0, TR1 = None, None, None, None + # Compute transforms for available pairs + if wm0_id is not None and L is not None: + TL0 = M0.GetInverse() * L + self._pairA_candidates.append(TL0) + if wm1_id is not None and L is not None: + TL1 = M1.GetInverse() * L + self._pairB_candidates.append(TL1) + if wm1_id is not None and R is not None: + TR1 = M1.GetInverse() * R + self._pairA_candidates.append(TR1) + if wm0_id is not None and R is not None: + TR0 = M0.GetInverse() * R + self._pairB_candidates.append(TR0) + + # Per-frame pairing error if both candidates present + if TL0 is not None and TR1 is not None and TL1 is not None and TR0 is not None: + eT, eR = compute_delta_errors(TL0, TR1) + self._pairA_trans_errs.append(eT) + self._pairA_rot_errs.append(eR) + eT, eR = compute_delta_errors(TL1, TR0) + self._pairB_trans_errs.append(eT) + self._pairB_rot_errs.append(eR) + + # Choose a mapping + choose_A = None + if len(self._pairA_candidates) == 0 and len(self._pairB_candidates) >= min_frames: + choose_A = False + elif len(self._pairB_candidates) == 0 and len(self._pairA_candidates) >= min_frames: + choose_A = True + elif len(self._pairA_trans_errs) >= min_frames and len(self._pairB_trans_errs) >= min_frames: + errA = get_pairing_error(self._pairA_trans_errs, self._pairA_rot_errs) + errB = get_pairing_error(self._pairB_trans_errs, self._pairB_rot_errs) + if errA < errB and errA < tolerance: + choose_A = True + elif errB < errA and errB < tolerance: + choose_A = False + elif len(self._pairA_trans_errs) % 10 == 0 or len(self._pairB_trans_errs) % 10 == 0: + print("Computing pairing of Vive trackers with wrists") + logger.info( + f"Pairing Vive trackers with wrists: error of pairing A: {errA}, error of pairing B: {errB}" + ) + if choose_A is None: + return + + if choose_A: + chosen_list = self._pairA_candidates + self._vive_left_id, self._vive_right_id = wm0_id, wm1_id + else: + chosen_list = self._pairB_candidates + self._vive_left_id, self._vive_right_id = wm1_id, wm0_id + + if len(chosen_list) >= min_frames: + cluster = select_mode_cluster(chosen_list) + if len(chosen_list) % 10 == 0: + print( + f"Computing wrist calibration: formed size {len(cluster)} cluster from" + f" {len(chosen_list)} samples" + ) + if len(cluster) >= min_frames // 2: + averaged = average_transforms(cluster) + self.scene_T_lighthouse_static = averaged + print( + f"Wrist calibration computed. Resolved mapping: {self._vive_left_id}->Left," + f" {self._vive_right_id}->Right" + ) + + except Exception as e: + logger.error(f"Failed to initialize coordinate transformation: {e}") + + def _transform_vive_data(self, device_data: dict) -> dict: + """Transform Vive tracker poses to scene coordinates. + + The returned data is in xyzw format. + + Args: + device_data: raw vive tracker poses, with device id as keys. + + Returns: + Vive tracker poses in scene coordinates, with device id as keys. + """ + transformed_data = {} + for joint_name, joint_data in device_data.items(): + transformed_pose = self.default_pose + if self.scene_T_lighthouse_static is not None: + transformed_matrix = pose_to_matrix(joint_data) * self.scene_T_lighthouse_static + transformed_pose = matrix_to_pose(transformed_matrix) + transformed_data[joint_name] = transformed_pose + return transformed_data + + def _get_scene_T_wrist_matrix(self, vive_data: dict) -> dict: + """Compute scene-frame wrist transforms for left and right hands. + + Args: + vive_data: Vive tracker poses expressed in scene coordinates. + + Returns: + Dictionary with 'left' and 'right' keys mapping to 4x4 transforms. + """ + scene_T_wrist = {"left": Gf.Matrix4d().SetIdentity(), "right": Gf.Matrix4d().SetIdentity()} + # 10 cm offset on Y-axis for change in vive tracker position after flipping the palm + Rcorr = Gf.Matrix4d(self.rot_adjust, Gf.Vec3d(0, -0.1, 0)) + if self._vive_left_id is not None: + scene_T_wrist["left"] = Rcorr * pose_to_matrix(vive_data[self._vive_left_id]) + if self._vive_right_id is not None: + scene_T_wrist["right"] = Rcorr * pose_to_matrix(vive_data[self._vive_right_id]) + return scene_T_wrist + + def _transform_manus_data(self, manus_data: dict, scene_T_wrist: dict) -> dict: + """Transform Manus glove joints from wrist-relative to scene coordinates. + + The returned data is in xyzw format. + + Args: + manus_data: Raw Manus joint pose dictionary, wrist-relative. + scene_T_wrist: Dictionary of scene transforms for left and right wrists. + + Returns: + Dictionary of Manus joint poses in scene coordinates. + """ + Rcorr = Gf.Matrix4d(self.rot_adjust, Gf.Vec3d(0, 0, 0)).GetInverse() + transformed_data = {} + for joint_name, joint_data in manus_data.items(): + hand, _ = joint_name.split("_") + joint_mat = Rcorr * pose_to_matrix(joint_data) * scene_T_wrist[hand] + transformed_data[joint_name] = matrix_to_pose(joint_mat) + # Calculate palm with middle metacarpal and proximal + transformed_data["left_25"] = self._get_palm(transformed_data, "left") + transformed_data["right_25"] = self._get_palm(transformed_data, "right") + return transformed_data + + def _get_palm(self, transformed_data: dict, hand: str) -> dict: + """Compute palm pose from middle metacarpal and proximal joints. + + Args: + transformed_data: Manus joint poses in scene coordinates. + hand: The hand side, either 'left' or 'right'. + + Returns: + Pose dictionary with 'position' and 'orientation'. + """ + if f"{hand}_6" not in transformed_data or f"{hand}_7" not in transformed_data: + # Joint data not arrived yet + return self.default_pose + metacarpal = transformed_data[f"{hand}_6"] + proximal = transformed_data[f"{hand}_7"] + pos = (np.array(metacarpal["position"]) + np.array(proximal["position"])) / 2.0 + return {"position": [pos[0], pos[1], pos[2]], "orientation": metacarpal["orientation"]} + + +def compute_delta_errors(a: Gf.Matrix4d, b: Gf.Matrix4d) -> tuple[float, float]: + """Compute translation and rotation error between two transforms. + + Args: + a: The first transform. + b: The second transform. + + Returns: + Tuple containing (translation_error_m, rotation_error_deg). + """ + try: + delta = a * b.GetInverse() + t = delta.ExtractTranslation() + trans_err = float(np.linalg.norm([t[0], t[1], t[2]])) + q = delta.ExtractRotation().GetQuat() + w = float(max(min(q.GetReal(), 1.0), -1.0)) + ang = 2.0 * float(np.arccos(w)) + ang_deg = float(np.degrees(ang)) + if ang_deg > 180.0: + ang_deg = 360.0 - ang_deg + return trans_err, ang_deg + except Exception: + return float("inf"), float("inf") + + +def average_transforms(mats: list[Gf.Matrix4d]) -> Gf.Matrix4d: + """Average rigid transforms across translations and quaternions. + + Args: + mats: The list of 4x4 transforms to average. + + Returns: + Averaged 4x4 transform, or None if the list is empty. + """ + if not mats: + return None + ref_quat = mats[0].ExtractRotation().GetQuat() + ref = np.array([ref_quat.GetReal(), *ref_quat.GetImaginary()]) + acc_q = np.zeros(4, dtype=np.float64) + acc_t = np.zeros(3, dtype=np.float64) + for m in mats: + t = m.ExtractTranslation() + acc_t += np.array([t[0], t[1], t[2]], dtype=np.float64) + q = m.ExtractRotation().GetQuat() + qi = np.array([q.GetReal(), *q.GetImaginary()], dtype=np.float64) + if np.dot(qi, ref) < 0.0: + qi = -qi + acc_q += qi + mean_t = acc_t / float(len(mats)) + norm = np.linalg.norm(acc_q) + if norm <= 1e-12: + quat_avg = Gf.Quatd(1.0, Gf.Vec3d(0.0, 0.0, 0.0)) + else: + qn = acc_q / norm + quat_avg = Gf.Quatd(float(qn[0]), Gf.Vec3d(float(qn[1]), float(qn[2]), float(qn[3]))) + rot3 = Gf.Matrix3d().SetRotate(quat_avg) + trans = Gf.Vec3d(float(mean_t[0]), float(mean_t[1]), float(mean_t[2])) + return Gf.Matrix4d(rot3, trans) + + +def select_mode_cluster( + mats: list[Gf.Matrix4d], trans_thresh_m: float = 0.03, rot_thresh_deg: float = 10.0 +) -> list[Gf.Matrix4d]: + """Select the largest cluster of transforms under proximity thresholds. + + Args: + mats: The list of 4x4 transforms to cluster. + trans_thresh_m: The translation threshold in meters. + rot_thresh_deg: The rotation threshold in degrees. + + Returns: + The largest cluster (mode) of transforms. + """ + if not mats: + return [] + best_cluster: list[Gf.Matrix4d] = [] + for center in mats: + cluster: list[Gf.Matrix4d] = [] + for m in mats: + trans_err, rot_err = compute_delta_errors(m, center) + if trans_err <= trans_thresh_m and rot_err <= rot_thresh_deg: + cluster.append(m) + if len(cluster) > len(best_cluster): + best_cluster = cluster + return best_cluster + + +def get_openxr_wrist_matrix(hand: str) -> Gf.Matrix4d: + """Get the OpenXR wrist matrix if valid. + + Args: + hand: The hand side ('left' or 'right'). + + Returns: + 4x4 transform for the wrist if valid, otherwise None. + """ + hand = hand.lower() + try: + hand_device = XRCore.get_singleton().get_input_device(f"/user/hand/{hand}") + if hand_device is None: + return None + joints = hand_device.get_all_virtual_world_poses() + if "wrist" not in joints: + return None + joint = joints["wrist"] + required = XRPoseValidityFlags.POSITION_VALID | XRPoseValidityFlags.ORIENTATION_VALID + if (joint.validity_flags & required) != required: + return None + return joint.pose_matrix + except Exception as e: + logger.warning(f"OpenXR {hand} wrist fetch failed: {e}") + return None + + +def get_vive_wrist_ids(vive_data: dict) -> tuple[str, str]: + """Get the Vive wrist tracker IDs if available. + + Args: + vive_data: The raw Vive data dictionary. + + Returns: + (wm0_id, wm1_id) if available, otherwise None values. + """ + wm_ids = [k for k in vive_data.keys() if len(k) >= 2 and k[:2] == "WM"] + wm_ids.sort() + if len(wm_ids) >= 2: # Assumes the first two vive trackers are the wrist trackers + return wm_ids[0], wm_ids[1] + if len(wm_ids) == 1: + return wm_ids[0], None + return None, None + + +def pose_to_matrix(pose: dict) -> Gf.Matrix4d: + """Convert a pose dictionary to a 4x4 transform matrix. + + pose is a dictionary with 'position' and 'orientation' fields. + position is a tuple of 3 floats. + orientation is a tuple of 4 floats. (x, y, z, w) + + Args: + pose: The pose with 'position' and 'orientation' fields. + + Returns: + A 4x4 transform representing the pose. + """ + pos, ori = pose["position"], pose["orientation"] + # ori is (x, y, z, w) - Gf.Quatd takes (real, imaginary_vec) + quat = Gf.Quatd(ori[3], Gf.Vec3d(ori[0], ori[1], ori[2])) + rot = Gf.Matrix3d().SetRotate(quat) + trans = Gf.Vec3d(pos[0], pos[1], pos[2]) + return Gf.Matrix4d(rot, trans) + + +def matrix_to_pose(matrix: Gf.Matrix4d) -> dict: + """Convert a 4x4 transform matrix to a pose dictionary. + + pose is a dictionary with 'position' and 'orientation' fields. + position is a tuple of 3 floats. + orientation is a tuple of 4 floats. (x, y, z, w) + + Args: + matrix: The 4x4 transform matrix to convert. + + Returns: + Pose dictionary with 'position' and 'orientation'. + """ + pos = matrix.ExtractTranslation() + rot = matrix.ExtractRotation() + quat = rot.GetQuat() + return { + "position": [pos[0], pos[1], pos[2]], + "orientation": [quat.GetImaginary()[0], quat.GetImaginary()[1], quat.GetImaginary()[2], quat.GetReal()], + } + + +def get_pairing_error(trans_errs: list, rot_errs: list) -> float: + """Compute a scalar pairing error from translation and rotation errors. + + Args: + trans_errs: The list of translation errors across samples. + rot_errs: The list of rotation errors across samples. + + Returns: + The weighted sum of medians of translation and rotation errors. + """ + + def _median(values: list) -> float: + try: + return float(np.median(np.asarray(values, dtype=np.float64))) + except Exception: + return float("inf") + + return _median(trans_errs) + 0.01 * _median(rot_errs) diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/openxr_device.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/openxr_device.py new file mode 100644 index 000000000000..3135e8e5bb4d --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/openxr_device.py @@ -0,0 +1,528 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""OpenXR-powered device for teleoperation and interaction. + +.. deprecated:: + :class:`OpenXRDevice` and :class:`OpenXRDeviceCfg` are deprecated. + Please use :class:`isaaclab_teleop.IsaacTeleopDevice` and + :class:`isaaclab_teleop.IsaacTeleopCfg` instead. +""" + +from __future__ import annotations + +import contextlib +import logging +from collections.abc import Callable +from dataclasses import dataclass +from typing import Any + +import numpy as np + +import carb + +# import logger +logger = logging.getLogger(__name__) + +from isaaclab.devices.device_base import DeviceBase, DeviceCfg +from isaaclab.devices.retargeter_base import RetargeterBase + +from .common import HAND_JOINT_NAMES +from .xr_anchor_utils import XrAnchorSynchronizer +from .xr_cfg import XrCfg + +# For testing purposes, we need to mock the XRCore, XRPoseValidityFlags classes +XRCore = None +XRPoseValidityFlags = None +XRCoreEventType = None + +with contextlib.suppress(ModuleNotFoundError): + from omni.kit.xr.core import XRCore, XRCoreEventType, XRPoseValidityFlags + +import isaaclab.sim as sim_utils + + +class OpenXRDevice(DeviceBase): + """An OpenXR-powered device for teleoperation and interaction. + + This device tracks hand joints using OpenXR and makes them available as: + + 1. A dictionary of tracking data (when used without retargeters) + 2. Retargeted commands for robot control (when retargeters are provided) + + Raw data format (_get_raw_data output): + + * A dictionary with keys matching TrackingTarget enum values (HAND_LEFT, HAND_RIGHT, HEAD) + * Each hand tracking entry contains a dictionary of joint poses + * Each joint pose is a 7D vector (x, y, z, qw, qx, qy, qz) in meters and quaternion units + * Joint names are defined in HAND_JOINT_NAMES from isaaclab_teleop.deprecated.openxr.common + * Supported joints include palm, wrist, and joints for thumb, index, middle, ring and little fingers + + Teleop commands: + The device responds to several teleop commands that can be subscribed to via add_callback(): + + * "START": Resume hand tracking data flow + * "STOP": Pause hand tracking data flow + * "RESET": Reset the tracking and signal simulation reset + + The device tracks the left hand, right hand, head position, or any combination of these + based on the TrackingTarget enum values. When retargeters are provided, the raw tracking + data is transformed into robot control commands suitable for teleoperation. + """ + + TELEOP_COMMAND_EVENT_TYPE = "teleop_command" + + def __init__( + self, + cfg: OpenXRDeviceCfg, + retargeters: list[RetargeterBase] | None = None, + ): + """Initialize the OpenXR device. + + Args: + cfg: Configuration object for OpenXR settings. + retargeters: List of retargeter instances to use for transforming raw tracking data. + """ + import warnings + + warnings.warn( + "OpenXRDevice is deprecated. Please use isaaclab_teleop.IsaacTeleopDevice instead.", + DeprecationWarning, + stacklevel=2, + ) + super().__init__(retargeters) + self._xr_cfg = cfg.xr_cfg or XrCfg() + self._additional_callbacks = dict() + self._xr_core = XRCore.get_singleton() if XRCore is not None else None + self._vc_subscription = ( + self._xr_core.get_message_bus().create_subscription_to_pop_by_type( + carb.events.type_from_string(self.TELEOP_COMMAND_EVENT_TYPE), self._on_teleop_command + ) + if self._xr_core is not None + else None + ) + + # Initialize dictionaries instead of arrays + default_pose = np.array([0, 0, 0, 1, 0, 0, 0], dtype=np.float32) + self._previous_joint_poses_left = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_headpose = default_pose.copy() + + if self._xr_cfg.anchor_prim_path is not None: + anchor_path = self._xr_cfg.anchor_prim_path + if anchor_path.endswith("/"): + anchor_path = anchor_path[:-1] + self._xr_anchor_headset_path = f"{anchor_path}/XRAnchor" + else: + self._xr_anchor_headset_path = "/World/XRAnchor" + + # Only create the anchor prim if it doesn't already exist (supports multiple devices sharing anchor) + stage = sim_utils.get_current_stage() + if not stage.GetPrimAtPath(self._xr_anchor_headset_path).IsValid(): + sim_utils.create_prim( + self._xr_anchor_headset_path, + prim_type="Xform", + position=self._xr_cfg.anchor_pos, + orientation=self._xr_cfg.anchor_rot, + ) + + if hasattr(carb, "settings"): + carb.settings.get_settings().set_float("/persistent/xr/render/nearPlane", self._xr_cfg.near_plane) + carb.settings.get_settings().set_string("/persistent/xr/anchorMode", "custom anchor") + carb.settings.get_settings().set_string("/xrstage/customAnchor", self._xr_anchor_headset_path) + + # Button binding support + self.__button_subscriptions: dict[str, dict] = {} + + # Optional anchor synchronizer + self._anchor_sync: XrAnchorSynchronizer | None = None + if self._xr_core is not None and self._xr_cfg.anchor_prim_path is not None: + try: + self._anchor_sync = XrAnchorSynchronizer( + xr_core=self._xr_core, + xr_cfg=self._xr_cfg, + xr_anchor_headset_path=self._xr_anchor_headset_path, + ) + # Subscribe to pre_sync_update to keep anchor in sync + if XRCoreEventType is not None: + self._xr_pre_sync_update_subscription = ( + self._xr_core.get_message_bus().create_subscription_to_pop_by_type( + XRCoreEventType.pre_sync_update, + lambda _: self._anchor_sync.sync_headset_to_anchor(), + name="isaaclab_xr_pre_sync_update", + ) + ) + except Exception as e: + logger.warning(f"XR: Failed to initialize anchor synchronizer: {e}") + + # Default convenience binding: toggle anchor rotation with right controller 'a' button + with contextlib.suppress(Exception): + self._bind_button_press( + "/user/hand/right", + "a", + "isaaclab_right_a", + lambda ev: self._toggle_anchor_rotation(), + ) + + def __del__(self): + """Clean up resources when the object is destroyed. + + Properly unsubscribes from the XR message bus to prevent memory leaks + and resource issues when the device is no longer needed. + """ + if hasattr(self, "_vc_subscription") and self._vc_subscription is not None: + self._vc_subscription = None + if hasattr(self, "_xr_pre_sync_update_subscription") and self._xr_pre_sync_update_subscription is not None: + self._xr_pre_sync_update_subscription = None + # clear button subscriptions + if hasattr(self, "__button_subscriptions"): + self._unbind_all_buttons() + + # No need to explicitly clean up OpenXR instance as it's managed by NVIDIA Isaac Sim + + def __str__(self) -> str: + """Returns a string containing information about the OpenXR hand tracking device. + + This provides details about the device configuration, tracking settings, + and available gesture commands. + + Returns: + Formatted string with device information + """ + + msg = f"OpenXR Hand Tracking Device: {self.__class__.__name__}\n" + msg += f"\tAnchor Position: {self._xr_cfg.anchor_pos}\n" + msg += f"\tAnchor Rotation: {self._xr_cfg.anchor_rot}\n" + if self._xr_cfg.anchor_prim_path is not None: + msg += f"\tAnchor Prim Path: {self._xr_cfg.anchor_prim_path} (Dynamic Anchoring)\n" + else: + msg += "\tAnchor Mode: Static (Root Level)\n" + + # Add retargeter information + if self._retargeters: + msg += "\tRetargeters:\n" + for i, retargeter in enumerate(self._retargeters): + msg += f"\t\t{i + 1}. {retargeter.__class__.__name__}\n" + else: + msg += "\tRetargeters: None (raw joint data output)\n" + + # Add available gesture commands + msg += "\t----------------------------------------------\n" + msg += "\tAvailable Gesture Commands:\n" + + # Check which callbacks are registered + start_avail = "START" in self._additional_callbacks + stop_avail = "STOP" in self._additional_callbacks + reset_avail = "RESET" in self._additional_callbacks + + msg += f"\t\tStart Teleoperation: {'✓' if start_avail else '✗'}\n" + msg += f"\t\tStop Teleoperation: {'✓' if stop_avail else '✗'}\n" + msg += f"\t\tReset Environment: {'✓' if reset_avail else '✗'}\n" + + # Add joint tracking information + msg += "\t----------------------------------------------\n" + msg += "\tTracked Joints: All 26 XR hand joints including:\n" + msg += "\t\t- Wrist, palm\n" + msg += "\t\t- Thumb (tip, intermediate joints)\n" + msg += "\t\t- Fingers (tip, distal, intermediate, proximal)\n" + + return msg + + """ + Operations + """ + + def reset(self): + default_pose = np.array([0, 0, 0, 1, 0, 0, 0], dtype=np.float32) + self._previous_joint_poses_left = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES} + self._previous_headpose = default_pose.copy() + if hasattr(self, "_anchor_sync") and self._anchor_sync is not None: + self._anchor_sync.reset() + + def add_callback(self, key: str, func: Callable): + """Add additional functions to bind to client messages. + + Args: + key: The message type to bind to. Valid values are "START", "STOP", and "RESET". + func: The function to call when the message is received. The callback function should not + take any arguments. + """ + self._additional_callbacks[key] = func + + def _get_raw_data(self) -> Any: + """Get the latest tracking data from the OpenXR runtime. + + Returns: + Dictionary with TrackingTarget enum keys (HAND_LEFT, HAND_RIGHT, HEAD) containing: + - Left hand joint poses: Dictionary of 26 joints with position and orientation + - Right hand joint poses: Dictionary of 26 joints with position and orientation + - Head pose: Single 7-element array with position and orientation + + Each pose is represented as a 7-element array: [x, y, z, qw, qx, qy, qz] + where the first 3 elements are position and the last 4 are quaternion orientation. + """ + data = {} + + if RetargeterBase.Requirement.HAND_TRACKING in self._required_features: + data[DeviceBase.TrackingTarget.HAND_LEFT] = self._calculate_joint_poses( + XRCore.get_singleton().get_input_device("/user/hand/left"), + self._previous_joint_poses_left, + ) + data[DeviceBase.TrackingTarget.HAND_RIGHT] = self._calculate_joint_poses( + XRCore.get_singleton().get_input_device("/user/hand/right"), + self._previous_joint_poses_right, + ) + + if RetargeterBase.Requirement.HEAD_TRACKING in self._required_features: + data[DeviceBase.TrackingTarget.HEAD] = self._calculate_headpose() + + if RetargeterBase.Requirement.MOTION_CONTROLLER in self._required_features: + # Optionally include motion controller pose+inputs if devices are available + try: + left_dev = XRCore.get_singleton().get_input_device("/user/hand/left") + right_dev = XRCore.get_singleton().get_input_device("/user/hand/right") + left_ctrl = self._query_controller(left_dev) if left_dev is not None else np.array([]) + right_ctrl = self._query_controller(right_dev) if right_dev is not None else np.array([]) + if left_ctrl.size: + data[DeviceBase.TrackingTarget.CONTROLLER_LEFT] = left_ctrl + if right_ctrl.size: + data[DeviceBase.TrackingTarget.CONTROLLER_RIGHT] = right_ctrl + except Exception: + # Ignore controller data if XRCore/controller APIs are unavailable + pass + + return data + + """ + Internal helpers. + """ + + def _calculate_joint_poses( + self, hand_device: Any, previous_joint_poses: dict[str, np.ndarray] + ) -> dict[str, np.ndarray]: + """Calculate and update joint poses for a hand device. + + This function retrieves the current joint poses from the OpenXR hand device and updates + the previous joint poses with the new data. If a joint's position or orientation is not + valid, it will use the previous values. + + Args: + hand_device: The OpenXR input device for a hand (/user/hand/left or /user/hand/right). + previous_joint_poses: Dictionary mapping joint names to their previous poses. + Each pose is a 7-element array: [x, y, z, qw, qx, qy, qz]. + + Returns: + Updated dictionary of joint poses with the same structure as previous_joint_poses. + Each pose is represented as a 7-element numpy array: [x, y, z, qw, qx, qy, qz] + where the first 3 elements are position and the last 4 are quaternion orientation. + """ + if hand_device is None: + return previous_joint_poses + + joint_poses = hand_device.get_all_virtual_world_poses() + + # Update each joint that is present in the current data + for joint_name, joint_pose in joint_poses.items(): + if joint_name in HAND_JOINT_NAMES: + # Extract translation and rotation + if joint_pose.validity_flags & XRPoseValidityFlags.POSITION_VALID: + position = joint_pose.pose_matrix.ExtractTranslation() + else: + position = previous_joint_poses[joint_name][:3] + + if joint_pose.validity_flags & XRPoseValidityFlags.ORIENTATION_VALID: + quat = joint_pose.pose_matrix.ExtractRotationQuat() + quati = quat.GetImaginary() + quatw = quat.GetReal() + else: + quati = previous_joint_poses[joint_name][3:6] + quatw = previous_joint_poses[joint_name][6] + + # Directly update the dictionary with new data + previous_joint_poses[joint_name] = np.array( + [position[0], position[1], position[2], quati[0], quati[1], quati[2], quatw], dtype=np.float32 + ) + + # No need for conversion, just return the updated dictionary + return previous_joint_poses + + def _calculate_headpose(self) -> np.ndarray: + """Calculate the head pose from OpenXR. + + Returns: + numpy.ndarray: 7-element array containing head position (xyz) and orientation (wxyz) + """ + head_device = XRCore.get_singleton().get_input_device("/user/head") + if head_device: + hmd = head_device.get_virtual_world_pose("") + position = hmd.ExtractTranslation() + quat = hmd.ExtractRotationQuat() + quati = quat.GetImaginary() + quatw = quat.GetReal() + + # Store in x, y, z, w order to match our convention + self._previous_headpose = np.array( + [ + position[0], + position[1], + position[2], + quati[0], + quati[1], + quati[2], + quatw, + ] + ) + + return self._previous_headpose + + # ----------------------------- + # Controller button binding utilities + # ----------------------------- + def _bind_button_press( + self, + device_path: str, + button_name: str, + event_name: str, + on_button_press: Callable[[carb.events.IEvent], None], + ) -> None: + if self._xr_core is None: + logger.warning("XR core not available; skipping button binding") + return + + sub_key = f"{device_path}/{button_name}" + self.__button_subscriptions[sub_key] = {} + + def try_emit_button_events(): + if self.__button_subscriptions[sub_key].get("generator"): + return + device = self._xr_core.get_input_device(device_path) + if not device: + return + names = {str(n) for n in (device.get_input_names() or ())} + if button_name not in names: + return + gen = device.bind_event_generator(button_name, event_name, ("press",)) + if gen is not None: + logger.info(f"XR: Bound event generator for {sub_key}, {event_name}") + self.__button_subscriptions[sub_key]["generator"] = gen + + def on_inputs_change(_ev: carb.events.IEvent) -> None: + try_emit_button_events() + + def on_disable(_ev: carb.events.IEvent) -> None: + self.__button_subscriptions[sub_key]["generator"] = None + + message_bus = self._xr_core.get_message_bus() + event_suffix = device_path.strip("/").replace("/", "_") + self.__button_subscriptions[sub_key]["press_sub"] = message_bus.create_subscription_to_pop_by_type( + carb.events.type_from_string(f"{event_name}.press"), on_button_press + ) + self.__button_subscriptions[sub_key]["inputs_change_sub"] = message_bus.create_subscription_to_pop_by_type( + carb.events.type_from_string(f"xr_input.{event_suffix}.inputs_change"), on_inputs_change + ) + self.__button_subscriptions[sub_key]["disable_sub"] = message_bus.create_subscription_to_pop_by_type( + carb.events.type_from_string(f"xr_input.{event_suffix}.disable"), on_disable + ) + try_emit_button_events() + + def _unbind_all_buttons(self) -> None: + for sub_key, subs in self.__button_subscriptions.items(): + if "generator" in subs: + subs["generator"] = None + for key in ["press_sub", "inputs_change_sub", "disable_sub"]: + if key in subs: + subs[key] = None + self.__button_subscriptions.clear() + logger.info("XR: Unbound all button event handlers") + + def _toggle_anchor_rotation(self): + if self._anchor_sync is not None: + self._anchor_sync.toggle_anchor_rotation() + + def _query_controller(self, input_device) -> np.ndarray: + """Query motion controller pose and inputs as a 2x7 array. + + Row 0 (POSE): [x, y, z, qx, qy, qz, qw] + Row 1 (INPUTS): [thumbstick_x, thumbstick_y, trigger, squeeze, button_0, button_1, padding] + """ + if input_device is None: + return np.array([]) + + pose = input_device.get_virtual_world_pose() + position = pose.ExtractTranslation() + quat = pose.ExtractRotationQuat() + + thumbstick_x = 0.0 + thumbstick_y = 0.0 + trigger = 0.0 + squeeze = 0.0 + button_0 = 0.0 + button_1 = 0.0 + + if input_device.has_input_gesture("thumbstick", "x"): + thumbstick_x = float(input_device.get_input_gesture_value("thumbstick", "x")) + if input_device.has_input_gesture("thumbstick", "y"): + thumbstick_y = float(input_device.get_input_gesture_value("thumbstick", "y")) + if input_device.has_input_gesture("trigger", "value"): + trigger = float(input_device.get_input_gesture_value("trigger", "value")) + if input_device.has_input_gesture("squeeze", "value"): + squeeze = float(input_device.get_input_gesture_value("squeeze", "value")) + + # Determine which button pair exists on this device + if input_device.has_input_gesture("x", "click") or input_device.has_input_gesture("y", "click"): + if input_device.has_input_gesture("x", "click"): + button_0 = float(input_device.get_input_gesture_value("x", "click")) + if input_device.has_input_gesture("y", "click"): + button_1 = float(input_device.get_input_gesture_value("y", "click")) + else: + if input_device.has_input_gesture("a", "click"): + button_0 = float(input_device.get_input_gesture_value("a", "click")) + if input_device.has_input_gesture("b", "click"): + button_1 = float(input_device.get_input_gesture_value("b", "click")) + + pose_row = [ + position[0], + position[1], + position[2], + quat.GetImaginary()[0], + quat.GetImaginary()[1], + quat.GetImaginary()[2], + quat.GetReal(), + ] + + input_row = [ + thumbstick_x, + thumbstick_y, + trigger, + squeeze, + button_0, + button_1, + 0.0, + ] + + return np.array([pose_row, input_row], dtype=np.float32) + + def _on_teleop_command(self, event: carb.events.IEvent): + msg = event.payload["message"] + + if "start" in msg: + if "START" in self._additional_callbacks: + self._additional_callbacks["START"]() + elif "stop" in msg: + if "STOP" in self._additional_callbacks: + self._additional_callbacks["STOP"]() + elif "reset" in msg: + if "RESET" in self._additional_callbacks: + self._additional_callbacks["RESET"]() + self.reset() + + +@dataclass +class OpenXRDeviceCfg(DeviceCfg): + """Configuration for OpenXR devices.""" + + xr_cfg: XrCfg | None = None + class_type: type[DeviceBase] = OpenXRDevice diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/__init__.py new file mode 100644 index 000000000000..0089a30899ac --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +"""Retargeters for mapping input device data to robot commands.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/__init__.pyi b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/__init__.pyi new file mode 100644 index 000000000000..69a907d98ce1 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/__init__.pyi @@ -0,0 +1,56 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "GR1T2Retargeter", + "GR1T2RetargeterCfg", + "G1LowerBodyStandingRetargeter", + "G1LowerBodyStandingRetargeterCfg", + "G1LowerBodyStandingMotionControllerRetargeter", + "G1LowerBodyStandingMotionControllerRetargeterCfg", + "UnitreeG1Retargeter", + "UnitreeG1RetargeterCfg", + "G1TriHandUpperBodyMotionControllerGripperRetargeter", + "G1TriHandUpperBodyMotionControllerGripperRetargeterCfg", + "G1TriHandUpperBodyMotionControllerRetargeter", + "G1TriHandUpperBodyMotionControllerRetargeterCfg", + "G1TriHandUpperBodyRetargeter", + "G1TriHandUpperBodyRetargeterCfg", + "GripperRetargeter", + "GripperRetargeterCfg", + "Se3AbsRetargeter", + "Se3AbsRetargeterCfg", + "Se3RelRetargeter", + "Se3RelRetargeterCfg", +] + +from .humanoid.fourier.gr1t2_retargeter import GR1T2Retargeter, GR1T2RetargeterCfg +from .humanoid.unitree.g1_lower_body_standing import ( + G1LowerBodyStandingRetargeter, + G1LowerBodyStandingRetargeterCfg, +) +from .humanoid.unitree.g1_motion_controller_locomotion import ( + G1LowerBodyStandingMotionControllerRetargeter, + G1LowerBodyStandingMotionControllerRetargeterCfg, +) +from .humanoid.unitree.inspire.g1_upper_body_retargeter import ( + UnitreeG1Retargeter, + UnitreeG1RetargeterCfg, +) +from .humanoid.unitree.trihand.g1_upper_body_motion_ctrl_gripper import ( + G1TriHandUpperBodyMotionControllerGripperRetargeter, + G1TriHandUpperBodyMotionControllerGripperRetargeterCfg, +) +from .humanoid.unitree.trihand.g1_upper_body_motion_ctrl_retargeter import ( + G1TriHandUpperBodyMotionControllerRetargeter, + G1TriHandUpperBodyMotionControllerRetargeterCfg, +) +from .humanoid.unitree.trihand.g1_upper_body_retargeter import ( + G1TriHandUpperBodyRetargeter, + G1TriHandUpperBodyRetargeterCfg, +) +from .manipulator.gripper_retargeter import GripperRetargeter, GripperRetargeterCfg +from .manipulator.se3_abs_retargeter import Se3AbsRetargeter, Se3AbsRetargeterCfg +from .manipulator.se3_rel_retargeter import Se3RelRetargeter, Se3RelRetargeterCfg diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml new file mode 100644 index 000000000000..1e203d11e7e8 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_left_dexpilot.yml @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - GR1T2_fourier_hand_6dof_L_thumb_distal_link + - GR1T2_fourier_hand_6dof_L_index_intermediate_link + - GR1T2_fourier_hand_6dof_L_middle_intermediate_link + - GR1T2_fourier_hand_6dof_L_ring_intermediate_link + - GR1T2_fourier_hand_6dof_L_pinky_intermediate_link + low_pass_alpha: 0.2 + scaling_factor: 1.2 + target_joint_names: + - L_index_proximal_joint + - L_middle_proximal_joint + - L_pinky_proximal_joint + - L_ring_proximal_joint + - L_index_intermediate_joint + - L_middle_intermediate_joint + - L_pinky_intermediate_joint + - L_ring_intermediate_joint + - L_thumb_proximal_yaw_joint + - L_thumb_proximal_pitch_joint + - L_thumb_distal_joint + - L_thumb_distal_joint + type: DexPilot + urdf_path: /tmp/GR1_T2_left_hand.urdf + wrist_link_name: l_hand_base_link diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml new file mode 100644 index 000000000000..f67041bd9b60 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/data/configs/dex-retargeting/fourier_hand_right_dexpilot.yml @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - GR1T2_fourier_hand_6dof_R_thumb_distal_link + - GR1T2_fourier_hand_6dof_R_index_intermediate_link + - GR1T2_fourier_hand_6dof_R_middle_intermediate_link + - GR1T2_fourier_hand_6dof_R_ring_intermediate_link + - GR1T2_fourier_hand_6dof_R_pinky_intermediate_link + low_pass_alpha: 0.2 + scaling_factor: 1.2 + target_joint_names: + - R_index_proximal_joint + - R_middle_proximal_joint + - R_pinky_proximal_joint + - R_ring_proximal_joint + - R_index_intermediate_joint + - R_middle_intermediate_joint + - R_pinky_intermediate_joint + - R_ring_intermediate_joint + - R_thumb_proximal_yaw_joint + - R_thumb_proximal_pitch_joint + - R_thumb_distal_joint + type: DexPilot + urdf_path: /tmp/GR1_T2_right_hand.urdf + wrist_link_name: r_hand_base_link diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/gr1_t2_dex_retargeting_utils.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/gr1_t2_dex_retargeting_utils.py new file mode 100644 index 000000000000..e832f441e403 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/gr1_t2_dex_retargeting_utils.py @@ -0,0 +1,262 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import os + +import numpy as np +import torch +import yaml +from dex_retargeting.retargeting_config import RetargetingConfig +from scipy.spatial.transform import Rotation as R + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +# import logger +logger = logging.getLogger(__name__) + +# The index to map the OpenXR hand joints to the hand joints used +# in Dex-retargeting. +_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25] + +# The transformation matrices to convert hand pose to canonical view. +_OPERATOR2MANO_RIGHT = np.array( + [ + [0, -1, 0], + [-1, 0, 0], + [0, 0, -1], + ] +) + +_OPERATOR2MANO_LEFT = np.array( + [ + [0, -1, 0], + [-1, 0, 0], + [0, 0, -1], + ] +) + +_LEFT_HAND_JOINT_NAMES = [ + "L_index_proximal_joint", + "L_index_intermediate_joint", + "L_middle_proximal_joint", + "L_middle_intermediate_joint", + "L_pinky_proximal_joint", + "L_pinky_intermediate_joint", + "L_ring_proximal_joint", + "L_ring_intermediate_joint", + "L_thumb_proximal_yaw_joint", + "L_thumb_proximal_pitch_joint", + "L_thumb_distal_joint", +] + + +_RIGHT_HAND_JOINT_NAMES = [ + "R_index_proximal_joint", + "R_index_intermediate_joint", + "R_middle_proximal_joint", + "R_middle_intermediate_joint", + "R_pinky_proximal_joint", + "R_pinky_intermediate_joint", + "R_ring_proximal_joint", + "R_ring_intermediate_joint", + "R_thumb_proximal_yaw_joint", + "R_thumb_proximal_pitch_joint", + "R_thumb_distal_joint", +] + + +class GR1TR2DexRetargeting: + """A class for hand retargeting with GR1Fourier. + + Handles retargeting of OpenXRhand tracking data to GR1T2 robot hand joint angles. + """ + + def __init__( + self, + hand_joint_names: list[str], + right_hand_config_filename: str = "fourier_hand_right_dexpilot.yml", + left_hand_config_filename: str = "fourier_hand_left_dexpilot.yml", + left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/GR1T2_assets/GR1_T2_left_hand.urdf", + right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/GR1T2_assets/GR1_T2_right_hand.urdf", + ): + """Initialize the hand retargeting. + + Args: + hand_joint_names: Names of hand joints in the robot model + right_hand_config_filename: Config file for right hand retargeting + left_hand_config_filename: Config file for left hand retargeting + """ + data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/")) + config_dir = os.path.join(data_dir, "configs/dex-retargeting") + + # Download urdf files from aws + local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True) + local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True) + + left_config_path = os.path.join(config_dir, left_hand_config_filename) + right_config_path = os.path.join(config_dir, right_hand_config_filename) + + # Update the YAML files with the correct URDF paths + self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path) + self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path) + + self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build() + self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build() + + self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names + self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names + self.dof_names = self.left_dof_names + self.right_dof_names + self.isaac_lab_hand_joint_names = hand_joint_names + + logger.info("[GR1T2DexRetargeter] init done.") + + def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str): + """Update YAML file with the correct URDF path. + + Args: + yaml_path: Path to the YAML configuration file + urdf_path: Path to the URDF file to use + """ + try: + # Read the YAML file + with open(yaml_path) as file: + config = yaml.safe_load(file) + + # Update the URDF path in the configuration + if "retargeting" in config: + config["retargeting"]["urdf_path"] = urdf_path + logger.info(f"Updated URDF path in {yaml_path} to {urdf_path}") + else: + logger.warning(f"Unable to find 'retargeting' section in {yaml_path}") + + # Write the updated configuration back to the file + with open(yaml_path, "w") as file: + yaml.dump(config, file) + + except Exception as e: + logger.error(f"Error updating YAML file {yaml_path}: {e}") + + def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray: + """Prepares the hand joints data for retargeting. + + Args: + hand_poses: Dictionary containing hand pose data with joint positions and rotations + operator2mano: Transformation matrix to convert from operator to MANO frame + + Returns: + Joint positions with shape (21, 3) + """ + joint_position = np.zeros((21, 3)) + hand_joints = list(hand_poses.values()) + for i in range(len(_HAND_JOINTS_INDEX)): + joint = hand_joints[_HAND_JOINTS_INDEX[i]] + joint_position[i] = joint[:3] + + # Convert hand pose to the canonical frame. + joint_position = joint_position - joint_position[0:1, :] + xr_wrist_quat = hand_poses.get("wrist")[3:] + # OpenXR hand data is in xyzw order, matching scipy's convention + wrist_rot = R.from_quat(xr_wrist_quat).as_matrix() + + return joint_position @ wrist_rot @ operator2mano + + def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray: + """Computes reference value for retargeting. + + Args: + joint_position: Joint positions array + indices: Target link indices + retargeting_type: Type of retargeting ("POSITION" or other) + + Returns: + Reference value in cartesian space + """ + if retargeting_type == "POSITION": + return joint_position[indices, :] + else: + origin_indices = indices[0, :] + task_indices = indices[1, :] + ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :] + return ref_value + + def compute_one_hand( + self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray + ) -> np.ndarray: + """Computes retargeted joint angles for one hand. + + Args: + hand_joints: Dictionary containing hand joint data + retargeting: Retargeting configuration object + operator2mano: Transformation matrix from operator to MANO frame + + Returns: + Retargeted joint angles + """ + joint_pos = self.convert_hand_joints(hand_joints, operator2mano) + ref_value = self.compute_ref_value( + joint_pos, + indices=retargeting.optimizer.target_link_human_indices, + retargeting_type=retargeting.optimizer.retargeting_type, + ) + # Enable gradient calculation and inference mode in case some other script has disabled it + # This is necessary for the retargeting to work since it uses gradient features that + # are not available in inference mode + with torch.enable_grad(): + with torch.inference_mode(False): + return retargeting.retarget(ref_value) + + def get_joint_names(self) -> list[str]: + """Returns list of all joint names.""" + return self.dof_names + + def get_left_joint_names(self) -> list[str]: + """Returns list of left hand joint names.""" + return self.left_dof_names + + def get_right_joint_names(self) -> list[str]: + """Returns list of right hand joint names.""" + return self.right_dof_names + + def get_hand_indices(self, robot) -> np.ndarray: + """Gets indices of hand joints in robot's DOF array. + + Args: + robot: Robot object containing DOF information + + Returns: + Array of joint indices + """ + return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64) + + def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for left hand. + + Args: + left_hand_poses: Dictionary of left hand joint poses + + Returns: + Retargeted joint angles for left hand + """ + if left_hand_poses is not None: + left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT) + else: + left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES)) + return left_hand_q + + def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for right hand. + + Args: + right_hand_poses: Dictionary of right hand joint poses + + Returns: + Retargeted joint angles for right hand + """ + if right_hand_poses is not None: + right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT) + else: + right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES)) + return right_hand_q diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py new file mode 100644 index 000000000000..29698d4be696 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py @@ -0,0 +1,168 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import contextlib +from dataclasses import dataclass + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + +# This import exception is suppressed because gr1_t2_dex_retargeting_utils depends +# on pinocchio which is not available on Windows. +with contextlib.suppress(Exception): + from .gr1_t2_dex_retargeting_utils import GR1TR2DexRetargeting + + +class GR1T2Retargeter(RetargeterBase): + """Retargets OpenXR hand tracking data to GR1T2 hand end-effector commands. + + This retargeter maps hand tracking data from OpenXR to joint commands for the GR1T2 robot's hands. + It handles both left and right hands, converting poses of the hands in OpenXR format joint angles + for the GR1T2 robot's hands. + """ + + def __init__( + self, + cfg: GR1T2RetargeterCfg, + ): + """Initialize the GR1T2 hand retargeter. + + Args: + enable_visualization: If True, visualize tracked hand joints + num_open_xr_hand_joints: Number of joints tracked by OpenXR + device: PyTorch device for computations + hand_joint_names: List of robot hand joint names + """ + + super().__init__(cfg) + self._hand_joint_names = cfg.hand_joint_names + self._hands_controller = GR1TR2DexRetargeting(self._hand_joint_names) + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints + self._sim_device = cfg.sim_device + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.005, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector commands. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + + Returns: + tuple containing: + Left wrist pose + Right wrist pose in USD frame + Retargeted hand joint angles + """ + + # Access the left and right hand data using the enum key + left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT] + right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT] + + left_wrist = left_hand_poses.get("wrist") + right_wrist = right_hand_poses.get("wrist") + + if self._enable_visualization: + joints_position = np.zeros((self._num_open_xr_hand_joints, 3)) + + joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()]) + joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()]) + + self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device)) + + # Create array of zeros with length matching number of joint names + left_hands_pos = self._hands_controller.compute_left(left_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()] + left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + left_retargeted_hand_joints[indexes] = left_hands_pos + left_hand_joints = left_retargeted_hand_joints + + right_hands_pos = self._hands_controller.compute_right(right_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()] + right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + right_retargeted_hand_joints[indexes] = right_hands_pos + right_hand_joints = right_retargeted_hand_joints + retargeted_hand_joints = left_hand_joints + right_hand_joints + + # Convert numpy arrays to tensors and concatenate them + left_wrist_tensor = torch.tensor(left_wrist, dtype=torch.float32, device=self._sim_device) + right_wrist_tensor = torch.tensor(self._retarget_abs(right_wrist), dtype=torch.float32, device=self._sim_device) + hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device) + + # Combine all tensors into a single tensor + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.HAND_TRACKING] + + def _retarget_abs(self, wrist: np.ndarray) -> np.ndarray: + """Handle absolute pose retargeting. + + Args: + wrist: Wrist pose data from OpenXR + + Returns: + Retargeted wrist pose in USD control frame + """ + + # Convert wrist data in openxr frame to usd control frame + + # Create pose object for openxr_right_wrist_in_world + # Note: The pose utils require torch tensors + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + openxr_right_wrist_in_world = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + + # The usd control frame is 180 degrees rotated around z axis wrt to the openxr frame + # This was determined through trial and error + zero_pos = torch.zeros(3, dtype=torch.float32) + # 180 degree rotation around z axis + z_axis_rot_quat = torch.tensor([0, 0, 1, 0], dtype=torch.float32) + usd_right_roll_link_in_openxr_right_wrist = PoseUtils.make_pose( + zero_pos, PoseUtils.matrix_from_quat(z_axis_rot_quat) + ) + + # Convert wrist pose in openxr frame to usd control frame + usd_right_roll_link_in_world = PoseUtils.pose_in_A_to_pose_in_B( + usd_right_roll_link_in_openxr_right_wrist, openxr_right_wrist_in_world + ) + + # extract position and rotation + usd_right_roll_link_in_world_pos, usd_right_roll_link_in_world_mat = PoseUtils.unmake_pose( + usd_right_roll_link_in_world + ) + usd_right_roll_link_in_world_quat = PoseUtils.quat_from_matrix(usd_right_roll_link_in_world_mat) + + return np.concatenate([usd_right_roll_link_in_world_pos, usd_right_roll_link_in_world_quat]) + + +@dataclass +class GR1T2RetargeterCfg(RetargeterCfg): + """Configuration for the GR1T2 retargeter.""" + + enable_visualization: bool = False + num_open_xr_hand_joints: int = 100 + hand_joint_names: list[str] | None = None # List of robot hand joint names + retargeter_type: type[RetargeterBase] = GR1T2Retargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/g1_lower_body_standing.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/g1_lower_body_standing.py new file mode 100644 index 000000000000..1692b4a86d9b --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/g1_lower_body_standing.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import dataclass + +import torch + +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg + + +class G1LowerBodyStandingRetargeter(RetargeterBase): + """Provides lower body standing commands for the G1 robot.""" + + def __init__(self, cfg: G1LowerBodyStandingRetargeterCfg): + """Initialize the retargeter.""" + super().__init__(cfg) + self.cfg = cfg + + def retarget(self, data: dict) -> torch.Tensor: + return torch.tensor([0.0, 0.0, 0.0, self.cfg.hip_height], device=self.cfg.sim_device) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + # This retargeter does not consume any device data + return [] + + +@dataclass +class G1LowerBodyStandingRetargeterCfg(RetargeterCfg): + """Configuration for the G1 lower body standing retargeter.""" + + hip_height: float = 0.72 + """Height of the G1 robot hip in meters. The value is a fixed height suitable for G1 to do tabletop manipulation.""" + retargeter_type: type[RetargeterBase] = G1LowerBodyStandingRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/g1_motion_controller_locomotion.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/g1_motion_controller_locomotion.py new file mode 100644 index 000000000000..8acfe0abc027 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/g1_motion_controller_locomotion.py @@ -0,0 +1,87 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import dataclass + +import torch + +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.sim import SimulationContext + + +class G1LowerBodyStandingMotionControllerRetargeter(RetargeterBase): + """Provides lower body standing commands for the G1 robot.""" + + def __init__(self, cfg: G1LowerBodyStandingMotionControllerRetargeterCfg): + """Initialize the retargeter.""" + super().__init__(cfg) + self.cfg = cfg + self._hip_height = cfg.hip_height + + def retarget(self, data: dict) -> torch.Tensor: + left_thumbstick_x = 0.0 + left_thumbstick_y = 0.0 + right_thumbstick_x = 0.0 + right_thumbstick_y = 0.0 + + # Get controller data using enums + if ( + DeviceBase.TrackingTarget.CONTROLLER_LEFT in data + and data[DeviceBase.TrackingTarget.CONTROLLER_LEFT] is not None + ): + left_controller_data = data[DeviceBase.TrackingTarget.CONTROLLER_LEFT] + if len(left_controller_data) > DeviceBase.MotionControllerDataRowIndex.INPUTS.value: + left_inputs = left_controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value] + if len(left_inputs) > DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value: + left_thumbstick_x = left_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_X.value] + left_thumbstick_y = left_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value] + + if ( + DeviceBase.TrackingTarget.CONTROLLER_RIGHT in data + and data[DeviceBase.TrackingTarget.CONTROLLER_RIGHT] is not None + ): + right_controller_data = data[DeviceBase.TrackingTarget.CONTROLLER_RIGHT] + if len(right_controller_data) > DeviceBase.MotionControllerDataRowIndex.INPUTS.value: + right_inputs = right_controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value] + if len(right_inputs) > DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value: + right_thumbstick_x = right_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_X.value] + right_thumbstick_y = right_inputs[DeviceBase.MotionControllerInputIndex.THUMBSTICK_Y.value] + + # Thumbstick values are in the range of [-1, 1], so we need to scale them to the range of + # [-movement_scale, movement_scale] + left_thumbstick_x = left_thumbstick_x * self.cfg.movement_scale + left_thumbstick_y = left_thumbstick_y * self.cfg.movement_scale + + # Use rendering time step for deterministic hip height adjustment regardless of wall clock time. + dt = SimulationContext.instance().get_rendering_dt() + self._hip_height -= right_thumbstick_y * dt * self.cfg.rotation_scale + self._hip_height = max(0.4, min(1.0, self._hip_height)) + + return torch.tensor( + [-left_thumbstick_y, -left_thumbstick_x, -right_thumbstick_x, self._hip_height], + device=self.cfg.sim_device, + dtype=torch.float32, + ) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.MOTION_CONTROLLER] + + +@dataclass +class G1LowerBodyStandingMotionControllerRetargeterCfg(RetargeterCfg): + """Configuration for the G1 lower body standing retargeter.""" + + hip_height: float = 0.72 + """Height of the G1 robot hip in meters. The value is a fixed height suitable for G1 to do tabletop manipulation.""" + + movement_scale: float = 0.5 + """Scale the movement of the robot to the range of [-movement_scale, movement_scale].""" + + rotation_scale: float = 0.35 + """Scale the rotation of the robot to the range of [-rotation_scale, rotation_scale].""" + retargeter_type: type[RetargeterBase] = G1LowerBodyStandingMotionControllerRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_left_dexpilot.yml b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_left_dexpilot.yml new file mode 100644 index 000000000000..de72352738fd --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_left_dexpilot.yml @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - L_thumb_tip + - L_index_tip + - L_middle_tip + - L_ring_tip + - L_pinky_tip + low_pass_alpha: 0.2 + scaling_factor: 1.2 + target_joint_names: + - L_thumb_proximal_yaw_joint + - L_thumb_proximal_pitch_joint + - L_index_proximal_joint + - L_middle_proximal_joint + - L_ring_proximal_joint + - L_pinky_proximal_joint + type: DexPilot + urdf_path: /tmp/retarget_inspire_white_left_hand.urdf + wrist_link_name: L_hand_base_link diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_right_dexpilot.yml b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_right_dexpilot.yml new file mode 100644 index 000000000000..5d0406da4365 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/data/configs/dex-retargeting/unitree_hand_right_dexpilot.yml @@ -0,0 +1,24 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - R_thumb_tip + - R_index_tip + - R_middle_tip + - R_ring_tip + - R_pinky_tip + low_pass_alpha: 0.2 + scaling_factor: 1.2 + target_joint_names: + - R_thumb_proximal_yaw_joint + - R_thumb_proximal_pitch_joint + - R_index_proximal_joint + - R_middle_proximal_joint + - R_ring_proximal_joint + - R_pinky_proximal_joint + type: DexPilot + urdf_path: /tmp/retarget_inspire_white_right_hand.urdf + wrist_link_name: R_hand_base_link diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/g1_dex_retargeting_utils.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/g1_dex_retargeting_utils.py new file mode 100644 index 000000000000..3d759003f854 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/g1_dex_retargeting_utils.py @@ -0,0 +1,266 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import os + +import numpy as np +import torch +import yaml +from dex_retargeting.retargeting_config import RetargetingConfig +from scipy.spatial.transform import Rotation as R + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +# import logger +logger = logging.getLogger(__name__) + +# The index to map the OpenXR hand joints to the hand joints used +# in Dex-retargeting. +_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25] + +# The transformation matrices to convert hand pose to canonical view. +_OPERATOR2MANO_RIGHT = np.array( + [ + [0, -1, 0], + [-1, 0, 0], + [0, 0, -1], + ] +) + +_OPERATOR2MANO_LEFT = np.array( + [ + [0, -1, 0], + [-1, 0, 0], + [0, 0, -1], + ] +) + +_LEFT_HAND_JOINT_NAMES = [ + "L_thumb_proximal_yaw_joint", + "L_thumb_proximal_pitch_joint", + "L_thumb_intermediate_joint", + "L_thumb_distal_joint", + "L_index_proximal_joint", + "L_index_intermediate_joint", + "L_middle_proximal_joint", + "L_middle_intermediate_joint", + "L_ring_proximal_joint", + "L_ring_intermediate_joint", + "L_pinky_proximal_joint", + "L_pinky_intermediate_joint", +] + + +_RIGHT_HAND_JOINT_NAMES = [ + "R_thumb_proximal_yaw_joint", + "R_thumb_proximal_pitch_joint", + "R_thumb_intermediate_joint", + "R_thumb_distal_joint", + "R_index_proximal_joint", + "R_index_intermediate_joint", + "R_middle_proximal_joint", + "R_middle_intermediate_joint", + "R_ring_proximal_joint", + "R_ring_intermediate_joint", + "R_pinky_proximal_joint", + "R_pinky_intermediate_joint", +] + + +class UnitreeG1DexRetargeting: + """A class for hand retargeting with GR1Fourier. + + Handles retargeting of OpenXRhand tracking data to GR1T2 robot hand joint angles. + """ + + def __init__( + self, + hand_joint_names: list[str], + right_hand_config_filename: str = "unitree_hand_right_dexpilot.yml", + left_hand_config_filename: str = "unitree_hand_left_dexpilot.yml", + left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_left_hand.urdf", # noqa: E501 + right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Mimic/G1_inspire_assets/retarget_inspire_white_right_hand.urdf", # noqa: E501 + ): + """Initialize the hand retargeting. + + Args: + hand_joint_names: Names of hand joints in the robot model + right_hand_config_filename: Config file for right hand retargeting + left_hand_config_filename: Config file for left hand retargeting + """ + data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/")) + config_dir = os.path.join(data_dir, "configs/dex-retargeting") + + # Download urdf files from aws + local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True) + local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True) + + left_config_path = os.path.join(config_dir, left_hand_config_filename) + right_config_path = os.path.join(config_dir, right_hand_config_filename) + + # Update the YAML files with the correct URDF paths + self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path) + self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path) + + self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build() + self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build() + + self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names + self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names + + self.dof_names = self.left_dof_names + self.right_dof_names + self.isaac_lab_hand_joint_names = hand_joint_names + + logger.info("[UnitreeG1DexRetargeter] init done.") + + def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str): + """Update YAML file with the correct URDF path. + + Args: + yaml_path: Path to the YAML configuration file + urdf_path: Path to the URDF file to use + """ + try: + # Read the YAML file + with open(yaml_path) as file: + config = yaml.safe_load(file) + + # Update the URDF path in the configuration + if "retargeting" in config: + config["retargeting"]["urdf_path"] = urdf_path + logger.info(f"Updated URDF path in {yaml_path} to {urdf_path}") + else: + logger.warning(f"Unable to find 'retargeting' section in {yaml_path}") + + # Write the updated configuration back to the file + with open(yaml_path, "w") as file: + yaml.dump(config, file) + + except Exception as e: + logger.error(f"Error updating YAML file {yaml_path}: {e}") + + def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray: + """Prepares the hand joints data for retargeting. + + Args: + hand_poses: Dictionary containing hand pose data with joint positions and rotations + operator2mano: Transformation matrix to convert from operator to MANO frame + + Returns: + Joint positions with shape (21, 3) + """ + joint_position = np.zeros((21, 3)) + hand_joints = list(hand_poses.values()) + for i in range(len(_HAND_JOINTS_INDEX)): + joint = hand_joints[_HAND_JOINTS_INDEX[i]] + joint_position[i] = joint[:3] + + # Convert hand pose to the canonical frame. + joint_position = joint_position - joint_position[0:1, :] + xr_wrist_quat = hand_poses.get("wrist")[3:] + # OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order + wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix() + + return joint_position @ wrist_rot @ operator2mano + + def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray: + """Computes reference value for retargeting. + + Args: + joint_position: Joint positions array + indices: Target link indices + retargeting_type: Type of retargeting ("POSITION" or other) + + Returns: + Reference value in cartesian space + """ + if retargeting_type == "POSITION": + return joint_position[indices, :] + else: + origin_indices = indices[0, :] + task_indices = indices[1, :] + ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :] + return ref_value + + def compute_one_hand( + self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray + ) -> np.ndarray: + """Computes retargeted joint angles for one hand. + + Args: + hand_joints: Dictionary containing hand joint data + retargeting: Retargeting configuration object + operator2mano: Transformation matrix from operator to MANO frame + + Returns: + Retargeted joint angles + """ + joint_pos = self.convert_hand_joints(hand_joints, operator2mano) + ref_value = self.compute_ref_value( + joint_pos, + indices=retargeting.optimizer.target_link_human_indices, + retargeting_type=retargeting.optimizer.retargeting_type, + ) + + # Enable gradient calculation and inference mode in case some other script has disabled it + # This is necessary for the retargeting to work since it uses gradient features that + # are not available in inference mode + with torch.enable_grad(): + with torch.inference_mode(False): + return retargeting.retarget(ref_value) + + def get_joint_names(self) -> list[str]: + """Returns list of all joint names.""" + return self.dof_names + + def get_left_joint_names(self) -> list[str]: + """Returns list of left hand joint names.""" + return self.left_dof_names + + def get_right_joint_names(self) -> list[str]: + """Returns list of right hand joint names.""" + return self.right_dof_names + + def get_hand_indices(self, robot) -> np.ndarray: + """Gets indices of hand joints in robot's DOF array. + + Args: + robot: Robot object containing DOF information + + Returns: + Array of joint indices + """ + return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64) + + def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for left hand. + + Args: + left_hand_poses: Dictionary of left hand joint poses + + Returns: + Retargeted joint angles for left hand + """ + if left_hand_poses is not None: + left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT) + else: + left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES)) + return left_hand_q + + def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for right hand. + + Args: + right_hand_poses: Dictionary of right hand joint poses + + Returns: + Retargeted joint angles for right hand + """ + if right_hand_poses is not None: + right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT) + else: + right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES)) + return right_hand_q diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/g1_upper_body_retargeter.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/g1_upper_body_retargeter.py new file mode 100644 index 000000000000..d7976bc14264 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/inspire/g1_upper_body_retargeter.py @@ -0,0 +1,164 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import contextlib +from dataclasses import dataclass + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + +# This import exception is suppressed because g1_dex_retargeting_utils +# depends on pinocchio which is not available on Windows. +with contextlib.suppress(Exception): + from .g1_dex_retargeting_utils import UnitreeG1DexRetargeting + + +class UnitreeG1Retargeter(RetargeterBase): + """Retargets OpenXR hand tracking data to GR1T2 hand end-effector commands. + + This retargeter maps hand tracking data from OpenXR to joint commands for the GR1T2 robot's hands. + It handles both left and right hands, converting poses of the hands in OpenXR format joint angles + for the GR1T2 robot's hands. + """ + + def __init__( + self, + cfg: UnitreeG1RetargeterCfg, + ): + """Initialize the UnitreeG1 hand retargeter. + + Args: + enable_visualization: If True, visualize tracked hand joints + num_open_xr_hand_joints: Number of joints tracked by OpenXR + device: PyTorch device for computations + hand_joint_names: List of robot hand joint names + """ + + super().__init__(cfg) + self._hand_joint_names = cfg.hand_joint_names + self._hands_controller = UnitreeG1DexRetargeting(self._hand_joint_names) + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints + self._sim_device = cfg.sim_device + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.005, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector commands. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + + Returns: + tuple containing: + Left wrist pose + Right wrist pose in USD frame + Retargeted hand joint angles + """ + + # Access the left and right hand data using the enum key + left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT] + right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT] + + left_wrist = left_hand_poses.get("wrist") + right_wrist = right_hand_poses.get("wrist") + + if self._enable_visualization: + joints_position = np.zeros((self._num_open_xr_hand_joints, 3)) + + joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()]) + joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()]) + + self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device)) + + # Create array of zeros with length matching number of joint names + left_hands_pos = self._hands_controller.compute_left(left_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()] + left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + left_retargeted_hand_joints[indexes] = left_hands_pos + left_hand_joints = left_retargeted_hand_joints + + right_hands_pos = self._hands_controller.compute_right(right_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()] + right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + right_retargeted_hand_joints[indexes] = right_hands_pos + right_hand_joints = right_retargeted_hand_joints + retargeted_hand_joints = left_hand_joints + right_hand_joints + + # Convert numpy arrays to tensors and concatenate them + left_wrist_tensor = torch.tensor( + self._retarget_abs(left_wrist, True), dtype=torch.float32, device=self._sim_device + ) + right_wrist_tensor = torch.tensor( + self._retarget_abs(right_wrist, False), dtype=torch.float32, device=self._sim_device + ) + hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device) + + # Combine all tensors into a single tensor + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.HAND_TRACKING] + + def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray: + """Handle absolute pose retargeting. + + Args: + wrist: Wrist pose data from OpenXR. + is_left: True for the left hand, False for the right hand. + + Returns: + Retargeted wrist pose in USD control frame. + """ + # Note: This was determined through trial, use the target quat and cloudXR quat, + # to estimate a most reasonable transformation matrix + + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + if is_left: + # Corresponds to a rotation of (0, 180, 0) in euler angles (x,y,z) + combined_quat = torch.tensor([0, 0.7071, 0, 0.7071], dtype=torch.float32) + else: + # Corresponds to a rotation of (180, 0, 0) in euler angles (x,y,z) + combined_quat = torch.tensor([0.7071, 0, -0.7071, 0], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) + + +@dataclass +class UnitreeG1RetargeterCfg(RetargeterCfg): + """Configuration for the UnitreeG1 retargeter.""" + + enable_visualization: bool = False + num_open_xr_hand_joints: int = 100 + hand_joint_names: list[str] | None = None # List of robot hand joint names + retargeter_type: type[RetargeterBase] = UnitreeG1Retargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_left_dexpilot.yml b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_left_dexpilot.yml new file mode 100644 index 000000000000..0f9f5416e1ca --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_left_dexpilot.yml @@ -0,0 +1,23 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - thumb_tip + - index_tip + - middle_tip + low_pass_alpha: 0.2 + scaling_factor: 1.0 + target_joint_names: + - left_hand_thumb_0_joint + - left_hand_thumb_1_joint + - left_hand_thumb_2_joint + - left_hand_middle_0_joint + - left_hand_middle_1_joint + - left_hand_index_0_joint + - left_hand_index_1_joint + type: DexPilot + urdf_path: /tmp/G1_left_hand.urdf + wrist_link_name: base_link diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_right_dexpilot.yml b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_right_dexpilot.yml new file mode 100644 index 000000000000..3908adcce0f6 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/data/configs/dex-retargeting/g1_hand_right_dexpilot.yml @@ -0,0 +1,23 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +retargeting: + finger_tip_link_names: + - thumb_tip + - index_tip + - middle_tip + low_pass_alpha: 0.2 + scaling_factor: 1.0 + target_joint_names: + - right_hand_thumb_0_joint + - right_hand_thumb_1_joint + - right_hand_thumb_2_joint + - right_hand_middle_0_joint + - right_hand_middle_1_joint + - right_hand_index_0_joint + - right_hand_index_1_joint + type: DexPilot + urdf_path: /tmp/G1_right_hand.urdf + wrist_link_name: base_link diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_dex_retargeting_utils.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_dex_retargeting_utils.py new file mode 100644 index 000000000000..6575eaaba41c --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_dex_retargeting_utils.py @@ -0,0 +1,258 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import os + +import numpy as np +import torch +import yaml +from dex_retargeting.retargeting_config import RetargetingConfig +from scipy.spatial.transform import Rotation as R + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +# import logger +logger = logging.getLogger(__name__) + +# yourdfpy loads visual/collision meshes with the hand URDFs; these aren't needed for +# retargeting and clutter the logs, so we suppress them. +logging.getLogger("dex_retargeting.yourdfpy").setLevel(logging.ERROR) + +# The index to map the OpenXR hand joints to the hand joints used +# in Dex-retargeting. +_HAND_JOINTS_INDEX = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25] + +# The transformation matrices to convert hand pose to canonical view. +_OPERATOR2MANO_RIGHT = np.array( + [ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0], + ] +) + +_OPERATOR2MANO_LEFT = np.array( + [ + [0, 0, 1], + [1, 0, 0], + [0, 1, 0], + ] +) + +# G1 robot hand joint names - 2 fingers and 1 thumb configuration +_LEFT_HAND_JOINT_NAMES = [ + "left_hand_thumb_0_joint", # Thumb base (yaw axis) + "left_hand_thumb_1_joint", # Thumb middle (pitch axis) + "left_hand_thumb_2_joint", # Thumb tip + "left_hand_index_0_joint", # Index finger proximal + "left_hand_index_1_joint", # Index finger distal + "left_hand_middle_0_joint", # Middle finger proximal + "left_hand_middle_1_joint", # Middle finger distal +] + +_RIGHT_HAND_JOINT_NAMES = [ + "right_hand_thumb_0_joint", # Thumb base (yaw axis) + "right_hand_thumb_1_joint", # Thumb middle (pitch axis) + "right_hand_thumb_2_joint", # Thumb tip + "right_hand_index_0_joint", # Index finger proximal + "right_hand_index_1_joint", # Index finger distal + "right_hand_middle_0_joint", # Middle finger proximal + "right_hand_middle_1_joint", # Middle finger distal +] + + +class G1TriHandDexRetargeting: + """A class for hand retargeting with G1. + + Handles retargeting of OpenXRhand tracking data to G1 robot hand joint angles. + """ + + def __init__( + self, + hand_joint_names: list[str], + right_hand_config_filename: str = "g1_hand_right_dexpilot.yml", + left_hand_config_filename: str = "g1_hand_left_dexpilot.yml", + left_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_dexpilot_asset/G1_left_hand.urdf", # noqa: E501 + right_hand_urdf_path: str = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_dexpilot_asset/G1_right_hand.urdf", # noqa: E501 + ): + """Initialize the hand retargeting. + + Args: + hand_joint_names: Names of hand joints in the robot model + right_hand_config_filename: Config file for right hand retargeting + left_hand_config_filename: Config file for left hand retargeting + """ + data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/")) + config_dir = os.path.join(data_dir, "configs/dex-retargeting") + + # Download urdf files from aws + local_left_urdf_path = retrieve_file_path(left_hand_urdf_path, force_download=True) + local_right_urdf_path = retrieve_file_path(right_hand_urdf_path, force_download=True) + + left_config_path = os.path.join(config_dir, left_hand_config_filename) + right_config_path = os.path.join(config_dir, right_hand_config_filename) + + # Update the YAML files with the correct URDF paths + self._update_yaml_with_urdf_path(left_config_path, local_left_urdf_path) + self._update_yaml_with_urdf_path(right_config_path, local_right_urdf_path) + + self._dex_left_hand = RetargetingConfig.load_from_file(left_config_path).build() + self._dex_right_hand = RetargetingConfig.load_from_file(right_config_path).build() + + self.left_dof_names = self._dex_left_hand.optimizer.robot.dof_joint_names + self.right_dof_names = self._dex_right_hand.optimizer.robot.dof_joint_names + self.dof_names = self.left_dof_names + self.right_dof_names + self.isaac_lab_hand_joint_names = hand_joint_names + + logger.info("[G1DexRetargeter] init done.") + + def _update_yaml_with_urdf_path(self, yaml_path: str, urdf_path: str): + """Update YAML file with the correct URDF path. + + Args: + yaml_path: Path to the YAML configuration file + urdf_path: Path to the URDF file to use + """ + try: + # Read the YAML file + with open(yaml_path) as file: + config = yaml.safe_load(file) + + # Update the URDF path in the configuration + if "retargeting" in config: + config["retargeting"]["urdf_path"] = urdf_path + logger.info(f"Updated URDF path in {yaml_path} to {urdf_path}") + else: + logger.warning(f"Unable to find 'retargeting' section in {yaml_path}") + + # Write the updated configuration back to the file + with open(yaml_path, "w") as file: + yaml.dump(config, file) + + except Exception as e: + logger.error(f"Error updating YAML file {yaml_path}: {e}") + + def convert_hand_joints(self, hand_poses: dict[str, np.ndarray], operator2mano: np.ndarray) -> np.ndarray: + """Prepares the hand joints data for retargeting. + + Args: + hand_poses: Dictionary containing hand pose data with joint positions and rotations + operator2mano: Transformation matrix to convert from operator to MANO frame + + Returns: + Joint positions with shape (21, 3) + """ + joint_position = np.zeros((21, 3)) + hand_joints = list(hand_poses.values()) + for i, joint_index in enumerate(_HAND_JOINTS_INDEX): + joint = hand_joints[joint_index] + joint_position[i] = joint[:3] + + # Convert hand pose to the canonical frame. + joint_position = joint_position - joint_position[0:1, :] + xr_wrist_quat = hand_poses.get("wrist")[3:] + # OpenXR hand uses w,x,y,z order for quaternions but scipy uses x,y,z,w order + wrist_rot = R.from_quat([xr_wrist_quat[1], xr_wrist_quat[2], xr_wrist_quat[3], xr_wrist_quat[0]]).as_matrix() + + return joint_position @ wrist_rot @ operator2mano + + def compute_ref_value(self, joint_position: np.ndarray, indices: np.ndarray, retargeting_type: str) -> np.ndarray: + """Computes reference value for retargeting. + + Args: + joint_position: Joint positions array + indices: Target link indices + retargeting_type: Type of retargeting ("POSITION" or other) + + Returns: + Reference value in cartesian space + """ + if retargeting_type == "POSITION": + return joint_position[indices, :] + else: + origin_indices = indices[0, :] + task_indices = indices[1, :] + ref_value = joint_position[task_indices, :] - joint_position[origin_indices, :] + return ref_value + + def compute_one_hand( + self, hand_joints: dict[str, np.ndarray], retargeting: RetargetingConfig, operator2mano: np.ndarray + ) -> np.ndarray: + """Computes retargeted joint angles for one hand. + + Args: + hand_joints: Dictionary containing hand joint data + retargeting: Retargeting configuration object + operator2mano: Transformation matrix from operator to MANO frame + + Returns: + Retargeted joint angles + """ + joint_pos = self.convert_hand_joints(hand_joints, operator2mano) + ref_value = self.compute_ref_value( + joint_pos, + indices=retargeting.optimizer.target_link_human_indices, + retargeting_type=retargeting.optimizer.retargeting_type, + ) + # Enable gradient calculation and inference mode in case some other script has disabled it + # This is necessary for the retargeting to work since it uses gradient features that + # are not available in inference mode + with torch.enable_grad(): + with torch.inference_mode(False): + return retargeting.retarget(ref_value) + + def get_joint_names(self) -> list[str]: + """Returns list of all joint names.""" + return self.dof_names + + def get_left_joint_names(self) -> list[str]: + """Returns list of left hand joint names.""" + return self.left_dof_names + + def get_right_joint_names(self) -> list[str]: + """Returns list of right hand joint names.""" + return self.right_dof_names + + def get_hand_indices(self, robot) -> np.ndarray: + """Gets indices of hand joints in robot's DOF array. + + Args: + robot: Robot object containing DOF information + + Returns: + Array of joint indices + """ + return np.array([robot.dof_names.index(name) for name in self.dof_names], dtype=np.int64) + + def compute_left(self, left_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for left hand. + + Args: + left_hand_poses: Dictionary of left hand joint poses + + Returns: + Retargeted joint angles for left hand + """ + if left_hand_poses is not None: + left_hand_q = self.compute_one_hand(left_hand_poses, self._dex_left_hand, _OPERATOR2MANO_LEFT) + else: + left_hand_q = np.zeros(len(_LEFT_HAND_JOINT_NAMES)) + return left_hand_q + + def compute_right(self, right_hand_poses: dict[str, np.ndarray]) -> np.ndarray: + """Computes retargeted joints for right hand. + + Args: + right_hand_poses: Dictionary of right hand joint poses + + Returns: + Retargeted joint angles for right hand + """ + if right_hand_poses is not None: + right_hand_q = self.compute_one_hand(right_hand_poses, self._dex_right_hand, _OPERATOR2MANO_RIGHT) + else: + right_hand_q = np.zeros(len(_RIGHT_HAND_JOINT_NAMES)) + return right_hand_q diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_motion_ctrl_gripper.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_motion_ctrl_gripper.py new file mode 100644 index 000000000000..e0641c27de53 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_motion_ctrl_gripper.py @@ -0,0 +1,154 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import dataclass + +import numpy as np +import torch + +import isaaclab.utils.math as PoseUtils +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg + + +class G1TriHandUpperBodyMotionControllerGripperRetargeter(RetargeterBase): + """Retargeter for G1 gripper that outputs a boolean state based on controller trigger input, + concatenated with the retargeted wrist pose. + + Gripper: + - Uses hysteresis to prevent flickering when the trigger is near the threshold. + - Output is 0.0 for open, 1.0 for close. + + Wrist: + - Retargets absolute pose from controller to robot frame. + - Applies a fixed offset rotation for comfort/alignment. + """ + + def __init__(self, cfg: G1TriHandUpperBodyMotionControllerGripperRetargeterCfg): + """Initialize the retargeter. + + Args: + cfg: Configuration for the retargeter. + """ + super().__init__(cfg) + self._cfg = cfg + # Track previous state for hysteresis (left, right) + self._prev_left_state: float = 0.0 + self._prev_right_state: float = 0.0 + + def retarget(self, data: dict) -> torch.Tensor: + """Retarget controller inputs to gripper boolean state and wrist pose. + + Args: + data: Dictionary with MotionControllerTrackingTarget.LEFT/RIGHT keys + Each value is a 2D array: [pose(7), inputs(7)] + + Returns: + Tensor: [left_gripper_state(1), right_gripper_state(1), left_wrist(7), right_wrist(7)] + Wrist format: [x, y, z, qw, qx, qy, qz] + """ + # Get controller data + left_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_LEFT, np.array([])) + right_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_RIGHT, np.array([])) + + # --- Gripper Logic --- + # Extract hand state from controller data with hysteresis + left_hand_state: float = self._extract_hand_state(left_controller_data, self._prev_left_state) + right_hand_state: float = self._extract_hand_state(right_controller_data, self._prev_right_state) + + # Update previous states + self._prev_left_state = left_hand_state + self._prev_right_state = right_hand_state + + gripper_tensor = torch.tensor([left_hand_state, right_hand_state], dtype=torch.float32, device=self._sim_device) + + # --- Wrist Logic --- + # Default wrist poses (position + quaternion [x, y, z, w]) + # Format: [x, y, z, qx, qy, qz, qw] + default_wrist = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + + # Extract poses from controller data + left_wrist = self._extract_wrist_pose(left_controller_data, default_wrist) + right_wrist = self._extract_wrist_pose(right_controller_data, default_wrist) + + # Convert to tensors + left_wrist_tensor = torch.tensor(self._retarget_abs(left_wrist), dtype=torch.float32, device=self._sim_device) + right_wrist_tensor = torch.tensor(self._retarget_abs(right_wrist), dtype=torch.float32, device=self._sim_device) + + # Concatenate: [gripper(2), left_wrist(7), right_wrist(7)] + return torch.cat([gripper_tensor, left_wrist_tensor, right_wrist_tensor]) + + def _extract_hand_state(self, controller_data: np.ndarray, prev_state: float) -> float: + """Extract hand state from controller data with hysteresis. + + Args: + controller_data: 2D array [pose(7), inputs(7)] + prev_state: Previous hand state (0.0 or 1.0) + + Returns: + Hand state as float (0.0 for open, 1.0 for close) + """ + if len(controller_data) <= DeviceBase.MotionControllerDataRowIndex.INPUTS.value: + return 0.0 + + # Extract inputs from second row + inputs = controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value] + if len(inputs) < len(DeviceBase.MotionControllerInputIndex): + return 0.0 + + # Extract specific inputs using enum + trigger = inputs[DeviceBase.MotionControllerInputIndex.TRIGGER.value] # 0.0 to 1.0 (analog) + + # Apply hysteresis + if prev_state < 0.5: # Currently open + return 1.0 if trigger > self._cfg.threshold_high else 0.0 + else: # Currently closed + return 0.0 if trigger < self._cfg.threshold_low else 1.0 + + def _extract_wrist_pose(self, controller_data: np.ndarray, default_pose: np.ndarray) -> np.ndarray: + """Extract wrist pose from controller data. + + Args: + controller_data: 2D array [pose(7), inputs(7)] + default_pose: Default pose to use if no data + + Returns: + Wrist pose array [x, y, z, w, x, y, z] + """ + if len(controller_data) > DeviceBase.MotionControllerDataRowIndex.POSE.value: + return controller_data[DeviceBase.MotionControllerDataRowIndex.POSE.value] + return default_pose + + def _retarget_abs(self, wrist: np.ndarray) -> np.ndarray: + """Handle absolute pose retargeting for controller wrists.""" + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + # Combined -75° (rather than -90° for wrist comfort) Y rotation + 90° Z rotation + # This is equivalent to (0, -75, 90) in euler angles + combined_quat = torch.tensor([-0.4619, 0.5358, 0.4619, 0.5358], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.MOTION_CONTROLLER] + + +@dataclass +class G1TriHandUpperBodyMotionControllerGripperRetargeterCfg(RetargeterCfg): + """Configuration for the G1 boolean gripper and wrist retargeter.""" + + threshold_high: float = 0.6 # Threshold to close hand + threshold_low: float = 0.4 # Threshold to open hand + retargeter_type: type[RetargeterBase] = G1TriHandUpperBodyMotionControllerGripperRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_motion_ctrl_retargeter.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_motion_ctrl_retargeter.py new file mode 100644 index 000000000000..b31e4dcca817 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_motion_ctrl_retargeter.py @@ -0,0 +1,230 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import dataclass + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + + +class G1TriHandUpperBodyMotionControllerRetargeter(RetargeterBase): + """Simple retargeter that maps motion controller inputs to G1 hand joints. + + Mapping: + - A button (digital 0/1) → Thumb joints + - Trigger (analog 0-1) → Index finger joints + - Squeeze (analog 0-1) → Middle finger joints + """ + + def __init__(self, cfg: G1TriHandUpperBodyMotionControllerRetargeterCfg): + """Initialize the retargeter.""" + super().__init__(cfg) + self._sim_device = cfg.sim_device + self._hand_joint_names = cfg.hand_joint_names + self._enable_visualization = cfg.enable_visualization + + if cfg.hand_joint_names is None: + raise ValueError("hand_joint_names must be provided") + + # Initialize visualization if enabled + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/g1_controller_markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.01, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert controller inputs to robot commands. + + Args: + data: Dictionary with MotionControllerTrackingTarget.LEFT/RIGHT keys + Each value is a 2D array: [pose(7), inputs(7)] + + Returns: + Tensor: [left_wrist(7), right_wrist(7), hand_joints(14)] + hand_joints order: + [ + left_proximal(3), right_proximal(3), left_distal(2), left_thumb_middle(1), + right_distal(2), right_thumb_middle(1), left_thumb_tip(1), right_thumb_tip(1) + ] + """ + + # Get controller data + left_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_LEFT, np.array([])) + right_controller_data = data.get(DeviceBase.TrackingTarget.CONTROLLER_RIGHT, np.array([])) + + # Default wrist poses (position + quaternion xyzw) + default_wrist = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + + # Extract poses from controller data + left_wrist = self._extract_wrist_pose(left_controller_data, default_wrist) + right_wrist = self._extract_wrist_pose(right_controller_data, default_wrist) + + # Map controller inputs to hand joints + left_hand_joints = self._map_to_hand_joints(left_controller_data, is_left=True) + right_hand_joints = self._map_to_hand_joints(right_controller_data, is_left=False) + + # Negate left hand joints for proper mirroring + left_hand_joints = -left_hand_joints + + # Combine joints in the expected order: + # [left_proximal(3), right_proximal(3), left_distal(2), left_thumb_middle(1), + # right_distal(2), right_thumb_middle(1), left_thumb_tip(1), right_thumb_tip(1)] + all_hand_joints = np.array( + [ + left_hand_joints[3], # left_index_proximal + left_hand_joints[5], # left_middle_proximal + left_hand_joints[0], # left_thumb_base + right_hand_joints[3], # right_index_proximal + right_hand_joints[5], # right_middle_proximal + right_hand_joints[0], # right_thumb_base + left_hand_joints[4], # left_index_distal + left_hand_joints[6], # left_middle_distal + left_hand_joints[1], # left_thumb_middle + right_hand_joints[4], # right_index_distal + right_hand_joints[6], # right_middle_distal + right_hand_joints[1], # right_thumb_middle + left_hand_joints[2], # left_thumb_tip + right_hand_joints[2], # right_thumb_tip + ] + ) + + # Convert to tensors + left_wrist_tensor = torch.tensor( + self._retarget_abs(left_wrist, is_left=True), dtype=torch.float32, device=self._sim_device + ) + right_wrist_tensor = torch.tensor( + self._retarget_abs(right_wrist, is_left=False), dtype=torch.float32, device=self._sim_device + ) + hand_joints_tensor = torch.tensor(all_hand_joints, dtype=torch.float32, device=self._sim_device) + + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.MOTION_CONTROLLER] + + def _extract_wrist_pose(self, controller_data: np.ndarray, default_pose: np.ndarray) -> np.ndarray: + """Extract wrist pose from controller data. + + Args: + controller_data: 2D array [pose(7), inputs(7)] + default_pose: Default pose to use if no data + + Returns: + Wrist pose array [x, y, z, w, x, y, z] + """ + if len(controller_data) > DeviceBase.MotionControllerDataRowIndex.POSE.value: + return controller_data[DeviceBase.MotionControllerDataRowIndex.POSE.value] + return default_pose + + def _map_to_hand_joints(self, controller_data: np.ndarray, is_left: bool) -> np.ndarray: + """Map controller inputs to hand joint angles. + + Args: + controller_data: 2D array [pose(7), inputs(7)] + is_left: True for left hand, False for right hand + + Returns: + Hand joint angles (7 joints per hand) in radians + """ + + # Initialize all joints to zero + hand_joints = np.zeros(7) + + if len(controller_data) <= DeviceBase.MotionControllerDataRowIndex.INPUTS.value: + return hand_joints + + # Extract inputs from second row + inputs = controller_data[DeviceBase.MotionControllerDataRowIndex.INPUTS.value] + + if len(inputs) < len(DeviceBase.MotionControllerInputIndex): + return hand_joints + + # Extract specific inputs using enum + trigger = inputs[DeviceBase.MotionControllerInputIndex.TRIGGER.value] # 0.0 to 1.0 (analog) + squeeze = inputs[DeviceBase.MotionControllerInputIndex.SQUEEZE.value] # 0.0 to 1.0 (analog) + + # Grasping logic: + # If trigger is pressed, we grasp with index and thumb. + # If squeeze is pressed, we grasp with middle and thumb. + # If both are pressed, we grasp with index, middle, and thumb. + # The thumb rotates towards the direction of the pressing finger. + # If both are pressed, the thumb stays in the middle. + + thumb_button = max(trigger, squeeze) + + # Map to G1 hand joints (in radians) + # Thumb joints (3 joints) - controlled by A button (digital) + thumb_angle = -thumb_button # Max 1 radian ≈ 57° + + # Thumb rotation: + # If trigger is pressed, we rotate the thumb toward the index finger. + # If squeeze is pressed, we rotate the thumb toward the middle finger. + # If both are pressed, the thumb stays between the index and middle fingers. + # Trigger pushes toward +0.5, squeeze pushes toward -0.5 + # trigger=1,squeeze=0 → 0.5; trigger=0,squeeze=1 → -0.5; both=1 → 0 + thumb_rotation = 0.5 * trigger - 0.5 * squeeze + + if not is_left: + thumb_rotation = -thumb_rotation + + # These values were found empirically to get a good gripper pose. + + hand_joints[0] = thumb_rotation # thumb_0_joint (base) + hand_joints[1] = thumb_angle * 0.4 # thumb_1_joint (middle) + hand_joints[2] = thumb_angle * 0.7 # thumb_2_joint (tip) + + # Index finger joints (2 joints) - controlled by trigger (analog) + index_angle = trigger * 1.0 # Max 1.0 radians ≈ 57° + hand_joints[3] = index_angle # index_0_joint (proximal) + hand_joints[4] = index_angle # index_1_joint (distal) + + # Middle finger joints (2 joints) - controlled by squeeze (analog) + middle_angle = squeeze * 1.0 # Max 1.0 radians ≈ 57° + hand_joints[5] = middle_angle # middle_0_joint (proximal) + hand_joints[6] = middle_angle # middle_1_joint (distal) + + return hand_joints + + def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray: + """Handle absolute pose retargeting for controller wrists.""" + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + # Combined -75° (rather than -90° for wrist comfort) Y rotation + 90° Z rotation + # This is equivalent to (0, -75, 90) in euler angles + combined_quat = torch.tensor([-0.4619, 0.5358, 0.4619, 0.5358], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) + + +@dataclass +class G1TriHandUpperBodyMotionControllerRetargeterCfg(RetargeterCfg): + """Configuration for the G1 Controller Upper Body retargeter.""" + + enable_visualization: bool = False + hand_joint_names: list[str] | None = None # List of robot hand joint names + retargeter_type: type[RetargeterBase] = G1TriHandUpperBodyMotionControllerRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_retargeter.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_retargeter.py new file mode 100644 index 000000000000..446154007e1a --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/humanoid/unitree/trihand/g1_upper_body_retargeter.py @@ -0,0 +1,173 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import contextlib +from dataclasses import dataclass + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as PoseUtils +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg + +# This import exception is suppressed because g1_dex_retargeting_utils depends +# on pinocchio which is not available on Windows. +with contextlib.suppress(Exception): + from .g1_dex_retargeting_utils import G1TriHandDexRetargeting + + +class G1TriHandUpperBodyRetargeter(RetargeterBase): + """Retargets OpenXR data to G1 upper body commands. + + This retargeter maps hand tracking data from OpenXR to wrist and hand joint commands for the G1 robot. + It handles both left and right hands, converting poses of the hands in OpenXR format to appropriate wrist poses + and joint angles for the G1 robot's upper body. + """ + + def __init__( + self, + cfg: G1TriHandUpperBodyRetargeterCfg, + ): + """Initialize the G1 upper body retargeter. + + Args: + cfg: Configuration for the retargeter. + """ + super().__init__(cfg) + + # Store device name for runtime retrieval + self._sim_device = cfg.sim_device + self._hand_joint_names = cfg.hand_joint_names + + # Initialize the hands controller + if cfg.hand_joint_names is not None: + self._hands_controller = G1TriHandDexRetargeting(cfg.hand_joint_names) + else: + raise ValueError("hand_joint_names must be provided in configuration") + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints + if self._enable_visualization: + marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/g1_hand_markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.005, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + }, + ) + self._markers = VisualizationMarkers(marker_cfg) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector commands. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + + Returns: + A tensor containing the retargeted commands: + - Left wrist pose (7) + - Right wrist pose (7) + - Hand joint angles (len(hand_joint_names)) + """ + + # Access the left and right hand data using the enum key + left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT] + right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT] + + left_wrist = left_hand_poses.get("wrist") + right_wrist = right_hand_poses.get("wrist") + + # Handle case where wrist data is not available + if left_wrist is None or right_wrist is None: + # Set to default pose if no data available. + # pos=(0,0,0), quat=(0,0,0,1) (x,y,z,w) + default_pose = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + if left_wrist is None: + left_wrist = default_pose + if right_wrist is None: + right_wrist = default_pose + + # Visualization if enabled + if self._enable_visualization: + joints_position = np.zeros((self._num_open_xr_hand_joints, 3)) + joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()]) + joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()]) + self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device)) + + # Compute retargeted hand joints + left_hands_pos = self._hands_controller.compute_left(left_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()] + left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + left_retargeted_hand_joints[indexes] = left_hands_pos + left_hand_joints = left_retargeted_hand_joints + + right_hands_pos = self._hands_controller.compute_right(right_hand_poses) + indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()] + right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) + right_retargeted_hand_joints[indexes] = right_hands_pos + right_hand_joints = right_retargeted_hand_joints + retargeted_hand_joints = left_hand_joints + right_hand_joints + + # Convert numpy arrays to tensors and store in command buffer + left_wrist_tensor = torch.tensor( + self._retarget_abs(left_wrist, is_left=True), dtype=torch.float32, device=self._sim_device + ) + right_wrist_tensor = torch.tensor( + self._retarget_abs(right_wrist, is_left=False), dtype=torch.float32, device=self._sim_device + ) + hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device) + + # Combine all tensors into a single tensor + return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.HAND_TRACKING] + + def _retarget_abs(self, wrist: np.ndarray, is_left: bool) -> np.ndarray: + """Handle absolute pose retargeting. + + Args: + wrist: Wrist pose data from OpenXR. + is_left: True for the left hand, False for the right hand. + + Returns: + Retargeted wrist pose in USD control frame. + """ + wrist_pos = torch.tensor(wrist[:3], dtype=torch.float32) + wrist_quat = torch.tensor(wrist[3:], dtype=torch.float32) + + if is_left: + # Corresponds to a rotation of (0, 90, 90) in euler angles (x,y,z) + combined_quat = torch.tensor([0, 0.7071, 0, 0.7071], dtype=torch.float32) + else: + # Corresponds to a rotation of (0, -90, -90) in euler angles (x,y,z) + combined_quat = torch.tensor([-0.7071, 0, 0.7071, 0], dtype=torch.float32) + + openxr_pose = PoseUtils.make_pose(wrist_pos, PoseUtils.matrix_from_quat(wrist_quat)) + transform_pose = PoseUtils.make_pose(torch.zeros(3), PoseUtils.matrix_from_quat(combined_quat)) + + result_pose = PoseUtils.pose_in_A_to_pose_in_B(transform_pose, openxr_pose) + pos, rot_mat = PoseUtils.unmake_pose(result_pose) + quat = PoseUtils.quat_from_matrix(rot_mat) + + return np.concatenate([pos.numpy(), quat.numpy()]) + + +@dataclass +class G1TriHandUpperBodyRetargeterCfg(RetargeterCfg): + """Configuration for the G1 Controller Upper Body retargeter.""" + + enable_visualization: bool = False + num_open_xr_hand_joints: int = 100 + hand_joint_names: list[str] | None = None # List of robot hand joint names + retargeter_type: type[RetargeterBase] = G1TriHandUpperBodyRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/__init__.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/__init__.py new file mode 100644 index 000000000000..49b6eba4588d --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Franka manipulator retargeting module. + +This module provides functionality for retargeting motion to Franka robots. +""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/__init__.pyi b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/__init__.pyi new file mode 100644 index 000000000000..756b705c8a55 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/__init__.pyi @@ -0,0 +1,17 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "GripperRetargeter", + "GripperRetargeterCfg", + "Se3AbsRetargeter", + "Se3AbsRetargeterCfg", + "Se3RelRetargeter", + "Se3RelRetargeterCfg", +] + +from .gripper_retargeter import GripperRetargeter, GripperRetargeterCfg +from .se3_abs_retargeter import Se3AbsRetargeter, Se3AbsRetargeterCfg +from .se3_rel_retargeter import Se3RelRetargeter, Se3RelRetargeterCfg diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/gripper_retargeter.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/gripper_retargeter.py new file mode 100644 index 000000000000..9ae2031b4d81 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/gripper_retargeter.py @@ -0,0 +1,98 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +from dataclasses import dataclass +from typing import Final + +import numpy as np +import torch + +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg + + +class GripperRetargeter(RetargeterBase): + """Retargeter specifically for gripper control based on hand tracking data. + + This retargeter analyzes the distance between thumb and index finger tips to determine + whether the gripper should be open or closed. It includes hysteresis to prevent rapid + toggling between states when the finger distance is near the threshold. + + Features: + - Tracks thumb and index finger distance + - Implements hysteresis for stable gripper control + - Outputs boolean command (True = close gripper, False = open gripper) + """ + + GRIPPER_CLOSE_METERS: Final[float] = 0.03 + GRIPPER_OPEN_METERS: Final[float] = 0.05 + + def __init__( + self, + cfg: GripperRetargeterCfg, + ): + super().__init__(cfg) + """Initialize the gripper retargeter.""" + # Store the hand to track + if cfg.bound_hand not in [DeviceBase.TrackingTarget.HAND_LEFT, DeviceBase.TrackingTarget.HAND_RIGHT]: + raise ValueError( + "bound_hand must be either DeviceBase.TrackingTarget.HAND_LEFT or DeviceBase.TrackingTarget.HAND_RIGHT" + ) + self.bound_hand = cfg.bound_hand + # Initialize gripper state + self._previous_gripper_command = False + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to gripper command. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + The joint names are defined in isaaclab.devices.openxr.common.HAND_JOINT_NAMES + + Returns: + torch.Tensor: Tensor containing a single bool value where True = close gripper, False = open gripper + """ + # Extract key joint poses + hand_data = data[self.bound_hand] + thumb_tip = hand_data["thumb_tip"] + index_tip = hand_data["index_tip"] + + # Calculate gripper command with hysteresis + gripper_command_bool = self._calculate_gripper_command(thumb_tip[:3], index_tip[:3]) + gripper_value = -1.0 if gripper_command_bool else 1.0 + + return torch.tensor([gripper_value], dtype=torch.float32, device=self._sim_device) + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.HAND_TRACKING] + + def _calculate_gripper_command(self, thumb_pos: np.ndarray, index_pos: np.ndarray) -> bool: + """Calculate gripper command from finger positions with hysteresis. + + Args: + thumb_pos: 3D position of thumb tip + index_pos: 3D position of index tip + + Returns: + bool: Gripper command (True = close, False = open) + """ + distance = np.linalg.norm(thumb_pos - index_pos) + + # Apply hysteresis to prevent rapid switching + if distance > self.GRIPPER_OPEN_METERS: + self._previous_gripper_command = False + elif distance < self.GRIPPER_CLOSE_METERS: + self._previous_gripper_command = True + + return self._previous_gripper_command + + +@dataclass +class GripperRetargeterCfg(RetargeterCfg): + """Configuration for gripper retargeter.""" + + bound_hand: DeviceBase.TrackingTarget = DeviceBase.TrackingTarget.HAND_RIGHT + retargeter_type: type[RetargeterBase] = GripperRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/se3_abs_retargeter.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/se3_abs_retargeter.py new file mode 100644 index 000000000000..473312b9ed26 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/se3_abs_retargeter.py @@ -0,0 +1,171 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +from dataclasses import dataclass + +import numpy as np +import torch +from scipy.spatial.transform import Rotation, Slerp + +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers +from isaaclab.markers.config import FRAME_MARKER_CFG + + +class Se3AbsRetargeter(RetargeterBase): + """Retargets OpenXR hand tracking data to end-effector commands using absolute positioning. + + This retargeter maps hand joint poses directly to robot end-effector positions and orientations, + rather than using relative movements. It can either: + - Use the wrist position and orientation + - Use the midpoint between thumb and index finger (pinch position) + + Features: + - Optional constraint to zero out X/Y rotations (keeping only Z-axis rotation) + - Optional visualization of the target end-effector pose + """ + + def __init__( + self, + cfg: Se3AbsRetargeterCfg, + ): + """Initialize the retargeter. + + Args: + bound_hand: The hand to track (DeviceBase.TrackingTarget.HAND_LEFT or DeviceBase.TrackingTarget.HAND_RIGHT) + zero_out_xy_rotation: If True, zero out rotation around x and y axes + use_wrist_rotation: If True, use wrist rotation instead of finger average + use_wrist_position: If True, use wrist position instead of pinch position + enable_visualization: If True, visualize the target pose in the scene + device: The device to place the returned tensor on ('cpu' or 'cuda') + """ + super().__init__(cfg) + if cfg.bound_hand not in [DeviceBase.TrackingTarget.HAND_LEFT, DeviceBase.TrackingTarget.HAND_RIGHT]: + raise ValueError( + "bound_hand must be either DeviceBase.TrackingTarget.HAND_LEFT or DeviceBase.TrackingTarget.HAND_RIGHT" + ) + self.bound_hand = cfg.bound_hand + + self._zero_out_xy_rotation = cfg.zero_out_xy_rotation + self._use_wrist_rotation = cfg.use_wrist_rotation + self._use_wrist_position = cfg.use_wrist_position + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + if cfg.enable_visualization: + frame_marker_cfg = FRAME_MARKER_CFG.copy() + frame_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + self._goal_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_goal")) + self._goal_marker.set_visibility(True) + self._visualization_pos = np.zeros(3) + self._visualization_rot = np.array([0.0, 0.0, 0.0, 1.0]) # xyzw format + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector command. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + The joint names are defined in isaaclab.devices.openxr.common.HAND_JOINT_NAMES + + Returns: + torch.Tensor: 7D tensor containing position (xyz) and orientation (quaternion) + for the robot end-effector + """ + # Extract key joint poses from the bound hand + hand_data = data[self.bound_hand] + thumb_tip = hand_data.get("thumb_tip") + index_tip = hand_data.get("index_tip") + wrist = hand_data.get("wrist") + + ee_command_np = self._retarget_abs(thumb_tip, index_tip, wrist) + + # Convert to torch tensor + ee_command = torch.tensor(ee_command_np, dtype=torch.float32, device=self._sim_device) + + return ee_command + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.HAND_TRACKING] + + def _retarget_abs(self, thumb_tip: np.ndarray, index_tip: np.ndarray, wrist: np.ndarray) -> np.ndarray: + """Handle absolute pose retargeting. + + Args: + thumb_tip: 7D array containing position (xyz) and orientation (quaternion) + for the thumb tip + index_tip: 7D array containing position (xyz) and orientation (quaternion) + for the index tip + wrist: 7D array containing position (xyz) and orientation (quaternion) + for the wrist + + Returns: + np.ndarray: 7D array containing position (xyz) and orientation (quaternion) + for the robot end-effector + """ + + # Get position + if self._use_wrist_position: + position = wrist[:3] + else: + position = (thumb_tip[:3] + index_tip[:3]) / 2 + + # Get rotation + if self._use_wrist_rotation: + # Scipy expects x,y,z,w + base_rot = Rotation.from_quat(wrist[3:]) + else: + # Average the orientations of thumb and index using SLERP + # Scipy expects x,y,z,w + r0 = Rotation.from_quat(thumb_tip[3:]) + # Scipy expects x,y,z,w + r1 = Rotation.from_quat(index_tip[3:]) + key_times = [0, 1] + slerp = Slerp(key_times, Rotation.concatenate([r0, r1])) + base_rot = slerp([0.5])[0] + + # Apply additional x-axis rotation to align with pinch gesture + final_rot = base_rot * Rotation.from_euler("x", 90, degrees=True) + + if self._zero_out_xy_rotation: + z, y, x = final_rot.as_euler("ZYX") + y = 0.0 # Zero out rotation around y-axis + x = 0.0 # Zero out rotation around x-axis + final_rot = Rotation.from_euler("ZYX", [z, y, x]) * Rotation.from_euler("X", np.pi, degrees=False) + + # We are already in x,y,z,w format + rotation = final_rot.as_quat() + + # Update visualization if enabled + if self._enable_visualization: + self._visualization_pos = position + self._visualization_rot = rotation + self._update_visualization() + + return np.concatenate([position, rotation]) + + def _update_visualization(self): + """Update visualization markers with current pose. + + If visualization is enabled, the target end-effector pose is visualized in the scene. + """ + if self._enable_visualization: + trans = np.array([self._visualization_pos]) + quat = Rotation.from_matrix(self._visualization_rot).as_quat() + rot = np.array([np.array([quat[0], quat[1], quat[2], quat[3]])]) + self._goal_marker.visualize(translations=trans, orientations=rot) + + +@dataclass +class Se3AbsRetargeterCfg(RetargeterCfg): + """Configuration for absolute position retargeter.""" + + zero_out_xy_rotation: bool = True + use_wrist_rotation: bool = False + use_wrist_position: bool = True + enable_visualization: bool = False + bound_hand: DeviceBase.TrackingTarget = DeviceBase.TrackingTarget.HAND_RIGHT + retargeter_type: type[RetargeterBase] = Se3AbsRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/se3_rel_retargeter.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/se3_rel_retargeter.py new file mode 100644 index 000000000000..7b902c39d62a --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/retargeters/manipulator/se3_rel_retargeter.py @@ -0,0 +1,218 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + +from dataclasses import dataclass + +import numpy as np +import torch +from scipy.spatial.transform import Rotation + +from isaaclab.devices.device_base import DeviceBase +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg +from isaaclab.markers import VisualizationMarkers +from isaaclab.markers.config import FRAME_MARKER_CFG + + +class Se3RelRetargeter(RetargeterBase): + """Retargets OpenXR hand tracking data to end-effector commands using relative positioning. + + This retargeter calculates delta poses between consecutive hand joint poses to generate incremental robot movements. + It can either: + - Use the wrist position and orientation + - Use the midpoint between thumb and index finger (pinch position) + + Features: + - Optional constraint to zero out X/Y rotations (keeping only Z-axis rotation) + - Motion smoothing with adjustable parameters + - Optional visualization of the target end-effector pose + """ + + def __init__( + self, + cfg: Se3RelRetargeterCfg, + ): + """Initialize the relative motion retargeter. + + Args: + bound_hand: The hand to track (DeviceBase.TrackingTarget.HAND_LEFT or DeviceBase.TrackingTarget.HAND_RIGHT) + zero_out_xy_rotation: If True, ignore rotations around x and y axes, allowing only z-axis rotation + use_wrist_rotation: If True, use wrist rotation for control instead of averaging finger orientations + use_wrist_position: If True, use wrist position instead of pinch position (midpoint between fingers) + delta_pos_scale_factor: Amplification factor for position changes (higher = larger robot movements) + delta_rot_scale_factor: Amplification factor for rotation changes (higher = larger robot rotations) + alpha_pos: Position smoothing parameter (0-1); higher values track more closely to input, + lower values smooth more + alpha_rot: Rotation smoothing parameter (0-1); higher values track more closely to input, + lower values smooth more + enable_visualization: If True, show a visual marker representing the target end-effector pose + device: The device to place the returned tensor on ('cpu' or 'cuda') + """ + # Store the hand to track + if cfg.bound_hand not in [DeviceBase.TrackingTarget.HAND_LEFT, DeviceBase.TrackingTarget.HAND_RIGHT]: + raise ValueError( + "bound_hand must be either DeviceBase.TrackingTarget.HAND_LEFT or DeviceBase.TrackingTarget.HAND_RIGHT" + ) + super().__init__(cfg) + self.bound_hand = cfg.bound_hand + + self._zero_out_xy_rotation = cfg.zero_out_xy_rotation + self._use_wrist_rotation = cfg.use_wrist_rotation + self._use_wrist_position = cfg.use_wrist_position + self._delta_pos_scale_factor = cfg.delta_pos_scale_factor + self._delta_rot_scale_factor = cfg.delta_rot_scale_factor + self._alpha_pos = cfg.alpha_pos + self._alpha_rot = cfg.alpha_rot + + # Initialize smoothing state + self._smoothed_delta_pos = np.zeros(3) + self._smoothed_delta_rot = np.zeros(3) + + # Define thresholds for small movements + self._position_threshold = 0.001 + self._rotation_threshold = 0.01 + + # Initialize visualization if enabled + self._enable_visualization = cfg.enable_visualization + if cfg.enable_visualization: + frame_marker_cfg = FRAME_MARKER_CFG.copy() + frame_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + self._goal_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_goal")) + self._goal_marker.set_visibility(True) + self._visualization_pos = np.zeros(3) + self._visualization_rot = np.array([0.0, 0.0, 0.0, 1.0]) # xyzw format + + # pose format: [x, y, z, qx, qy, qz, qw] (xyzw quaternion) + self._previous_thumb_tip = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32) + self._previous_index_tip = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32) + self._previous_wrist = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32) + + def retarget(self, data: dict) -> torch.Tensor: + """Convert hand joint poses to robot end-effector command. + + Args: + data: Dictionary mapping tracking targets to joint data dictionaries. + The joint names are defined in isaaclab.devices.openxr.common.HAND_JOINT_NAMES + + Returns: + torch.Tensor: 6D tensor containing position (xyz) and rotation vector (rx,ry,rz) + for the robot end-effector + """ + # Extract key joint poses from the bound hand + hand_data = data[self.bound_hand] + thumb_tip = hand_data.get("thumb_tip") + index_tip = hand_data.get("index_tip") + wrist = hand_data.get("wrist") + + delta_thumb_tip = self._calculate_delta_pose(thumb_tip, self._previous_thumb_tip) + delta_index_tip = self._calculate_delta_pose(index_tip, self._previous_index_tip) + delta_wrist = self._calculate_delta_pose(wrist, self._previous_wrist) + ee_command_np = self._retarget_rel(delta_thumb_tip, delta_index_tip, delta_wrist) + + self._previous_thumb_tip = thumb_tip.copy() + self._previous_index_tip = index_tip.copy() + self._previous_wrist = wrist.copy() + + # Convert to torch tensor + ee_command = torch.tensor(ee_command_np, dtype=torch.float32, device=self._sim_device) + + return ee_command + + def get_requirements(self) -> list[RetargeterBase.Requirement]: + return [RetargeterBase.Requirement.HAND_TRACKING] + + def _calculate_delta_pose(self, joint_pose: np.ndarray, previous_joint_pose: np.ndarray) -> np.ndarray: + """Calculate delta pose from previous joint pose. + + Args: + joint_pose: Current joint pose (position and orientation) + previous_joint_pose: Previous joint pose for the same joint + + Returns: + np.ndarray: 6D array with position delta (xyz) and rotation delta as axis-angle (rx,ry,rz) + """ + delta_pos = joint_pose[:3] - previous_joint_pose[:3] + # Scipy expects x,y,z,w + abs_rotation = Rotation.from_quat(joint_pose[3:7]) + # Scipy expects x,y,z,w + previous_rot = Rotation.from_quat(previous_joint_pose[3:7]) + relative_rotation = abs_rotation * previous_rot.inv() + return np.concatenate([delta_pos, relative_rotation.as_rotvec()]) + + def _retarget_rel(self, thumb_tip: np.ndarray, index_tip: np.ndarray, wrist: np.ndarray) -> np.ndarray: + """Handle relative (delta) pose retargeting. + + Args: + thumb_tip: Delta pose of thumb tip + index_tip: Delta pose of index tip + wrist: Delta pose of wrist + + Returns: + np.ndarray: 6D array with position delta (xyz) and rotation delta (rx,ry,rz) + """ + # Get position + if self._use_wrist_position: + position = wrist[:3] + else: + position = (thumb_tip[:3] + index_tip[:3]) / 2 + + # Get rotation + if self._use_wrist_rotation: + rotation = wrist[3:6] # rx, ry, rz + else: + rotation = (thumb_tip[3:6] + index_tip[3:6]) / 2 + + # Apply zero_out_xy_rotation regardless of rotation source + if self._zero_out_xy_rotation: + rotation[0] = 0 # x-axis + rotation[1] = 0 # y-axis + + # Smooth and scale position + self._smoothed_delta_pos = self._alpha_pos * position + (1 - self._alpha_pos) * self._smoothed_delta_pos + if np.linalg.norm(self._smoothed_delta_pos) < self._position_threshold: + self._smoothed_delta_pos = np.zeros(3) + position = self._smoothed_delta_pos * self._delta_pos_scale_factor + + # Smooth and scale rotation + self._smoothed_delta_rot = self._alpha_rot * rotation + (1 - self._alpha_rot) * self._smoothed_delta_rot + if np.linalg.norm(self._smoothed_delta_rot) < self._rotation_threshold: + self._smoothed_delta_rot = np.zeros(3) + rotation = self._smoothed_delta_rot * self._delta_rot_scale_factor + + # Update visualization if enabled + if self._enable_visualization: + # Convert rotation vector to quaternion and combine with current rotation + delta_quat = Rotation.from_rotvec(rotation).as_quat() # x, y, z, w format + current_rot = Rotation.from_quat([self._visualization_rot[1:], self._visualization_rot[0]]) + self._visualization_rot = Rotation.from_quat(delta_quat) * current_rot + self._visualization_pos = self._visualization_pos + position + # Convert back to x,y,z,w format + self._update_visualization() + + return np.concatenate([position, rotation]) + + def _update_visualization(self): + """Update visualization markers with current pose.""" + if self._enable_visualization: + trans = np.array([self._visualization_pos]) + quat = Rotation.from_matrix(self._visualization_rot).as_quat() + rot = np.array(quat) + self._goal_marker.visualize(translations=trans, orientations=rot) + + +@dataclass +class Se3RelRetargeterCfg(RetargeterCfg): + """Configuration for relative position retargeter.""" + + zero_out_xy_rotation: bool = True + use_wrist_rotation: bool = False + use_wrist_position: bool = True + delta_pos_scale_factor: float = 10.0 + delta_rot_scale_factor: float = 10.0 + alpha_pos: float = 0.5 + alpha_rot: float = 0.5 + enable_visualization: bool = False + bound_hand: DeviceBase.TrackingTarget = DeviceBase.TrackingTarget.HAND_RIGHT + retargeter_type: type[RetargeterBase] = Se3RelRetargeter diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/xr_anchor_utils.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/xr_anchor_utils.py new file mode 100644 index 000000000000..a28bf7e39f8f --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/xr_anchor_utils.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Re-export of XR anchor utilities from their canonical location. + +The canonical definitions live in :mod:`isaaclab_teleop.xr_anchor_utils`. +""" + +from isaaclab_teleop.xr_anchor_utils import * # noqa: F401,F403 diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/xr_cfg.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/xr_cfg.py new file mode 100644 index 000000000000..0bae1caa4621 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/openxr/xr_cfg.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Re-export of XR configuration classes from their canonical location. + +The canonical definitions live in :mod:`isaaclab_teleop.xr_cfg`. +""" + +from isaaclab_teleop.xr_cfg import * # noqa: F401,F403 diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/retargeter_base.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/retargeter_base.py new file mode 100644 index 000000000000..61401cb8f719 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/retargeter_base.py @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Re-export of legacy retargeter base classes. + +The canonical definitions remain in :mod:`isaaclab.devices.retargeter_base` +because :class:`~isaaclab.devices.DeviceCfg` depends on them for all device +types (keyboard, gamepad, etc.). This module simply re-exports them so that +code under ``isaaclab_teleop.deprecated`` can reference them locally. +""" + +from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg # noqa: F401 + +__all__ = ["RetargeterBase", "RetargeterCfg"] diff --git a/source/isaaclab_teleop/isaaclab_teleop/deprecated/teleop_device_factory.py b/source/isaaclab_teleop/isaaclab_teleop/deprecated/teleop_device_factory.py new file mode 100644 index 000000000000..cb8fe18e8e74 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/deprecated/teleop_device_factory.py @@ -0,0 +1,105 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory to create teleoperation devices from configuration. + +.. deprecated:: + This module is deprecated. Please use :class:`isaaclab_teleop.IsaacTeleopDevice` + instead of :func:`create_teleop_device`. +""" + +from __future__ import annotations + +import inspect +import logging +import warnings +from collections.abc import Callable +from typing import cast + +from isaaclab.devices import DeviceBase, DeviceCfg +from isaaclab.devices.retargeter_base import RetargeterBase + +# import logger +logger = logging.getLogger(__name__) + + +def create_teleop_device( + device_name: str, devices_cfg: dict[str, DeviceCfg], callbacks: dict[str, Callable] | None = None +) -> DeviceBase: + """Create a teleoperation device based on configuration. + + .. deprecated:: + Use :class:`isaaclab_teleop.IsaacTeleopDevice` instead. + + Args: + device_name: The name of the device to create (must exist in devices_cfg) + devices_cfg: Dictionary of device configurations + callbacks: Optional dictionary of callbacks to register with the device + Keys are the button/gesture names, values are callback functions + + Returns: + The configured teleoperation device + + Raises: + ValueError: If the device name is not found in the configuration + ValueError: If the device configuration type is not supported + """ + warnings.warn( + "create_teleop_device is deprecated. Please use isaaclab_teleop.IsaacTeleopDevice instead.", + DeprecationWarning, + stacklevel=2, + ) + if device_name not in devices_cfg: + raise ValueError(f"Device '{device_name}' not found in teleop device configurations") + + device_cfg = devices_cfg[device_name] + callbacks = callbacks or {} + + # Determine constructor from the configuration itself + device_constructor = getattr(device_cfg, "class_type", None) + if device_constructor is None: + raise ValueError( + f"Device configuration '{device_name}' does not declare class_type. " + "Set cfg.class_type to the concrete DeviceBase subclass." + ) + check_cls = device_constructor._resolve() if hasattr(device_constructor, "_resolve") else device_constructor + if not issubclass(check_cls, DeviceBase): + raise TypeError(f"class_type for '{device_name}' must be a subclass of DeviceBase; got {device_constructor}") + + # Try to create retargeters if they are configured + retargeters = [] + if hasattr(device_cfg, "retargeters") and device_cfg.retargeters is not None: + try: + # Create retargeters based on configuration using per-config retargeter_type + for retargeter_cfg in device_cfg.retargeters: + retargeter_constructor = getattr(retargeter_cfg, "retargeter_type", None) + if retargeter_constructor is None: + raise ValueError( + f"Retargeter configuration {type(retargeter_cfg).__name__} does not declare retargeter_type. " + "Set cfg.retargeter_type to the concrete RetargeterBase subclass." + ) + if not issubclass(retargeter_constructor, RetargeterBase): + raise TypeError( + f"retargeter_type for {type(retargeter_cfg).__name__} must be a subclass of RetargeterBase; got" + f" {retargeter_constructor}" + ) + retargeters.append(retargeter_constructor(retargeter_cfg)) + + except NameError as e: + raise ValueError(f"Failed to create retargeters: {e}") + + # Build constructor kwargs based on signature + constructor_params = inspect.signature(device_constructor).parameters + params: dict = {"cfg": device_cfg} + if "retargeters" in constructor_params: + params["retargeters"] = retargeters + device = cast(DeviceBase, device_constructor(**params)) + + # Register callbacks + for key, callback in callbacks.items(): + device.add_callback(key, callback) + + logging.info(f"Created teleoperation device: {device_name}") + return device diff --git a/source/isaaclab_teleop/isaaclab_teleop/isaac_teleop_cfg.py b/source/isaaclab_teleop/isaaclab_teleop/isaac_teleop_cfg.py new file mode 100644 index 000000000000..f94a63d57589 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/isaac_teleop_cfg.py @@ -0,0 +1,161 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration class for IsaacTeleop-based teleoperation.""" + +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import MISSING, field +from pathlib import Path +from typing import TYPE_CHECKING + +from isaaclab.utils import configclass + +from .control_events import TELEOP_CONTROL_CHANNEL_UUID +from .xr_cfg import XrCfg + +_CLOUDXR_ENV_DIR = Path(__file__).resolve().parent + +CLOUDXR_AVP_ENV: str = str(_CLOUDXR_ENV_DIR / "avp-cloudxr.env") +"""Absolute path to the Apple Vision Pro CloudXR ``.env`` profile (``auto-native``).""" + +CLOUDXR_JS_ENV: str = str(_CLOUDXR_ENV_DIR / "cloudxrjs-cloudxr.env") +"""Absolute path to the CloudXR JS (Quest/Pico) ``.env`` profile (``auto-webrtc``).""" + +if TYPE_CHECKING: + from isaacteleop.retargeting_engine.interface import BaseRetargeter, OutputCombiner + from isaacteleop.teleop_session_manager import PluginConfig + + +@configclass +class IsaacTeleopCfg: + """Configuration for IsaacTeleop-based teleoperation. + + This configuration class defines the parameters needed to create a IsaacTeleop + teleoperation session integrated with Isaac Lab environments. + + The pipeline_builder is a callable that constructs the IsaacTeleop retargeting + pipeline. It should return an OutputCombiner with a single "action" output + that contains the flattened action tensor (typically via TensorReorderer). + + If the pipeline builder also produces retargeters that should be exposed in + the tuning UI, the env cfg should call the builder, unpack the results, and + populate both ``pipeline_builder`` and ``retargeters_to_tune`` explicitly. + Both fields must be callables (lambdas / functions) so they survive the + ``deepcopy`` performed by ``@configclass`` on mutable attributes. + + Example: + .. code-block:: python + + def build_pipeline(): + controllers = ControllersSource(name="controllers") + se3 = Se3AbsRetargeter(cfg, name="ee_pose") + # ... connect and flatten with TensorReorderer ... + pipeline = OutputCombiner({"action": reorderer.output("output")}) + return pipeline, [se3] # return retargeters separately + + + pipeline, retargeters = build_pipeline() + teleop_cfg = IsaacTeleopCfg( + xr_cfg=XrCfg(anchor_pos=(0.5, 0.0, 0.5)), + pipeline_builder=lambda: pipeline, + retargeters_to_tune=lambda: retargeters, + ) + """ + + xr_cfg: XrCfg = field(default_factory=XrCfg) + """XR anchor configuration for positioning the user in the simulation. + + This includes anchor position, rotation, and optional dynamic anchoring + to follow a prim (e.g., robot base) during locomotion tasks. + """ + + pipeline_builder: Callable[[], OutputCombiner] = MISSING + """Callable that builds the IsaacTeleop retargeting pipeline. + + The function should return an OutputCombiner with an "action" output + containing the flattened action tensor matching the Isaac Lab action space. + Use TensorReorderer to flatten multiple retargeter outputs into a single array. + + To expose retargeters for the tuning UI, populate :attr:`retargeters_to_tune` + directly when constructing this config rather than encoding them into the + builder's return value. + """ + + plugins: list[PluginConfig] = field(default_factory=list) + """List of IsaacTeleop plugin configurations. + + Plugins can provide additional functionality like synthetic hand tracking + from controller inputs. + """ + + sim_device: str = "cuda:0" + """Torch device string for placing output action tensors.""" + + teleoperation_active_default: bool = False + """Whether teleoperation should be active by default when the session starts. + + When ``False`` (the default), the teleop session remains inactive until a + ``"START"`` command is received from xr_core via the message bus. + """ + + retargeters_to_tune: Callable[[], list[BaseRetargeter]] | None = None + """Optional callable returning retargeters to expose in the tuning UI. + + Must be a callable (e.g. ``lambda: [retargeter1, retargeter2]``) rather + than a plain list because ``@configclass`` deep-copies mutable attributes + and retargeter objects often contain non-picklable C++/SWIG handles. + Wrapping in a callable makes the value opaque to ``deepcopy``. + + When set and the tuning UI is enabled, the returned retargeters will be + displayed in the ``MultiRetargeterTuningUIImGui`` window, allowing + real-time adjustment of their tunable parameters. Only retargeters that + have a ``ParameterState`` (i.e. tunable parameters) will appear. + + If ``None``, the tuning UI will not be opened. + """ + + control_channel_uuid: bytes | None = TELEOP_CONTROL_CHANNEL_UUID + """16-byte UUID for the teleop control message channel. + + Defaults to :data:`~isaaclab_teleop.TELEOP_CONTROL_CHANNEL_UUID` + (``uuid5(NAMESPACE_DNS, "teleop_command")``), which is the well-known + channel both the Isaac Lab server and CloudXR JS client use to + exchange start/stop/reset commands. + + When set, a ``teleop_control_pipeline`` is created automatically + using :class:`~isaaclab_teleop.teleop_message_processor.TeleopMessageProcessor` + and :class:`~isaacteleop.teleop_session_manager.DefaultTeleopStateManager`. + The remote client sends UTF-8 control commands over the OpenXR opaque + data channel identified by this UUID, and the results are exposed via + :func:`~isaaclab_teleop.poll_control_events`. + + Set to ``None`` to disable the control channel entirely. + """ + + target_frame_prim_path: str | None = None + """Optional USD prim path whose world frame becomes the target coordinate + frame for all output poses. + + When set, the device automatically reads this prim's world transform each + frame and uses its inverse as the ``target_T_world`` rebase matrix in + :meth:`~isaaclab_teleop.IsaacTeleopDevice.advance`. An explicit + ``target_T_world`` argument to :meth:`~isaaclab_teleop.IsaacTeleopDevice.advance` + takes precedence over this config. + + Typical usage: set to the robot base link prim path so that an IK + controller receives end-effector poses in the robot's base frame. + + Example:: + + IsaacTeleopCfg( + target_frame_prim_path="/World/envs/env_0/Robot/base_link", + ... + ) + """ + + app_name: str = "IsaacLabTeleop" + """Application name for the IsaacTeleop session.""" diff --git a/source/isaaclab_teleop/isaaclab_teleop/isaac_teleop_device.py b/source/isaaclab_teleop/isaaclab_teleop/isaac_teleop_device.py new file mode 100644 index 000000000000..3f8c565a7e21 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/isaac_teleop_device.py @@ -0,0 +1,464 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""IsaacTeleop-based teleoperation device for Isaac Lab.""" + +from __future__ import annotations + +import logging +from collections.abc import Callable +from typing import TYPE_CHECKING + +import numpy as np +import torch + +from .command_handler import CommandHandler +from .control_events import ControlEvents +from .isaac_teleop_cfg import IsaacTeleopCfg +from .session_lifecycle import TeleopSessionLifecycle +from .xr_anchor_manager import XrAnchorManager + +if TYPE_CHECKING: + from .session_lifecycle import SupportsDLPack + +logger = logging.getLogger(__name__) + + +class IsaacTeleopDevice: + """A IsaacTeleop-based teleoperation device for Isaac Lab. + + This device provides an interface between IsaacTeleop's retargeting pipeline + and Isaac Lab environments. It composes three focused collaborators: + + * :class:`XrAnchorManager` -- XR anchor prim setup, synchronization, + and coordinate-frame transform computation. + * :class:`TeleopSessionLifecycle` -- pipeline building, OpenXR handle + acquisition, session creation/destruction, and action-tensor extraction. + * :class:`CommandHandler` -- callback registration for START / STOP / RESET + commands, bridged from the pipeline-based control events. + + Together they manage: + + 1. XR anchor configuration and synchronization + 2. IsaacTeleop session lifecycle + 3. Action tensor generation from the retargeting pipeline + + The device uses IsaacTeleop's TensorReorderer to flatten pipeline outputs + into a single action tensor matching the environment's action space. + + Frame rebasing: + By default, all output poses are expressed in the simulation world + frame. When an application needs poses in a different frame (e.g. + robot base link for IK), there are two options: + + * **Config-driven** (recommended): set + :attr:`~IsaacTeleopCfg.target_frame_prim_path` to the USD prim + whose frame the output should be expressed in. The device reads + the prim's world transform each frame and applies the rebase + automatically. + * **Explicit**: pass a ``target_T_world`` matrix directly to + :meth:`advance`. + + In both cases the device composes + ``target_T_world @ world_T_anchor`` before feeding the matrix into + the retargeting pipeline, so all resulting poses are expressed in the + target frame. + + Teleop commands: + The device supports callbacks for START, STOP, and RESET commands + that can be triggered via the message-channel control pipeline or + registered directly via :meth:`add_callback`. + + Example: + .. code-block:: python + + cfg = IsaacTeleopCfg( + pipeline_builder=my_pipeline_builder, + sim_device="cuda:0", + ) + + # Poses in world frame (default) + with IsaacTeleopDevice(cfg) as device: + while running: + action = device.advance() + env.step(action.repeat(num_envs, 1)) + + # Config-driven rebase into robot base frame + cfg.target_frame_prim_path = "/World/Robot/base_link" + with IsaacTeleopDevice(cfg) as device: + while running: + action = device.advance() + env.step(action.repeat(num_envs, 1)) + + # Explicit rebase into robot base frame + with IsaacTeleopDevice(cfg) as device: + while running: + robot_T_world = get_robot_base_transform() + action = device.advance(target_T_world=robot_T_world) + env.step(action.repeat(num_envs, 1)) + """ + + def __init__( + self, + cfg: IsaacTeleopCfg, + cloudxr_env_file: str | None = None, + auto_launch_cloudxr: bool = True, + ): + """Initialize the IsaacTeleop device. + + Args: + cfg: Configuration object for IsaacTeleop settings. + cloudxr_env_file: Optional path to a CloudXR ``.env`` file. + When provided and *auto_launch_cloudxr* is ``True``, the + CloudXR runtime is launched automatically during session + start. When ``None``, no CloudXR runtime is launched. + auto_launch_cloudxr: Whether to auto-launch the CloudXR runtime + when *cloudxr_env_file* is set. Ignored when + *cloudxr_env_file* is ``None``. + """ + self._cfg = cfg + + self._anchor_manager = XrAnchorManager(cfg.xr_cfg) + self._command_handler = CommandHandler() + self._session_lifecycle = TeleopSessionLifecycle( + cfg, + cloudxr_env_file=cloudxr_env_file, + auto_launch_cloudxr=auto_launch_cloudxr, + ) + + self._prev_right_a_pressed = False + self._prev_control_is_active: bool | None = None + + def __del__(self): + """Clean up resources when the object is destroyed.""" + if hasattr(self, "_command_handler"): + self._command_handler.cleanup() + if hasattr(self, "_anchor_manager"): + self._anchor_manager.cleanup() + + def __str__(self) -> str: + """Returns a string containing information about the IsaacTeleop device.""" + xr_cfg = self._cfg.xr_cfg + msg = f"IsaacTeleop Device: {self.__class__.__name__}\n" + msg += f"\tAnchor Position: {xr_cfg.anchor_pos}\n" + msg += f"\tAnchor Rotation: {xr_cfg.anchor_rot}\n" + if xr_cfg.anchor_prim_path is not None: + msg += f"\tAnchor Prim Path: {xr_cfg.anchor_prim_path} (Dynamic Anchoring)\n" + else: + msg += "\tAnchor Mode: Static (Root Level)\n" + msg += f"\tSim Device: {self._cfg.sim_device}\n" + msg += f"\tApp Name: {self._cfg.app_name}\n" + + msg += "\t----------------------------------------------\n" + msg += "\tAvailable Commands:\n" + callbacks = self._command_handler.callbacks + start_avail = "START" in callbacks + stop_avail = "STOP" in callbacks + reset_avail = "RESET" in callbacks + msg += f"\t\tStart Teleoperation: {'registered' if start_avail else 'not registered'}\n" + msg += f"\t\tStop Teleoperation: {'registered' if stop_avail else 'not registered'}\n" + msg += f"\t\tReset Environment: {'registered' if reset_avail else 'not registered'}\n" + + return msg + + def __enter__(self) -> IsaacTeleopDevice: + """Enter the context manager and prepare the IsaacTeleop session. + + Builds the retargeting pipeline and attempts to acquire OpenXR handles + from Kit's XR bridge extension. If the handles are not yet available + (e.g. the user has not clicked "Start AR"), session creation is deferred + and will be retried automatically on each :meth:`advance` call. + + Returns: + Self for context manager protocol. + """ + self._session_lifecycle.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the context manager and clean up the IsaacTeleop session.""" + self._anchor_manager.cleanup() + self._session_lifecycle.stop(exc_type, exc_val, exc_tb) + return False + + def reset(self) -> None: + """Reset the device state. + + Resets the XR anchor synchronizer and schedules a + ``reset`` :class:`~isaacteleop.retargeting_engine.interface.execution_events.ExecutionEvents` + for the next pipeline step so that all retargeters reinitialize + their cross-step state. + """ + self._anchor_manager.reset() + self._session_lifecycle.request_reset() + + @property + def last_control_events(self) -> ControlEvents: + """Control events from the most recent :meth:`advance`. + + Returns a :class:`ControlEvents` derived from the teleop control + pipeline. When no control channel is configured, returns a + default (no-op) :class:`ControlEvents`. + """ + return self._session_lifecycle.last_control_events + + def add_callback(self, key: str, func: Callable) -> None: + """Add a callback function for teleop commands. + + Args: + key: The command type to bind to. Valid values are "START", "STOP", "RESET", and "R". + func: The function to call when the command is received. Should take no arguments. + """ + self._command_handler.add_callback(key, func) + + def advance(self, target_T_world: np.ndarray | torch.Tensor | SupportsDLPack | None = None) -> torch.Tensor | None: + """Process current device state and return control commands. + + If the IsaacTeleop session has not been started yet (because the OpenXR + handles were not available at ``__enter__`` time), this method will + attempt to start it on each call. Once the user clicks "Start AR" and + the handles become available, the session is created transparently. + + Args: + target_T_world: Optional 4x4 transform matrix that rebases all + output poses into an arbitrary target coordinate frame. When + provided, the matrix sent to the retargeting pipeline becomes + ``target_T_world @ world_T_anchor`` instead of just + ``world_T_anchor``, so all resulting poses are expressed in + the target frame rather than the simulation world frame. + + Typical use case: pass ``robot_base_T_world`` so that an IK + controller receives end-effector poses in the robot's base + link frame. + + Accepts any object supporting the DLPack buffer protocol + (``__dlpack__``), including :class:`numpy.ndarray`, + :class:`torch.Tensor`, and ``wp.array``. + + When ``None`` and + :attr:`~IsaacTeleopCfg.target_frame_prim_path` is set, the + transform is computed automatically by reading the prim's + world matrix from Fabric and inverting it. + + Returns: + A flattened action :class:`torch.Tensor` ready for the Isaac Lab + environment, or ``None`` if the session has not started yet + (e.g. still waiting for the user to start AR). + + Raises: + RuntimeError: If called outside of a context manager. + """ + # Auto-compute target_T_world from config if not explicitly provided + if target_T_world is None and self._cfg.target_frame_prim_path is not None: + target_T_world = self._get_target_frame_T_world() + + # Step the session (handles lazy start and action extraction) + action = self._session_lifecycle.step( + anchor_world_matrix_fn=self._anchor_manager.get_world_matrix, + target_T_world=target_T_world, + ) + + if action is not None: + # Poll controller buttons (e.g. toggle anchor rotation on right 'A' press) + self._poll_buttons() + + self._dispatch_control_callbacks() + + return action + + # ------------------------------------------------------------------ + # Control event -> callback bridge + # ------------------------------------------------------------------ + + def _dispatch_control_callbacks(self) -> None: + """Fire legacy callbacks when control events indicate a state change. + + This bridges the pipeline-based :class:`ControlEvents` with the + callback-based :class:`CommandHandler` so that scripts which registered + callbacks via :meth:`add_callback` still receive dispatches. + + Only fires START/STOP when ``is_active`` transitions between ``True`` + and ``False``; initial transitions from ``None`` are ignored to avoid + spurious callbacks during ``DefaultTeleopStateManager``'s + STOPPED -> PAUSED progression. + """ + from .control_events import _NO_OP_EVENTS + + events = self._session_lifecycle.last_control_events + if events is _NO_OP_EVENTS: + return + if events.should_reset: + self._command_handler.fire("RESET") + self._anchor_manager.reset() + if events.is_active is not None: + if self._prev_control_is_active is not None and events.is_active != self._prev_control_is_active: + self._command_handler.fire("START" if events.is_active else "STOP") + self._prev_control_is_active = events.is_active + + # ------------------------------------------------------------------ + # Target frame transform (config-driven rebase) + # ------------------------------------------------------------------ + + def _get_target_frame_T_world(self) -> np.ndarray | None: + """Read the target-frame prim's world matrix from Fabric and return its inverse. + + Uses USDRT to read the prim's hierarchical world matrix, matching the + pattern used by :class:`XrAnchorSynchronizer` for anchor prim reads. + + Returns: + A (4, 4) float32 :class:`numpy.ndarray` representing the inverse + of the prim's world transform (i.e. ``target_T_world``), or + ``None`` if the prim cannot be read. + """ + try: + import omni.usd + import usdrt + from pxr import UsdUtils + from usdrt import Rt + + stage = omni.usd.get_context().get_stage() + stage_cache = UsdUtils.StageCache.Get() + stage_id = stage_cache.GetId(stage).ToLongInt() + if stage_id < 0: + stage_id = stage_cache.Insert(stage).ToLongInt() + rt_stage = usdrt.Usd.Stage.Attach(stage_id) + if rt_stage is None: + return None + + rt_prim = rt_stage.GetPrimAtPath(self._cfg.target_frame_prim_path) + if not rt_prim.IsValid(): + return None + + rt_xformable = Rt.Xformable(rt_prim) + if not rt_xformable.GetPrim().IsValid(): + return None + + world_matrix_attr = rt_xformable.GetFabricHierarchyWorldMatrixAttr() + if world_matrix_attr is None: + return None + + rt_matrix = world_matrix_attr.Get() + if rt_matrix is None: + return None + + pos = rt_matrix.ExtractTranslation() + rt_quat = rt_matrix.ExtractRotationQuat() + + from scipy.spatial.transform import Rotation + + quat_xyzw = [ + float(rt_quat.GetImaginary()[0]), + float(rt_quat.GetImaginary()[1]), + float(rt_quat.GetImaginary()[2]), + float(rt_quat.GetReal()), + ] + + R = Rotation.from_quat(quat_xyzw).as_matrix().astype(np.float32) + t = np.array([float(pos[0]), float(pos[1]), float(pos[2])], dtype=np.float32) + + inv_mat = np.eye(4, dtype=np.float32) + inv_mat[:3, :3] = R.T + inv_mat[:3, 3] = -(R.T @ t) + return inv_mat + except Exception as e: + logger.warning(f"Failed to read target frame prim '{self._cfg.target_frame_prim_path}': {e}") + return None + + # ------------------------------------------------------------------ + # Controller button polling (glue between session and anchor manager) + # ------------------------------------------------------------------ + + def _poll_buttons(self) -> None: + """Poll controller buttons and trigger actions on rising edges. + + Called once per :meth:`advance` frame, after ``session.step()`` has + executed the pipeline so the controller ``TensorGroup`` is fresh. + + Currently handles: + * Right controller primary button ("A") -- toggles anchor rotation. + """ + from isaacteleop.retargeting_engine.tensor_types import ControllerInputIndex + + right_data = self._session_lifecycle.last_right_controller + if right_data is None or right_data.is_none: + return + + current = float(right_data[ControllerInputIndex.PRIMARY_CLICK]) > 0.5 + if current and not self._prev_right_a_pressed: + self._anchor_manager.toggle_anchor_rotation() + self._prev_right_a_pressed = current + + +def _enable_teleop_bridge() -> None: + """Enable the XR teleop bridge extension and configure carb settings. + + Must be called after the Omniverse AppLauncher has started. + """ + import carb.settings + import omni.kit.app + + carb.settings.get_settings().set("/persistent/xr/openxr/disableInputBindings", True) + carb.settings.get_settings().set('/xr/openxr/components/"isaacsim.kit.xr.teleop.bridge"/enabled', True) + ext_manager = omni.kit.app.get_app().get_extension_manager() + ext_manager.set_extension_enabled_immediate("isaacsim.kit.xr.teleop.bridge", True) + + +def create_isaac_teleop_device( + cfg: IsaacTeleopCfg, + sim_device: str | None = None, + callbacks: dict[str, Callable] | None = None, + cloudxr_env_file: str | None = None, + auto_launch_cloudxr: bool = True, +) -> IsaacTeleopDevice: + """Create an :class:`IsaacTeleopDevice` with required Omniverse extension setup. + + This helper centralises the boilerplate that every script must execute + before constructing an :class:`IsaacTeleopDevice`: + + 1. Disable default OpenXR input bindings (prevents conflicts). + 2. Enable the ``isaacsim.kit.xr.teleop.bridge`` extension. + 3. Optionally override :attr:`IsaacTeleopCfg.sim_device` so action tensors + land on the same device the caller uses for the simulation. + + Note: + When *sim_device* is provided, ``cfg.sim_device`` is mutated in place + before the device is constructed. + + Args: + cfg: IsaacTeleop configuration. + sim_device: If provided, overrides ``cfg.sim_device`` so action tensors + are placed on the requested torch device (e.g. ``"cuda:0"``). + callbacks: Optional mapping of command keys (e.g. ``"START"``, ``"STOP"``, + ``"RESET"``) to callables registered on the device. + cloudxr_env_file: Optional path to a CloudXR ``.env`` file. When + provided and *auto_launch_cloudxr* is ``True``, the CloudXR + runtime and WSS proxy are launched automatically during session + start. When ``None``, no CloudXR runtime is launched. + auto_launch_cloudxr: Whether to auto-launch the CloudXR runtime + when *cloudxr_env_file* is set. Set to ``False`` to skip the + launch (e.g. when running the runtime externally). Ignored + when *cloudxr_env_file* is ``None``. + + Returns: + A fully configured :class:`IsaacTeleopDevice` ready for use in a + ``with`` block. + """ + _enable_teleop_bridge() + + if sim_device is not None: + cfg.sim_device = sim_device + + logger.info("Using IsaacTeleop stack for teleoperation") + device = IsaacTeleopDevice( + cfg, + cloudxr_env_file=cloudxr_env_file, + auto_launch_cloudxr=auto_launch_cloudxr, + ) + + if callbacks is not None: + for key, func in callbacks.items(): + device.add_callback(key, func) + + return device diff --git a/source/isaaclab_teleop/isaaclab_teleop/session_lifecycle.py b/source/isaaclab_teleop/isaaclab_teleop/session_lifecycle.py new file mode 100644 index 000000000000..fa5f36f658e0 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/session_lifecycle.py @@ -0,0 +1,819 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""IsaacTeleop session lifecycle management.""" + +from __future__ import annotations + +import logging +import os +from collections.abc import Callable +from typing import TYPE_CHECKING, Any, Protocol + +import numpy as np +import torch + +if TYPE_CHECKING: + from isaacteleop.cloudxr import CloudXRLauncher + from isaacteleop.oxr import OpenXRSessionHandles + from isaacteleop.retargeting_engine.interface.execution_events import ExecutionEvents + from isaacteleop.retargeting_engine_ui import MultiRetargeterTuningUIImGui + from isaacteleop.teleop_session_manager import TeleopSession + +from .control_events import _NO_OP_EVENTS, ControlEvents +from .isaac_teleop_cfg import IsaacTeleopCfg +from .teleop_message_processor import TeleopMessageProcessor + + +class SupportsDLPack(Protocol): + """Duck type for objects supporting the DLPack buffer protocol. + + Satisfied by :class:`torch.Tensor`, :class:`numpy.ndarray` (>= 1.22), + ``wp.array``, CuPy arrays, JAX arrays, and other frameworks. + """ + + def __dlpack__(self, *, stream: Any = ...) -> Any: ... + + def __dlpack_device__(self) -> tuple[int, int]: ... + + +logger = logging.getLogger(__name__) + +_DLDEVICE_CPU = 1 + + +def _to_numpy_4x4(mat: np.ndarray | torch.Tensor | SupportsDLPack) -> np.ndarray: + """Convert a (4, 4) transform to a float32 numpy array. + + Prefers the DLPack buffer protocol (``__dlpack__``) for zero-copy + interop with torch, warp, cupy, jax, and other frameworks. + + Args: + mat: A (4, 4) transform matrix. + + Returns: + A (4, 4) float32 :class:`numpy.ndarray`. + """ + if isinstance(mat, np.ndarray): + return np.asarray(mat, dtype=np.float32) + if isinstance(mat, torch.Tensor): + return np.asarray(np.from_dlpack(mat.detach().cpu()), dtype=np.float32) + if hasattr(mat, "__dlpack_device__"): + device_type, _ = mat.__dlpack_device__() + if device_type == _DLDEVICE_CPU: + return np.asarray(np.from_dlpack(mat), dtype=np.float32) + # Non-CPU DLPack source (e.g. CUDA wp.array): .numpy() typically + # handles the device-to-host transfer internally. + if hasattr(mat, "numpy"): + return np.asarray(mat.numpy(), dtype=np.float32) # type: ignore[union-attr] + raise TypeError(f"Cannot convert non-CPU DLPack array of type {type(mat).__name__} to numpy") + if hasattr(mat, "numpy"): + return np.asarray(mat.numpy(), dtype=np.float32) # type: ignore[union-attr] + return np.asarray(mat, dtype=np.float32) + + +def _execution_events_to_control(ee: ExecutionEvents) -> ControlEvents: + """Map TeleopCore :class:`ExecutionEvents` to the script-facing :class:`ControlEvents`.""" + from isaacteleop.retargeting_engine.interface.execution_events import ExecutionState + + if ee.execution_state == ExecutionState.RUNNING: + is_active: bool | None = True + elif ee.execution_state in (ExecutionState.PAUSED, ExecutionState.STOPPED): + is_active = False + else: + is_active = None + return ControlEvents(is_active=is_active, should_reset=ee.reset) + + +class TeleopSessionLifecycle: + """Manages the IsaacTeleop session lifecycle. + + This class is responsible for: + + 1. Building the retargeting pipeline from configuration + 2. Adding a parallel ``ControllersSource`` for button-state access + 3. Building the optional ``teleop_control_pipeline`` for headset-driven + start/stop/reset via a message channel + 4. Acquiring OpenXR handles from Kit's XR bridge extension + 5. Creating, entering, and exiting the ``TeleopSession`` + 6. Building external inputs for pipeline leaf nodes (e.g. world-to-anchor transform) + 7. Stepping the session and extracting the flattened action tensor + 8. Managing the optional retargeting tuning UI + """ + + WORLD_T_ANCHOR_INPUT_NAME = "world_T_anchor" + """Well-known name for the ValueInput node that receives the + world-to-XR-anchor 4x4 transform matrix.""" + + _CONTROLLER_RIGHT_KEY = "_controller_right" + """Internal pipeline output key for the right controller ``TensorGroup``.""" + + def __init__( + self, + cfg: IsaacTeleopCfg, + cloudxr_env_file: str | None = None, + auto_launch_cloudxr: bool = True, + ): + """Initialize the session lifecycle manager. + + Args: + cfg: Configuration for IsaacTeleop settings. + cloudxr_env_file: Optional path to a CloudXR ``.env`` file. + When provided, the CloudXR runtime is launched automatically + during :meth:`start` (unless *auto_launch_cloudxr* is + ``False``). When ``None``, no CloudXR runtime is launched. + auto_launch_cloudxr: Whether to auto-launch the CloudXR runtime + when *cloudxr_env_file* is set. Ignored when + *cloudxr_env_file* is ``None``. + """ + self._cfg = cfg + self._device = torch.device(cfg.sim_device) + self._cloudxr_env_file = cloudxr_env_file + self._auto_launch_cloudxr = auto_launch_cloudxr + + # Session state (populated during start) + self._session: TeleopSession | None = None + self._pipeline = None + self._teleop_control_pipeline = None + self._message_processor: TeleopMessageProcessor | None = None + self._last_right_controller = None + self._session_start_deferred_logged = False + # Fallback for host-initiated resets when no control pipeline is configured + self._pending_reset = False + + # CloudXR runtime launcher (created in start if configured, stopped in stop) + self._cloudxr_launcher: CloudXRLauncher | None = None + + # Retargeting tuning UI (created in start, closed in stop) + self._retargeting_ui_ctx: MultiRetargeterTuningUIImGui | None = None + self._retargeting_ui = None + + try: + # Importing bridge also performs polyfill of missing omni.kit.xr.system.openxr functions. + import isaacsim.kit.xr.teleop.bridge as bridge + + subscribe_required_extensions = getattr(bridge, "subscribe_required_extensions", None) + if callable(subscribe_required_extensions): + self._required_extensions_subscription = subscribe_required_extensions( + self._on_request_required_extensions + ) + else: + logger.info( + "isaacsim.kit.xr.teleop.bridge.subscribe_required_extensions not available; " + "skipping required extensions subscription" + ) + except (ImportError, ModuleNotFoundError): + logger.info("isaacsim.kit.xr.teleop.bridge not available; IsaacTeleop will create its own OpenXR session") + + try: + import carb.settings + + # Subscribe to the setting (may not fire when Kit closes; see pre-shutdown below) + self._xr_enabled_subscription = carb.settings.get_settings().subscribe_to_node_change_events( + "/xr/enabled", + self._on_xr_enabled_changed, + ) + except (ImportError, ModuleNotFoundError): + logger.info("carb.settings not available; IsaacTeleop will not be able to detect XR enabled state") + + try: + import omni.kit.app + from carb.eventdispatcher import get_eventdispatcher + + # Subscribe to Kit pre-shutdown so we tear down our session before XRCore + # tears down the OpenXR instance/session (XRCore uses order=0; lowest runs first). + # The /xr/enabled setting often does not fire on close, so this is required. + self._pre_shutdown_subscription = get_eventdispatcher().observe_event( + event_name=omni.kit.app.GLOBAL_EVENT_PRE_SHUTDOWN, + on_event=self._on_pre_shutdown, + observer_name="IsaacTeleop session lifecycle", + order=-100, + ) + except (ImportError, ModuleNotFoundError): + logger.info("omni.kit.app/carb.eventdispatcher not available; IsaacTeleop will not clean up on Kit close") + + @property + def is_active(self) -> bool: + """Whether the teleop session is currently running.""" + return self._session is not None + + @property + def pipeline(self): + """The retargeting pipeline, or ``None`` if not yet built.""" + return self._pipeline + + @property + def last_right_controller(self): + """Right controller ``TensorGroup`` from the most recent step, or ``None``. + + The ``TensorGroup`` follows the ``ControllerInput`` schema. Button + fields can be read by index (e.g. index 4 = ``primary_click``, + index 11 = ``is_active``). + """ + return self._last_right_controller + + @property + def has_control_channel(self) -> bool: + """Whether a message-channel-based control pipeline is configured.""" + return self._message_processor is not None + + @property + def last_control_events(self) -> ControlEvents: + """Control events from the most recent :meth:`step`. + + When a ``teleop_control_pipeline`` is configured, derives + :class:`ControlEvents` from + ``session.last_context.execution_events``. Otherwise returns a + default (no-op) :class:`ControlEvents`. + """ + if self._message_processor is None: + return _NO_OP_EVENTS + if self._session is None: + return _NO_OP_EVENTS + ctx = self._session.last_context + if ctx is None: + return _NO_OP_EVENTS + return _execution_events_to_control(ctx.execution_events) + + def request_reset(self) -> None: + """Schedule a reset for the next pipeline step. + + When a control pipeline is configured, the reset flows through + :meth:`TeleopMessageProcessor.inject_reset` so + :class:`~isaacteleop.teleop_session_manager.DefaultTeleopStateManager` + processes it normally. Otherwise falls back to an + ``execution_events`` override on the next :meth:`step` call. + + If the control channel already processed a reset this frame, + this method is a no-op to avoid a redundant second reset pulse. + """ + if self.last_control_events.should_reset: + return + if self._message_processor is not None: + self._message_processor.inject_reset() + else: + self._pending_reset = True + + # ------------------------------------------------------------------ + # Lifecycle: start / stop + # ------------------------------------------------------------------ + + def start(self) -> None: + """Build the pipeline and attempt to start the session. + + If a CloudXR env file was provided and auto-launch is enabled, + the CloudXR runtime and WSS proxy are launched first. + + Builds the retargeting pipeline, wraps it with a parallel + ``ControllersSource`` for button-state access, builds the optional + ``teleop_control_pipeline`` for message-channel control, attempts + to acquire OpenXR handles, and opens the retargeting tuning UI if + retargeters are configured. + + If the OpenXR handles are not yet available (e.g. user hasn't clicked + "Start AR"), session creation is deferred and will be retried on each + :meth:`step` call. + """ + if self._cloudxr_env_file is not None: + self._ensure_cloudxr_runtime() + + from isaacteleop.retargeting_engine.deviceio_source_nodes import ControllersSource + from isaacteleop.retargeting_engine.interface import OutputCombiner + + user_pipeline = self._cfg.pipeline_builder() + self._session_start_deferred_logged = False + self._last_right_controller = None + + button_controllers = ControllersSource("_button_controllers") + pipeline_outputs: dict[str, Any] = { + "action": user_pipeline.output("action"), + self._CONTROLLER_RIGHT_KEY: button_controllers.output(ControllersSource.RIGHT), + } + self._pipeline = OutputCombiner(pipeline_outputs) + + # Build optional teleop_control_pipeline for message-channel control + self._teleop_control_pipeline = None + self._message_processor = None + if self._cfg.control_channel_uuid is not None: + self._teleop_control_pipeline, self._message_processor = self._build_control_pipeline( + self._cfg.control_channel_uuid + ) + + # Try to start the session now; it may be deferred + self._try_start_session() + + # Open the retargeting tuning UI and keep it alive until stop() + retargeters = self._cfg.retargeters_to_tune() if self._cfg.retargeters_to_tune else [] + if retargeters: + from isaacteleop.retargeting_engine_ui import MultiRetargeterTuningUIImGui + + print("Opening Retargeting UI...") + self._retargeting_ui_ctx = MultiRetargeterTuningUIImGui(retargeters, title="Hand Retargeting Tuning") + self._retargeting_ui = self._retargeting_ui_ctx.__enter__() + + def stop(self, exc_type=None, exc_val=None, exc_tb=None) -> None: + """Shut down the session and clean up resources. + + Closes the retargeting tuning UI and exits the ``TeleopSession`` + context manager. If the underlying OpenXR session was already torn + down externally (e.g. "Stop AR"), cleanup errors are suppressed. + + Args: + exc_type: Exception type (for context manager protocol). + exc_val: Exception value. + exc_tb: Exception traceback. + """ + # Close the retargeting tuning UI first + if self._retargeting_ui_ctx is not None: + self._retargeting_ui_ctx.__exit__(exc_type, exc_val, exc_tb) + self._retargeting_ui_ctx = None + self._retargeting_ui = None + + if self._session is not None: + try: + self._session.__exit__(exc_type, exc_val, exc_tb) + except Exception as e: + # The OpenXR session may have already been torn down externally + # (e.g. user clicked "Stop AR"), so destroying spaces/action + # sets will fail with XR_ERROR_HANDLE_INVALID. This is + # expected and safe to suppress. + logger.debug(f"Suppressed error during IsaacTeleop session cleanup: {e}") + self._session = None + + # Always clear pipeline state (session may never have been created if + # OpenXR handles were never available). + self._pipeline = None + self._teleop_control_pipeline = None + self._message_processor = None + + if self._cloudxr_launcher is not None: + try: + self._cloudxr_launcher.stop() + except RuntimeError: + logger.warning("CloudXR runtime process could not be terminated; handle retained for atexit cleanup") + else: + self._cloudxr_launcher = None + logger.info("CloudXR runtime stopped") + + logger.info("IsaacTeleop session ended") + + # ------------------------------------------------------------------ + # Control pipeline construction + # ------------------------------------------------------------------ + + @staticmethod + def _build_control_pipeline(channel_uuid: bytes) -> tuple[Any, TeleopMessageProcessor]: + """Build a ``teleop_control_pipeline`` from a message channel UUID. + + Wires ``MessageChannelSource`` -> :class:`TeleopMessageProcessor` + -> :class:`~isaacteleop.teleop_session_manager.DefaultTeleopStateManager`. + + Args: + channel_uuid: 16-byte UUID for the OpenXR opaque data channel. + + Returns: + A ``(teleop_control_pipeline, message_processor)`` tuple. + """ + from isaacteleop.retargeting_engine.deviceio_source_nodes import message_channel_config + from isaacteleop.teleop_session_manager import DefaultTeleopStateManager + + source, _sink = message_channel_config( + name="_teleop_control", + channel_uuid=channel_uuid, + ) + + processor = TeleopMessageProcessor(name="_teleop_msg_processor") + processor_graph = processor.connect({processor.INPUT_MESSAGES: source.output("messages_tracked")}) + + state_manager = DefaultTeleopStateManager(name="_teleop_state") + teleop_control_pipeline = state_manager.connect( + { + state_manager.INPUT_KILL: processor_graph.output("kill"), + state_manager.INPUT_RUN_TOGGLE: processor_graph.output("run_toggle"), + state_manager.INPUT_RESET: processor_graph.output("reset"), + } + ) + + return teleop_control_pipeline, processor + + # ------------------------------------------------------------------ + # Extension / XR lifecycle callbacks + # ------------------------------------------------------------------ + + def _on_request_required_extensions(self) -> list[str]: + """Callback for required extensions subscription. + + Inspects both the main pipeline and the ``teleop_control_pipeline`` + (if configured) so that extensions required by the control channel + (e.g. ``XR_NV_opaque_data_channel``) are included. + + Returns: + A list of required extensions. + """ + from isaacteleop.teleop_session_manager.helpers import get_required_oxr_extensions_from_pipeline + + required_extensions: list[str] = [] + if self._pipeline is not None: + required_extensions.extend(get_required_oxr_extensions_from_pipeline(self._pipeline)) + if self._teleop_control_pipeline is not None: + required_extensions.extend(get_required_oxr_extensions_from_pipeline(self._teleop_control_pipeline)) + + required_extensions = sorted(set(required_extensions)) + logger.info(f"Required extensions: {required_extensions}") + return required_extensions + + def _on_xr_enabled_changed(self, item, event_type): + import carb.settings + + enabled = carb.settings.get_settings().get("/xr/enabled") + logger.info(f"XR enabled changed to: {enabled}") + + if not enabled: + self._teardown_dead_session() + + def _on_pre_shutdown(self, _event): + """Called when Kit is closing; tear down the session but leave the + pipeline intact so the main loop can exit via its own control flow + (``simulation_app.is_running()`` will go ``False``). + + Full resource cleanup happens later when the context manager's + ``__exit__`` calls :meth:`stop`. + """ + logger.info("Shutting down IsaacTeleop session due to Kit close") + self._pre_shutdown_subscription = None + self._teardown_dead_session() + + # ------------------------------------------------------------------ + # Deferred session creation + # ------------------------------------------------------------------ + + def try_start_session(self) -> bool: + """Public wrapper for deferred session creation. + + Returns: + ``True`` if the session is running, ``False`` if still deferred. + """ + return self._try_start_session() + + def _try_start_session(self) -> bool: + """Attempt to create and start the IsaacTeleop session. + + Tries to acquire OpenXR handles from Kit's XR bridge. If the + handles are available, creates and enters the ``TeleopSession``. + If the handles are not yet complete — either because the XR session + has not started or because the bridge component has not finished + registering — session creation is deferred and will be retried on + the next :meth:`step` call. + + Returns: + ``True`` if the session was successfully started (or was already + running), ``False`` if session creation was deferred. + """ + if self._session is not None: + return True + + self._ensure_xr_ar_profile_enabled() + + from isaacteleop.oxr import OpenXRSessionHandles + from isaacteleop.teleop_session_manager import TeleopSession, TeleopSessionConfig + + oxr_handles = self._acquire_kit_oxr_handles(OpenXRSessionHandles) + + if oxr_handles is None: + if not self._session_start_deferred_logged: + if self._kit_xr_session_is_active(): + logger.info( + "Kit XR session active but bridge handles incomplete; IsaacTeleop session creation deferred" + ) + else: + logger.info( + "OpenXR handles not yet available (waiting for XR session); " + "IsaacTeleop session creation deferred" + ) + self._session_start_deferred_logged = True + return False + + session_config = TeleopSessionConfig( + app_name=self._cfg.app_name, + trackers=[], + pipeline=self._pipeline, + teleop_control_pipeline=self._teleop_control_pipeline, + plugins=self._cfg.plugins, + oxr_handles=oxr_handles, + ) + + # Create and enter the TeleopSession + self._session = TeleopSession(session_config) + self._session.__enter__() + + logger.info(f"IsaacTeleop session started: {self._cfg.app_name}") + return True + + # ------------------------------------------------------------------ + # Stepping + # ------------------------------------------------------------------ + + def step( + self, + anchor_world_matrix_fn: Callable[[], np.ndarray] | None = None, + target_T_world: np.ndarray | torch.Tensor | SupportsDLPack | None = None, + ) -> torch.Tensor | None: + """Execute one step of the teleop session and return the action tensor. + + If the session has not been started yet (because OpenXR handles were + not available), this method will attempt to start it. Once the user + clicks "Start AR" and the handles become available, the session is + created transparently. + + If the underlying OpenXR session is torn down externally (e.g. the + user clicks "Stop AR"), the error is caught, the session is cleaned + up, and ``None`` is returned so the caller can continue rendering + while waiting for a potential restart. + + Args: + anchor_world_matrix_fn: Optional callable returning the (4, 4) + world-to-anchor transform. Used to build external inputs + for ``ValueInput`` leaf nodes in the pipeline. + target_T_world: Optional (4, 4) transform matrix that rebases + pipeline poses into a target coordinate frame. When provided, + the anchor matrix is left-multiplied by this transform + (``target_T_world @ world_T_anchor``) so all output poses + are expressed in the target frame. Accepts any object + supporting the DLPack buffer protocol (``__dlpack__``), + including :class:`numpy.ndarray`, :class:`torch.Tensor`, + and ``wp.array``. + + Returns: + A flattened action :class:`torch.Tensor` ready for the Isaac Lab + environment, or ``None`` if the session has not started yet + or the XR session was torn down externally. + + Raises: + RuntimeError: If called before :meth:`start`. + """ + if self._pipeline is None: + raise RuntimeError("TeleopSessionLifecycle.start() must be called before step()") + + # Lazily start the session when OpenXR handles become available + if self._session is None: + if not self._try_start_session(): + return None + + # Build external inputs (e.g. world-to-anchor transform) if the + # pipeline contains ValueInput leaf nodes. + external_inputs = self._build_external_inputs(anchor_world_matrix_fn, target_T_world) + + # When no control pipeline is configured, host-initiated resets use + # the execution_events override as a fallback path. + execution_events = None + if self._pending_reset: + from isaacteleop.retargeting_engine.interface.execution_events import ExecutionEvents, ExecutionState + + execution_events = ExecutionEvents(reset=True, execution_state=ExecutionState.RUNNING) + self._pending_reset = False + + # Execute one step of the teleop session. + # If the underlying OpenXR session was destroyed externally (e.g. + # user clicked "Stop AR"), the step call will fail. We catch the + # error, tear down the dead session, and return None so the caller + # can continue rendering (or wait for the session to restart). + assert self._session is not None # guaranteed by _try_start_session above + try: + result = self._session.step( + external_inputs=external_inputs, + execution_events=execution_events, + ) + except Exception as e: + logger.warning(f"IsaacTeleop session step failed (XR session likely torn down): {e}") + self._teardown_dead_session() + return None + + # Store the right controller TensorGroup for button polling + self._last_right_controller = result.get(self._CONTROLLER_RIGHT_KEY) + + # Extract the flattened action array (DLPack-compatible) from + # TensorReorderer and move to the simulation device. + action_array = result["action"][0] + action = torch.from_dlpack(action_array).to( # type: ignore[attr-defined] + dtype=torch.float32, device=self._device + ) + + return action + + # ------------------------------------------------------------------ + # Dead session teardown + # ------------------------------------------------------------------ + + def _teardown_dead_session(self) -> None: + """Clean up a session whose underlying OpenXR handles are no longer valid. + + This is called when :meth:`step` detects that the XR session was + destroyed externally (e.g. user clicked "Stop AR"). The + ``TeleopSession`` is exited with error suppression (since its XR + resources are already gone), and the internal state is reset so that + the deferred-start logic in :meth:`step` can re-acquire handles if + the user restarts AR. + """ + if self._session is not None: + try: + self._session.__exit__(None, None, None) + except Exception as e: + logger.debug(f"Suppressed error tearing down dead session: {e}") + self._session = None + self._session_start_deferred_logged = False + logger.info("IsaacTeleop session torn down after external XR shutdown") + + # ------------------------------------------------------------------ + # External input building + # ------------------------------------------------------------------ + + def _build_external_inputs( + self, + anchor_world_matrix_fn: Callable[[], np.ndarray] | None, + target_T_world: np.ndarray | torch.Tensor | SupportsDLPack | None = None, + ) -> dict | None: + """Build external inputs for non-DeviceIO leaf nodes in the pipeline. + + Checks whether the active ``TeleopSession`` has external (non-DeviceIO) + leaf nodes and, for each recognized leaf, constructs the corresponding + ``TensorGroup`` data. + + When *target_T_world* is provided, the anchor matrix is left-multiplied + by the rebase transform so that all pipeline poses are expressed in + the target coordinate frame: + ``target_T_world @ world_T_anchor = target_T_anchor``. + + Args: + anchor_world_matrix_fn: Callable returning the (4, 4) + world-to-anchor transform matrix. + target_T_world: Optional (4, 4) rebase transform. See + :meth:`step` for details. + + Returns: + A dict suitable for ``TeleopSession.step(external_inputs=...)``, + or ``None`` when no external inputs are required. + """ + if self._session is None or not self._session.has_external_inputs(): + return None + + from isaacteleop.retargeting_engine.interface import TensorGroup, ValueInput + from isaacteleop.retargeting_engine.tensor_types import TransformMatrix + + ext_specs = self._session.get_external_input_specs() + external_inputs: dict = {} + + for leaf_name in ext_specs: + if leaf_name == self.WORLD_T_ANCHOR_INPUT_NAME: + if anchor_world_matrix_fn is not None: + anchor_matrix = anchor_world_matrix_fn() + else: + anchor_matrix = np.eye(4, dtype=np.float32) + if target_T_world is not None: + anchor_matrix = _to_numpy_4x4(target_T_world) @ anchor_matrix + xform_tg = TensorGroup(TransformMatrix()) + xform_tg[0] = anchor_matrix + external_inputs[leaf_name] = {ValueInput.VALUE: xform_tg} + else: + logger.warning( + f"Unrecognized external leaf node '{leaf_name}' in pipeline; " + "IsaacTeleopDevice does not know how to provide its inputs" + ) + + return external_inputs if external_inputs else None + + # ------------------------------------------------------------------ + # CloudXR runtime auto-launch + # ------------------------------------------------------------------ + + def _ensure_cloudxr_runtime(self) -> None: + """Launch the CloudXR runtime and WSS proxy if configured. + + Uses :class:`~isaacteleop.cloudxr.CloudXRLauncher` to set up the + environment, spawn the native runtime process, and start the WSS + TLS proxy in a background thread. The launcher is stored in + ``self._cloudxr_launcher`` and shut down in :meth:`stop`. + + Auto-launch is skipped when ``auto_launch_cloudxr`` is ``False`` + or the ``ISAACLAB_CXR_SKIP_AUTOLAUNCH=1`` environment variable is + set (the env var takes precedence). + """ + if self._cloudxr_launcher is not None: + return + + if os.environ.get("ISAACLAB_CXR_SKIP_AUTOLAUNCH", "").strip() == "1": + logger.info("CloudXR auto-launch skipped (ISAACLAB_CXR_SKIP_AUTOLAUNCH=1)") + return + + if not self._auto_launch_cloudxr: + logger.info("CloudXR auto-launch disabled (auto_launch_cloudxr=False)") + return + + from pathlib import Path + + from isaacteleop.cloudxr import CloudXRLauncher as _CloudXRLauncher + + self._cloudxr_launcher = _CloudXRLauncher( + install_dir=str(Path.home() / ".cloudxr"), + env_config=self._cloudxr_env_file, + accept_eula=False, + ) + logger.info("CloudXR runtime auto-launched") + + # ------------------------------------------------------------------ + # OpenXR handle acquisition + # ------------------------------------------------------------------ + + _xr_ar_profile_enabled = False + + @classmethod + def _ensure_xr_ar_profile_enabled(cls) -> None: + """Enable the XR AR profile via carb.settings when running headless. + + In headless mode the ``xr.profile.ar.enabled`` setting is intentionally + omitted from the ``.kit`` file so that all extensions — including + ``isaacsim.kit.xr.teleop.bridge`` and its ``BridgeComponent`` — can + load and register with Kit's XR system *before* the OpenXR instance is + created. This method sets the flag from Python once extensions are + loaded. Kit's XR system picks up the change on the next event-loop + tick, which is why handle acquisition may be deferred by one frame. + + Headless mode is detected via the ``/isaaclab/xr/auto_start`` carb + setting which the :class:`~isaaclab.app.AppLauncher` stores after + resolving the headless state (covers both explicit ``--headless`` and + implicit headless when no Kit visualizer is requested). In + non-headless mode this is a no-op because Kit's profile system manages + AR activation through the UI. + """ + if cls._xr_ar_profile_enabled: + return + cls._xr_ar_profile_enabled = True + try: + import carb.settings + + settings = carb.settings.get_settings() + + if not settings.get("/isaaclab/xr/auto_start"): + return + + if not settings.get("/xr/profile/ar/enabled"): + settings.set("/xr/profile/ar/enabled", True) + logger.info("Enabled /xr/profile/ar/enabled via carb.settings") + except (ImportError, AttributeError): + pass + + @staticmethod + def _kit_xr_session_is_active() -> bool: + """Check whether Kit's XR system has an active OpenXR session. + + Used to provide a more specific log message when deferring session + creation: "waiting for XR session" vs "bridge handles incomplete". + + Returns: + ``True`` if Kit reports non-zero instance **and** session handles. + """ + try: + import omni.kit.xr.system.openxr as openxr + + return bool(openxr.get_instance_handle() and openxr.get_session_handle()) + except (ImportError, ModuleNotFoundError, AttributeError): + return False + + @staticmethod + def _acquire_kit_oxr_handles(handles_cls: type[OpenXRSessionHandles]) -> OpenXRSessionHandles | None: + """Acquire OpenXR session handles from Kit's XR bridge extension. + + Imports ``omni.kit.xr.system.openxr`` and reads the four raw handle + values (XrInstance, XrSession, XrSpace, xrGetInstanceProcAddr) that Kit's + OpenXR system exposes. The handles are returned as an + ``OpenXRSessionHandles`` instance ready for ``DeviceIOSession.run()``. + + Args: + handles_cls: The ``OpenXRSessionHandles`` class (passed in to avoid + a module-level import of ``isaacteleop.oxr``). + + Returns: + An ``OpenXRSessionHandles`` instance, or ``None`` if the bridge + extension is not available or any handle is missing. + """ + try: + import omni.kit.xr.system.openxr as openxr + except (ImportError, ModuleNotFoundError): + logger.info("omni.kit.xr.system.openxr not available; IsaacTeleop will create its own OpenXR session") + return None + + instance = openxr.get_instance_handle() + session = openxr.get_session_handle() + space = openxr.get_stage_space_handle() + proc_addr = openxr.get_instance_proc_addr() + + if not all((instance, session, space, proc_addr)): + logger.debug( + "Kit XR bridge returned incomplete handles " + f"(instance={instance}, session={session}, space={space}, proc_addr={proc_addr})" + ) + return None + + logger.info("Acquired OpenXR handles from Kit XR bridge") + return handles_cls(instance, session, space, proc_addr) diff --git a/source/isaaclab_teleop/isaaclab_teleop/teleop_message_processor.py b/source/isaaclab_teleop/isaaclab_teleop/teleop_message_processor.py new file mode 100644 index 000000000000..1844925c4d0c --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/teleop_message_processor.py @@ -0,0 +1,232 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Message-channel payload parser for TeleopCore's teleop_control_pipeline. + +Provides :class:`TeleopMessageProcessor`, a lightweight +:class:`~isaacteleop.retargeting_engine.interface.BaseRetargeter` that +converts message-channel payloads into boolean pulse signals suitable for +:class:`~isaacteleop.teleop_session_manager.DefaultTeleopStateManager`. +""" + +from __future__ import annotations + +import json +import re +from typing import TYPE_CHECKING + +from isaacteleop.retargeting_engine.interface import BaseRetargeter, RetargeterIOType + +if TYPE_CHECKING: + from isaacteleop.retargeting_engine.interface.retargeter_core_types import ComputeContext, RetargeterIO + +_COMMAND_PATTERNS: list[tuple[re.Pattern[str], str]] = [ + (re.compile(r"\breset\b", re.IGNORECASE), "reset"), + (re.compile(r"\bstop\b", re.IGNORECASE), "stop"), + (re.compile(r"\bstart\b", re.IGNORECASE), "start"), +] +"""Ordered patterns for classifying a command string. + +``reset`` is checked first so that a hypothetical payload containing +both "reset" and "start" is treated as a reset (the more destructive +operation wins). ``stop`` precedes ``start`` for the same reason. +""" + +# Shadow states mirroring DefaultTeleopStateManager's ExecutionState. +_STOPPED = "stopped" +_PAUSED = "paused" +_RUNNING = "running" + +# DefaultTeleopStateManager cycles states on run_toggle rising edges: +# STOPPED -> PAUSED -> RUNNING -> PAUSED -> RUNNING -> ... +# To map imperative "start" (= go to RUNNING) and "stop" (= go to PAUSED) +# we emit the right number of toggle edges based on predicted state. +_START_TOGGLE_SEQUENCES: dict[str, list[bool]] = { + _STOPPED: [True, False, True], # 2 edges: STOPPED -> PAUSED -> RUNNING + _PAUSED: [True], # 1 edge: PAUSED -> RUNNING + _RUNNING: [], # already running +} +_STOP_TOGGLE_SEQUENCES: dict[str, list[bool]] = { + _RUNNING: [True], # 1 edge: RUNNING -> PAUSED + _PAUSED: [], # already paused + _STOPPED: [], # already stopped +} +# Shadow state advances on each rising edge (True after False). +_TOGGLE_TRANSITIONS: dict[str, str] = { + _STOPPED: _PAUSED, + _PAUSED: _RUNNING, + _RUNNING: _PAUSED, +} + + +class TeleopMessageProcessor(BaseRetargeter): + """Parse message-channel payloads into boolean control signals. + + Consumes the ``messages_tracked`` output of a + :class:`~isaacteleop.retargeting_engine.deviceio_source_nodes.MessageChannelSource` + and produces three boolean pulse outputs that drive + :class:`~isaacteleop.teleop_session_manager.DefaultTeleopStateManager`: + + * ``run_toggle`` -- pulsed ``True`` on rising edges; the number of + edges depends on the target state (e.g. ``"start"`` from STOPPED + emits two edges over three frames: STOPPED -> PAUSED -> RUNNING). + * ``kill`` -- always ``False`` (reserved for fail-safe; ``"stop"`` + uses ``run_toggle`` to reach PAUSED instead of STOPPED). + * ``reset`` -- pulsed ``True`` for one frame on ``"reset"``. + + The processor maintains a *shadow state* that mirrors + ``DefaultTeleopStateManager``'s internal state so it can emit the + correct toggle sequence for imperative commands. + + Payload formats supported: + + 1. **JSON (Quest client format)**:: + + {"type": "teleop_command", "message": {"command": "start teleop"}} + + 2. **Plain text (fallback)**: raw UTF-8 string matched by word boundary + (``"start"``, ``"stop"``, ``"reset"``). + + Host-initiated resets (e.g. environment success) are injected via + :meth:`inject_reset`, which sets the ``reset`` output ``True`` on the + next compute call without requiring a message-channel payload. + """ + + INPUT_MESSAGES = "messages_tracked" + + def __init__(self, name: str) -> None: + self._inject_reset_pending = False + self._shadow_state = _STOPPED + self._run_toggle_queue: list[bool] = [] + self._prev_toggle_output = False + super().__init__(name=name) + + def inject_reset(self) -> None: + """Schedule a reset pulse on the next pipeline step. + + The ``reset`` output will be ``True`` for exactly one frame, then + automatically cleared. + """ + self._inject_reset_pending = True + + def _make_toggle_sequence(self, base_sequence: list[bool]) -> list[bool]: + """Prepend a ``False`` frame if needed to guarantee a clean rising edge. + + ``DefaultTeleopStateManager`` uses edge detection + (``pressed and not prev_pressed``), so emitting ``True`` when the + previous output was already ``True`` would not trigger a state + transition. This method prepends ``False`` when necessary. + """ + if not base_sequence: + return [] + seq = list(base_sequence) + if self._prev_toggle_output: + seq.insert(0, False) + return seq + + def input_spec(self) -> RetargeterIOType: + from isaacteleop.retargeting_engine.deviceio_source_nodes.deviceio_tensor_types import ( + MessageChannelMessagesTrackedGroup, + ) + + return {self.INPUT_MESSAGES: MessageChannelMessagesTrackedGroup()} + + def output_spec(self) -> RetargeterIOType: + from isaacteleop.teleop_session_manager.teleop_state_manager_types import bool_signal + + return { + "run_toggle": bool_signal("run_toggle"), + "kill": bool_signal("kill"), + "reset": bool_signal("reset"), + } + + def _compute_fn( + self, + inputs: RetargeterIO, + outputs: RetargeterIO, + context: ComputeContext, + ) -> None: + del context + + reset = self._inject_reset_pending + self._inject_reset_pending = False + + # Parse incoming messages and enqueue toggle sequences. + messages_tracked = inputs[self.INPUT_MESSAGES][0] + data = getattr(messages_tracked, "data", None) + if data: + for message in data: + payload = getattr(message, "payload", None) + if payload is None: + continue + try: + text = bytes(payload).decode("utf-8") + except (UnicodeDecodeError, TypeError): + continue + + command = _extract_command(text) + if command is None: + continue + + kind = _classify_command(command) + if kind == "start" and not self._run_toggle_queue: + self._run_toggle_queue = self._make_toggle_sequence(_START_TOGGLE_SEQUENCES[self._shadow_state]) + elif kind == "stop" and not self._run_toggle_queue: + self._run_toggle_queue = self._make_toggle_sequence(_STOP_TOGGLE_SEQUENCES[self._shadow_state]) + elif kind == "reset": + reset = True + + # Drain the toggle queue (one value per frame). + if self._run_toggle_queue: + run_toggle = self._run_toggle_queue.pop(0) + else: + run_toggle = False + + # Advance shadow state on rising edges (matches DefaultTeleopStateManager's + # edge detection: ``pressed and not prev_pressed``). + if run_toggle and not self._prev_toggle_output: + self._shadow_state = _TOGGLE_TRANSITIONS[self._shadow_state] + self._prev_toggle_output = run_toggle + + outputs["run_toggle"][0] = run_toggle + outputs["kill"][0] = False + outputs["reset"][0] = reset + + +def _classify_command(text: str) -> str | None: + """Return ``"start"``, ``"stop"``, ``"reset"``, or ``None``. + + Uses word-boundary matching so that e.g. ``"stop_and_restart"`` + matches ``"stop"`` (not ``"start"``). + """ + for pattern, label in _COMMAND_PATTERNS: + if pattern.search(text): + return label + return None + + +def _extract_command(text: str) -> str | None: + """Extract the command string from a JSON or plain-text payload. + + Tries JSON parsing first (Quest client format) and falls back to the + raw text for plain-string payloads. Non-string JSON scalars (numbers, + arrays, booleans) are discarded. + """ + try: + obj = json.loads(text) + except (json.JSONDecodeError, TypeError): + return text + + if not isinstance(obj, dict): + return None + if obj.get("type") != "teleop_command": + return None + + msg = obj.get("message") + if isinstance(msg, dict): + return msg.get("command", "") + if isinstance(msg, str): + return msg + return None diff --git a/source/isaaclab_teleop/isaaclab_teleop/xr_anchor_manager.py b/source/isaaclab_teleop/isaaclab_teleop/xr_anchor_manager.py new file mode 100644 index 000000000000..d39cdcbe84ad --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/xr_anchor_manager.py @@ -0,0 +1,202 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""XR anchor management for IsaacTeleop-based teleoperation.""" + +from __future__ import annotations + +import contextlib +import logging + +import numpy as np +from scipy.spatial.transform import Rotation + +import carb + +from .xr_anchor_utils import XrAnchorSynchronizer +from .xr_cfg import XrCfg + +# Import XR components with fallback for testing +XRCore = None +XRCoreEventType = None +with contextlib.suppress(ModuleNotFoundError): + from omni.kit.xr.core import XRCore, XRCoreEventType + +from isaaclab.sim.utils.prims import create_prim as _create_prim + +logger = logging.getLogger(__name__) + + +class XrAnchorManager: + """Manages XR anchor prim creation, synchronization, and world transform computation. + + This class is responsible for: + + 1. Creating the XR anchor prim in the USD stage + 2. Configuring carb settings for XR rendering + 3. Managing the :class:`XrAnchorSynchronizer` that keeps the anchor + aligned with a reference prim (for dynamic anchoring) + 4. Computing the 4x4 world transform matrix that converts OpenXR + local-space poses into the Isaac Lab world frame + """ + + # Basis-change rotation from OpenXR (Y-up) to USD/Isaac Lab (Z-up). + _OXR_TO_USD_ROTATION: np.ndarray = np.array( + [ + [1.0, 0.0, 0.0], + [0.0, 0.0, -1.0], + [0.0, 1.0, 0.0], + ], + dtype=np.float64, + ) + + def __init__(self, xr_cfg: XrCfg): + """Initialize the XR anchor manager. + + Creates the anchor prim, configures carb XR settings, and sets up + the optional anchor synchronizer for dynamic anchoring. + + Args: + xr_cfg: XR configuration specifying anchor position, rotation, + and optional dynamic anchoring prim path. + """ + self._xr_cfg = xr_cfg + self._xr_core = XRCore.get_singleton() if XRCore is not None else None + self._xr_pre_sync_update_subscription = None + + # Resolve the headset anchor path + if self._xr_cfg.anchor_prim_path is not None: + anchor_path = self._xr_cfg.anchor_prim_path + if anchor_path.endswith("/"): + anchor_path = anchor_path[:-1] + self._xr_anchor_headset_path = f"{anchor_path}/XRAnchor" + else: + self._xr_anchor_headset_path = "/World/XRAnchor" + + # Create the XR anchor prim in USD. + # XrCfg.anchor_rot is xyzw; create_prim orientation expects xyzw. + x, y, z, w = self._xr_cfg.anchor_rot + try: + pos = np.asarray(self._xr_cfg.anchor_pos, dtype=np.float64) + quat_xyzw = np.asarray([x, y, z, w], dtype=np.float64) + _create_prim(self._xr_anchor_headset_path, prim_type="Xform", position=pos, orientation=quat_xyzw) + except Exception as e: + logger.warning(f"Failed to create XR anchor prim: {e}") + + # Configure carb settings for XR rendering + if hasattr(carb, "settings"): + carb.settings.get_settings().set_float("/persistent/xr/render/nearPlane", self._xr_cfg.near_plane) + carb.settings.get_settings().set_string("/persistent/xr/anchorMode", "custom anchor") + carb.settings.get_settings().set_string("/xrstage/customAnchor", self._xr_anchor_headset_path) + + self._anchor_sync: XrAnchorSynchronizer | None = None + if self._xr_core is not None: + try: + self._anchor_sync = XrAnchorSynchronizer( + xr_core=self._xr_core, + xr_cfg=self._xr_cfg, + xr_anchor_headset_path=self._xr_anchor_headset_path, + ) + # Subscribe to pre_sync_update to keep anchor in sync each frame. + # Capture the synchronizer in a local to satisfy type narrowing. + if XRCoreEventType is not None: + assert self._anchor_sync is not None # guaranteed by the lines above + anchor_sync = self._anchor_sync + self._xr_pre_sync_update_subscription = ( + self._xr_core.get_message_bus().create_subscription_to_pop_by_type( + XRCoreEventType.pre_sync_update, + lambda _, _sync=anchor_sync: _sync.sync_headset_to_anchor(), + name="isaaclab_teleop_xr_pre_sync_update", + ) + ) + except Exception as e: + logger.warning(f"Failed to initialize anchor synchronizer: {e}") + + @property + def xr_core(self): + """The XRCore singleton, or ``None`` if XR is not available.""" + return self._xr_core + + @property + def anchor_headset_path(self) -> str: + """The USD path of the XR anchor prim.""" + return self._xr_anchor_headset_path + + def get_world_matrix(self) -> np.ndarray: + """Build the combined 4x4 transform from OpenXR local space to Isaac Lab world. + + This matrix performs two operations on every pose that comes out of + IsaacTeleop's DeviceIO pipeline: + + 1. **Axis conversion** -- rotates from the OpenXR coordinate convention + (Y-up, +X right, +Z back) to the Isaac Lab convention + (Z-up, +X forward, +Y left). + 2. **World offset** -- translates and rotates from the XR anchor's + local frame into the Isaac Lab world frame using the anchor + position and orientation. + + The returned matrix is ``world_T_anchor @ oxr_to_usd`` so that + ``ControllerTransform`` can apply ``p_world = R @ p_oxr + t`` in a + single operation. + + Strategy: + * When the :class:`XrAnchorSynchronizer` is available (typical + runtime), reads the cached transform that was written to the XR + core via ``set_world_transform_matrix`` during the most recent + ``pre_sync_update``. This works for both dynamic anchoring + (``anchor_prim_path`` set) and static anchoring. + * Falls back to raw ``anchor_pos`` / ``anchor_rot`` config values + only when the XR core is unavailable (e.g. unit tests). + + Returns: + A (4, 4) float32 numpy array. + """ + if self._anchor_sync is not None: + xform = self._anchor_sync.get_world_transform() + if xform is not None: + pos, quat_xyzw = xform + return self._build_matrix(pos, quat_xyzw) + + # Fallback when XR core is unavailable (e.g. unit tests). + return self._build_matrix(self._xr_cfg.anchor_pos, self._xr_cfg.anchor_rot) + + def _build_matrix(self, pos, quat_xyzw) -> np.ndarray: + """Assemble world_T_anchor @ oxr_to_usd as a single 4x4 matrix. + + The anchor rotation/translation places the XR origin in the Isaac Lab + world. The axis-conversion rotation (``_OXR_TO_USD_ROTATION``) is + composed on the right so that it is applied *first* to the raw OpenXR + pose before the anchor transform maps it into the world. + """ + r_anchor = Rotation.from_quat( + [ + float(quat_xyzw[0]), + float(quat_xyzw[1]), + float(quat_xyzw[2]), + float(quat_xyzw[3]), + ] + ).as_matrix() + + # Combined rotation: R_anchor @ R_oxr_to_usd + r_combined = r_anchor @ self._OXR_TO_USD_ROTATION + + mat = np.eye(4, dtype=np.float32) + mat[:3, :3] = r_combined + mat[:3, 3] = [float(pos[0]), float(pos[1]), float(pos[2])] + return mat + + def reset(self) -> None: + """Reset the anchor synchronizer state.""" + if self._anchor_sync is not None: + self._anchor_sync.reset() + + def toggle_anchor_rotation(self) -> None: + """Toggle anchor rotation following on the synchronizer.""" + if self._anchor_sync is not None: + self._anchor_sync.toggle_anchor_rotation() + + def cleanup(self) -> None: + """Release event subscriptions.""" + self._xr_pre_sync_update_subscription = None diff --git a/source/isaaclab_teleop/isaaclab_teleop/xr_anchor_utils.py b/source/isaaclab_teleop/isaaclab_teleop/xr_anchor_utils.py new file mode 100644 index 000000000000..196de3ce56b9 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/xr_anchor_utils.py @@ -0,0 +1,226 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for synchronizing XR anchor pose with a reference prim and XR config.""" + +from __future__ import annotations + +import contextlib +import logging +import math +from typing import Any + +import numpy as np + +logger = logging.getLogger(__name__) + +from isaaclab.sim import SimulationContext +from isaaclab.sim.utils.stage import get_current_stage, get_current_stage_id + +from .xr_cfg import XrAnchorRotationMode + +with contextlib.suppress(ModuleNotFoundError): + import usdrt + from pxr import Gf as pxrGf + from usdrt import Rt + + +class XrAnchorSynchronizer: + """Keeps the XR anchor prim aligned with a reference prim according to XR config.""" + + def __init__(self, xr_core: Any, xr_cfg: Any, xr_anchor_headset_path: str): + self._xr_core = xr_core + self._xr_cfg = xr_cfg + self._xr_anchor_headset_path = xr_anchor_headset_path + + self.__anchor_prim_initial_quat = None + self.__anchor_prim_initial_height = None + self.__smoothed_anchor_quat = None + self.__last_anchor_quat = None + self.__anchor_rotation_enabled = True + + # Cached anchor world transform (pos, quat_xyzw) set by sync_headset_to_anchor(). + # Reading back from the prim hierarchy is unreliable when the anchor is a child of a + # physics-driven prim (e.g. the pelvis) because Fabric computes the hierarchy world + # matrix using the physics-updated parent while the local xform was decomposed against + # the USD-level parent, which can diverge. + self.__cached_world_pos: np.ndarray | None = None + self.__cached_world_quat_xyzw: np.ndarray | None = None + + # Resolve USD layer identifier of the anchor for updates + try: + stage = get_current_stage() + xr_anchor_headset_prim = stage.GetPrimAtPath(self._xr_anchor_headset_path) + prim_stack = xr_anchor_headset_prim.GetPrimStack() if xr_anchor_headset_prim is not None else None + self.__anchor_headset_layer_identifier = prim_stack[0].layer.identifier if prim_stack else None + except Exception: + self.__anchor_headset_layer_identifier = None + + def reset(self): + self.__anchor_prim_initial_quat = None + self.__anchor_prim_initial_height = None + self.__smoothed_anchor_quat = None + self.__last_anchor_quat = None + self.__anchor_rotation_enabled = True + self.__cached_world_pos = None + self.__cached_world_quat_xyzw = None + self.sync_headset_to_anchor() + + def toggle_anchor_rotation(self): + self.__anchor_rotation_enabled = not self.__anchor_rotation_enabled + logger.info(f"XR: Toggling anchor rotation: {self.__anchor_rotation_enabled}") + + def get_world_transform(self) -> tuple[np.ndarray, np.ndarray] | None: + """Return the anchor world transform. + + Returns the cached world transform that was computed by the most recent + call to :meth:`sync_headset_to_anchor`. Using the cached value avoids + a Fabric/USD layer mismatch: when the XR anchor prim is a child of a + physics-driven prim (e.g. the robot pelvis), reading + ``GetFabricHierarchyWorldMatrixAttr`` would compose the Fabric-side + parent transform (updated by physics) with a local xform that was + decomposed against the USD-side parent (which can lag behind), + producing an incorrect world matrix that drifts as the robot moves. + + Returns: + A ``(position, quat_xyzw)`` tuple of numpy float64 arrays, + or ``None`` if :meth:`sync_headset_to_anchor` has not run yet. + """ + if self.__cached_world_pos is not None and self.__cached_world_quat_xyzw is not None: + return self.__cached_world_pos, self.__cached_world_quat_xyzw + return None + + def sync_headset_to_anchor(self): + """Sync XR anchor pose in USD for both dynamic and static anchoring. + + For **dynamic** anchoring (``anchor_prim_path`` is set), the reference + prim's world position is read from Fabric and ``anchor_pos`` is added + as an offset. For **static** anchoring (no prim path), ``anchor_pos`` + is used directly as the world position. + + In both cases the function calls ``set_world_transform_matrix`` on the + XR core so that the rendering anchor and the pipeline's + ``world_T_anchor`` matrix are guaranteed to agree, and caches the + world transform for :meth:`get_world_transform`. + """ + try: + if self._xr_cfg.anchor_prim_path is not None: + stage_id = get_current_stage_id() + rt_stage = usdrt.Usd.Stage.Attach(stage_id) + if rt_stage is None: + return + + rt_prim = rt_stage.GetPrimAtPath(self._xr_cfg.anchor_prim_path) + if rt_prim is None: + return + + rt_xformable = Rt.Xformable(rt_prim) + if rt_xformable is None: + return + + world_matrix_attr = rt_xformable.GetFabricHierarchyWorldMatrixAttr() + if world_matrix_attr is None: + return + + rt_matrix = world_matrix_attr.Get() + if rt_matrix is None: + return + rt_pos = rt_matrix.ExtractTranslation() + + if self.__anchor_prim_initial_quat is None: + self.__anchor_prim_initial_quat = rt_matrix.ExtractRotationQuat() + + if getattr(self._xr_cfg, "fixed_anchor_height", False): + if self.__anchor_prim_initial_height is None: + self.__anchor_prim_initial_height = rt_pos[2] + rt_pos[2] = self.__anchor_prim_initial_height + + pxr_anchor_pos = pxrGf.Vec3d(*rt_pos) + pxrGf.Vec3d(*self._xr_cfg.anchor_pos) + else: + rt_matrix = None + pxr_anchor_pos = pxrGf.Vec3d(*self._xr_cfg.anchor_pos) + + x, y, z, w = self._xr_cfg.anchor_rot + pxr_cfg_quat = pxrGf.Quatd(w, pxrGf.Vec3d(x, y, z)) + + pxr_anchor_quat = pxr_cfg_quat + + if rt_matrix is not None: + if self._xr_cfg.anchor_rotation_mode in ( + XrAnchorRotationMode.FOLLOW_PRIM, + XrAnchorRotationMode.FOLLOW_PRIM_SMOOTHED, + ): + rt_prim_quat = rt_matrix.ExtractRotationQuat() + rt_delta_quat = rt_prim_quat * self.__anchor_prim_initial_quat.GetInverse() + pxr_delta_quat = pxrGf.Quatd(rt_delta_quat.GetReal(), pxrGf.Vec3d(*rt_delta_quat.GetImaginary())) + + # yaw-only about Z (right-handed, Z-up) + wq = pxr_delta_quat.GetReal() + ix, iy, iz = pxr_delta_quat.GetImaginary() + yaw = math.atan2(2.0 * (wq * iz + ix * iy), 1.0 - 2.0 * (iy * iy + iz * iz)) + cy = math.cos(yaw * 0.5) + sy = math.sin(yaw * 0.5) + pxr_delta_yaw_only_quat = pxrGf.Quatd(cy, pxrGf.Vec3d(0.0, 0.0, sy)) + pxr_anchor_quat = pxr_delta_yaw_only_quat * pxr_cfg_quat + + if self._xr_cfg.anchor_rotation_mode == XrAnchorRotationMode.FOLLOW_PRIM_SMOOTHED: + if self.__smoothed_anchor_quat is None: + self.__smoothed_anchor_quat = pxr_anchor_quat + else: + dt = SimulationContext.instance().get_rendering_dt() + alpha = 1.0 - math.exp(-dt / max(self._xr_cfg.anchor_rotation_smoothing_time, 1e-6)) + alpha = min(1.0, max(0.05, alpha)) + self.__smoothed_anchor_quat = pxrGf.Slerp( + alpha, self.__smoothed_anchor_quat, pxr_anchor_quat + ) + pxr_anchor_quat = self.__smoothed_anchor_quat + + elif self._xr_cfg.anchor_rotation_mode == XrAnchorRotationMode.CUSTOM: + if self._xr_cfg.anchor_rotation_custom_func is not None: + rt_prim_quat = rt_matrix.ExtractRotationQuat() + anchor_prim_pose = np.array( + [ + rt_pos[0], + rt_pos[1], + rt_pos[2], + rt_prim_quat.GetImaginary()[0], + rt_prim_quat.GetImaginary()[1], + rt_prim_quat.GetImaginary()[2], + rt_prim_quat.GetReal(), + ], + dtype=np.float64, + ) + prev_head = getattr(self, "_previous_headpose", np.zeros(7, dtype=np.float64)) + np_array_quat = self._xr_cfg.anchor_rotation_custom_func(prev_head, anchor_prim_pose) + x, y, z, w = np_array_quat + pxr_anchor_quat = pxrGf.Quatd(w, pxrGf.Vec3d(x, y, z)) + + pxr_mat = pxrGf.Matrix4d() + pxr_mat.SetTranslateOnly(pxr_anchor_pos) + + if self.__anchor_rotation_enabled: + pxr_final_quat = pxr_anchor_quat + self.__last_anchor_quat = pxr_anchor_quat + else: + if self.__last_anchor_quat is None: + self.__last_anchor_quat = pxr_anchor_quat + pxr_final_quat = self.__last_anchor_quat + self.__smoothed_anchor_quat = self.__last_anchor_quat + + pxr_mat.SetRotateOnly(pxr_final_quat) + + self.__cached_world_pos = np.array( + [pxr_anchor_pos[0], pxr_anchor_pos[1], pxr_anchor_pos[2]], dtype=np.float64 + ) + fq_img = pxr_final_quat.GetImaginary() + self.__cached_world_quat_xyzw = np.array( + [fq_img[0], fq_img[1], fq_img[2], pxr_final_quat.GetReal()], dtype=np.float64 + ) + + self._xr_core.set_world_transform_matrix( + self._xr_anchor_headset_path, pxr_mat, self.__anchor_headset_layer_identifier + ) + except Exception as e: + logger.warning(f"XR: Anchor sync failed: {e}") diff --git a/source/isaaclab_teleop/isaaclab_teleop/xr_cfg.py b/source/isaaclab_teleop/isaaclab_teleop/xr_cfg.py new file mode 100644 index 000000000000..7382f8570252 --- /dev/null +++ b/source/isaaclab_teleop/isaaclab_teleop/xr_cfg.py @@ -0,0 +1,150 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ignore private usage of variables warning +# pyright: reportPrivateUsage=none + +from __future__ import annotations + +import enum +from collections.abc import Callable + +import numpy as np + +from isaaclab.utils import configclass + + +class XrAnchorRotationMode(enum.Enum): + """Enumeration for XR anchor rotation modes.""" + + FIXED = "fixed" + """Fixed rotation mode: sets rotation once and doesn't change it.""" + + FOLLOW_PRIM = "follow_prim" + """Follow prim rotation mode: rotation follows prim's rotation.""" + + FOLLOW_PRIM_SMOOTHED = "follow_prim_smoothed" + """Follow prim rotation mode with smooth interpolation: rotation smoothly follows prim's rotation using slerp.""" + + CUSTOM = "custom_rotation" + """Custom rotation mode: user provided function to calculate the rotation.""" + + +@configclass +class XrCfg: + """Configuration for viewing and interacting with the environment through an XR device.""" + + anchor_pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Specifies the position (in m) of the simulation when viewed in an XR device. + + Specifically: this position will appear at the origin of the XR device's local coordinate frame. + """ + + anchor_rot: tuple[float, float, float, float] = (0.0, 0.0, 0.0, 1.0) + """Specifies the rotation (as a quaternion xyzw) of the simulation when viewed in an XR device. + + Specifically: this rotation will determine how the simulation is rotated with respect to the + origin of the XR device's local coordinate frame. + + This quantity is only effective if :attr:`xr_anchor_pos` is set. + """ + + anchor_prim_path: str | None = None + """Specifies the prim path to attach the XR anchor to for dynamic positioning. + + When set, the XR anchor will be attached to the specified prim (e.g., robot root prim), + allowing the XR camera to move with the prim. This is particularly useful for locomotion + robot teleoperation where the robot moves and the XR camera should follow it. + + If None, the anchor will use the static :attr:`anchor_pos` and :attr:`anchor_rot` values. + """ + + anchor_rotation_mode: XrAnchorRotationMode = XrAnchorRotationMode.FIXED + """Specifies how the XR anchor rotation should behave when attached to a prim. + + The available modes are: + - :attr:`XrAnchorRotationMode.FIXED`: Sets rotation once to anchor_rot value + - :attr:`XrAnchorRotationMode.FOLLOW_PRIM`: Rotation follows prim's rotation + - :attr:`XrAnchorRotationMode.FOLLOW_PRIM_SMOOTHED`: Rotation smoothly follows prim's rotation using slerp + - :attr:`XrAnchorRotationMode.CUSTOM`: user provided function to calculate the rotation + """ + + anchor_rotation_smoothing_time: float = 1.0 + """Wall-clock time constant (seconds) for rotation smoothing in FOLLOW_PRIM_SMOOTHED mode. + + This time constant is applied using wall-clock delta time between frames (not physics dt). + Smaller values (e.g., 0.1) result in faster/snappier response but less smoothing. + Larger values (e.g., 0.75–2.0) result in slower/smoother response but more lag. + Typical useful range: 0.3 – 1.5 seconds depending on runtime frame-rate and comfort. + """ + + anchor_rotation_custom_func: Callable[[np.ndarray, np.ndarray], np.ndarray] = lambda headpose, primpose: np.array( + [0, 0, 0, 1], dtype=np.float64 + ) + """Specifies the function to calculate the rotation of the XR anchor when anchor_rotation_mode is CUSTOM. + + Args: + headpose: Previous head pose as numpy array [x, y, z, w, x, y, z] (position + quaternion) + pose: Anchor prim pose as numpy array [x, y, z, w, x, y, z] (position + quaternion) + + Returns: + np.ndarray: Quaternion as numpy array [w, x, y, z] + """ + + near_plane: float = 0.15 + """Specifies the near plane distance for the XR device. + + This value determines the closest distance at which objects will be rendered in the XR device. + """ + + fixed_anchor_height: bool = True + """Specifies if the anchor height should be fixed. + + If True, the anchor height will be fixed to the initial height of the anchor prim. + """ + + +from typing import Any + + +def remove_camera_configs(env_cfg: Any) -> Any: + """Removes cameras from environments when using XR devices. + + Having additional cameras cause operation performance issues. This function scans the environment + configuration for camera objects and removes them, along with any associated + observation terms that reference these cameras. + + Args: + env_cfg: The environment configuration to modify. + + Returns: + The modified environment configuration with cameras removed. + """ + + import logging + + # import logger + logger = logging.getLogger(__name__) + + from isaaclab.managers import SceneEntityCfg + from isaaclab.sensors import CameraCfg + + for attr_name in dir(env_cfg.scene): + attr = getattr(env_cfg.scene, attr_name) + if isinstance(attr, CameraCfg): + delattr(env_cfg.scene, attr_name) + logger.info(f"Removed camera config: {attr_name}") + + # Remove any ObsTerms for the camera + if hasattr(env_cfg.observations, "policy"): + for obs_name in dir(env_cfg.observations.policy): + obsterm = getattr(env_cfg.observations.policy, obs_name) + if hasattr(obsterm, "params") and obsterm.params: + for param_value in obsterm.params.values(): + if isinstance(param_value, SceneEntityCfg) and param_value.name == attr_name: + delattr(env_cfg.observations.policy, obs_name) + logger.info(f"Removed camera observation term: {obs_name}") + break + return env_cfg diff --git a/source/isaaclab_teleop/pyproject.toml b/source/isaaclab_teleop/pyproject.toml new file mode 100644 index 000000000000..31dce8d230ec --- /dev/null +++ b/source/isaaclab_teleop/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools<82.0.0", "wheel", "toml"] +build-backend = "setuptools.build_meta" diff --git a/source/isaaclab_teleop/setup.py b/source/isaaclab_teleop/setup.py new file mode 100644 index 000000000000..7ecec8340abb --- /dev/null +++ b/source/isaaclab_teleop/setup.py @@ -0,0 +1,48 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_teleop' python package.""" + +import os + +import toml +from setuptools import find_packages, setup + +# Obtain the extension data from the extension.toml file +EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) +# Read the extension.toml file +EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +SUPPORTED_ARCHS = "platform_machine in 'x86_64,AMD64'" + +# Minimum dependencies required prior to installation +INSTALL_REQUIRES = [ + # IsaacTeleop is only available on Linux x86_64 + f"isaacteleop[retargeters,ui,cloudxr]~=1.2.0 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS})", + # required by isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1_t2_dex_retargeting_utils + f"dex-retargeting==0.5.0 ; platform_system == 'Linux' and ({SUPPORTED_ARCHS})", +] + +# Installation operation +setup( + name="isaaclab_teleop", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url=EXTENSION_TOML_DATA["package"]["repository"], + version=EXTENSION_TOML_DATA["package"]["version"], + description=EXTENSION_TOML_DATA["package"]["description"], + keywords=EXTENSION_TOML_DATA["package"]["keywords"], + include_package_data=True, + package_data={"": ["*.pyi", "*.env"]}, + python_requires=">=3.12", + install_requires=INSTALL_REQUIRES, + packages=find_packages(), + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", + ], + zip_safe=False, +) diff --git a/source/isaaclab_teleop/test/test_cloudxr_lifecycle.py b/source/isaaclab_teleop/test/test_cloudxr_lifecycle.py new file mode 100644 index 000000000000..43131f70cfc3 --- /dev/null +++ b/source/isaaclab_teleop/test/test_cloudxr_lifecycle.py @@ -0,0 +1,341 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# pyright: reportPrivateUsage=none + +"""Tests for CloudXR runtime auto-launch lifecycle. + +These tests exercise the CloudXR auto-launch logic in +:class:`~isaaclab_teleop.session_lifecycle.TeleopSessionLifecycle` without +requiring the Omniverse/Isaac Sim stack or a real CloudXR installation. + +All heavy dependencies (isaacteleop, carb, omni.kit, isaacsim) are +stubbed out via ``sys.modules`` and ``unittest.mock`` so these tests run +in a plain Python environment. +""" + +from __future__ import annotations + +import os +import sys +from pathlib import Path +from types import ModuleType +from unittest.mock import MagicMock, patch + +import pytest + +# --------------------------------------------------------------------------- +# Stub out isaacteleop and Kit modules before any isaaclab_teleop imports. +# TeleopSessionLifecycle.__init__ tries to import several Kit/XR modules; +# we inject stubs so the constructor can complete without Omniverse. +# --------------------------------------------------------------------------- + +_MODULES_TO_STUB = [ + "isaacteleop", + "isaacteleop.cloudxr", + "isaacteleop.oxr", + "isaacteleop.retargeting_engine", + "isaacteleop.retargeting_engine.interface", + "isaacteleop.retargeting_engine.interface.execution_events", + "isaacteleop.retargeting_engine.interface.retargeter_core_types", + "isaacteleop.retargeting_engine.interface.tensor_group_type", + "isaacteleop.retargeting_engine.deviceio_source_nodes", + "isaacteleop.retargeting_engine.deviceio_source_nodes.deviceio_tensor_types", + "isaacteleop.retargeting_engine_ui", + "isaacteleop.teleop_session_manager", + "isaacteleop.teleop_session_manager.teleop_state_manager_retargeter", + "isaacteleop.teleop_session_manager.teleop_state_manager_types", + "isaacsim", + "isaacsim.kit", + "isaacsim.kit.xr", + "isaacsim.kit.xr.teleop", + "isaacsim.kit.xr.teleop.bridge", + "carb", + "carb.settings", + "carb.eventdispatcher", + "omni", + "omni.kit", + "omni.kit.app", + "omni.kit.xr", + "omni.kit.xr.system", + "omni.kit.xr.system.openxr", +] + +_stubs_installed: dict[str, ModuleType | MagicMock] = {} + + +def _install_stubs(): + """Insert MagicMock modules for all heavy dependencies.""" + for name in _MODULES_TO_STUB: + if name not in sys.modules: + _stubs_installed[name] = MagicMock() + sys.modules[name] = _stubs_installed[name] + + +_install_stubs() + +from isaaclab_teleop.isaac_teleop_cfg import ( # noqa: E402 + CLOUDXR_AVP_ENV, + CLOUDXR_JS_ENV, + IsaacTeleopCfg, +) +from isaaclab_teleop.session_lifecycle import TeleopSessionLifecycle # noqa: E402 + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_cfg() -> IsaacTeleopCfg: + """Build a minimal IsaacTeleopCfg with a dummy pipeline_builder.""" + return IsaacTeleopCfg( + pipeline_builder=lambda: MagicMock(), + control_channel_uuid=None, + ) + + +def _make_lifecycle( + cloudxr_env_file: str | None = None, + auto_launch_cloudxr: bool = True, +) -> TeleopSessionLifecycle: + """Create a TeleopSessionLifecycle with Kit dependencies safely stubbed.""" + cfg = _make_cfg() + return TeleopSessionLifecycle( + cfg, + cloudxr_env_file=cloudxr_env_file, + auto_launch_cloudxr=auto_launch_cloudxr, + ) + + +# ============================================================================ +# Shipped .env profile paths +# ============================================================================ + + +class TestEnvProfilePaths: + """Tests for the shipped .env profile path constants.""" + + def test_avp_env_is_absolute_path(self): + assert os.path.isabs(CLOUDXR_AVP_ENV) + + def test_js_env_is_absolute_path(self): + assert os.path.isabs(CLOUDXR_JS_ENV) + + def test_avp_env_file_exists(self): + assert Path(CLOUDXR_AVP_ENV).is_file(), f"Missing: {CLOUDXR_AVP_ENV}" + + def test_js_env_file_exists(self): + assert Path(CLOUDXR_JS_ENV).is_file(), f"Missing: {CLOUDXR_JS_ENV}" + + def test_avp_env_filename(self): + assert Path(CLOUDXR_AVP_ENV).name == "avp-cloudxr.env" + + def test_js_env_filename(self): + assert Path(CLOUDXR_JS_ENV).name == "cloudxrjs-cloudxr.env" + + def test_profiles_are_in_same_directory(self): + assert Path(CLOUDXR_AVP_ENV).parent == Path(CLOUDXR_JS_ENV).parent + + +# ============================================================================ +# _ensure_cloudxr_runtime +# ============================================================================ + + +class TestEnsureCloudXRRuntime: + """Tests for the ``_ensure_cloudxr_runtime`` method on TeleopSessionLifecycle.""" + + def test_skip_when_env_var_set(self): + """ISAACLAB_CXR_SKIP_AUTOLAUNCH=1 skips the launch entirely.""" + lifecycle = _make_lifecycle(cloudxr_env_file="/tmp/test.env") + + with patch.dict(os.environ, {"ISAACLAB_CXR_SKIP_AUTOLAUNCH": "1"}): + lifecycle._ensure_cloudxr_runtime() + + assert lifecycle._cloudxr_launcher is None + + def test_skip_when_env_var_set_with_whitespace(self): + """Whitespace around the env var value is stripped before comparison.""" + lifecycle = _make_lifecycle(cloudxr_env_file="/tmp/test.env") + + with patch.dict(os.environ, {"ISAACLAB_CXR_SKIP_AUTOLAUNCH": " 1 "}): + lifecycle._ensure_cloudxr_runtime() + + assert lifecycle._cloudxr_launcher is None + + def test_no_skip_when_env_var_zero(self): + """ISAACLAB_CXR_SKIP_AUTOLAUNCH=0 does NOT skip the launch.""" + mock_cls = MagicMock() + fake_module = MagicMock() + fake_module.CloudXRLauncher = mock_cls + + lifecycle = _make_lifecycle(cloudxr_env_file="/tmp/test.env") + + with ( + patch.dict(os.environ, {"ISAACLAB_CXR_SKIP_AUTOLAUNCH": "0"}), + patch.dict(sys.modules, {"isaacteleop.cloudxr": fake_module}), + ): + lifecycle._ensure_cloudxr_runtime() + + assert lifecycle._cloudxr_launcher is not None + + def test_skip_when_auto_launch_false(self): + """auto_launch_cloudxr=False skips the launch.""" + lifecycle = _make_lifecycle( + cloudxr_env_file="/tmp/test.env", + auto_launch_cloudxr=False, + ) + + with patch.dict(os.environ, {}, clear=False): + os.environ.pop("ISAACLAB_CXR_SKIP_AUTOLAUNCH", None) + lifecycle._ensure_cloudxr_runtime() + + assert lifecycle._cloudxr_launcher is None + + def test_idempotency(self): + """Calling _ensure_cloudxr_runtime twice does not create a second launcher.""" + sentinel = MagicMock() + lifecycle = _make_lifecycle(cloudxr_env_file="/tmp/test.env") + lifecycle._cloudxr_launcher = sentinel + + lifecycle._ensure_cloudxr_runtime() + + assert lifecycle._cloudxr_launcher is sentinel + + def test_launches_with_correct_args(self): + """CloudXRLauncher is constructed with hardcoded install_dir/accept_eula and the env file.""" + mock_cls = MagicMock() + lifecycle = _make_lifecycle(cloudxr_env_file="/etc/cxr.env") + + fake_module = MagicMock() + fake_module.CloudXRLauncher = mock_cls + + with ( + patch.dict(os.environ, {}, clear=False), + patch.dict(sys.modules, {"isaacteleop.cloudxr": fake_module}), + ): + os.environ.pop("ISAACLAB_CXR_SKIP_AUTOLAUNCH", None) + lifecycle._ensure_cloudxr_runtime() + + mock_cls.assert_called_once_with( + install_dir=str(Path.home() / ".cloudxr"), + env_config="/etc/cxr.env", + accept_eula=False, + ) + assert lifecycle._cloudxr_launcher is mock_cls.return_value + + def test_env_var_takes_precedence_over_auto_launch(self): + """ISAACLAB_CXR_SKIP_AUTOLAUNCH=1 overrides auto_launch_cloudxr=True.""" + lifecycle = _make_lifecycle( + cloudxr_env_file="/tmp/test.env", + auto_launch_cloudxr=True, + ) + + with patch.dict(os.environ, {"ISAACLAB_CXR_SKIP_AUTOLAUNCH": "1"}): + lifecycle._ensure_cloudxr_runtime() + + assert lifecycle._cloudxr_launcher is None + + +# ============================================================================ +# Lifecycle start/stop integration with CloudXR +# ============================================================================ + + +class TestLifecycleCloudXRIntegration: + """Tests for CloudXR launch/shutdown within the start()/stop() lifecycle.""" + + def _make_started_lifecycle(self) -> tuple[TeleopSessionLifecycle, MagicMock]: + """Build a lifecycle whose start() has been called with a mock launcher.""" + mock_cls = MagicMock() + lifecycle = _make_lifecycle(cloudxr_env_file="/tmp/test.env") + + fake_cxr_module = MagicMock() + fake_cxr_module.CloudXRLauncher = mock_cls + + fake_teleop_modules = { + "isaacteleop.cloudxr": fake_cxr_module, + "isaacteleop.retargeting_engine.deviceio_source_nodes": MagicMock(), + "isaacteleop.retargeting_engine.interface": MagicMock(), + } + + with ( + patch.dict(os.environ, {}, clear=False), + patch.dict(sys.modules, fake_teleop_modules), + ): + os.environ.pop("ISAACLAB_CXR_SKIP_AUTOLAUNCH", None) + lifecycle.start() + + return lifecycle, mock_cls.return_value + + def test_start_launches_runtime(self): + """start() invokes _ensure_cloudxr_runtime when cloudxr_env_file is set.""" + lifecycle, mock_launcher = self._make_started_lifecycle() + assert lifecycle._cloudxr_launcher is mock_launcher + + def test_stop_calls_launcher_stop(self): + """stop() calls CloudXRLauncher.stop().""" + lifecycle, mock_launcher = self._make_started_lifecycle() + lifecycle.stop() + mock_launcher.stop.assert_called_once() + + def test_stop_clears_launcher_on_success(self): + """After a successful stop(), _cloudxr_launcher is set to None.""" + lifecycle, mock_launcher = self._make_started_lifecycle() + lifecycle.stop() + assert lifecycle._cloudxr_launcher is None + + def test_stop_retains_launcher_on_runtime_error(self): + """When CloudXRLauncher.stop() raises RuntimeError, the launcher is retained for atexit.""" + lifecycle, mock_launcher = self._make_started_lifecycle() + mock_launcher.stop.side_effect = RuntimeError("process not found") + + lifecycle.stop() + + assert lifecycle._cloudxr_launcher is mock_launcher + + def test_start_without_cloudxr_env_file(self): + """start() works normally when no cloudxr_env_file is provided.""" + lifecycle = _make_lifecycle(cloudxr_env_file=None) + + fake_teleop_modules = { + "isaacteleop.retargeting_engine.deviceio_source_nodes": MagicMock(), + "isaacteleop.retargeting_engine.interface": MagicMock(), + } + + with patch.dict(sys.modules, fake_teleop_modules): + lifecycle.start() + + assert lifecycle._cloudxr_launcher is None + + def test_start_with_auto_launch_disabled(self): + """start() skips CloudXR launch when auto_launch_cloudxr=False.""" + lifecycle = _make_lifecycle( + cloudxr_env_file="/tmp/test.env", + auto_launch_cloudxr=False, + ) + + fake_teleop_modules = { + "isaacteleop.retargeting_engine.deviceio_source_nodes": MagicMock(), + "isaacteleop.retargeting_engine.interface": MagicMock(), + } + + with ( + patch.dict(os.environ, {}, clear=False), + patch.dict(sys.modules, fake_teleop_modules), + ): + os.environ.pop("ISAACLAB_CXR_SKIP_AUTOLAUNCH", None) + lifecycle.start() + + assert lifecycle._cloudxr_launcher is None + + def test_stop_without_cloudxr_env_file(self): + """stop() does not error when no CloudXR launcher was created.""" + lifecycle = _make_lifecycle(cloudxr_env_file=None) + lifecycle.stop() + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/source/isaaclab_teleop/test/test_control_events.py b/source/isaaclab_teleop/test/test_control_events.py new file mode 100644 index 000000000000..8bc05d3f957c --- /dev/null +++ b/source/isaaclab_teleop/test/test_control_events.py @@ -0,0 +1,586 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# pyright: reportPrivateUsage=none + +"""Tests for TeleopMessageProcessor, _classify_command, _extract_command, +and poll_control_events. + +These tests exercise pure logic (no Omniverse/Isaac Sim stack required). +The message processor is tested by calling its ``_compute_fn`` method +directly with fake pipeline I/O, mirroring how TeleopCore's +``teleop_control_pipeline`` mechanism invokes it. +""" + +from __future__ import annotations + +import dataclasses +import json +import sys +from types import ModuleType +from unittest.mock import MagicMock + +import pytest + +# --------------------------------------------------------------------------- +# Stub out isaacteleop modules before any isaaclab_teleop imports so the +# tests can run in a plain Python environment without Omniverse. +# --------------------------------------------------------------------------- + +_MODULES_TO_STUB = [ + "isaacteleop", + "isaacteleop.deviceio", + "isaacteleop.deviceio_trackers", + "isaacteleop.retargeting_engine", + "isaacteleop.retargeting_engine.deviceio_source_nodes", + "isaacteleop.retargeting_engine.deviceio_source_nodes.deviceio_tensor_types", + "isaacteleop.retargeting_engine.interface", + "isaacteleop.retargeting_engine.interface.retargeter_core_types", + "isaacteleop.retargeting_engine.interface.tensor_group_type", + "isaacteleop.retargeting_engine_ui", + "isaacteleop.schema", + "isaacteleop.teleop_session_manager", + "isaacteleop.teleop_session_manager.teleop_state_manager_retargeter", + "isaacteleop.teleop_session_manager.teleop_state_manager_types", +] + +_stubs: dict[str, ModuleType | MagicMock] = {} + + +def _install_stubs(): + for name in _MODULES_TO_STUB: + if name not in sys.modules: + _stubs[name] = MagicMock() + sys.modules[name] = _stubs[name] + + from enum import Enum + + class ExecutionState(str, Enum): + UNKNOWN = "unknown" + STOPPED = "stopped" + PAUSED = "paused" + RUNNING = "running" + + @dataclasses.dataclass + class ExecutionEvents: + reset: bool = False + execution_state: ExecutionState = ExecutionState.UNKNOWN + + ee_mod = sys.modules["isaacteleop.retargeting_engine.interface.execution_events"] = ModuleType( + "isaacteleop.retargeting_engine.interface.execution_events" + ) + ee_mod.ExecutionState = ExecutionState # type: ignore[attr-defined] + ee_mod.ExecutionEvents = ExecutionEvents # type: ignore[attr-defined] + + iface = sys.modules["isaacteleop.retargeting_engine.interface"] + iface.ExecutionState = ExecutionState # type: ignore[attr-defined] + iface.ExecutionEvents = ExecutionEvents # type: ignore[attr-defined] + iface.RetargeterIOType = dict # type: ignore[attr-defined] + + class FakeBaseRetargeter: + def __init__(self, name: str) -> None: + self.name = name + + iface.BaseRetargeter = FakeBaseRetargeter # type: ignore[attr-defined] + + tsm_types = sys.modules["isaacteleop.teleop_session_manager.teleop_state_manager_types"] + tsm_types.bool_signal = MagicMock # type: ignore[attr-defined] + + dt_mod = sys.modules["isaacteleop.retargeting_engine.deviceio_source_nodes.deviceio_tensor_types"] + dt_mod.MessageChannelMessagesTrackedGroup = MagicMock # type: ignore[attr-defined] + + +_install_stubs() + +from isaaclab_teleop.control_events import ControlEvents, poll_control_events # noqa: E402 +from isaaclab_teleop.teleop_message_processor import ( # noqa: E402 + TeleopMessageProcessor, + _classify_command, + _extract_command, +) + +# --------------------------------------------------------------------------- +# Test doubles for MessageChannelMessagesTrackedT +# --------------------------------------------------------------------------- + + +@dataclasses.dataclass +class _FakePayload: + payload: bytes + + +@dataclasses.dataclass +class _FakeTracked: + data: list[_FakePayload] | None = None + + +def _tracked(*payloads: bytes) -> _FakeTracked: + """Build a lightweight stand-in for ``MessageChannelMessagesTrackedT``.""" + return _FakeTracked(data=[_FakePayload(p) for p in payloads]) + + +def _empty_tracked() -> _FakeTracked: + return _FakeTracked(data=[]) + + +def _null_tracked() -> _FakeTracked: + return _FakeTracked(data=None) + + +def _make_inputs(messages_tracked): + """Build a fake RetargeterIO dict for the processor.""" + tg = MagicMock() + tg.__getitem__ = MagicMock(return_value=messages_tracked) + return {TeleopMessageProcessor.INPUT_MESSAGES: tg} + + +class _FakeOutputSlot: + """Captures ``outputs["key"][0] = value`` assignments.""" + + def __init__(self): + self.value = None + + def __setitem__(self, idx, val): + self.value = val + + def __getitem__(self, idx): + return self.value + + +def _make_outputs(): + """Build a fake outputs dict with capturable slots.""" + return {"run_toggle": _FakeOutputSlot(), "kill": _FakeOutputSlot(), "reset": _FakeOutputSlot()} + + +def _step(proc, messages_tracked) -> dict: + """Run the processor's _compute_fn and return captured outputs.""" + inputs = _make_inputs(messages_tracked) + outputs = _make_outputs() + proc._compute_fn(inputs, outputs, context=None) + return {k: v.value for k, v in outputs.items()} + + +# =========================================================================== +# TeleopMessageProcessor: basic command parsing +# =========================================================================== + + +class TestStartCommand: + def test_start_sets_run_toggle(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"start")) + assert result["run_toggle"] is True + assert result["kill"] is False + assert result["reset"] is False + + def test_start_does_not_set_reset(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"start")) + assert result["reset"] is False + + +class TestStopCommand: + def test_stop_from_stopped_is_noop(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"stop")) + assert result["run_toggle"] is False + assert result["kill"] is False + + +class TestResetCommand: + def test_reset_sets_reset_flag(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"reset")) + assert result["reset"] is True + assert result["run_toggle"] is False + assert result["kill"] is False + + +class TestResetPulseBehaviour: + def test_reset_clears_on_next_step(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"reset")) + assert result["reset"] is True + + result = _step(proc, _empty_tracked()) + assert result["reset"] is False + + +class TestKillAlwaysFalse: + def test_kill_is_always_false(self): + proc = TeleopMessageProcessor(name="test") + for payload in [b"start", b"stop", b"reset", b"hello"]: + result = _step(proc, _tracked(payload)) + assert result["kill"] is False + + +# =========================================================================== +# Shadow state and toggle sequences +# =========================================================================== + + +class TestStartFromStopped: + """``start`` from STOPPED needs 2 toggle edges over 3 frames.""" + + def test_full_sequence_reaches_running(self): + proc = TeleopMessageProcessor(name="test") + # Frame 0: "start" received, first toggle edge queued + r0 = _step(proc, _tracked(b"start")) + assert r0["run_toggle"] is True # edge 1: STOPPED -> PAUSED + + # Frame 1: queue drains False (prev resets) + r1 = _step(proc, _empty_tracked()) + assert r1["run_toggle"] is False + + # Frame 2: queue drains True (second edge) + r2 = _step(proc, _empty_tracked()) + assert r2["run_toggle"] is True # edge 2: PAUSED -> RUNNING + + # Frame 3: queue empty, back to idle + r3 = _step(proc, _empty_tracked()) + assert r3["run_toggle"] is False + + def test_shadow_state_is_running_after_sequence(self): + proc = TeleopMessageProcessor(name="test") + _step(proc, _tracked(b"start")) + _step(proc, _empty_tracked()) + _step(proc, _empty_tracked()) + assert proc._shadow_state == "running" + + +class TestStartFromPaused: + """``start`` from PAUSED needs 1 toggle edge.""" + + def test_single_edge_reaches_running(self): + proc = TeleopMessageProcessor(name="test") + # Drive to RUNNING: start sequence plays 3 frames + _step(proc, _tracked(b"start")) + _step(proc, _empty_tracked()) + _step(proc, _empty_tracked()) + assert proc._shadow_state == "running" + + # Stop to reach PAUSED (prev_toggle is True from start sequence, + # so a False is prepended before the toggle edge) + _step(proc, _tracked(b"stop")) # drains False (prepended) + r_stop_edge = _step(proc, _empty_tracked()) # drains True (edge) + assert r_stop_edge["run_toggle"] is True + assert proc._shadow_state == "paused" + + # Start from PAUSED: prev_toggle is True, so False prepended + _step(proc, _tracked(b"start")) # drains False (prepended) + r_start_edge = _step(proc, _empty_tracked()) # drains True (edge) + assert r_start_edge["run_toggle"] is True + assert proc._shadow_state == "running" + + +class TestStartFromRunning: + """``start`` when already RUNNING is a no-op.""" + + def test_start_from_running_noop(self): + proc = TeleopMessageProcessor(name="test") + _step(proc, _tracked(b"start")) + _step(proc, _empty_tracked()) + _step(proc, _empty_tracked()) + assert proc._shadow_state == "running" + + result = _step(proc, _tracked(b"start")) + assert result["run_toggle"] is False + + +class TestStopFromRunning: + """``stop`` from RUNNING uses one toggle edge to reach PAUSED.""" + + def test_stop_pauses(self): + proc = TeleopMessageProcessor(name="test") + _step(proc, _tracked(b"start")) + _step(proc, _empty_tracked()) + _step(proc, _empty_tracked()) + assert proc._shadow_state == "running" + + # prev_toggle is True, so stop prepends False before the edge + r0 = _step(proc, _tracked(b"stop")) + assert r0["run_toggle"] is False # prepended False + r1 = _step(proc, _empty_tracked()) + assert r1["run_toggle"] is True # edge: RUNNING -> PAUSED + assert proc._shadow_state == "paused" + + +class TestStopFromPaused: + """``stop`` when already PAUSED is a no-op.""" + + def test_stop_from_paused_noop(self): + proc = TeleopMessageProcessor(name="test") + _step(proc, _tracked(b"start")) + _step(proc, _empty_tracked()) + _step(proc, _empty_tracked()) + # Stop to PAUSED + _step(proc, _tracked(b"stop")) + _step(proc, _empty_tracked()) + assert proc._shadow_state == "paused" + + result = _step(proc, _tracked(b"stop")) + assert result["run_toggle"] is False + + +class TestCommandDuringToggleSequence: + """Commands received while a toggle sequence is in progress are ignored.""" + + def test_second_start_during_sequence_ignored(self): + proc = TeleopMessageProcessor(name="test") + _step(proc, _tracked(b"start")) # starts the 3-frame sequence + # Second start during the sequence should not restart it + r1 = _step(proc, _tracked(b"start")) + assert r1["run_toggle"] is False # draining the False from queue + + r2 = _step(proc, _empty_tracked()) + assert r2["run_toggle"] is True # second edge fires normally + + +# =========================================================================== +# inject_reset +# =========================================================================== + + +class TestInjectReset: + def test_inject_reset_produces_pulse(self): + proc = TeleopMessageProcessor(name="test") + proc.inject_reset() + result = _step(proc, _empty_tracked()) + assert result["reset"] is True + + def test_inject_reset_clears_after_one_step(self): + proc = TeleopMessageProcessor(name="test") + proc.inject_reset() + _step(proc, _empty_tracked()) + result = _step(proc, _empty_tracked()) + assert result["reset"] is False + + def test_inject_reset_combines_with_message_reset(self): + proc = TeleopMessageProcessor(name="test") + proc.inject_reset() + result = _step(proc, _tracked(b"reset")) + assert result["reset"] is True + + def test_inject_reset_independent_of_toggle(self): + proc = TeleopMessageProcessor(name="test") + proc.inject_reset() + result = _step(proc, _tracked(b"start")) + assert result["run_toggle"] is True + assert result["reset"] is True + + +# =========================================================================== +# Word boundary matching +# =========================================================================== + + +class TestWordBoundaryMatching: + @pytest.mark.parametrize("payload", [b"teleop start", b"xr start session", b"start now"]) + def test_start_word(self, payload: bytes): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(payload)) + assert result["run_toggle"] is True + + @pytest.mark.parametrize("payload", [b"teleop reset", b"env reset"]) + def test_reset_word(self, payload: bytes): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(payload)) + assert result["reset"] is True + + +class TestAmbiguousPayloads: + def test_reset_wins_over_start(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"reset and start")) + assert result["reset"] is True + assert result["run_toggle"] is False + + +# =========================================================================== +# Empty, null, and malformed batches +# =========================================================================== + + +class TestEmptyAndNullBatches: + def test_empty_data_list(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _empty_tracked()) + assert result["run_toggle"] is False + assert result["kill"] is False + assert result["reset"] is False + + def test_null_data(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _null_tracked()) + assert result["run_toggle"] is False + + def test_none_input(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, None) + assert result["run_toggle"] is False + + +class TestMultipleMessagesInBatch: + def test_start_then_reset_in_one_batch(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"start", b"reset")) + assert result["run_toggle"] is True + assert result["reset"] is True + + +class TestMalformedPayloads: + def test_invalid_utf8(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(b"\xff\xfe")) + assert result["run_toggle"] is False + assert result["kill"] is False + assert result["reset"] is False + + def test_none_payload(self): + proc = TeleopMessageProcessor(name="test") + tracked = _FakeTracked(data=[_FakePayload(payload=None)]) # type: ignore[arg-type] + result = _step(proc, tracked) + assert result["run_toggle"] is False + + +# =========================================================================== +# JSON format tests (Quest client sends JSON teleop_command messages) +# =========================================================================== + + +def _json_command(command: str) -> bytes: + """Build a Quest-style JSON teleop_command payload.""" + return json.dumps({"type": "teleop_command", "message": {"command": command}}).encode("utf-8") + + +class TestJsonFormat: + def test_json_start_teleop(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(_json_command("start teleop"))) + assert result["run_toggle"] is True + + def test_json_stop_teleop_from_stopped_noop(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(_json_command("stop teleop"))) + assert result["run_toggle"] is False + + def test_json_reset_teleop(self): + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(_json_command("reset teleop"))) + assert result["reset"] is True + + def test_json_wrong_type_ignored(self): + payload = json.dumps({"type": "other_event", "message": {"command": "start"}}).encode("utf-8") + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(payload)) + assert result["run_toggle"] is False + + def test_json_message_as_string(self): + payload = json.dumps({"type": "teleop_command", "message": "start teleop"}).encode("utf-8") + proc = TeleopMessageProcessor(name="test") + result = _step(proc, _tracked(payload)) + assert result["run_toggle"] is True + + +# =========================================================================== +# _extract_command unit tests +# =========================================================================== + + +class TestExtractCommand: + def test_plain_text(self): + assert _extract_command("start teleop") == "start teleop" + + def test_json_teleop_command(self): + text = json.dumps({"type": "teleop_command", "message": {"command": "stop"}}) + assert _extract_command(text) == "stop" + + def test_json_wrong_type(self): + text = json.dumps({"type": "other", "message": {"command": "start"}}) + assert _extract_command(text) is None + + def test_json_no_message_key(self): + text = json.dumps({"type": "teleop_command"}) + assert _extract_command(text) is None + + def test_json_non_dict_value_returns_none(self): + assert _extract_command("42") is None + assert _extract_command("[1, 2, 3]") is None + assert _extract_command("true") is None + + +# =========================================================================== +# _classify_command unit tests +# =========================================================================== + + +class TestClassifyCommand: + def test_exact_words(self): + assert _classify_command("start") == "start" + assert _classify_command("stop") == "stop" + assert _classify_command("reset") == "reset" + + def test_word_boundary_prevents_false_match(self): + assert _classify_command("upstart") is None + assert _classify_command("nonstop") is None + assert _classify_command("unreset") is None + + def test_reset_beats_start(self): + assert _classify_command("reset and start") == "reset" + + def test_stop_beats_start(self): + assert _classify_command("stop and start") == "stop" + + def test_unrecognized_text(self): + assert _classify_command("hello world") is None + + def test_case_insensitive(self): + assert _classify_command("START") == "start" + assert _classify_command("Stop Teleop") == "stop" + assert _classify_command("RESET NOW") == "reset" + + +# =========================================================================== +# poll_control_events tests +# =========================================================================== + + +class TestPollControlEvents: + def test_plain_object_returns_default(self): + result = poll_control_events(object()) + assert result.is_active is None + assert result.should_reset is False + + def test_device_with_control_events(self): + class FakeDevice: + @property + def last_control_events(self): + return ControlEvents(is_active=True, should_reset=True) + + result = poll_control_events(FakeDevice()) + assert result.is_active is True + assert result.should_reset is True + + def test_device_with_none_events(self): + class FakeDevice: + last_control_events = None + + result = poll_control_events(FakeDevice()) + assert result.is_active is None + assert result.should_reset is False + + def test_duck_typed_snapshot(self): + class FakeSnapshot: + is_active = False + should_reset = True + + class FakeDevice: + @property + def last_control_events(self): + return FakeSnapshot() + + result = poll_control_events(FakeDevice()) + assert result.is_active is False + assert result.should_reset is True diff --git a/source/isaaclab_teleop/test/test_openxr_device_constructors.py b/source/isaaclab_teleop/test/test_openxr_device_constructors.py new file mode 100644 index 000000000000..ddc4ce275864 --- /dev/null +++ b/source/isaaclab_teleop/test/test_openxr_device_constructors.py @@ -0,0 +1,248 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import importlib +from typing import cast + +import pytest +from isaaclab_teleop.deprecated.openxr import OpenXRDevice, OpenXRDeviceCfg, XrCfg +from isaaclab_teleop.deprecated.openxr.retargeters import GripperRetargeterCfg, Se3AbsRetargeterCfg + +# Import teleop device factory for testing +from isaaclab_teleop.deprecated.teleop_device_factory import create_teleop_device + +# Import device classes to test +from isaaclab.devices import ( + DeviceCfg, + Se3Keyboard, + Se3KeyboardCfg, +) + + +@pytest.fixture +def mock_environment(mocker): + """Set up common mock objects for tests.""" + carb_mock = mocker.MagicMock() + omni_mock = mocker.MagicMock() + appwindow_mock = mocker.MagicMock() + keyboard_mock = mocker.MagicMock() + gamepad_mock = mocker.MagicMock() + input_mock = mocker.MagicMock() + settings_mock = mocker.MagicMock() + hid_mock = mocker.MagicMock() + device_mock = mocker.MagicMock() + + omni_mock.appwindow.get_default_app_window.return_value = appwindow_mock + appwindow_mock.get_keyboard.return_value = keyboard_mock + appwindow_mock.get_gamepad.return_value = gamepad_mock + carb_mock.input.acquire_input_interface.return_value = input_mock + carb_mock.settings.get_settings.return_value = settings_mock + + carb_mock.input.KeyboardEventType.KEY_PRESS = 1 + carb_mock.input.KeyboardEventType.KEY_RELEASE = 2 + + events_mock = mocker.MagicMock() + events_mock.type_from_string.return_value = 0 + carb_mock.events = events_mock + + hid_mock.enumerate.return_value = [{"product_string": "SpaceMouse Compact", "vendor_id": 123, "product_id": 456}] + hid_mock.device.return_value = device_mock + + message_bus_mock = mocker.MagicMock() + singleton_mock = mocker.MagicMock() + omni_mock.kit.xr.core.XRCore.get_singleton.return_value = singleton_mock + singleton_mock.get_message_bus.return_value = message_bus_mock + omni_mock.kit.xr.core.XRPoseValidityFlags.POSITION_VALID = 1 + omni_mock.kit.xr.core.XRPoseValidityFlags.ORIENTATION_VALID = 2 + + websockets_mock = mocker.MagicMock() + websocket_mock = mocker.MagicMock() + websockets_mock.connect.return_value.__aenter__.return_value = websocket_mock + + return { + "carb": carb_mock, + "omni": omni_mock, + "appwindow": appwindow_mock, + "keyboard": keyboard_mock, + "gamepad": gamepad_mock, + "input": input_mock, + "settings": settings_mock, + "hid": hid_mock, + "device": device_mock, + "websockets": websockets_mock, + "websocket": websocket_mock, + } + + +""" +Test OpenXR devices. +""" + + +def test_openxr_constructors(mock_environment, mocker): + """Test constructor for OpenXRDevice.""" + xr_cfg = XrCfg( + anchor_pos=(1.0, 2.0, 3.0), + anchor_rot=(0.0, 0.1, 0.2, 0.3), + near_plane=0.2, + ) + config = OpenXRDeviceCfg(xr_cfg=xr_cfg) + + mock_controller_retargeter = mocker.MagicMock() + mock_head_retargeter = mocker.MagicMock() + retargeters = [mock_controller_retargeter, mock_head_retargeter] + + device_mod = importlib.import_module("isaaclab_teleop.deprecated.openxr.openxr_device") + mocker.patch.dict( + "sys.modules", + { + "carb": mock_environment["carb"], + "omni.kit.xr.core": mock_environment["omni"].kit.xr.core, + }, + ) + mocker.patch.object(device_mod, "carb", mock_environment["carb"]) + mocker.patch.object(device_mod, "XRCore", mock_environment["omni"].kit.xr.core.XRCore) + mocker.patch.object(device_mod, "XRPoseValidityFlags", mock_environment["omni"].kit.xr.core.XRPoseValidityFlags) + + mock_stage = mocker.MagicMock() + mock_prim = mocker.MagicMock() + mock_prim.IsValid.return_value = False + mock_stage.GetPrimAtPath.return_value = mock_prim + mocker.patch.object(device_mod, "sim_utils", mocker.MagicMock()) + device_mod.sim_utils.get_current_stage.return_value = mock_stage + device_mod.sim_utils.create_prim.return_value = None + + device = OpenXRDevice(config) + assert device._xr_cfg == xr_cfg + + device = OpenXRDevice(cfg=config, retargeters=retargeters) + assert device._retargeters == retargeters + + device = OpenXRDevice(cfg=config, retargeters=retargeters) + assert device._xr_cfg == xr_cfg + assert device._retargeters == retargeters + + device.reset() + + +""" +Test teleop device factory. +""" + + +def test_create_teleop_device_basic(mock_environment, mocker): + """Test creating devices using the teleop device factory.""" + keyboard_cfg = Se3KeyboardCfg(pos_sensitivity=0.8, rot_sensitivity=1.2) + devices_cfg: dict[str, DeviceCfg] = {"test_keyboard": keyboard_cfg} + + device_mod = importlib.import_module("isaaclab.devices.keyboard.se3_keyboard") + mocker.patch.dict("sys.modules", {"carb": mock_environment["carb"], "omni": mock_environment["omni"]}) + mocker.patch.object(device_mod, "carb", mock_environment["carb"]) + mocker.patch.object(device_mod, "omni", mock_environment["omni"]) + + device = create_teleop_device("test_keyboard", devices_cfg) + + assert isinstance(device, Se3Keyboard) + assert device.pos_sensitivity == 0.8 + assert device.rot_sensitivity == 1.2 + + +def test_create_teleop_device_with_callbacks(mock_environment, mocker): + """Test creating device with callbacks.""" + xr_cfg = XrCfg(anchor_pos=(0.0, 0.0, 0.0), anchor_rot=(0.0, 0.0, 0.0, 1.0), near_plane=0.15) + openxr_cfg = OpenXRDeviceCfg(xr_cfg=xr_cfg) + devices_cfg: dict[str, DeviceCfg] = {"test_xr": openxr_cfg} + + button_a_callback = mocker.MagicMock() + button_b_callback = mocker.MagicMock() + callbacks = {"button_a": button_a_callback, "button_b": button_b_callback} + + device_mod = importlib.import_module("isaaclab_teleop.deprecated.openxr.openxr_device") + mocker.patch.dict( + "sys.modules", + { + "carb": mock_environment["carb"], + "omni.kit.xr.core": mock_environment["omni"].kit.xr.core, + }, + ) + mocker.patch.object(device_mod, "carb", mock_environment["carb"]) + mocker.patch.object(device_mod, "XRCore", mock_environment["omni"].kit.xr.core.XRCore) + mocker.patch.object(device_mod, "XRPoseValidityFlags", mock_environment["omni"].kit.xr.core.XRPoseValidityFlags) + + mock_stage = mocker.MagicMock() + mock_prim = mocker.MagicMock() + mock_prim.IsValid.return_value = False + mock_stage.GetPrimAtPath.return_value = mock_prim + mocker.patch.object(device_mod, "sim_utils", mocker.MagicMock()) + device_mod.sim_utils.get_current_stage.return_value = mock_stage + device_mod.sim_utils.create_prim.return_value = None + + device = create_teleop_device("test_xr", devices_cfg, callbacks) + + assert isinstance(device, OpenXRDevice) + assert set(device._additional_callbacks.keys()) == {"button_a", "button_b"} + + +def test_create_teleop_device_with_retargeters(mock_environment, mocker): + """Test creating device with retargeters.""" + retargeter_cfg1 = Se3AbsRetargeterCfg() + retargeter_cfg2 = GripperRetargeterCfg() + + xr_cfg = XrCfg() + device_cfg = OpenXRDeviceCfg(xr_cfg=xr_cfg, retargeters=[retargeter_cfg1, retargeter_cfg2]) + devices_cfg: dict[str, DeviceCfg] = {"test_xr": device_cfg} + + device_mod = importlib.import_module("isaaclab_teleop.deprecated.openxr.openxr_device") + mocker.patch.dict( + "sys.modules", + { + "carb": mock_environment["carb"], + "omni.kit.xr.core": mock_environment["omni"].kit.xr.core, + }, + ) + mocker.patch.object(device_mod, "carb", mock_environment["carb"]) + mocker.patch.object(device_mod, "XRCore", mock_environment["omni"].kit.xr.core.XRCore) + mocker.patch.object(device_mod, "XRPoseValidityFlags", mock_environment["omni"].kit.xr.core.XRPoseValidityFlags) + + mock_stage = mocker.MagicMock() + mock_prim = mocker.MagicMock() + mock_prim.IsValid.return_value = False + mock_stage.GetPrimAtPath.return_value = mock_prim + mocker.patch.object(device_mod, "sim_utils", mocker.MagicMock()) + device_mod.sim_utils.get_current_stage.return_value = mock_stage + device_mod.sim_utils.create_prim.return_value = None + + device = create_teleop_device("test_xr", devices_cfg) + + assert len(device._retargeters) == 2 + + +def test_create_teleop_device_device_not_found(): + """Test error when device name is not found in configuration.""" + devices_cfg: dict[str, DeviceCfg] = {"keyboard": Se3KeyboardCfg()} + + with pytest.raises(ValueError, match="Device 'gamepad' not found"): + create_teleop_device("gamepad", devices_cfg) + + +def test_create_teleop_device_unsupported_config(): + """Test error when device configuration type is not supported.""" + + class UnsupportedCfg: + pass + + devices_cfg: dict[str, DeviceCfg] = cast(dict[str, DeviceCfg], {"unsupported": UnsupportedCfg()}) + + with pytest.raises(ValueError, match="does not declare class_type"): + create_teleop_device("unsupported", devices_cfg) diff --git a/source/isaaclab/test/devices/test_oxr_device.py b/source/isaaclab_teleop/test/test_oxr_device.py similarity index 77% rename from source/isaaclab/test/devices/test_oxr_device.py rename to source/isaaclab_teleop/test/test_oxr_device.py index 6402d8e0c187..be2a604a6489 100644 --- a/source/isaaclab/test/devices/test_oxr_device.py +++ b/source/isaaclab_teleop/test/test_oxr_device.py @@ -22,13 +22,11 @@ import numpy as np import pytest import torch +from isaaclab_teleop.deprecated.openxr import OpenXRDevice, OpenXRDeviceCfg, XrCfg import carb -import omni.usd -from isaacsim.core.prims import XFormPrim -from isaaclab.devices import OpenXRDevice, OpenXRDeviceCfg -from isaaclab.devices.openxr import XrCfg +import isaaclab.sim as sim_utils from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg from isaaclab.scene import InteractiveSceneCfg @@ -141,7 +139,7 @@ def get_input_device_mock(device_path): head_mock.get_virtual_world_pose.return_value = pose_matrix_mock # Patch the modules - device_mod = importlib.import_module("isaaclab.devices.openxr.openxr_device") + device_mod = importlib.import_module("isaaclab_teleop.deprecated.openxr.openxr_device") mocker.patch.object(device_mod, "XRCore", xr_core_mock) mocker.patch.object(device_mod, "XRPoseValidityFlags", xr_pose_validity_flags_mock) @@ -160,7 +158,7 @@ def get_input_device_mock(device_path): def empty_env(): """Fixture to create and cleanup an empty environment.""" # Create a new stage - omni.usd.get_context().new_stage() + sim_utils.create_new_stage() # Create environment with config env_cfg = EmptyEnvCfg() env = ManagerBasedEnv(cfg=env_cfg) @@ -175,21 +173,23 @@ def empty_env(): def test_xr_anchor(empty_env, mock_xrcore): """Test XR anchor creation and configuration.""" env, env_cfg = empty_env - env_cfg.xr = XrCfg(anchor_pos=(1, 2, 3), anchor_rot=(0, 1, 0, 0)) + # Use xyzw format for quaternion: identity is (0, 0, 0, 1) + env_cfg.xr = XrCfg(anchor_pos=(1, 2, 3), anchor_rot=(0, 0, 0, 1)) device = OpenXRDevice(OpenXRDeviceCfg(xr_cfg=env_cfg.xr)) # Check that the xr anchor prim is created with the correct pose - xr_anchor_prim = XFormPrim("/World/XRAnchor") - assert xr_anchor_prim.is_valid() + xr_anchor_view = sim_utils.FrameView("/World/XRAnchor") + assert xr_anchor_view.count == 1 - position, orientation = xr_anchor_prim.get_world_poses() - np.testing.assert_almost_equal(position.tolist(), [[1, 2, 3]]) - np.testing.assert_almost_equal(orientation.tolist(), [[0, 1, 0, 0]]) + position, orientation = xr_anchor_view.get_world_poses() + np.testing.assert_almost_equal(position.torch.numpy(), [[1, 2, 3]]) + # FrameView returns quaternion in xyzw format, identity is [0, 0, 0, 1] + np.testing.assert_almost_equal(orientation.torch.numpy(), [[0, 0, 0, 1]]) # Check that xr anchor mode and custom anchor are set correctly - assert carb.settings.get_settings().get("/persistent/xr/profile/ar/anchorMode") == "custom anchor" - assert carb.settings.get_settings().get("/xrstage/profile/ar/customAnchor") == "/World/XRAnchor" + assert carb.settings.get_settings().get("/persistent/xr/anchorMode") == "custom anchor" + assert carb.settings.get_settings().get("/xrstage/customAnchor") == "/World/XRAnchor" device.reset() @@ -202,16 +202,16 @@ def test_xr_anchor_default(empty_env, mock_xrcore): device = OpenXRDevice(OpenXRDeviceCfg()) # Check that the xr anchor prim is created with the correct default pose - xr_anchor_prim = XFormPrim("/World/XRAnchor") - assert xr_anchor_prim.is_valid() + xr_anchor_view = sim_utils.FrameView("/World/XRAnchor") + assert xr_anchor_view.count == 1 - position, orientation = xr_anchor_prim.get_world_poses() - np.testing.assert_almost_equal(position.tolist(), [[0, 0, 0]]) - np.testing.assert_almost_equal(orientation.tolist(), [[1, 0, 0, 0]]) + position, orientation = xr_anchor_view.get_world_poses() + np.testing.assert_almost_equal(position.torch.numpy().tolist(), [[0, 0, 0]]) + np.testing.assert_almost_equal(orientation.torch.numpy().tolist(), [[0, 0, 0, 1]]) # Check that xr anchor mode and custom anchor are set correctly - assert carb.settings.get_settings().get("/persistent/xr/profile/ar/anchorMode") == "custom anchor" - assert carb.settings.get_settings().get("/xrstage/profile/ar/customAnchor") == "/World/XRAnchor" + assert carb.settings.get_settings().get("/persistent/xr/anchorMode") == "custom anchor" + assert carb.settings.get_settings().get("/xrstage/customAnchor") == "/World/XRAnchor" device.reset() @@ -225,16 +225,16 @@ def test_xr_anchor_multiple_devices(empty_env, mock_xrcore): device_2 = OpenXRDevice(OpenXRDeviceCfg()) # Check that the xr anchor prim is created with the correct default pose - xr_anchor_prim = XFormPrim("/World/XRAnchor") - assert xr_anchor_prim.is_valid() + xr_anchor_view = sim_utils.FrameView("/World/XRAnchor") + assert xr_anchor_view.count == 1 - position, orientation = xr_anchor_prim.get_world_poses() - np.testing.assert_almost_equal(position.tolist(), [[0, 0, 0]]) - np.testing.assert_almost_equal(orientation.tolist(), [[1, 0, 0, 0]]) + position, orientation = xr_anchor_view.get_world_poses() + np.testing.assert_almost_equal(position.torch.numpy().tolist(), [[0, 0, 0]]) + np.testing.assert_almost_equal(orientation.torch.numpy().tolist(), [[0, 0, 0, 1]]) # Check that xr anchor mode and custom anchor are set correctly - assert carb.settings.get_settings().get("/persistent/xr/profile/ar/anchorMode") == "custom anchor" - assert carb.settings.get_settings().get("/xrstage/profile/ar/customAnchor") == "/World/XRAnchor" + assert carb.settings.get_settings().get("/persistent/xr/anchorMode") == "custom anchor" + assert carb.settings.get_settings().get("/xrstage/customAnchor") == "/World/XRAnchor" device_1.reset() device_2.reset() @@ -265,12 +265,12 @@ def test_get_raw_data(empty_env, mock_xrcore): # Check that joint pose format is correct palm_pose = left_hand["palm"] - assert len(palm_pose) == 7 # [x, y, z, qw, qx, qy, qz] + assert len(palm_pose) == 7 # [x, y, z, qx, qy, qz, qw] np.testing.assert_almost_equal(palm_pose[:3], [0.1, 0.2, 0.3]) # Position - np.testing.assert_almost_equal(palm_pose[3:], [0.9, 0.1, 0.2, 0.3]) # Orientation + np.testing.assert_almost_equal(palm_pose[3:], [0.1, 0.2, 0.3, 0.9]) # Orientation # Check head pose head_pose = raw_data[DeviceBase.TrackingTarget.HEAD] - assert len(head_pose) == 7 + assert len(head_pose) == 7 # [x, y, z, qx, qy, qz, qw] np.testing.assert_almost_equal(head_pose[:3], [0.1, 0.2, 0.3]) # Position - np.testing.assert_almost_equal(head_pose[3:], [0.9, 0.1, 0.2, 0.3]) # Orientation + np.testing.assert_almost_equal(head_pose[3:], [0.1, 0.2, 0.3, 0.9]) # Orientation diff --git a/source/isaaclab/test/devices/test_retargeters.py b/source/isaaclab_teleop/test/test_retargeters.py similarity index 73% rename from source/isaaclab/test/devices/test_retargeters.py rename to source/isaaclab_teleop/test/test_retargeters.py index c080c4a43d9c..c7ad6a196fda 100644 --- a/source/isaaclab/test/devices/test_retargeters.py +++ b/source/isaaclab_teleop/test/test_retargeters.py @@ -36,45 +36,55 @@ # Import after mocking -from isaaclab.devices.device_base import DeviceBase -from isaaclab.devices.openxr.retargeters.humanoid.unitree.g1_lower_body_standing import ( +from isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.g1_lower_body_standing import ( G1LowerBodyStandingRetargeter, G1LowerBodyStandingRetargeterCfg, ) -from isaaclab.devices.openxr.retargeters.humanoid.unitree.g1_motion_controller_locomotion import ( +from isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.g1_motion_controller_locomotion import ( G1LowerBodyStandingMotionControllerRetargeter, G1LowerBodyStandingMotionControllerRetargeterCfg, ) -from isaaclab.devices.openxr.retargeters.manipulator.gripper_retargeter import GripperRetargeter, GripperRetargeterCfg -from isaaclab.devices.openxr.retargeters.manipulator.se3_abs_retargeter import Se3AbsRetargeter, Se3AbsRetargeterCfg -from isaaclab.devices.openxr.retargeters.manipulator.se3_rel_retargeter import Se3RelRetargeter, Se3RelRetargeterCfg +from isaaclab_teleop.deprecated.openxr.retargeters.manipulator.gripper_retargeter import ( + GripperRetargeter, + GripperRetargeterCfg, +) +from isaaclab_teleop.deprecated.openxr.retargeters.manipulator.se3_abs_retargeter import ( + Se3AbsRetargeter, + Se3AbsRetargeterCfg, +) +from isaaclab_teleop.deprecated.openxr.retargeters.manipulator.se3_rel_retargeter import ( + Se3RelRetargeter, + Se3RelRetargeterCfg, +) + +from isaaclab.devices.device_base import DeviceBase # Mock dex retargeting utils with patch.dict( sys.modules, { - "isaaclab.devices.openxr.retargeters.humanoid.unitree.inspire.g1_dex_retargeting_utils": MagicMock(), - "isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1_t2_dex_retargeting_utils": MagicMock(), - "isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_dex_retargeting_utils": MagicMock(), + "isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.inspire.g1_dex_retargeting_utils": MagicMock(), + "isaaclab_teleop.deprecated.openxr.retargeters.humanoid.fourier.gr1_t2_dex_retargeting_utils": MagicMock(), + "isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.trihand.g1_dex_retargeting_utils": MagicMock(), }, ): - from isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import ( + from isaaclab_teleop.deprecated.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import ( GR1T2Retargeter, GR1T2RetargeterCfg, ) - from isaaclab.devices.openxr.retargeters.humanoid.unitree.inspire.g1_upper_body_retargeter import ( + from isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.inspire.g1_upper_body_retargeter import ( UnitreeG1Retargeter, UnitreeG1RetargeterCfg, ) - from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_motion_ctrl_gripper import ( + from isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_motion_ctrl_gripper import ( # noqa: E501 G1TriHandUpperBodyMotionControllerGripperRetargeter, G1TriHandUpperBodyMotionControllerGripperRetargeterCfg, ) - from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_motion_ctrl_retargeter import ( + from isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_motion_ctrl_retargeter import ( # noqa: E501 G1TriHandUpperBodyMotionControllerRetargeter, G1TriHandUpperBodyMotionControllerRetargeterCfg, ) - from isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_retargeter import ( + from isaaclab_teleop.deprecated.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_retargeter import ( G1TriHandUpperBodyRetargeter, G1TriHandUpperBodyRetargeterCfg, ) @@ -89,9 +99,9 @@ def setUp(self): def test_retarget_defaults(self): # Mock input data - wrist_pose = np.array([0.1, 0.2, 0.3, 1.0, 0.0, 0.0, 0.0]) - thumb_tip_pose = np.array([0.15, 0.25, 0.35, 1.0, 0.0, 0.0, 0.0]) - index_tip_pose = np.array([0.15, 0.20, 0.35, 1.0, 0.0, 0.0, 0.0]) + wrist_pose = np.array([0.1, 0.2, 0.3, 0.0, 0.0, 0.0, 1.0]) + thumb_tip_pose = np.array([0.15, 0.25, 0.35, 0.0, 0.0, 0.0, 1.0]) + index_tip_pose = np.array([0.15, 0.20, 0.35, 0.0, 0.0, 0.0, 1.0]) data = { DeviceBase.TrackingTarget.HAND_RIGHT: { @@ -106,15 +116,15 @@ def test_retarget_defaults(self): self.assertIsInstance(result, torch.Tensor) self.assertEqual(result.shape, (7,)) np.testing.assert_allclose(result[:3].numpy(), wrist_pose[:3], rtol=1e-5) - self.assertAlmostEqual(torch.norm(result[3:]).item(), 1.0, places=4) + self.assertAlmostEqual(torch.linalg.norm(result[3:]).item(), 1.0, places=4) def test_pinch_position(self): self.cfg.use_wrist_position = False retargeter = Se3AbsRetargeter(self.cfg) - wrist_pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - thumb_tip_pose = np.array([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - index_tip_pose = np.array([3.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + wrist_pose = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + thumb_tip_pose = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + index_tip_pose = np.array([3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) data = { DeviceBase.TrackingTarget.HAND_RIGHT: { @@ -143,9 +153,9 @@ def setUp(self): self.retargeter = Se3RelRetargeter(self.cfg) def test_retarget_movement(self): - wrist_pose_1 = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - thumb_tip_pose_1 = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - index_tip_pose_1 = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + wrist_pose_1 = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + thumb_tip_pose_1 = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + index_tip_pose_1 = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) data_1 = { DeviceBase.TrackingTarget.HAND_LEFT: { @@ -157,9 +167,9 @@ def test_retarget_movement(self): _ = self.retargeter.retarget(data_1) - wrist_pose_2 = np.array([0.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - thumb_tip_pose_2 = np.array([0.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - index_tip_pose_2 = np.array([0.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + wrist_pose_2 = np.array([0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + thumb_tip_pose_2 = np.array([0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + index_tip_pose_2 = np.array([0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) data_2 = { DeviceBase.TrackingTarget.HAND_LEFT: { @@ -182,8 +192,8 @@ def setUp(self): def test_gripper_logic(self): data_open = { DeviceBase.TrackingTarget.HAND_RIGHT: { - "thumb_tip": np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]), - "index_tip": np.array([0.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]), + "thumb_tip": np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]), + "index_tip": np.array([0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]), } } result = self.retargeter.retarget(data_open) @@ -191,8 +201,8 @@ def test_gripper_logic(self): data_close = { DeviceBase.TrackingTarget.HAND_RIGHT: { - "thumb_tip": np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]), - "index_tip": np.array([0.02, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]), + "thumb_tip": np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]), + "index_tip": np.array([0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]), } } result = self.retargeter.retarget(data_close) @@ -208,23 +218,25 @@ def test_retarget(self): class TestUnitreeG1Retargeter(unittest.TestCase): - @patch( - "isaaclab.devices.openxr.retargeters.humanoid.unitree.inspire.g1_upper_body_retargeter.UnitreeG1DexRetargeting" - ) - def test_retarget(self, mock_dex_retargeting_cls): - mock_dex_retargeting = mock_dex_retargeting_cls.return_value + def test_retarget(self): + cfg = UnitreeG1RetargeterCfg( + enable_visualization=False, sim_device="cpu", hand_joint_names=["joint1", "joint2"] + ) + retargeter = UnitreeG1Retargeter(cfg) + + # Replace _hands_controller with a configured mock. + # NOTE: We cannot use @patch on the module-level class because lazy_export() + # in parent __init__.py files replaces the module __dict__, so the class's + # __globals__ (old dict) diverges from the module attribute (new dict). + mock_dex_retargeting = MagicMock() mock_dex_retargeting.get_joint_names.return_value = ["joint1", "joint2"] mock_dex_retargeting.get_left_joint_names.return_value = ["joint1"] mock_dex_retargeting.get_right_joint_names.return_value = ["joint2"] mock_dex_retargeting.compute_left.return_value = np.array([0.1]) mock_dex_retargeting.compute_right.return_value = np.array([0.2]) + retargeter._hands_controller = mock_dex_retargeting - cfg = UnitreeG1RetargeterCfg( - enable_visualization=False, sim_device="cpu", hand_joint_names=["joint1", "joint2"] - ) - retargeter = UnitreeG1Retargeter(cfg) - - wrist_pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + wrist_pose = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) data = { DeviceBase.TrackingTarget.HAND_LEFT: {"wrist": wrist_pose}, DeviceBase.TrackingTarget.HAND_RIGHT: {"wrist": wrist_pose}, @@ -235,19 +247,20 @@ def test_retarget(self, mock_dex_retargeting_cls): class TestGR1T2Retargeter(unittest.TestCase): - @patch("isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter.GR1TR2DexRetargeting") - def test_retarget(self, mock_dex_retargeting_cls): - mock_dex_retargeting = mock_dex_retargeting_cls.return_value + def test_retarget(self): + cfg = GR1T2RetargeterCfg(enable_visualization=False, sim_device="cpu", hand_joint_names=["joint1", "joint2"]) + retargeter = GR1T2Retargeter(cfg) + + # Replace _hands_controller with a configured mock (see TestUnitreeG1Retargeter note). + mock_dex_retargeting = MagicMock() mock_dex_retargeting.get_joint_names.return_value = ["joint1", "joint2"] mock_dex_retargeting.get_left_joint_names.return_value = ["joint1"] mock_dex_retargeting.get_right_joint_names.return_value = ["joint2"] mock_dex_retargeting.compute_left.return_value = np.array([0.1]) mock_dex_retargeting.compute_right.return_value = np.array([0.2]) + retargeter._hands_controller = mock_dex_retargeting - cfg = GR1T2RetargeterCfg(enable_visualization=False, sim_device="cpu", hand_joint_names=["joint1", "joint2"]) - retargeter = GR1T2Retargeter(cfg) - - wrist_pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + wrist_pose = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) data = { DeviceBase.TrackingTarget.HAND_LEFT: {"wrist": wrist_pose}, DeviceBase.TrackingTarget.HAND_RIGHT: {"wrist": wrist_pose}, @@ -297,7 +310,7 @@ def test_retarget(self): ) retargeter = G1TriHandUpperBodyMotionControllerGripperRetargeter(cfg) - pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + pose = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) inputs_trigger_high = np.zeros(7) inputs_trigger_high[2] = 0.8 # Trigger @@ -325,7 +338,7 @@ def test_retarget(self): ) retargeter = G1TriHandUpperBodyMotionControllerRetargeter(cfg) - pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + pose = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) inputs = np.zeros(7) data = { @@ -339,23 +352,22 @@ def test_retarget(self): class TestG1TriHandUpperBodyRetargeter(unittest.TestCase): - @patch( - "isaaclab.devices.openxr.retargeters.humanoid.unitree.trihand.g1_upper_body_retargeter.G1TriHandDexRetargeting" - ) - def test_retarget(self, mock_dex_retargeting_cls): - mock_dex_retargeting = mock_dex_retargeting_cls.return_value + def test_retarget(self): + cfg = G1TriHandUpperBodyRetargeterCfg( + enable_visualization=False, sim_device="cpu", hand_joint_names=["joint1", "joint2"] + ) + retargeter = G1TriHandUpperBodyRetargeter(cfg) + + # Replace _hands_controller with a configured mock (see TestUnitreeG1Retargeter note). + mock_dex_retargeting = MagicMock() mock_dex_retargeting.get_joint_names.return_value = ["joint1", "joint2"] mock_dex_retargeting.get_left_joint_names.return_value = ["joint1"] mock_dex_retargeting.get_right_joint_names.return_value = ["joint2"] mock_dex_retargeting.compute_left.return_value = np.array([0.1]) mock_dex_retargeting.compute_right.return_value = np.array([0.2]) + retargeter._hands_controller = mock_dex_retargeting - cfg = G1TriHandUpperBodyRetargeterCfg( - enable_visualization=False, sim_device="cpu", hand_joint_names=["joint1", "joint2"] - ) - retargeter = G1TriHandUpperBodyRetargeter(cfg) - - wrist_pose = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + wrist_pose = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) data = { DeviceBase.TrackingTarget.HAND_LEFT: {"wrist": wrist_pose}, DeviceBase.TrackingTarget.HAND_RIGHT: {"wrist": wrist_pose}, diff --git a/source/isaaclab_teleop/test/test_target_frame_rebase.py b/source/isaaclab_teleop/test/test_target_frame_rebase.py new file mode 100644 index 000000000000..044b5da7d305 --- /dev/null +++ b/source/isaaclab_teleop/test/test_target_frame_rebase.py @@ -0,0 +1,264 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# pyright: reportPrivateUsage=none + +"""Tests for the target-frame rebase logic, _to_numpy_4x4 helper, and config-driven auto-selection. + +These tests exercise pure math (no Omniverse/Isaac Sim stack required). +""" + +from __future__ import annotations + +import numpy as np +import pytest +import torch +from isaaclab_teleop.session_lifecycle import _to_numpy_4x4 + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture +def identity_4x4() -> np.ndarray: + return np.eye(4, dtype=np.float32) + + +@pytest.fixture +def translation_matrix() -> np.ndarray: + """A pure translation of (1, 2, 3).""" + mat = np.eye(4, dtype=np.float32) + mat[:3, 3] = [1.0, 2.0, 3.0] + return mat + + +@pytest.fixture +def rotation_90z_matrix() -> np.ndarray: + """90-degree rotation about Z axis.""" + mat = np.eye(4, dtype=np.float32) + mat[0, 0] = 0.0 + mat[0, 1] = -1.0 + mat[1, 0] = 1.0 + mat[1, 1] = 0.0 + return mat + + +# --------------------------------------------------------------------------- +# _to_numpy_4x4 conversion tests +# --------------------------------------------------------------------------- + + +class TestToNumpy4x4: + def test_from_ndarray_float32(self, identity_4x4: np.ndarray): + result = _to_numpy_4x4(identity_4x4) + assert isinstance(result, np.ndarray) + assert result.dtype == np.float32 + np.testing.assert_array_equal(result, identity_4x4) + + def test_from_ndarray_float64_casts(self): + mat = np.eye(4, dtype=np.float64) + result = _to_numpy_4x4(mat) + assert result.dtype == np.float32 + np.testing.assert_array_almost_equal(result, np.eye(4, dtype=np.float32)) + + def test_from_torch_cpu(self, translation_matrix: np.ndarray): + tensor = torch.from_numpy(translation_matrix.copy()) + result = _to_numpy_4x4(tensor) + assert isinstance(result, np.ndarray) + assert result.dtype == np.float32 + np.testing.assert_array_almost_equal(result, translation_matrix) + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") + def test_from_torch_gpu(self, translation_matrix: np.ndarray): + tensor = torch.from_numpy(translation_matrix.copy()).cuda() + result = _to_numpy_4x4(tensor) + assert isinstance(result, np.ndarray) + assert result.dtype == np.float32 + np.testing.assert_array_almost_equal(result, translation_matrix) + + def test_from_duck_typed_numpy(self, rotation_90z_matrix: np.ndarray): + """Simulates a wp.array or similar object with a .numpy() method.""" + + class FakeWarpArray: + def __init__(self, data: np.ndarray): + self._data = data + + def numpy(self) -> np.ndarray: + return self._data + + fake = FakeWarpArray(rotation_90z_matrix.copy()) + result = _to_numpy_4x4(fake) + assert isinstance(result, np.ndarray) + assert result.dtype == np.float32 + np.testing.assert_array_almost_equal(result, rotation_90z_matrix) + + def test_from_list(self): + data = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] + result = _to_numpy_4x4(data) + assert isinstance(result, np.ndarray) + assert result.dtype == np.float32 + np.testing.assert_array_equal(result, np.eye(4, dtype=np.float32)) + + +# --------------------------------------------------------------------------- +# Matrix multiplication (rebase) tests +# --------------------------------------------------------------------------- + + +class TestRebaseMultiplication: + def test_rebase_identity_is_noop(self, translation_matrix: np.ndarray): + """target_T_world = I should leave anchor_matrix unchanged.""" + identity = np.eye(4, dtype=np.float32) + result = _to_numpy_4x4(identity) @ translation_matrix + np.testing.assert_array_almost_equal(result, translation_matrix) + + def test_rebase_translation(self, identity_4x4: np.ndarray): + """Rebasing by a pure translation offsets the origin.""" + target_T_world = np.eye(4, dtype=np.float32) + target_T_world[:3, 3] = [10.0, 20.0, 30.0] + + world_T_anchor = np.eye(4, dtype=np.float32) + world_T_anchor[:3, 3] = [1.0, 2.0, 3.0] + + result = _to_numpy_4x4(target_T_world) @ world_T_anchor + np.testing.assert_array_almost_equal(result[:3, 3], [11.0, 22.0, 33.0]) + + def test_rebase_rotation(self, rotation_90z_matrix: np.ndarray): + """A 90-deg Z rotation rebase should rotate the anchor translation.""" + world_T_anchor = np.eye(4, dtype=np.float32) + world_T_anchor[:3, 3] = [1.0, 0.0, 0.0] + + result = _to_numpy_4x4(rotation_90z_matrix) @ world_T_anchor + + # After 90-deg Z rotation: (1,0,0) -> (0,1,0) + np.testing.assert_array_almost_equal(result[:3, 3], [0.0, 1.0, 0.0]) + # Rotation part should match the 90-deg Z rotation + np.testing.assert_array_almost_equal(result[:3, :3], rotation_90z_matrix[:3, :3]) + + def test_rebase_with_torch_tensor(self, translation_matrix: np.ndarray): + """target_T_world as a torch.Tensor should work identically.""" + target_T_world = torch.eye(4, dtype=torch.float32) + target_T_world[:3, 3] = torch.tensor([5.0, 5.0, 5.0]) + + result = _to_numpy_4x4(target_T_world) @ translation_matrix + + expected = np.eye(4, dtype=np.float32) + expected[:3, 3] = [6.0, 7.0, 8.0] + np.testing.assert_array_almost_equal(result, expected) + + def test_none_target_leaves_anchor_unchanged(self, translation_matrix: np.ndarray): + """When target_T_world is None, the calling code should skip multiplication.""" + target_T_world = None + anchor_matrix = translation_matrix.copy() + + if target_T_world is not None: + anchor_matrix = _to_numpy_4x4(target_T_world) @ anchor_matrix + + np.testing.assert_array_equal(anchor_matrix, translation_matrix) + + def test_inverse_rebase_recovers_identity(self): + """target_T_world = inv(world_T_anchor) should yield identity.""" + world_T_anchor = np.array( + [ + [0.0, -1.0, 0.0, 3.0], + [1.0, 0.0, 0.0, -1.0], + [0.0, 0.0, 1.0, 2.0], + [0.0, 0.0, 0.0, 1.0], + ], + dtype=np.float32, + ) + target_T_world = np.linalg.inv(world_T_anchor).astype(np.float32) + + result = _to_numpy_4x4(target_T_world) @ world_T_anchor + np.testing.assert_array_almost_equal(result, np.eye(4, dtype=np.float32), decimal=5) + + +# --------------------------------------------------------------------------- +# Config-driven auto-selection tests +# --------------------------------------------------------------------------- + + +def _simulate_advance_selection( + target_T_world: np.ndarray | None, + target_frame_prim_path: str | None, + auto_read_result: np.ndarray | None = None, +) -> tuple[np.ndarray | None, bool]: + """Replicate the auto-selection logic from IsaacTeleopDevice.advance(). + + Returns the target_T_world that would be passed to step(), and whether + _get_target_frame_T_world would have been called. + """ + auto_read_called = False + + def fake_get_target_frame_T_world(): + nonlocal auto_read_called + auto_read_called = True + return auto_read_result + + if target_T_world is None and target_frame_prim_path is not None: + target_T_world = fake_get_target_frame_T_world() + + return target_T_world, auto_read_called + + +class TestConfigDrivenAutoSelection: + """Tests for the advance() auto-selection logic between explicit target_T_world + and config-driven target_frame_prim_path. + + These tests replicate the branching logic from advance() without importing + the full IsaacTeleopDevice (which requires Isaac Sim runtime dependencies). + """ + + def test_no_config_no_explicit_passes_none(self): + """When neither config nor explicit target_T_world is set, step() receives None.""" + result, called = _simulate_advance_selection(target_T_world=None, target_frame_prim_path=None) + assert result is None + assert not called + + def test_explicit_target_is_passed_through(self): + """An explicit target_T_world should be passed directly to step().""" + explicit = np.eye(4, dtype=np.float32) + explicit[:3, 3] = [1.0, 2.0, 3.0] + + result, called = _simulate_advance_selection(target_T_world=explicit, target_frame_prim_path=None) + np.testing.assert_array_equal(result, explicit) + assert not called + + def test_config_prim_triggers_auto_read(self): + """When target_frame_prim_path is set, _get_target_frame_T_world is called.""" + auto_matrix = np.eye(4, dtype=np.float32) + auto_matrix[:3, 3] = [9.0, 8.0, 7.0] + + result, called = _simulate_advance_selection( + target_T_world=None, + target_frame_prim_path="/World/Robot/base_link", + auto_read_result=auto_matrix, + ) + assert called + np.testing.assert_array_equal(result, auto_matrix) + + def test_explicit_overrides_config(self): + """An explicit target_T_world takes precedence over config prim path.""" + explicit = np.eye(4, dtype=np.float32) + explicit[:3, 3] = [42.0, 0.0, 0.0] + + result, called = _simulate_advance_selection( + target_T_world=explicit, + target_frame_prim_path="/World/Robot/base_link", + auto_read_result=np.eye(4, dtype=np.float32), + ) + assert not called + np.testing.assert_array_equal(result, explicit) + + def test_config_prim_returns_none_passes_none(self): + """If the prim read fails (returns None), step() receives None.""" + result, called = _simulate_advance_selection( + target_T_world=None, + target_frame_prim_path="/World/Robot/base_link", + auto_read_result=None, + ) + assert called + assert result is None diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/__init__.py b/source/isaaclab_visualizers/isaaclab_visualizers/__init__.py new file mode 100644 index 000000000000..186afae0b156 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Visualizer backends for Isaac Lab. + +Visualizers are loaded lazily by type (kit, newton, rerun, viser) via the factory in +isaaclab.visualizers. Import a specific backend only when needed: + + from isaaclab_visualizers.kit import KitVisualizer, KitVisualizerCfg + from isaaclab_visualizers.newton import NewtonVisualizer, NewtonVisualizerCfg + from isaaclab_visualizers.rerun import RerunVisualizer, RerunVisualizerCfg + from isaaclab_visualizers.viser import ViserVisualizer, ViserVisualizerCfg +""" diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/kit/__init__.py b/source/isaaclab_visualizers/isaaclab_visualizers/kit/__init__.py new file mode 100644 index 000000000000..69070ad341f1 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/kit/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kit visualizer backend (Isaac Sim viewport). + +This package keeps imports lazy so configuration-only imports avoid pulling the +full runtime backend before it is needed. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .kit_visualizer_cfg import KitVisualizerCfg + +if TYPE_CHECKING: + from .kit_visualizer import KitVisualizer + +__all__ = ["KitVisualizer", "KitVisualizerCfg"] + + +def __getattr__(name: str): + if name == "KitVisualizer": + from .kit_visualizer import KitVisualizer + + return KitVisualizer + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualization_markers.py b/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualization_markers.py new file mode 100644 index 000000000000..c9f55b413e03 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualization_markers.py @@ -0,0 +1,221 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kit/USD implementation for :class:`VisualizationMarkers`. + +This backend represents markers as :class:`UsdGeom.PointInstancer` prims in the +USD stage. Marker prototypes are created as child prims of the point instancer +and are instanced efficiently through prototype indices. + +.. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html +""" + +from __future__ import annotations + +import logging + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.markers.visualization_markers_cfg import VisualizationMarkersCfg +from isaaclab.utils.version import has_kit + +logger = logging.getLogger(__name__) + + +class KitVisualizationMarkers: + """USD PointInstancer backend for visualization markers. + + This class wraps around the `UsdGeom.PointInstancer`_ for efficient + handling of objects in the USD stage by instancing the created marker + prototype prims. + + A marker prototype prim is a reusable template prim used for defining + variations of objects in the scene. For example, a sphere prim can be used + as a marker prototype prim to create multiple sphere prims at different + locations. The marker prim path is resolved using the marker name from the + :attr:`VisualizationMarkersCfg.markers` dictionary. + + .. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html + """ + + def __init__(self, cfg: VisualizationMarkersCfg, visible: bool = True): + """Initialize the USD point instancer and register marker prototypes. + + When this backend is initialized, the :class:`UsdGeom.PointInstancer` + is created in the stage and the marker prims are registered into it. + + .. note:: + If a prim already exists at the requested path, the next free path + is used for the :class:`UsdGeom.PointInstancer` prim. + """ + self.cfg = cfg + self.stage = sim_utils.get_current_stage() + # Resolve the next free prim path before creating the point instancer. + self.prim_path = sim_utils.get_next_free_prim_path(cfg.prim_path) + self._is_visible = visible + self._count = len(cfg.markers) + + from pxr import Gf, UsdGeom # noqa: PLC0415 + + self._instancer_manager = UsdGeom.PointInstancer.Define(self.stage, self.prim_path) + self._add_markers_prototypes(self.cfg.markers) + # Note: We need to do this the first time to initialize the instancer. + # Otherwise, the instancer is not fully "created" and USD instance + # queries such as GetInstanceIndices() can fail. + self._instancer_manager.GetProtoIndicesAttr().Set(list(range(len(self.cfg.markers)))) + self._instancer_manager.GetPositionsAttr().Set([Gf.Vec3f(0.0)] * len(self.cfg.markers)) + self.set_visibility(visible) + + @property + def count(self) -> int: + return self._count + + def set_visibility(self, visible: bool) -> None: + """Set USD PointInstancer visibility. + + The method does this through the USD API. + """ + from pxr import UsdGeom # noqa: PLC0415 + + self._is_visible = visible + imageable = UsdGeom.Imageable(self._instancer_manager) + if visible: + imageable.MakeVisible() + else: + imageable.MakeInvisible() + + def is_visible(self) -> bool: + """Return USD PointInstancer visibility.""" + from pxr import UsdGeom # noqa: PLC0415 + + return self._instancer_manager.GetVisibilityAttr().Get() != UsdGeom.Tokens.invisible + + def visualize( + self, + translations: torch.Tensor | None, + orientations: torch.Tensor | None, + scales: torch.Tensor | None, + marker_indices: torch.Tensor | None, + ) -> None: + """Write marker transforms to USD PointInstancer attributes. + + Args: + translations: Translations w.r.t. parent prim frame. Shape is + (M, 3). + orientations: Quaternion orientations (x, y, z, w) w.r.t. parent + prim frame. Shape is (M, 4). + scales: Scale applied before any rotation is applied. Shape is + (M, 3). + marker_indices: Decides which marker prototype to visualize. Shape + is (M). + """ + from pxr import Vt # noqa: PLC0415 + + num_markers = 0 + if translations is not None: + translations_np = translations.detach().cpu().numpy() + # Apply translations. + self._instancer_manager.GetPositionsAttr().Set(Vt.Vec3fArray.FromNumpy(translations_np)) + num_markers = translations_np.shape[0] + if orientations is not None: + orientations_np = orientations.detach().cpu().numpy() + # Apply orientations. USD expects quaternion data in xyzw format. + self._instancer_manager.GetOrientationsAttr().Set(Vt.QuathArray.FromNumpy(orientations_np)) + num_markers = orientations_np.shape[0] + if scales is not None: + scales_np = scales.detach().cpu().numpy() + # Apply scales. + self._instancer_manager.GetScalesAttr().Set(Vt.Vec3fArray.FromNumpy(scales_np)) + num_markers = scales_np.shape[0] + if marker_indices is not None or num_markers != self._count: + if marker_indices is not None: + marker_indices_np = marker_indices.detach().cpu().numpy() + # Apply prototype indices. + self._instancer_manager.GetProtoIndicesAttr().Set(Vt.IntArray.FromNumpy(marker_indices_np)) + num_markers = marker_indices_np.shape[0] + elif num_markers != 0: + # Set all markers to the first prototype when the marker count + # changes and explicit marker indices are not provided. + self._instancer_manager.GetProtoIndicesAttr().Set([0] * num_markers) + if num_markers != 0: + self._count = num_markers + + def _add_markers_prototypes(self, markers_cfg: dict[str, sim_utils.SpawnerCfg]) -> None: + """Add marker prototypes to the scene and register them with the point instancer.""" + # Add markers based on config. + for name, cfg in markers_cfg.items(): + # Resolve prim path from the marker name. + marker_prim_path = f"{self.prim_path}/{name}" + # Create a child prim for the marker. + marker_prim = cfg.func(prim_path=marker_prim_path, cfg=cfg) + # Make the asset uninstanceable in case it is already instanced. + # Point instancer defines its own prototypes, so already-instanced + # assets cannot be used directly. + self._process_prototype_prim(marker_prim) + # Add child reference to point instancer. + self._instancer_manager.GetPrototypesRel().AddTarget(marker_prim_path) + + # Check that all prototypes were loaded. + prototypes = self._instancer_manager.GetPrototypesRel().GetTargets() + if len(prototypes) != len(markers_cfg): + raise RuntimeError( + f"Failed to load all the prototypes. Expected: {len(markers_cfg)}. Received: {len(prototypes)}." + ) + + def _process_prototype_prim(self, prim) -> None: + """Process a prim and its descendants to make them suitable for prototypes. + + Point instancer defines its own prototypes, so if an asset is already + instanced, this does not work. This function checks if the prim and its + descendants are instanced. If so, it makes the respective prim + uninstanceable by disabling instancing on the prim. + + Additionally, it makes the prim invisible to secondary rays. This is + useful when marker prims should not appear in camera images. + + Args: + prim: The prim to process. + """ + from pxr import Sdf, UsdGeom, UsdPhysics # noqa: PLC0415 + + if not prim.IsValid(): + raise ValueError(f"Prim at path '{prim.GetPrimAtPath()}' is not valid.") + + # Iterate over all prims under the marker prim path. + all_prims = [prim] + while len(all_prims) > 0: + child_prim = all_prims.pop(0) + # Remove physics from marker prototypes because they are only for + # visualization. + if child_prim.HasAPI(UsdPhysics.ArticulationRootAPI): + child_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI) + child_prim.RemoveAppliedSchema("PhysxArticulationAPI") + if child_prim.HasAPI(UsdPhysics.RigidBodyAPI): + child_prim.RemoveAPI(UsdPhysics.RigidBodyAPI) + child_prim.RemoveAppliedSchema("PhysxRigidBodyAPI") + if child_prim.IsA(UsdPhysics.Joint): + child_prim.GetAttribute("physics:jointEnabled").Set(False) + # Point instancer defines its own instancing, so nested instances + # must be made uninstanceable. + if child_prim.IsInstance(): + child_prim.SetInstanceable(False) + # Make renderable prims invisible to secondary rays such as depth + # images. + if child_prim.IsA(UsdGeom.Gprim): + sim_utils.change_prim_property( + prop_path=f"{child_prim.GetPrimPath().pathString}.primvars:invisibleToSecondaryRays", + value=True, + stage=prim.GetStage(), + type_to_create_if_not_exist=Sdf.ValueTypeNames.Bool, + ) + all_prims += child_prim.GetChildren() + + # Remove any remaining physics on the markers because they are only for + # visualization. + if has_kit(): + import omni.physx.scripts.utils as physx_utils # noqa: PLC0415 + + physx_utils.removeRigidBodySubtree(prim) diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualizer.py b/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualizer.py new file mode 100644 index 000000000000..701a21c79984 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualizer.py @@ -0,0 +1,482 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Kit-based visualizer using Isaac Sim viewport.""" + +from __future__ import annotations + +import asyncio +import logging +from typing import TYPE_CHECKING + +from pxr import Gf, Usd, UsdGeom, Vt + +from isaaclab.app.settings_manager import get_settings_manager +from isaaclab.visualizers.base_visualizer import BaseVisualizer + +from isaaclab_visualizers.newton_adapter import resolve_visible_env_indices + +from .kit_visualizer_cfg import KitVisualizerCfg + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from isaaclab.physics import BaseSceneDataProvider + +_DEFAULT_VIEWPORT_NAME = "Visualizer Viewport" + + +class KitVisualizer(BaseVisualizer): + """Kit visualizer using Isaac Sim viewport.""" + + def __init__(self, cfg: KitVisualizerCfg): + """Initialize Kit visualizer state. + + Args: + cfg: Kit visualizer configuration. + """ + super().__init__(cfg) + self.cfg: KitVisualizerCfg = cfg + + self._simulation_app = None + self._viewport_window = None + self._viewport_api = None + self._is_initialized = False + self._sim_time = 0.0 + self._step_counter = 0 + self._hidden_env_visibilities: dict[str, str] = {} + # PointInstancer prim path -> (had authored invisibleIds, previous value) for partial viz restore. + self._point_instancer_invisible_ids_backup: dict[str, tuple[bool, object]] = {} + self._runtime_headless = bool(cfg.headless) + # USD path for the viewport's active camera, refreshed after setup (used by CI/tests). + self._controlled_camera_path: str | None = None + + # ---- Lifecycle ------------------------------------------------------------------------ + + def initialize(self, scene_data_provider: BaseSceneDataProvider) -> None: + """Initialize viewport resources and bind scene data provider. + + Args: + scene_data_provider: Scene data provider used by the visualizer. + """ + if self._is_initialized: + logger.debug("[KitVisualizer] initialize() called while already initialized.") + return + + if scene_data_provider is None: + raise RuntimeError("[KitVisualizer] Requires a scene_data_provider.") + self._scene_data_provider = scene_data_provider + usd_stage = scene_data_provider.get_usd_stage() + if usd_stage is None: + raise RuntimeError("[KitVisualizer] USD stage not available from scene_data_provider.") + metadata = scene_data_provider.get_metadata() + + self._ensure_simulation_app() + self._setup_viewport() + + self._env_ids = self._compute_visualized_env_ids() + num_envs_meta = int(metadata.get("num_envs", 0)) + self._resolved_visible_env_ids = resolve_visible_env_indices( + self._env_ids, self.cfg.max_visible_envs, num_envs_meta + ) + if self._resolved_visible_env_ids is not None: + logger.warning( + "[KitVisualizer] Partial visualization in Kit uses visibility only; unselected env prims are hidden." + ) + self._apply_env_visibility(usd_stage, metadata, self._resolved_visible_env_ids) + num_visualized_envs = ( + len(self._resolved_visible_env_ids) if self._resolved_visible_env_ids is not None else num_envs_meta + ) + self._log_initialization_table( + logger=logger, + title="KitVisualizer Configuration", + rows=[ + ("eye", self.cfg.eye), + ("lookat", self.cfg.lookat), + ("cam_source", self.cfg.cam_source), + ("max_visible_envs", self.cfg.max_visible_envs), + ("num_visualized_envs", num_visualized_envs), + ("create_viewport", self.cfg.create_viewport), + ("headless", self._runtime_headless), + ], + ) + + self._is_initialized = True + + def step(self, dt: float) -> None: + """Advance visualizer/UI updates for one simulation step. + + Args: + dt: Simulation time-step in seconds. + """ + if not self._is_initialized: + return + self._sim_time += dt + self._step_counter += 1 + try: + import omni.kit.app + + app = omni.kit.app.get_app() + if app is not None and app.is_running(): + # Keep app pumping for viewport/UI updates only; physics is owned by SimulationContext. + # Disable playSimulations around app.update() so Kit does not advance its own physics here. + settings = get_settings_manager() + settings.set_bool("/app/player/playSimulations", False) + app.update() + settings.set_bool("/app/player/playSimulations", True) + except (ImportError, AttributeError) as exc: + logger.debug("[KitVisualizer] App update skipped: %s", exc) + # Markers (VisualizationMarkers) are often created or resized to num_envs only after the first + # simulation / debug-vis step; re-apply PointInstancer invisibleIds each step when partial viz is on. + self._refresh_partial_viz_point_instancers_if_needed() + + def close(self) -> None: + """Close viewport resources and restore temporary state.""" + if not self._is_initialized: + return + self._restore_env_visibility() + self._simulation_app = None + self._viewport_window = None + self._viewport_api = None + self._is_initialized = False + self._is_closed = True + + # ---- Capabilities --------------------------------------------------------------------- + + def is_running(self) -> bool: + """Return whether Kit app/runtime is still running. + + Returns: + ``True`` when the visualizer can continue stepping, otherwise ``False``. + """ + if self._simulation_app is not None: + return self._simulation_app.is_running() + try: + import omni.kit.app + + app = omni.kit.app.get_app() + return app is not None and app.is_running() + except (ImportError, AttributeError): + return False + + def is_training_paused(self) -> bool: + """Return whether simulation play flag is paused in Kit settings.""" + try: + settings = get_settings_manager() + play_flag = settings.get("/app/player/playSimulations") + return play_flag is False + except Exception: + return False + + def supports_markers(self) -> bool: + """Kit viewport supports marker visualization through Omni UI rendering.""" + return bool(self.cfg.enable_markers) + + def supports_live_plots(self) -> bool: + """Kit backend can host live plot widgets via viewport UI panels.""" + return True + + def requires_forward_before_step(self) -> bool: + """OV viewport relies on refreshed kinematic state before render.""" + return True + + def pumps_app_update(self) -> bool: + """KitVisualizer calls app.update() in step(), so render() should not do it again.""" + return True + + def set_camera_view( + self, eye: tuple[float, float, float] | list[float], target: tuple[float, float, float] | list[float] + ) -> None: + """Set active viewport camera eye/target. + + When :attr:`self.cfg.cam_source` is ``"cfg"``, this is a no-op: the pose comes only from + :attr:`self.cfg.eye` / :attr:`self.cfg.lookat` (applied in :meth:`_setup_viewport`). Otherwise + :class:`~isaaclab.sim.simulation_context.SimulationContext` and :class:`ViewportCameraController` + would overwrite that pose with :class:`~isaaclab.envs.common.ViewerCfg`-driven views. + + Args: + eye: Camera eye position. + target: Camera look-at target. + """ + if self.cfg.cam_source == "cfg": + return + if not self._is_initialized: + logger.debug("[KitVisualizer] set_camera_view() ignored because visualizer is not initialized.") + return + self._set_viewport_camera(tuple(eye), tuple(target)) + + # ---- Viewport + camera ---------------------------------------------------------------- + + def _ensure_simulation_app(self) -> None: + """Ensure a running Isaac Sim app is available and cache runtime mode.""" + import omni.kit.app + + app = omni.kit.app.get_app() + if app is None or not app.is_running(): + raise RuntimeError("[KitVisualizer] Isaac Sim app is not running.") + + try: + from isaacsim import SimulationApp + + sim_app = None + if hasattr(SimulationApp, "_instance") and SimulationApp._instance is not None: + sim_app = SimulationApp._instance + elif hasattr(SimulationApp, "instance") and callable(SimulationApp.instance): + sim_app = SimulationApp.instance() + + if sim_app is not None: + self._simulation_app = sim_app + self._runtime_headless = bool(self.cfg.headless or self._simulation_app.config.get("headless", False)) + if self._runtime_headless: + logger.warning("[KitVisualizer] Running in headless mode. Viewport may not display.") + except ImportError: + pass + + def _setup_viewport(self) -> None: + """Create/resolve viewport and configure initial camera.""" + import omni.kit.viewport.utility as vp_utils + from omni.ui import DockPosition + + if self._runtime_headless: + # Headless: no viewport window; apply cfg pose to the default perspective camera path. + self._viewport_window = None + self._viewport_api = None + if self.cfg.cam_source == "prim_path": + logger.warning( + "[KitVisualizer] cam_source='prim_path' has limited support in headless mode; " + "using eye/lookat from cfg instead." + ) + self._apply_cfg_camera_pose_if_configured() + self._refresh_controlled_camera_path() + return + + effective_viewport_name = ( + self.cfg.viewport_name if self.cfg.viewport_name is not None else _DEFAULT_VIEWPORT_NAME + ) + + if self.cfg.create_viewport: + if not str(effective_viewport_name).strip(): + raise RuntimeError( + "[KitVisualizer] viewport_name must be a non-empty string when create_viewport=True." + ) + dock_position_name = self.cfg.dock_position.upper() + dock_position_map = { + "LEFT": DockPosition.LEFT, + "RIGHT": DockPosition.RIGHT, + "BOTTOM": DockPosition.BOTTOM, + "SAME": DockPosition.SAME, + } + dock_pos = dock_position_map.get(dock_position_name, DockPosition.SAME) + + self._viewport_window = vp_utils.create_viewport_window( + name=effective_viewport_name, + width=self.cfg.window_width, + height=self.cfg.window_height, + position_x=50, + position_y=50, + docked=True, + ) + + asyncio.ensure_future(self._dock_viewport_async(effective_viewport_name, dock_pos)) + else: + self._viewport_window = vp_utils.get_active_viewport_window() + + if self._viewport_window is None: + logger.warning("[KitVisualizer] No active viewport window found.") + self._viewport_api = None + self._refresh_controlled_camera_path() + return + self._viewport_api = self._viewport_window.viewport_api + if self.cfg.cam_source == "prim_path": + if not self._set_active_camera_path(self.cfg.cam_prim_path): + raise RuntimeError( + "[KitVisualizer] cam_source='prim_path' requires a valid cam_prim_path. " + f"Camera prim not found: '{self.cfg.cam_prim_path}'." + ) + else: + self._apply_cfg_camera_pose_if_configured() + self._refresh_controlled_camera_path() + + def _refresh_controlled_camera_path(self) -> None: + """Cache :attr:`_controlled_camera_path` from the active viewport (or default persp).""" + if self._viewport_api is not None: + path = self._viewport_api.get_active_camera() + self._controlled_camera_path = path if path else "/OmniverseKit_Persp" + else: + self._controlled_camera_path = "/OmniverseKit_Persp" + + async def _dock_viewport_async(self, viewport_name: str, dock_position) -> None: + """Dock a created viewport window relative to main viewport.""" + import omni.kit.app + import omni.ui + + viewport_window = None + for _ in range(10): + viewport_window = omni.ui.Workspace.get_window(viewport_name) + if viewport_window: + break + await omni.kit.app.get_app().next_update_async() + + if not viewport_window: + logger.warning(f"[KitVisualizer] Could not find viewport window '{viewport_name}'.") + return + + main_viewport = omni.ui.Workspace.get_window("Viewport") + if not main_viewport: + for alt_name in ["/OmniverseKit/Viewport", "Viewport Next"]: + main_viewport = omni.ui.Workspace.get_window(alt_name) + if main_viewport: + break + + if main_viewport and main_viewport != viewport_window: + viewport_window.dock_in(main_viewport, dock_position, 0.5) + await omni.kit.app.get_app().next_update_async() + viewport_window.focus() + viewport_window.visible = True + await omni.kit.app.get_app().next_update_async() + viewport_window.focus() + + def _set_viewport_camera(self, position: tuple[float, float, float], target: tuple[float, float, float]) -> None: + """Apply eye/target camera view to the active viewport.""" + if self._viewport_api is None: + return + + try: + from omni.kit.viewport.utility.camera_state import ViewportCameraState + except ImportError as exc: + logger.warning("[KitVisualizer] Viewport camera update skipped: %s", exc) + return + + camera_path = self._viewport_api.get_active_camera() + if not camera_path: + camera_path = "/OmniverseKit_Persp" + + camera_state = ViewportCameraState(camera_path, self._viewport_api) + camera_state.set_position_world(Gf.Vec3d(float(position[0]), float(position[1]), float(position[2])), True) + camera_state.set_target_world(Gf.Vec3d(float(target[0]), float(target[1]), float(target[2])), True) + + def _apply_cfg_camera_pose_if_configured(self) -> None: + """Apply configured camera pose from eye/lookat.""" + self._set_viewport_camera(self.cfg.eye, self.cfg.lookat) + + def _set_active_camera_path(self, camera_path: str) -> bool: + """Set active camera path for viewport if the prim exists. + + Returns: + ``True`` if camera was set, otherwise ``False``. + """ + if self._viewport_api is None: + return False + usd_stage = self._scene_data_provider.get_usd_stage() if self._scene_data_provider else None + if usd_stage is None: + return False + camera_prim = usd_stage.GetPrimAtPath(camera_path) + if not camera_prim.IsValid(): + return False + self._viewport_api.set_active_camera(camera_path) + return True + + def _apply_env_visibility(self, usd_stage, metadata: dict, visible_env_ids: list[int]) -> None: + """Hide environments not listed in ``visible_env_ids`` (cosmetic partial visualization).""" + num_envs = int(metadata.get("num_envs", 0)) + if num_envs <= 0: + return + visible = set(visible_env_ids) + for env_id in range(num_envs): + if env_id in visible: + continue + env_path = f"/World/envs/env_{env_id}" + prim = usd_stage.GetPrimAtPath(env_path) + if not prim.IsValid(): + continue + imageable = UsdGeom.Imageable(prim) + if not imageable: + continue + attr = imageable.GetVisibilityAttr() + prev = attr.Get() + if env_path not in self._hidden_env_visibilities and prev: + self._hidden_env_visibilities[env_path] = prev + attr.Set(UsdGeom.Tokens.invisible) + + self._apply_visual_point_instancer_visibility(usd_stage, num_envs, visible) + + def _refresh_partial_viz_point_instancers_if_needed(self) -> None: + """Re-apply ``invisibleIds`` for env-scaled `/Visuals` instancers (handles lazy marker creation).""" + if self._resolved_visible_env_ids is None or self._scene_data_provider is None: + return + usd_stage = self._scene_data_provider.get_usd_stage() + if usd_stage is None: + return + num_envs = int(self._scene_data_provider.get_metadata().get("num_envs", 0)) + if num_envs <= 0: + return + self._apply_visual_point_instancer_visibility(usd_stage, num_envs, set(self._resolved_visible_env_ids)) + + def _apply_visual_point_instancer_visibility(self, usd_stage, num_envs: int, visible_env_ids: set[int]) -> None: + """Set ``PointInstancer.invisibleIds`` for per-env `/Visuals` markers (e.g. velocity arrows).""" + hidden = [i for i in range(num_envs) if i not in visible_env_ids] + vt_hidden = Vt.Int64Array([int(i) for i in hidden]) + for root_path in ("/Visuals", "/World/Visuals"): + root_prim = usd_stage.GetPrimAtPath(root_path) + if not root_prim.IsValid(): + continue + for prim in Usd.PrimRange(root_prim): + if not prim.IsA(UsdGeom.PointInstancer): + continue + pi = UsdGeom.PointInstancer(prim) + n = self._point_instancer_instance_count(pi) + if n is None or n != num_envs: + continue + path_str = prim.GetPath().pathString + inv_attr = pi.GetInvisibleIdsAttr() + # Record original authorship/value once per instancer for :meth:`_restore_env_visibility`. + if path_str not in self._point_instancer_invisible_ids_backup: + was_authored = inv_attr.HasAuthoredValue() + prev = inv_attr.Get() if was_authored else None + self._point_instancer_invisible_ids_backup[path_str] = (was_authored, prev) + inv_attr.Set(vt_hidden) + + @staticmethod + def _point_instancer_instance_count(pi: UsdGeom.PointInstancer) -> int | None: + """Return instance count from the first authored per-instance array, if any.""" + for attr in ( + pi.GetPositionsAttr(), + pi.GetScalesAttr(), + pi.GetOrientationsAttr(), + pi.GetProtoIndicesAttr(), + ): + if not attr.HasAuthoredValue(): + continue + val = attr.Get() + if val is None: + continue + return len(val) + return None + + def _restore_env_visibility(self) -> None: + """Restore environment visibilities and PointInstancer ``invisibleIds`` from partial viz.""" + usd_stage = self._scene_data_provider.get_usd_stage() if self._scene_data_provider else None + if usd_stage is None: + return + for env_path, prev in self._hidden_env_visibilities.items(): + prim = usd_stage.GetPrimAtPath(env_path) + if not prim.IsValid(): + continue + imageable = UsdGeom.Imageable(prim) + if not imageable: + continue + imageable.GetVisibilityAttr().Set(prev) + self._hidden_env_visibilities.clear() + + for path_str, (was_authored, prev) in self._point_instancer_invisible_ids_backup.items(): + prim = usd_stage.GetPrimAtPath(path_str) + if not prim.IsValid() or not prim.IsA(UsdGeom.PointInstancer): + continue + inv_attr = UsdGeom.PointInstancer(prim).GetInvisibleIdsAttr() + if not was_authored: + inv_attr.Clear() + else: + inv_attr.Set(prev) + self._point_instancer_invisible_ids_backup.clear() diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualizer_cfg.py b/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualizer_cfg.py new file mode 100644 index 000000000000..342be3fc2c6f --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/kit/kit_visualizer_cfg.py @@ -0,0 +1,40 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Kit-based visualizer.""" + +from __future__ import annotations + +from isaaclab.utils import configclass +from isaaclab.visualizers.visualizer_cfg import VisualizerCfg + + +@configclass +class KitVisualizerCfg(VisualizerCfg): + """Configuration for Kit visualizer using Isaac Sim viewport.""" + + visualizer_type: str = "kit" + """Type identifier for Kit visualizer.""" + + viewport_name: str | None = None + """Name for a new viewport window when :attr:`create_viewport` is ``True``. + + If ``None``, a default name (``"Visualizer Viewport"``) is used. + """ + + create_viewport: bool = False + """If ``True``, create a new viewport window; if ``False``, use the active viewport window.""" + + headless: bool = False + """Run without creating viewport windows when supported by the app.""" + + dock_position: str = "SAME" + """Dock position for a new viewport. Options: 'LEFT', 'RIGHT', 'BOTTOM', 'SAME'.""" + + window_width: int = 1280 + """Viewport width in pixels (when :attr:`create_viewport` is ``True``).""" + + window_height: int = 720 + """Viewport height in pixels (when :attr:`create_viewport` is ``True``).""" diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/newton/__init__.py b/source/isaaclab_visualizers/isaaclab_visualizers/newton/__init__.py new file mode 100644 index 000000000000..b83c42420d86 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/newton/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton OpenGL visualizer backend. + +This package keeps imports lazy so configuration-only imports do not pull in +the heavy viewer/runtime stack before Isaac Sim has finished bootstrapping. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .newton_visualizer_cfg import NewtonVisualizerCfg + +if TYPE_CHECKING: + from .newton_visualizer import NewtonVisualizer + +__all__ = ["NewtonVisualizer", "NewtonVisualizerCfg"] + + +def __getattr__(name: str): + if name == "NewtonVisualizer": + from .newton_visualizer import NewtonVisualizer + + return NewtonVisualizer + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualization_markers.py b/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualization_markers.py new file mode 100644 index 000000000000..9d222ed71a61 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualization_markers.py @@ -0,0 +1,531 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton-family implementation for :class:`VisualizationMarkers`.""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from typing import Any, Literal + +import numpy as np +import torch +import warp as wp +from newton import Axis, Mesh + +import isaaclab.sim as sim_utils +from isaaclab.markers.visualization_markers_cfg import VisualizationMarkersCfg +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab.utils.math import quat_apply + +logger = logging.getLogger(__name__) + +_OMNIPBR_DEFAULTS = { + "diffuse_color_constant": (0.2, 0.2, 0.2), + "diffuse_tint": (1.0, 1.0, 1.0), +} +_UNBOUND_DEFAULT_FALLBACK_GRAY = (0.18, 0.18, 0.18) +_DEX_CUBE_TEXTURE_URL = f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/Materials/dex_cube_mod.png" + + +@dataclass(frozen=True) +class _NewtonMarkerSpec: + renderer: Literal["mesh", "frame", "none"] + mesh_type: Literal["arrow", "box", "textured_box", "sphere", "cylinder", "capsule", "cone"] | None = None + mesh_params: dict[str, float | tuple[float, float, float]] | None = None + scale: tuple[float, float, float] | None = None + color: tuple[float, float, float] | None = None + texture: Any | None = None + + +@dataclass(frozen=True) +class _MeshData: + vertices: np.ndarray + indices: np.ndarray + normals: np.ndarray + uvs: np.ndarray + + +def render_newton_visualization_markers(viewer, visible_env_ids: list[int] | None, num_envs: int) -> None: + """Render all active Newton visualization marker groups into a Newton-family viewer.""" + sim = sim_utils.SimulationContext.instance() + if sim is None: + return + + for marker in sim.vis_marker_registry.get_groups().values(): + if isinstance(marker, NewtonVisualizationMarkers): + marker.render(viewer, visible_env_ids=visible_env_ids, num_envs=num_envs) + + +class NewtonVisualizationMarkers: + """Newton-family backend for visualization markers.""" + + def __init__(self, cfg: VisualizationMarkersCfg, visible: bool = True): + self.cfg = cfg + self.group_id = f"{cfg.prim_path}::{id(self)}" + self.visible = visible + self.translations: torch.Tensor | None = None + self.orientations: torch.Tensor | None = None + self.scales: torch.Tensor | None = None + self.marker_indices: torch.Tensor | None = None + self.count = len(cfg.markers) + self._registered_meshes: set[tuple[int, str]] = set() + self._warned_unsupported: set[str] = set() + + sim = sim_utils.SimulationContext.instance() + if sim is not None: + sim.vis_marker_registry.set_group(self.group_id, self) + + def close(self) -> None: + """Remove marker backend from the simulation marker registry.""" + sim = sim_utils.SimulationContext.instance() + if sim is not None: + sim.vis_marker_registry.remove_group(self.group_id) + + def infer_device(self) -> torch.device: + """Infer the device from current marker state.""" + for value in (self.translations, self.orientations, self.scales, self.marker_indices): + if value is not None: + return value.device + return torch.device("cpu") + + def set_visibility(self, visible: bool) -> None: + """Set marker visibility.""" + self.visible = visible + + def is_visible(self) -> bool: + """Return whether this marker group is visible.""" + return self.visible + + def visualize( + self, + translations: torch.Tensor | None, + orientations: torch.Tensor | None, + scales: torch.Tensor | None, + marker_indices: torch.Tensor | None, + ) -> None: + """Update marker state consumed by Newton-family visualizers.""" + if translations is not None: + self.translations = translations.detach() + self.count = translations.shape[0] + if orientations is not None: + self.orientations = orientations.detach() + self.count = orientations.shape[0] + if scales is not None: + self.scales = scales.detach() + self.count = scales.shape[0] + if marker_indices is not None: + self.marker_indices = marker_indices.detach().to(dtype=torch.int32) + self.count = marker_indices.shape[0] + elif self.count != 0: + self.marker_indices = torch.zeros(self.count, dtype=torch.int32, device=self.infer_device()) + + def render(self, viewer, visible_env_ids: list[int] | None, num_envs: int) -> None: + """Render marker state to a Newton viewer.""" + state = _filter_marker_state(self, visible_env_ids=visible_env_ids, num_envs=num_envs) + if state["count"] == 0: + for name, marker_cfg in self.cfg.markers.items(): + self._hide_batch(viewer, name, _resolve_newton_marker_cfg(name, marker_cfg, self.cfg)) + return + + translations = state["translations"] + if translations is None: + return + orientations = state["orientations"] + if orientations is None: + orientations = torch.tensor([[0.0, 0.0, 0.0, 1.0]], device=translations.device).repeat(state["count"], 1) + scales = state["scales"] + if scales is None: + scales = torch.ones((state["count"], 3), dtype=torch.float32, device=translations.device) + marker_indices = state["marker_indices"] + if marker_indices is None: + marker_indices = torch.zeros(state["count"], dtype=torch.int64, device=translations.device) + + for proto_index, (name, marker_cfg) in enumerate(self.cfg.markers.items()): + newton_cfg = _resolve_newton_marker_cfg(name, marker_cfg, self.cfg) + batch_name = f"{self.group_id}/{name}" + selected = marker_indices == proto_index + if not state["visible"] or int(selected.sum().item()) == 0: + self._hide_batch(viewer, name, newton_cfg) + continue + + if newton_cfg.renderer == "none": + unsupported_key = f"{self.group_id}:{name}" + if unsupported_key not in self._warned_unsupported: + logger.warning( + "[NewtonVisualizationMarkers] Unsupported marker prototype '%s' in group '%s'; skipping.", + name, + self.group_id, + ) + self._warned_unsupported.add(unsupported_key) + continue + + selected_translations = translations[selected] + selected_orientations = orientations[selected] + default_scale = newton_cfg.scale or _extract_scale_hint(marker_cfg) + selected_scales = scales[selected] * torch.tensor( + default_scale, dtype=torch.float32, device=scales.device + ).unsqueeze(0) + + if newton_cfg.renderer == "mesh": + mesh_name = f"{self.group_id}/meshes/{name}" + self._ensure_mesh_registered(viewer, mesh_name, newton_cfg) + color = newton_cfg.color or _extract_color(marker_cfg) + colors = torch.tensor(color, dtype=torch.float32, device=scales.device).repeat( + selected_scales.shape[0], 1 + ) + materials = torch.zeros((selected_scales.shape[0], 4), dtype=torch.float32, device=scales.device) + if newton_cfg.texture is not None: + # ViewerGL gates texture sampling with material.w. Rerun and + # Viser ignore this flag but consume the mesh texture. + materials[:, 3] = 1.0 + xforms = torch.cat((selected_translations, selected_orientations), dim=1).detach().cpu().numpy() + viewer.log_instances( + batch_name, + mesh_name, + wp.array(xforms.astype(np.float32), dtype=wp.transform), + wp.array(selected_scales.detach().cpu().numpy().astype(np.float32), dtype=wp.vec3), + wp.array(colors.detach().cpu().numpy().astype(np.float32), dtype=wp.vec3), + wp.array(materials.detach().cpu().numpy().astype(np.float32), dtype=wp.vec4), + hidden=False, + ) + elif newton_cfg.renderer == "frame": + starts, ends, colors = _build_frame_lines(selected_translations, selected_orientations, selected_scales) + width = max(float(selected_scales.mean().item()) * 0.05, 0.0025) + viewer.log_lines( + batch_name, + wp.array(starts.detach().cpu().numpy().astype(np.float32), dtype=wp.vec3), + wp.array(ends.detach().cpu().numpy().astype(np.float32), dtype=wp.vec3), + wp.array(colors.detach().cpu().numpy().astype(np.float32), dtype=wp.vec3), + width=width, + hidden=False, + ) + + def _hide_batch(self, viewer, name: str, newton_cfg: _NewtonMarkerSpec) -> None: + batch_name = f"{self.group_id}/{name}" + if newton_cfg.renderer == "mesh" and newton_cfg.mesh_type is not None: + mesh_name = f"{self.group_id}/meshes/{name}" + self._ensure_mesh_registered(viewer, mesh_name, newton_cfg) + viewer.log_instances(batch_name, mesh_name, None, None, None, None, hidden=True) + elif newton_cfg.renderer == "frame": + viewer.log_lines(batch_name, None, None, None, hidden=True) + + def _ensure_mesh_registered(self, viewer, mesh_name: str, newton_cfg: _NewtonMarkerSpec) -> None: + # The marker backend is shared by all Newton-family visualizers. Mesh + # registration is viewer-local, so the same marker mesh must be logged + # once per viewer (for example, once for Rerun and once for Viser). + registered_key = (id(viewer), mesh_name) + if registered_key in self._registered_meshes or newton_cfg.mesh_type is None: + return + mesh = _create_mesh(newton_cfg) + viewer.log_mesh( + mesh_name, + wp.array(mesh.vertices.astype(np.float32), dtype=wp.vec3), + wp.array(mesh.indices.astype(np.int32), dtype=wp.int32), + normals=wp.array(mesh.normals.astype(np.float32), dtype=wp.vec3) if mesh.normals.size else None, + uvs=wp.array(mesh.uvs.astype(np.float32), dtype=wp.vec2) if mesh.uvs.size else None, + texture=newton_cfg.texture, + hidden=True, + ) + self._registered_meshes.add(registered_key) + + +def _resolve_newton_marker_cfg(name: str, marker_cfg: object, cfg: VisualizationMarkersCfg) -> _NewtonMarkerSpec: + del name, cfg + return _infer_newton_marker_cfg(marker_cfg) + + +def _infer_newton_marker_cfg(marker_cfg: object) -> _NewtonMarkerSpec: + cfg_type = type(marker_cfg).__name__ + + if cfg_type == "SphereCfg": + return _NewtonMarkerSpec(renderer="mesh", mesh_type="sphere", mesh_params={"radius": float(marker_cfg.radius)}) + if cfg_type == "CuboidCfg": + return _NewtonMarkerSpec( + renderer="mesh", mesh_type="box", mesh_params={"size": tuple(float(v) for v in marker_cfg.size)} + ) + if cfg_type == "CylinderCfg": + return _NewtonMarkerSpec( + renderer="mesh", + mesh_type="cylinder", + mesh_params={"radius": float(marker_cfg.radius), "height": float(marker_cfg.height)}, + ) + if cfg_type == "CapsuleCfg": + return _NewtonMarkerSpec( + renderer="mesh", + mesh_type="capsule", + mesh_params={"radius": float(marker_cfg.radius), "height": float(marker_cfg.height)}, + ) + if cfg_type == "ConeCfg": + return _NewtonMarkerSpec( + renderer="mesh", + mesh_type="cone", + mesh_params={"radius": float(marker_cfg.radius), "height": float(marker_cfg.height)}, + ) + + if cfg_type == "UsdFileCfg": + usd_path = str(marker_cfg.usd_path).lower() + default_scale = _extract_scale_hint(marker_cfg) + if usd_path.endswith("arrow_x.usd"): + return _NewtonMarkerSpec( + renderer="mesh", + mesh_type="arrow", + mesh_params={"base_radius": 0.08, "base_height": 0.7, "cap_radius": 0.16, "cap_height": 0.3}, + scale=(default_scale[0], default_scale[1] * 2.5, default_scale[2] * 2.5), + ) + if usd_path.endswith("frame_prim.usd"): + return _NewtonMarkerSpec(renderer="frame", scale=default_scale) + if "dexcube" in usd_path or "dex_cube" in usd_path: + # TODO: Remove this specialized DexCube mesh code when general + # UsdFileCfg-to-Newton mesh conversion is supported. + # DexCube USDs are roughly 6 cm wide. Keep scale separate so task + # configs such as scale=(1.2, 1.2, 1.2) still apply naturally. + return _NewtonMarkerSpec( + renderer="mesh", + mesh_type="textured_box", + mesh_params={"size": (0.06, 0.06, 0.06)}, + color=(1.0, 1.0, 1.0), + texture=_DEX_CUBE_TEXTURE_URL, + ) + + # TODO: Add generic UsdFileCfg -> Newton mesh extraction for mesh-backed USD marker assets. + # For now, only common marker USDs are mapped to lightweight Newton-native fallbacks. + + return _NewtonMarkerSpec(renderer="none") + + +def _create_mesh(newton_cfg: _NewtonMarkerSpec): + mesh_params = newton_cfg.mesh_params or {} + if newton_cfg.mesh_type == "arrow": + return Mesh.create_arrow( + float(mesh_params["base_radius"]), + float(mesh_params["base_height"]), + cap_radius=float(mesh_params["cap_radius"]), + cap_height=float(mesh_params["cap_height"]), + up_axis=Axis.X, + ) + if newton_cfg.mesh_type == "box": + size = mesh_params["size"] + return Mesh.create_box(float(size[0]) * 0.5, float(size[1]) * 0.5, float(size[2]) * 0.5) + if newton_cfg.mesh_type == "textured_box": + return _create_textured_box_mesh(mesh_params["size"]) + if newton_cfg.mesh_type == "sphere": + return Mesh.create_sphere(radius=float(mesh_params["radius"])) + if newton_cfg.mesh_type == "cylinder": + return Mesh.create_cylinder( + float(mesh_params["radius"]), + float(mesh_params["height"]) * 0.5, + up_axis=Axis.Z, + ) + if newton_cfg.mesh_type == "capsule": + return Mesh.create_capsule( + float(mesh_params["radius"]), + float(mesh_params["height"]) * 0.5, + up_axis=Axis.Z, + ) + if newton_cfg.mesh_type == "cone": + return Mesh.create_cone( + float(mesh_params["radius"]), + float(mesh_params["height"]) * 0.5, + up_axis=Axis.Z, + ) + raise ValueError(f"Unsupported Newton mesh type: {newton_cfg.mesh_type}") + + +def _create_textured_box_mesh(size: tuple[float, float, float]) -> _MeshData: + # TODO: Remove this specialized DexCube mesh code when general + # UsdFileCfg-to-Newton mesh conversion is supported. + half = np.asarray(size, dtype=np.float32) * 0.5 + usd_vertices = np.asarray( + [ + (-1.0, -1.0, 1.0), + (-1.0, 1.0, 1.0), + (-1.0, 1.0, -1.0), + (-1.0, -1.0, -1.0), + (-1.0, -1.0, -1.0), + (-1.0, 1.0, -1.0), + (1.0, 1.0, -1.0), + (1.0, -1.0, -1.0), + (1.0, -1.0, -1.0), + (1.0, 1.0, -1.0), + (1.0, 1.0, 1.0), + (1.0, -1.0, 1.0), + (1.0, -1.0, 1.0), + (1.0, 1.0, 1.0), + (-1.0, 1.0, 1.0), + (-1.0, -1.0, 1.0), + (-1.0, -1.0, -1.0), + (1.0, -1.0, -1.0), + (1.0, -1.0, 1.0), + (-1.0, -1.0, 1.0), + (1.0, 1.0, -1.0), + (-1.0, 1.0, -1.0), + (-1.0, 1.0, 1.0), + (1.0, 1.0, 1.0), + ], + dtype=np.float32, + ) + uvs = np.asarray( + [ + (1.0, 0.333333), + (1.0, 0.666667), + (0.5, 0.666667), + (0.5, 0.333333), + (0.5, 0.666667), + (0.5, 1.0), + (0.0, 1.0), + (0.0, 0.666667), + (0.5, 0.333333), + (0.5, 0.666667), + (0.0, 0.666667), + (0.0, 0.333333), + (1.0, 0.0), + (1.0, 0.333333), + (0.5, 0.333333), + (0.5, 0.0), + (0.5, 0.0), + (0.5, 0.333333), + (0.0, 0.333333), + (0.0, 0.0), + (1.0, 0.666667), + (1.0, 1.0), + (0.5, 1.0), + (0.5, 0.666667), + ], + dtype=np.float32, + ) + indices: list[int] = [] + for base in range(0, 24, 4): + indices.extend([base, base + 1, base + 2, base, base + 2, base + 3]) + return _MeshData( + vertices=usd_vertices * half, + indices=np.asarray(indices, dtype=np.int32), + normals=np.zeros((0, 3), dtype=np.float32), + uvs=uvs, + ) + + +def _filter_marker_state( + marker: NewtonVisualizationMarkers, + visible_env_ids: list[int] | None, + num_envs: int, +) -> dict[str, Any]: + if visible_env_ids is None or marker.count == 0 or num_envs <= 0 or marker.count % num_envs != 0: + return { + "visible": marker.visible, + "translations": marker.translations, + "orientations": marker.orientations, + "scales": marker.scales, + "marker_indices": marker.marker_indices, + "count": marker.count, + } + + keep: list[int] = [] + repeat_count = marker.count // num_envs + for block_idx in range(repeat_count): + base = block_idx * num_envs + for env_id in visible_env_ids: + idx = base + env_id + if idx < marker.count: + keep.append(idx) + + if len(keep) == marker.count: + return { + "visible": marker.visible, + "translations": marker.translations, + "orientations": marker.orientations, + "scales": marker.scales, + "marker_indices": marker.marker_indices, + "count": marker.count, + } + + index = torch.tensor(keep, dtype=torch.long, device=marker.infer_device()) + return { + "visible": marker.visible, + "translations": marker.translations.index_select(0, index) if marker.translations is not None else None, + "orientations": marker.orientations.index_select(0, index) if marker.orientations is not None else None, + "scales": marker.scales.index_select(0, index) if marker.scales is not None else None, + "marker_indices": marker.marker_indices.index_select(0, index) if marker.marker_indices is not None else None, + "count": len(keep), + } + + +def _extract_scale_hint(marker_cfg: object) -> tuple[float, float, float]: + scale = marker_cfg.scale if type(marker_cfg).__name__ == "UsdFileCfg" else None + if scale is None: + return (1.0, 1.0, 1.0) + return tuple(float(v) for v in scale) + + +def _extract_color(marker_cfg: object) -> tuple[float, float, float]: + material_cfg = marker_cfg.visual_material + if material_cfg is None: + return _UNBOUND_DEFAULT_FALLBACK_GRAY + + if color := _extract_omnipbr_like_color(material_cfg): + return color + + material_type = type(material_cfg).__name__ + if material_type == "PreviewSurfaceCfg": + return _extract_rgb(material_cfg.diffuse_color) or _UNBOUND_DEFAULT_FALLBACK_GRAY + if material_type == "GlassMdlCfg": + return _extract_rgb(material_cfg.glass_color) or _UNBOUND_DEFAULT_FALLBACK_GRAY + + return _UNBOUND_DEFAULT_FALLBACK_GRAY + + +def _extract_omnipbr_like_color(material_cfg: object) -> tuple[float, float, float] | None: + material_type = type(material_cfg).__name__ + if material_type == "MdlFileCfg": + if not str(material_cfg.mdl_path).lower().endswith("omnipbr.mdl"): + return None + brightness = material_cfg.albedo_brightness + if brightness is not None: + diffuse_constant = (float(brightness), float(brightness), float(brightness)) + else: + diffuse_constant = _OMNIPBR_DEFAULTS["diffuse_color_constant"] + diffuse_tint = _OMNIPBR_DEFAULTS["diffuse_tint"] + else: + return None + + return ( + diffuse_constant[0] * diffuse_tint[0], + diffuse_constant[1] * diffuse_tint[1], + diffuse_constant[2] * diffuse_tint[2], + ) + + +def _extract_rgb(value: Any) -> tuple[float, float, float] | None: + if value is None: + return None + try: + rgb = tuple(float(v) for v in value) + except TypeError: + return None + if len(rgb) < 3: + return None + return (rgb[0], rgb[1], rgb[2]) + + +def _build_frame_lines( + translations: torch.Tensor, + orientations: torch.Tensor, + scales: torch.Tensor, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + unit_axes = ( + torch.eye(3, dtype=torch.float32, device=translations.device).unsqueeze(0).repeat(translations.shape[0], 1, 1) + ) + scaled_axes = unit_axes * scales.unsqueeze(1) + repeated_quats = orientations.unsqueeze(1).repeat(1, 3, 1).reshape(-1, 4) + rotated_axes = quat_apply(repeated_quats, scaled_axes.reshape(-1, 3)).reshape(-1, 3, 3) + starts = translations.unsqueeze(1).repeat(1, 3, 1).reshape(-1, 3) + ends = (translations.unsqueeze(1) + rotated_axes).reshape(-1, 3) + colors = torch.tensor( + [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.35, 1.0]], + dtype=torch.float32, + device=translations.device, + ).repeat(translations.shape[0], 1) + return starts, ends, colors diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualizer.py b/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualizer.py new file mode 100644 index 000000000000..b548a3e5f4f3 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualizer.py @@ -0,0 +1,504 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton OpenGL Visualizer implementation.""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +import numpy as np +import warp as wp +from newton.viewer import ViewerGL + +from isaaclab.visualizers.base_visualizer import BaseVisualizer + +from isaaclab_visualizers.newton_adapter import apply_viewer_visible_worlds, resolve_visible_env_indices + +from .newton_visualization_markers import render_newton_visualization_markers +from .newton_visualizer_cfg import NewtonVisualizerCfg + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from isaaclab.physics import BaseSceneDataProvider + + +class NewtonViewerGL(ViewerGL): + """Wrapper around Newton's ViewerGL with training/rendering pause controls.""" + + def __init__( + self, + *args, + metadata: dict | None = None, + update_frequency: int = 1, + **kwargs, + ): + """Initialize Newton viewer wrapper state. + + Args: + *args: Positional arguments forwarded to ``ViewerGL``. + metadata: Optional metadata shown in viewer panels. + update_frequency: Viewer refresh cadence in simulation frames. + **kwargs: Keyword arguments forwarded to ``ViewerGL``. + """ + super().__init__(*args, **kwargs) + self._paused_training = False + self._paused_rendering = False + self._metadata = metadata or {} + self._fallback_draw_controls = False + self._update_frequency = update_frequency + self._color_edit3_prefers_sequence: bool | None = None + + try: + self.register_ui_callback(self._render_training_controls, position="side") + except AttributeError: + self._fallback_draw_controls = True + + def is_training_paused(self) -> bool: + """Return whether simulation is paused by viewer controls.""" + return self._paused_training + + def is_rendering_paused(self) -> bool: + """Return whether rendering is paused by viewer controls.""" + return self._paused_rendering + + def _render_training_controls(self, imgui): + """Render Isaac Lab-specific control widgets in the Newton viewer UI.""" + imgui.separator() + imgui.text("IsaacLab Controls") + + pause_label = "Resume Simulation" if self._paused_training else "Pause Simulation" + if imgui.button(pause_label): + self._paused_training = not self._paused_training + + rendering_label = "Resume Rendering" if self._paused_rendering else "Pause Rendering" + if imgui.button(rendering_label): + self._paused_rendering = not self._paused_rendering + self._paused = self._paused_rendering + + imgui.text("Visualizer Update Frequency") + current_frequency = self._update_frequency + changed, new_frequency = imgui.slider_int( + "##VisualizerUpdateFreq", current_frequency, 1, 20, f"Every {current_frequency} frames" + ) + if changed: + self._update_frequency = new_frequency + + if imgui.is_item_hovered(): + imgui.set_tooltip( + "Controls visualizer update frequency\nlower values -> more responsive visualizer but slower" + " training\nhigher values -> less responsive visualizer but faster training" + ) + + def on_key_press(self, symbol, modifiers): + """Forward key presses unless UI is currently capturing input.""" + if self.ui.is_capturing(): + return + super().on_key_press(symbol, modifiers) + + def _render_ui(self): + """Render default UI and fallback control window when callback hooks are unavailable.""" + if not self._fallback_draw_controls: + return super()._render_ui() + + super()._render_ui() + imgui = self.ui.imgui + from contextlib import suppress + + with suppress(Exception): + imgui.set_next_window_pos(imgui.ImVec2(320, 10)) + + flags = 0 + if imgui.begin("Simulation Controls", flags=flags): + self._render_training_controls(imgui) + imgui.end() + return None + + def _coerce_color3(self, color) -> tuple[float, float, float]: + """Normalize color values from imgui/renderer into an RGB tuple.""" + if hasattr(color, "x") and hasattr(color, "y") and hasattr(color, "z"): + return (float(color.x), float(color.y), float(color.z)) + return (float(color[0]), float(color[1]), float(color[2])) + + def _color_edit3_compat(self, imgui, label: str, color): + """ + # Handle imgui.color_edit3 API differences between bindings. + # Some require vector-like objects, others require a Sequence[float]. + # This method tries both approaches, caching the one that works to avoid repeated exceptions. + # NOTE: This is a compatibility workaround, perhaps we can address the issue more directly. + """ + color_tuple = self._coerce_color3(color) + sequence_color = [color_tuple[0], color_tuple[1], color_tuple[2]] + if self._color_edit3_prefers_sequence is not True: + try: + imvec4 = imgui.ImVec4(sequence_color[0], sequence_color[1], sequence_color[2], 1.0) + changed, edited = imgui.color_edit3(label, imvec4) + self._color_edit3_prefers_sequence = False + return changed, self._coerce_color3(edited) + except Exception: + self._color_edit3_prefers_sequence = True + + try: + changed, edited = imgui.color_edit3(label, sequence_color) + return changed, self._coerce_color3(edited) + except Exception as exc: + logger.debug("[NewtonVisualizer] color_edit3 failed for '%s': %s", label, exc) + return False, color_tuple + + def _render_left_panel(self): + """Override the left panel to remove the base pause checkbox.""" + import newton as nt + + imgui = self.ui.imgui + + io = self.ui.io + imgui.set_next_window_pos(imgui.ImVec2(10, 10)) + imgui.set_next_window_size(imgui.ImVec2(300, io.display_size[1] - 20)) + + flags = imgui.WindowFlags_.no_resize.value + + if imgui.begin(f"Newton Viewer v{nt.__version__}", flags=flags): + imgui.separator() + + header_flags = 0 + + imgui.set_next_item_open(True, imgui.Cond_.appearing) + if imgui.collapsing_header("IsaacLab Options"): + for callback in self._ui_callbacks["side"]: + callback(self.ui.imgui) + + if self.model is not None: + imgui.set_next_item_open(True, imgui.Cond_.appearing) + if imgui.collapsing_header("Model Information", flags=header_flags): + imgui.separator() + num_envs = self._metadata.get("num_envs", 0) + imgui.text(f"Environments: {num_envs}") + axis_names = ["X", "Y", "Z"] + imgui.text(f"Up Axis: {axis_names[self.model.up_axis]}") + gravity = wp.to_torch(self.model.gravity)[0] + gravity_text = f"Gravity: ({gravity[0]:.2f}, {gravity[1]:.2f}, {gravity[2]:.2f})" + imgui.text(gravity_text) + + imgui.set_next_item_open(True, imgui.Cond_.appearing) + if imgui.collapsing_header("Visualization", flags=header_flags): + imgui.separator() + + show_joints = self.show_joints + changed, self.show_joints = imgui.checkbox("Show Joints", show_joints) + + show_contacts = self.show_contacts + changed, self.show_contacts = imgui.checkbox("Show Contacts", show_contacts) + + show_collision = self.show_collision + changed, self.show_collision = imgui.checkbox("Show Collision", show_collision) + + show_springs = self.show_springs + changed, self.show_springs = imgui.checkbox("Show Springs", show_springs) + + show_inertia_boxes = self.show_inertia_boxes + changed, self.show_inertia_boxes = imgui.checkbox("Show Inertia Boxes", show_inertia_boxes) + + show_com = self.show_com + changed, self.show_com = imgui.checkbox("Show Center of Mass", show_com) + + imgui.set_next_item_open(True, imgui.Cond_.appearing) + if imgui.collapsing_header("Rendering Options"): + imgui.separator() + + changed, self.renderer.draw_sky = imgui.checkbox("Sky", self.renderer.draw_sky) + changed, self.renderer.draw_shadows = imgui.checkbox("Shadows", self.renderer.draw_shadows) + changed, self.renderer.draw_wireframe = imgui.checkbox("Wireframe", self.renderer.draw_wireframe) + + try: + changed, self.renderer._light_color = self._color_edit3_compat( + imgui, "Light Color", self.renderer._light_color + ) + changed, self.renderer.sky_upper = self._color_edit3_compat( + imgui, "Upper Sky Color", self.renderer.sky_upper + ) + changed, self.renderer.sky_lower = self._color_edit3_compat( + imgui, "Lower Sky Color", self.renderer.sky_lower + ) + except Exception as exc: + logger.debug("[NewtonVisualizer] Rendering color controls failed: %s", exc) + + imgui.set_next_item_open(True, imgui.Cond_.appearing) + if imgui.collapsing_header("Camera"): + imgui.separator() + + pos = self.camera.pos + pos_text = f"Position: ({pos[0]:.2f}, {pos[1]:.2f}, {pos[2]:.2f})" + imgui.text(pos_text) + imgui.text(f"FOV: {self.camera.fov:.1f}°") + imgui.text(f"Yaw: {self.camera.yaw:.1f}°") + imgui.text(f"Pitch: {self.camera.pitch:.1f}°") + + imgui.separator() + imgui.text("WASD - Forward/Left/Back/Right") + imgui.text("QE - Down/Up") + imgui.text("Left Click - Look around") + imgui.text("Scroll - Zoom") + imgui.text("H - Toggle UI") + imgui.text("ESC - Exit") + + imgui.end() + return + + +class NewtonVisualizer(BaseVisualizer): + """Newton OpenGL visualizer for Isaac Lab.""" + + def __init__(self, cfg: NewtonVisualizerCfg): + """Initialize Newton visualizer state. + + Args: + cfg: Newton visualizer configuration. + """ + super().__init__(cfg) + self.cfg: NewtonVisualizerCfg = cfg + self._viewer: NewtonViewerGL | None = None + self._sim_time = 0.0 + self._step_counter = 0 + self._model = None + self._state = None + self._update_frequency = cfg.update_frequency + self._scene_data_provider = None + self._last_camera_pose: tuple[tuple[float, float, float], tuple[float, float, float]] | None = None + self._headless_no_viewer = False + self._resolved_visible_env_ids: list[int] | None = None + + def initialize(self, scene_data_provider: BaseSceneDataProvider) -> None: + """Initialize viewer resources and bind scene data provider. + + Args: + scene_data_provider: Scene data provider used to fetch model/state data. + """ + if self._is_initialized: + logger.debug("[NewtonVisualizer] initialize() called while already initialized.") + return + if scene_data_provider is None: + raise RuntimeError("Newton visualizer requires a scene_data_provider.") + + self._scene_data_provider = scene_data_provider + metadata = scene_data_provider.get_metadata() + num_envs = int(metadata.get("num_envs", 0)) + self._env_ids = self._compute_visualized_env_ids() + self._model = scene_data_provider.get_newton_model() + self._state = scene_data_provider.get_newton_state() + + # Use pyglet's EGL headless backend when requested. Must run before the first + # ``pyglet.window`` import so ``Window`` resolves to :class:`~pyglet.window.headless.HeadlessWindow`. + if self.cfg.headless: + import pyglet + + pyglet.options["headless"] = True + + self._viewer = NewtonViewerGL( + width=self.cfg.window_width, + height=self.cfg.window_height, + headless=self.cfg.headless, + metadata=metadata, + update_frequency=self.cfg.update_frequency, + ) + + if self._viewer is not None: + self._viewer.set_model(self._model) + apply_viewer_visible_worlds( + self._viewer, + env_ids=self._env_ids, + max_visible_envs=self.cfg.max_visible_envs, + num_envs=num_envs, + ) + self._viewer.set_world_offsets((0.0, 0.0, 0.0)) + initial_pose = self._resolve_initial_camera_pose() + self._apply_camera_pose(initial_pose) + self._viewer.up_axis = 2 # Z-up + + self._viewer.scaling = 1.0 + self._viewer._paused = False + + self._viewer.show_joints = self.cfg.show_joints + self._viewer.show_contacts = self.cfg.show_contacts + self._viewer.show_collision = self.cfg.show_collision + self._viewer.show_springs = self.cfg.show_springs + self._viewer.show_inertia_boxes = self.cfg.show_inertia_boxes + self._viewer.show_com = self.cfg.show_com + + self._viewer.renderer.draw_shadows = self.cfg.enable_shadows + self._viewer.renderer.draw_sky = self.cfg.enable_sky + self._viewer.renderer.draw_wireframe = self.cfg.enable_wireframe + + # Accept list/tuple/array-like config colors and provide a stable tuple for nanobind conversion. + self._viewer.renderer.sky_upper = self._viewer._coerce_color3(self.cfg.sky_upper_color) + self._viewer.renderer.sky_lower = self._viewer._coerce_color3(self.cfg.sky_lower_color) + self._viewer.renderer._light_color = self._viewer._coerce_color3(self.cfg.light_color) + + self._resolved_visible_env_ids = resolve_visible_env_indices(self._env_ids, self.cfg.max_visible_envs, num_envs) + num_visualized_envs = ( + len(self._resolved_visible_env_ids) if self._resolved_visible_env_ids is not None else num_envs + ) + self._log_initialization_table( + logger=logger, + title="NewtonVisualizer Configuration", + rows=[ + ( + "eye", + tuple(float(x) for x in self._viewer.camera.pos) if self._viewer is not None else self.cfg.eye, + ), + ("lookat", self._last_camera_pose[1] if self._last_camera_pose else self.cfg.lookat), + ("cam_source", self.cfg.cam_source), + ("num_visualized_envs", num_visualized_envs), + ("headless", self.cfg.headless), + ], + ) + self._is_initialized = True + + def step(self, dt: float) -> None: + """Advance visualization by one simulation step. + + Args: + dt: Simulation time-step in seconds. + """ + if not self._is_initialized or self._is_closed: + return + + self._sim_time += dt + self._step_counter += 1 + + if self._viewer is None: + if self._scene_data_provider is not None: + self._state = self._scene_data_provider.get_newton_state() + return + + if self.cfg.cam_source == "prim_path": + self._update_camera_from_usd_path() + + self._state = self._scene_data_provider.get_newton_state() + num_envs = int(self._scene_data_provider.get_metadata().get("num_envs", 0)) + + contacts = None + if self._viewer.show_contacts: + contacts_data = self._scene_data_provider.get_contacts() + if isinstance(contacts_data, dict): + contacts = contacts_data.get("contacts", contacts_data) + else: + contacts = contacts_data + + update_frequency = self._viewer._update_frequency if self._viewer else self._update_frequency + if self._step_counter % update_frequency != 0: + return + + try: + if not self._viewer.is_paused(): + self._viewer.begin_frame(self._sim_time) + if self._state is not None: + body_q = getattr(self._state, "body_q", None) + if hasattr(body_q, "shape") and body_q.shape[0] == 0: + self._viewer.end_frame() + return + self._viewer.log_state(self._state) + if contacts is not None and hasattr(self._viewer, "log_contacts"): + try: + self._viewer.log_contacts(contacts, self._state) + except RuntimeError as exc: + logger.debug(f"[NewtonVisualizer] Failed to log contacts: {exc}") + if self.cfg.enable_markers: + render_newton_visualization_markers( + self._viewer, self._resolved_visible_env_ids, num_envs=num_envs + ) + self._viewer.end_frame() + else: + self._viewer._update() + except Exception as exc: + logger.debug("[NewtonVisualizer] Viewer update failed: %s", exc) + + def close(self) -> None: + """Release viewer resources.""" + if self._is_closed: + return + if self._viewer is not None: + self._viewer = None + self._is_closed = True + + def is_running(self) -> bool: + """Return whether the visualizer should continue stepping. + + Returns: + ``True`` while the visualizer is active, otherwise ``False``. + """ + if not self._is_initialized or self._is_closed: + return False + if self._headless_no_viewer and self._viewer is None: + return True + if self._viewer is None: + return False + return self._viewer.is_running() + + def _resolve_initial_camera_pose(self) -> tuple[tuple[float, float, float], tuple[float, float, float]]: + """Resolve initial camera pose from config or USD camera path. + + Returns: + Camera eye and target tuples. + """ + if self.cfg.cam_source == "prim_path": + pose = self._resolve_camera_pose_from_usd_path(self.cfg.cam_prim_path) + if pose is not None: + return pose + raise RuntimeError( + "[NewtonVisualizer] cam_source='prim_path' requires a resolvable camera prim path, " + f"but no camera pose was found for '{self.cfg.cam_prim_path}'." + ) + return self._resolve_cfg_camera_pose("NewtonVisualizer") + + def _apply_camera_pose(self, pose: tuple[tuple[float, float, float], tuple[float, float, float]]) -> None: + """Apply camera eye/target pose to the Newton viewer. + + Args: + pose: Camera eye and target tuples. + """ + if self._viewer is None: + return + cam_pos, cam_target = pose + self._viewer.camera.pos = wp.vec3(*cam_pos) + cam_pos_np = np.array(cam_pos, dtype=np.float32) + cam_target_np = np.array(cam_target, dtype=np.float32) + direction = cam_target_np - cam_pos_np + yaw = np.degrees(np.arctan2(direction[1], direction[0])) + horizontal_dist = np.sqrt(direction[0] ** 2 + direction[1] ** 2) + pitch = np.degrees(np.arctan2(direction[2], horizontal_dist)) + self._viewer.camera.yaw = float(yaw) + self._viewer.camera.pitch = float(pitch) + self._last_camera_pose = (cam_pos, cam_target) + + def _update_camera_from_usd_path(self) -> None: + """Refresh camera pose from configured USD camera path when it changes.""" + pose = self._resolve_camera_pose_from_usd_path(self.cfg.cam_prim_path) + if pose is None: + return + if self._last_camera_pose == pose: + return + self._apply_camera_pose(pose) + + def supports_markers(self) -> bool: + """Newton OpenGL viewer supports Isaac Lab markers through viewer-side meshes and lines.""" + return bool(self.cfg.enable_markers) + + def supports_live_plots(self) -> bool: + """Newton OpenGL viewer does not provide live-plot panels.""" + return False + + def is_training_paused(self) -> bool: + """Return whether training is paused from viewer controls.""" + if not self._is_initialized or self._viewer is None: + return False + return self._viewer.is_training_paused() + + def is_rendering_paused(self) -> bool: + """Return whether rendering is paused from viewer controls.""" + if not self._is_initialized or self._viewer is None: + return False + return self._viewer.is_rendering_paused() diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualizer_cfg.py b/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualizer_cfg.py new file mode 100644 index 000000000000..711e86e03b31 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/newton/newton_visualizer_cfg.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton OpenGL Visualizer.""" + +from isaaclab.utils import configclass +from isaaclab.visualizers.visualizer_cfg import VisualizerCfg + + +@configclass +class NewtonVisualizerCfg(VisualizerCfg): + """Configuration for Newton OpenGL visualizer.""" + + visualizer_type: str = "newton" + """Type identifier for Newton visualizer.""" + + window_width: int = 1920 + """Window width in pixels.""" + + window_height: int = 1080 + """Window height in pixels.""" + + headless: bool = False + """Run the Newton viewer without requiring a display server.""" + + update_frequency: int = 1 + """Visualizer update frequency (updates every N frames).""" + + show_joints: bool = False + """Show joint visualization.""" + + show_contacts: bool = False + """Show contact visualization.""" + + show_collision: bool = False + """Show collision visualization.""" + + show_springs: bool = False + """Show spring visualization.""" + + show_inertia_boxes: bool = False + """Show inertia box visualization.""" + + show_com: bool = False + """Show center of mass visualization.""" + + enable_shadows: bool = True + """Enable shadow rendering.""" + + enable_sky: bool = True + """Enable sky rendering.""" + + enable_wireframe: bool = False + """Enable wireframe rendering.""" + + sky_upper_color: tuple[float, float, float] = (0.2, 0.4, 0.6) + """Sky upper color RGB [0,1].""" + + sky_lower_color: tuple[float, float, float] = (0.5, 0.6, 0.7) + """Sky lower color RGB [0,1].""" + + light_color: tuple[float, float, float] = (1.0, 1.0, 1.0) + """Light color RGB [0,1].""" diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/newton_adapter.py b/source/isaaclab_visualizers/isaaclab_visualizers/newton_adapter.py new file mode 100644 index 000000000000..6bc3d5a2b4f1 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/newton_adapter.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Shared helpers for viewer env selection (Newton viewers and Kit partial USD visibility).""" + +from __future__ import annotations + + +def resolve_visible_env_indices( + env_ids: list[int] | None, + max_visible_envs: int | None, + num_envs: int, +) -> list[int] | None: + """Resolve which env indices stay visible (same rules as :func:`apply_viewer_visible_worlds`). + + * Cap-only path (``env_ids`` is ``None``): contiguous ``0 .. min(cap, num_envs) - 1`` when ``max_visible_envs`` + is set; otherwise ``None`` (viewer shows all worlds). (Random cap-only selection is applied earlier by + turning it into explicit ``env_ids``.) + * Explicit path (``env_ids`` is a list): if ``max_visible_envs`` is set, keep only the first *cap* indices + (truncate from the end); if ``None``, use the full list. + + Returns: + Selected indices, or ``None`` when all environments should be visible (cap-only, no limit). + """ + if env_ids is not None: + out = list(env_ids) + if max_visible_envs is not None: + out = out[: max(0, int(max_visible_envs))] + return out + if max_visible_envs is not None and num_envs > 0: + n = min(int(max_visible_envs), num_envs) + return list(range(n)) + return None + + +def apply_viewer_visible_worlds( + viewer, + *, + env_ids: list[int] | None, + max_visible_envs: int | None, + num_envs: int, +) -> None: + """Select which simulation worlds are visualized; no-op if the viewer does not support it. + + Prefer this over ``set_model(..., max_worlds=...)`` (deprecated in Newton). + + Args: + viewer: Newton viewer (ViewerGL, ViewerRerun, ViewerViser, etc.). + env_ids: Env indices from ``visible_env_indices`` (after validation), or ``None`` for the cap-only + contiguous path (see ``VisualizerCfg``). + max_visible_envs: When ``env_ids`` is ``None``, caps the contiguous count; otherwise truncates the list to + the first *N* indices. + num_envs: Total environment count from scene metadata. + """ + if not hasattr(viewer, "set_visible_worlds"): + return + resolved = resolve_visible_env_indices(env_ids, max_visible_envs, num_envs) + if resolved is None: + viewer.set_visible_worlds(None) + else: + viewer.set_visible_worlds(resolved) diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/rerun/__init__.py b/source/isaaclab_visualizers/isaaclab_visualizers/rerun/__init__.py new file mode 100644 index 000000000000..d3f16652a561 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/rerun/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Rerun visualizer backend. + +This package keeps imports lazy so configuration-only imports avoid pulling the +full runtime backend before it is needed. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .rerun_visualizer_cfg import RerunVisualizerCfg + +if TYPE_CHECKING: + from .rerun_visualizer import RerunVisualizer + +__all__ = ["RerunVisualizer", "RerunVisualizerCfg"] + + +def __getattr__(name: str): + if name == "RerunVisualizer": + from .rerun_visualizer import RerunVisualizer + + return RerunVisualizer + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/rerun/rerun_visualizer.py b/source/isaaclab_visualizers/isaaclab_visualizers/rerun/rerun_visualizer.py new file mode 100644 index 000000000000..5600ad2ee9c4 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/rerun/rerun_visualizer.py @@ -0,0 +1,354 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Rerun visualizer implementation for Isaac Lab.""" + +from __future__ import annotations + +import atexit +import logging +import socket +import webbrowser +from typing import TYPE_CHECKING +from urllib.parse import quote + +import rerun as rr +import rerun.blueprint as rrb +from newton.viewer import ViewerRerun + +from isaaclab.visualizers.base_visualizer import BaseVisualizer + +from isaaclab_visualizers.newton.newton_visualization_markers import render_newton_visualization_markers +from isaaclab_visualizers.newton_adapter import apply_viewer_visible_worlds, resolve_visible_env_indices + +from .rerun_visualizer_cfg import RerunVisualizerCfg + +if TYPE_CHECKING: + from isaaclab.physics import BaseSceneDataProvider + +logger = logging.getLogger(__name__) + + +def _is_port_free(port: int, host: str = "127.0.0.1") -> bool: + """Return whether a TCP port can be bound on host.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + sock.bind((host, int(port))) + return True + except OSError: + return False + + +def _is_port_open(port: int, host: str = "127.0.0.1") -> bool: + """Return whether a TCP port is currently accepting connections.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.settimeout(0.2) + return sock.connect_ex((host, int(port))) == 0 + + +def _normalize_host(addr: str) -> str: + """Normalize bind host to loopback-friendly address for client URLs.""" + if addr in ("0.0.0.0", "127.0.0.1", "localhost"): + return "127.0.0.1" + return addr + + +def _ensure_rerun_server(app_id: str, bind_address: str, grpc_port: int, web_port: int) -> tuple[str, bool]: + """Resolve rerun endpoint and whether viewer should start web/grpc server.""" + del app_id + connect_host = _normalize_host(bind_address) + expected_uri = f"rerun+http://{connect_host}:{int(grpc_port)}/proxy" + + if _is_port_open(grpc_port, host=connect_host): + # Reuse existing endpoint; do not create a new server here. + return expected_uri, False + + if not _is_port_free(web_port, host=connect_host): + raise RuntimeError(f"Rerun web port {web_port} is in use. Free the port or choose a different `web_port`.") + + # No existing gRPC server: NewtonViewerRerun should start and own it. + return expected_uri, True + + +def _open_rerun_web_viewer(host: str, web_port: int, connect_to: str) -> None: + """Open rerun web UI and prefill endpoint connection URL.""" + url = _rerun_web_viewer_url(host, web_port, connect_to) + try: + if not webbrowser.open_new_tab(url): + logger.info("[RerunVisualizer] Could not auto-open browser tab. Open manually: %s", url) + except Exception: + logger.info("[RerunVisualizer] Could not auto-open browser tab. Open manually: %s", url) + + +def _rerun_web_viewer_url(host: str, web_port: int, connect_to: str) -> str: + """Return rerun web UI URL with prefilled endpoint.""" + return f"http://{host}:{int(web_port)}/?url={quote(connect_to, safe='')}" + + +class NewtonViewerRerun(ViewerRerun): + """Wrapper around Newton's ViewerRerun with rendering pause controls.""" + + def __init__(self, *args, **kwargs): + """Initialize viewer wrapper and Isaac Lab pause state.""" + super().__init__(*args, **kwargs) + self._paused_rendering = False + + def is_rendering_paused(self) -> bool: + """Return whether rendering is paused by viewer controls.""" + return self._paused_rendering + + def _render_ui(self): + """Extend base UI with Isaac Lab rendering pause toggle.""" + super()._render_ui() + + if not self._has_imgui: + return + + imgui = self._imgui + if not imgui: + return + + if imgui.collapsing_header("IsaacLab Controls"): + if imgui.button("Pause Rendering" if not self._paused_rendering else "Resume Rendering"): + self._paused_rendering = not self._paused_rendering + + +class RerunVisualizer(BaseVisualizer): + """Rerun visualizer for Isaac Lab.""" + + def __init__(self, cfg: RerunVisualizerCfg): + """Initialize Rerun visualizer state. + + Args: + cfg: Rerun visualizer configuration. + """ + super().__init__(cfg) + self.cfg: RerunVisualizerCfg = cfg + self._viewer: NewtonViewerRerun | None = None + self._sim_time = 0.0 + self._step_counter = 0 + self._model = None + self._state = None + self._scene_data_provider = None + self._last_camera_pose: tuple[tuple[float, float, float], tuple[float, float, float]] | None = None + self._resolved_visible_env_ids: list[int] | None = None + + def initialize(self, scene_data_provider: BaseSceneDataProvider) -> None: + """Initialize rerun viewer and bind scene data provider. + + Args: + scene_data_provider: Scene data provider used to fetch model/state data. + """ + if self._is_initialized: + return + if scene_data_provider is None: + raise RuntimeError("Rerun visualizer requires a scene_data_provider.") + + self._scene_data_provider = scene_data_provider + metadata = scene_data_provider.get_metadata() + num_envs = int(metadata.get("num_envs", 0)) + self._env_ids = self._compute_visualized_env_ids() + self._model = scene_data_provider.get_newton_model() + self._state = scene_data_provider.get_newton_state() + + grpc_port = int(self.cfg.grpc_port) + web_port = int(self.cfg.web_port) + bind_address = self.cfg.bind_address or "0.0.0.0" + rerun_address, start_server_in_viewer = _ensure_rerun_server( + app_id=self.cfg.app_id, + bind_address=bind_address, + grpc_port=grpc_port, + web_port=web_port, + ) + if not start_server_in_viewer: + logger.info("[RerunVisualizer] Reusing existing rerun server at %s.", rerun_address) + + viewer_address = None if start_server_in_viewer else rerun_address + self._viewer = NewtonViewerRerun( + app_id=self.cfg.app_id, + address=viewer_address, + serve_web_viewer=start_server_in_viewer, + web_port=web_port, + grpc_port=grpc_port, + keep_historical_data=self.cfg.keep_historical_data, + keep_scalar_history=self.cfg.keep_scalar_history, + record_to_rrd=self.cfg.record_to_rrd, + ) + if start_server_in_viewer: + rerun_address = getattr(self._viewer, "_grpc_server_uri", rerun_address) + viewer_host = _normalize_host(bind_address) + viewer_url = _rerun_web_viewer_url(viewer_host, web_port, rerun_address) + if self.cfg.open_browser and not start_server_in_viewer: + _open_rerun_web_viewer(viewer_host, web_port, rerun_address) + self._viewer.set_model(self._model) + apply_viewer_visible_worlds( + self._viewer, + env_ids=self._env_ids, + max_visible_envs=self.cfg.max_visible_envs, + num_envs=num_envs, + ) + # Preserve simulation world positions (env_spacing) rather than adding viewer-side offsets. + self._viewer.set_world_offsets((0.0, 0.0, 0.0)) + initial_pose = self._resolve_initial_camera_pose() + self._apply_camera_pose(initial_pose) + self._viewer.up_axis = 2 + self._viewer.scaling = 1.0 + self._viewer._paused = False + + self._resolved_visible_env_ids = resolve_visible_env_indices(self._env_ids, self.cfg.max_visible_envs, num_envs) + num_visualized_envs = ( + len(self._resolved_visible_env_ids) if self._resolved_visible_env_ids is not None else num_envs + ) + self._log_initialization_table( + logger=logger, + title="RerunVisualizer Configuration", + rows=[ + ("eye", self.cfg.eye), + ("lookat", self.cfg.lookat), + ("cam_source", self.cfg.cam_source), + ("num_visualized_envs", num_visualized_envs), + ("endpoint", f"http://{viewer_host}:{web_port}"), + ("viewer_url", viewer_url), + ("bind_address", bind_address), + ("grpc_port", grpc_port), + ("web_port", web_port), + ("open_browser", self.cfg.open_browser), + ("record_to_rrd", self.cfg.record_to_rrd or ""), + ], + ) + + self._is_initialized = True + atexit.register(self.close) + + def step(self, dt: float) -> None: + """Advance visualization by one simulation step. + + Args: + dt: Simulation time-step in seconds. + """ + if not self._is_initialized or self._is_closed or self._viewer is None: + return + + self._sim_time += dt + self._step_counter += 1 + + if self.cfg.cam_source == "prim_path": + self._update_camera_from_usd_path() + + self._state = self._scene_data_provider.get_newton_state() + num_envs = int(self._scene_data_provider.get_metadata().get("num_envs", 0)) + + if not self._viewer.is_paused(): + self._viewer.begin_frame(self._sim_time) + try: + if self._state is not None: + body_q = getattr(self._state, "body_q", None) + if hasattr(body_q, "shape") and body_q.shape[0] == 0: + return + self._viewer.log_state(self._state) + if self.cfg.enable_markers: + render_newton_visualization_markers( + self._viewer, self._resolved_visible_env_ids, num_envs=num_envs + ) + finally: + self._viewer.end_frame() + + def close(self) -> None: + """Close viewer/session resources.""" + if self._is_closed: + return + + if self._viewer is not None: + try: + self._viewer.close() + except Exception as exc: + logger.warning("[RerunVisualizer] Failed while closing viewer: %s", exc) + finally: + self._viewer = None + + try: + rr.disconnect() + except Exception as exc: + logger.warning("[RerunVisualizer] Failed while disconnecting rerun: %s", exc) + self._is_closed = True + + def is_running(self) -> bool: + """Return whether the visualizer should continue stepping. + + Returns: + ``True`` while the visualizer is active, otherwise ``False``. + """ + if not self._is_initialized or self._is_closed: + return False + if self._viewer is None: + return False + return self._viewer.is_running() + + def _resolve_initial_camera_pose(self) -> tuple[tuple[float, float, float], tuple[float, float, float]]: + """Resolve initial camera pose from config or USD camera path.""" + if self.cfg.cam_source == "prim_path": + pose = self._resolve_camera_pose_from_usd_path(self.cfg.cam_prim_path) + if pose is not None: + return pose + raise RuntimeError( + "[RerunVisualizer] cam_source='prim_path' requires a resolvable camera prim path, " + f"but no camera pose was found for '{self.cfg.cam_prim_path}'." + ) + return self._resolve_cfg_camera_pose("RerunVisualizer") + + def _apply_camera_pose(self, pose: tuple[tuple[float, float, float], tuple[float, float, float]]) -> None: + """Apply camera pose to rerun's 3D view controls. + + Args: + pose: Camera eye and target tuples. + """ + if self._viewer is None: + return + cam_pos, cam_target = pose + rr.send_blueprint( + rrb.Blueprint( + rrb.Spatial3DView( + name="3D View", + origin="/", + eye_controls=rrb.EyeControls3D( + position=cam_pos, + look_target=cam_target, + ), + ), + collapse_panels=True, + ) + ) + self._last_camera_pose = (cam_pos, cam_target) + + def _update_camera_from_usd_path(self) -> None: + """Refresh camera pose from configured USD camera path when it changes.""" + pose = self._resolve_camera_pose_from_usd_path(self.cfg.cam_prim_path) + if pose is None: + return + if self._last_camera_pose == pose: + return + self._apply_camera_pose(pose) + + def supports_markers(self) -> bool: + """Rerun backend supports Isaac Lab markers through Newton viewer primitives.""" + return bool(self.cfg.enable_markers) + + def supports_live_plots(self) -> bool: + """Rerun backend currently does not expose Isaac Lab live-plot widgets.""" + return False + + def is_training_paused(self) -> bool: + """Return whether training is paused. + + Rerun viewer exposes rendering pause only. + """ + return False + + def is_rendering_paused(self) -> bool: + """Return whether rendering is paused from viewer controls.""" + if not self._is_initialized or self._viewer is None: + return False + return self._viewer.is_rendering_paused() diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/rerun/rerun_visualizer_cfg.py b/source/isaaclab_visualizers/isaaclab_visualizers/rerun/rerun_visualizer_cfg.py new file mode 100644 index 000000000000..780b346f802b --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/rerun/rerun_visualizer_cfg.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the Rerun visualizer.""" + +from __future__ import annotations + +from isaaclab.utils import configclass +from isaaclab.visualizers.visualizer_cfg import VisualizerCfg + + +@configclass +class RerunVisualizerCfg(VisualizerCfg): + """Configuration for Rerun visualizer (web-based visualization).""" + + visualizer_type: str = "rerun" + """Type identifier for Rerun visualizer.""" + + app_id: str = "isaaclab-simulation" + """Application identifier shown in viewer title.""" + + web_port: int = 9090 + """Port of the local rerun web viewer which is launched in the browser.""" + + grpc_port: int = 9876 + """Port of the rerun gRPC server (used when serving web viewer externally).""" + + bind_address: str | None = "0.0.0.0" + """Host used for endpoint formatting and reuse checks. + + Notes: + - If an existing rerun server is reachable on ``grpc_port``, it is reused. + - New server startup is managed by ``newton.viewer.ViewerRerun`` via the rerun Python SDK. + - Local browser links normalize common loopback/wildcard hosts to ``127.0.0.1``. + """ + + open_browser: bool = True + """Whether to attempt opening the rerun web viewer URL in a browser.""" + + keep_historical_data: bool = False + """Keep transform history for time scrubbing (False = constant memory for training).""" + + keep_scalar_history: bool = False + """Keep scalar/plot history in timeline.""" + + record_to_rrd: str | None = None + """Path to save .rrd recording file. None = no recording.""" diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/viser/__init__.py b/source/isaaclab_visualizers/isaaclab_visualizers/viser/__init__.py new file mode 100644 index 000000000000..1ede53812ea4 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/viser/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Viser visualizer backend. + +This package keeps imports lazy so configuration-only imports avoid pulling the +full runtime backend before it is needed. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .viser_visualizer_cfg import ViserVisualizerCfg + +if TYPE_CHECKING: + from .viser_visualizer import ViserVisualizer + +__all__ = ["ViserVisualizer", "ViserVisualizerCfg"] + + +def __getattr__(name: str): + if name == "ViserVisualizer": + from .viser_visualizer import ViserVisualizer + + return ViserVisualizer + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/viser/viser_visualizer.py b/source/isaaclab_visualizers/isaaclab_visualizers/viser/viser_visualizer.py new file mode 100644 index 000000000000..44a0e92c1d70 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/viser/viser_visualizer.py @@ -0,0 +1,375 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Viser-based visualizer using Newton's ViewerViser.""" + +from __future__ import annotations + +import contextlib +import io +import logging +import os +import webbrowser +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from newton.viewer import ViewerViser + +from isaaclab.visualizers.base_visualizer import BaseVisualizer + +from isaaclab_visualizers.newton.newton_visualization_markers import render_newton_visualization_markers +from isaaclab_visualizers.newton_adapter import apply_viewer_visible_worlds, resolve_visible_env_indices + +from .viser_visualizer_cfg import ViserVisualizerCfg + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from isaaclab.physics import BaseSceneDataProvider + + +def _disable_viser_runtime_client_rebuild_if_bundled() -> None: + """Skip viser's runtime frontend rebuild when a bundled build is present.""" + try: + import viser + import viser._client_autobuild as client_autobuild + except Exception: + return + + client_root = Path(viser.__file__).resolve().parent / "client" + has_bundled_build = (client_root / "build" / "index.html").exists() + if not has_bundled_build: + return + + client_autobuild.ensure_client_is_built = lambda: None + + +@contextlib.contextmanager +def _suppress_viser_startup_logs(enabled: bool): + """Temporarily quiet noisy viser/websockets startup output.""" + if not enabled: + yield + return + + websockets_logger = logging.getLogger("websockets.server") + previous_level = websockets_logger.level + websockets_logger.setLevel(logging.WARNING) + try: + with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(io.StringIO()): + yield + finally: + websockets_logger.setLevel(previous_level) + + +def _open_viser_web_viewer(port: int) -> None: + """Open the local viser web UI in a browser.""" + url = _viser_web_viewer_url(port) + try: + if not webbrowser.open_new_tab(url): + logger.info("[ViserVisualizer] Could not auto-open browser tab. Open manually: %s", url) + except Exception: + logger.info("[ViserVisualizer] Could not auto-open browser tab. Open manually: %s", url) + + +def _viser_web_viewer_url(port: int) -> str: + """Return local viser web UI URL.""" + return f"http://localhost:{int(port)}" + + +class NewtonViewerViser(ViewerViser): + """Isaac Lab wrapper for Newton's ViewerViser.""" + + def __init__( + self, + port: int = 8080, + label: str | None = None, + verbose: bool = True, + share: bool = False, + record_to_viser: str | None = None, + metadata: dict | None = None, + ): + """Initialize Newton-backed viser viewer wrapper. + + Args: + port: HTTP port for viser server. + label: Optional viewer label. + verbose: Whether to keep verbose startup output enabled. + share: Whether to enable sharing/tunneling. + record_to_viser: Optional recording destination. + metadata: Optional metadata attached to the viewer. + """ + _disable_viser_runtime_client_rebuild_if_bundled() + super().__init__( + port=port, + label=label, + verbose=verbose, + share=share, + record_to_viser=record_to_viser, + ) + self._metadata = metadata or {} + + +class ViserVisualizer(BaseVisualizer): + """Viser web-based visualizer backed by Newton's ViewerViser.""" + + def __init__(self, cfg: ViserVisualizerCfg): + """Initialize Viser visualizer state. + + Args: + cfg: Viser visualizer configuration. + """ + super().__init__(cfg) + self.cfg: ViserVisualizerCfg = cfg + self._viewer: NewtonViewerViser | None = None + self._model: Any | None = None + self._state = None + self._sim_time = 0.0 + self._scene_data_provider = None + self._active_record_path: str | None = None + self._last_camera_pose: tuple[tuple[float, float, float], tuple[float, float, float]] | None = None + self._pending_camera_pose: tuple[tuple[float, float, float], tuple[float, float, float]] | None = None + self._resolved_visible_env_ids: list[int] | None = None + self._warned_marker_render_failure = False + + def initialize(self, scene_data_provider: BaseSceneDataProvider) -> None: + """Initialize viewer resources and bind scene data provider. + + Args: + scene_data_provider: Scene data provider used to fetch model/state data. + """ + if self._is_initialized: + logger.debug("[ViserVisualizer] initialize() called while already initialized.") + return + if scene_data_provider is None: + raise RuntimeError("Viser visualizer requires a scene_data_provider.") + + self._scene_data_provider = scene_data_provider + metadata = scene_data_provider.get_metadata() + self._env_ids = self._compute_visualized_env_ids() + self._model = scene_data_provider.get_newton_model() + self._state = scene_data_provider.get_newton_state() + + self._active_record_path = self.cfg.record_to_viser + self._create_viewer(record_to_viser=self.cfg.record_to_viser, metadata=metadata) + num_envs_meta = int(metadata.get("num_envs", 0)) + self._resolved_visible_env_ids = resolve_visible_env_indices( + self._env_ids, self.cfg.max_visible_envs, num_envs_meta + ) + num_visualized_envs = ( + len(self._resolved_visible_env_ids) if self._resolved_visible_env_ids is not None else num_envs_meta + ) + viewer_url = _viser_web_viewer_url(self.cfg.port) + self._log_initialization_table( + logger=logger, + title="ViserVisualizer Configuration", + rows=[ + ("eye", self.cfg.eye), + ("lookat", self.cfg.lookat), + ("cam_source", self.cfg.cam_source), + ("num_visualized_envs", num_visualized_envs), + ("port", self.cfg.port), + ("viewer_url", viewer_url), + ("record_to_viser", self.cfg.record_to_viser or ""), + ], + ) + self._is_initialized = True + + def step(self, dt: float) -> None: + """Advance visualization by one simulation step. + + Args: + dt: Simulation time-step in seconds. + """ + if not self._is_initialized or self._viewer is None or self._scene_data_provider is None: + return + + if self.cfg.cam_source == "prim_path": + self._update_camera_from_usd_path() + self._apply_pending_camera_pose() + + self._state = self._scene_data_provider.get_newton_state() + num_envs = int(self._scene_data_provider.get_metadata().get("num_envs", 0)) + self._sim_time += dt + self._viewer.begin_frame(self._sim_time) + try: + self._viewer.log_state(self._state) + if self.cfg.enable_markers: + self._render_markers(num_envs) + finally: + self._viewer.end_frame() + + def _render_markers(self, num_envs: int) -> None: + """Render marker overlays without letting them interrupt Viser body updates.""" + try: + render_newton_visualization_markers(self._viewer, self._resolved_visible_env_ids, num_envs=num_envs) + except Exception as exc: + if not self._warned_marker_render_failure: + logger.warning("[ViserVisualizer] Marker rendering failed; continuing body updates: %s", exc) + self._warned_marker_render_failure = True + else: + logger.debug("[ViserVisualizer] Marker rendering failed: %s", exc) + + def close(self) -> None: + """Close viewer resources and finalize optional recording.""" + if not self._is_initialized: + return + try: + self._close_viewer(finalize_viser=bool(self.cfg.record_to_viser)) + except Exception as exc: + logger.warning("[ViserVisualizer] Error during close: %s", exc) + + self._viewer = None + self._is_initialized = False + self._is_closed = True + self._active_record_path = None + self._pending_camera_pose = None + + def is_running(self) -> bool: + """Return whether the visualizer should continue stepping. + + Returns: + ``True`` while the visualizer is active, otherwise ``False``. + """ + if not self._is_initialized or self._is_closed: + return False + if self._viewer is None: + return False + return self._viewer.is_running() + + def is_training_paused(self) -> bool: + """Return whether training is paused. + + Viser backend does not currently expose a training pause control. + """ + return False + + def supports_markers(self) -> bool: + """Viser backend supports Isaac Lab markers through Newton viewer primitives.""" + return bool(self.cfg.enable_markers) + + def supports_live_plots(self) -> bool: + """Viser backend currently does not expose Isaac Lab live-plot widgets.""" + return False + + def _create_viewer(self, record_to_viser: str | None, metadata: dict | None = None) -> None: + """Create Newton-backed Viser viewer and apply initial camera. + + Args: + record_to_viser: Optional output path for viser recording. + metadata: Optional metadata passed to viewer. + """ + if self._model is None: + raise RuntimeError("Viser visualizer requires a Newton model.") + + with _suppress_viser_startup_logs(enabled=not self.cfg.verbose): + self._viewer = NewtonViewerViser( + port=self.cfg.port, + label=self.cfg.label, + verbose=self.cfg.verbose, + share=self.cfg.share, + record_to_viser=record_to_viser, + metadata=metadata or {}, + ) + num_envs = int((metadata or {}).get("num_envs", 0)) + self._viewer.set_model(self._model) + apply_viewer_visible_worlds( + self._viewer, + env_ids=self._env_ids, + max_visible_envs=self.cfg.max_visible_envs, + num_envs=num_envs, + ) + # Preserve simulation world positions (env_spacing) rather than adding viewer-side offsets. + self._viewer.set_world_offsets((0.0, 0.0, 0.0)) + if self.cfg.open_browser: + _open_viser_web_viewer(self.cfg.port) + initial_pose = self._resolve_initial_camera_pose() + self._set_viser_camera_view(initial_pose) + self._sim_time = 0.0 + + def _close_viewer(self, finalize_viser: bool = False) -> None: + """Close viewer and log recording output when requested.""" + if self._viewer is None: + return + self._viewer.close() + if finalize_viser and self._active_record_path: + if os.path.exists(self._active_record_path): + size = os.path.getsize(self._active_record_path) + logger.info("[ViserVisualizer] Recording saved: %s (%s bytes)", self._active_record_path, size) + else: + logger.warning("[ViserVisualizer] Recording file not found: %s", self._active_record_path) + self._viewer = None + + def _resolve_initial_camera_pose(self) -> tuple[tuple[float, float, float], tuple[float, float, float]]: + """Resolve initial camera pose from config or USD camera path.""" + if self.cfg.cam_source == "prim_path": + pose = self._resolve_camera_pose_from_usd_path(self.cfg.cam_prim_path) + if pose is not None: + return pose + raise RuntimeError( + "[ViserVisualizer] cam_source='prim_path' requires a resolvable camera prim path, " + f"but no camera pose was found for '{self.cfg.cam_prim_path}'." + ) + return self._resolve_cfg_camera_pose("ViserVisualizer") + + def _try_apply_viser_camera_view(self, pose: tuple[tuple[float, float, float], tuple[float, float, float]]) -> bool: + """Try applying camera pose to active viser clients. + + Returns: + ``True`` if at least one client camera was updated, otherwise ``False``. + """ + if self._viewer is None: + return False + server = getattr(self._viewer, "_server", None) + get_clients = getattr(server, "get_clients", None) if server is not None else None + if not callable(get_clients): + return False + + try: + clients = get_clients() + except Exception: + return False + + client_iterable = clients.values() if isinstance(clients, dict) else clients + cam_pos, cam_target = pose + applied = False + for client in client_iterable: + camera = getattr(client, "camera", None) + if camera is None: + continue + try: + if hasattr(camera, "position"): + camera.position = cam_pos + applied = True + if hasattr(camera, "look_at"): + camera.look_at = cam_target + applied = True + except Exception: + continue + return applied + + def _set_viser_camera_view(self, pose: tuple[tuple[float, float, float], tuple[float, float, float]]) -> None: + """Apply or defer camera pose update depending on client readiness.""" + if self._try_apply_viser_camera_view(pose): + self._last_camera_pose = pose + self._pending_camera_pose = None + else: + self._pending_camera_pose = pose + + def _apply_pending_camera_pose(self) -> None: + """Apply deferred camera pose once client cameras are available.""" + if self._pending_camera_pose is None: + return + if self._try_apply_viser_camera_view(self._pending_camera_pose): + self._last_camera_pose = self._pending_camera_pose + self._pending_camera_pose = None + + def _update_camera_from_usd_path(self) -> None: + """Refresh camera pose from configured USD camera path when it changes.""" + pose = self._resolve_camera_pose_from_usd_path(self.cfg.cam_prim_path) + if pose is None: + return + if self._last_camera_pose == pose or self._pending_camera_pose == pose: + return + self._set_viser_camera_view(pose) diff --git a/source/isaaclab_visualizers/isaaclab_visualizers/viser/viser_visualizer_cfg.py b/source/isaaclab_visualizers/isaaclab_visualizers/viser/viser_visualizer_cfg.py new file mode 100644 index 000000000000..f3f2aa39b0c2 --- /dev/null +++ b/source/isaaclab_visualizers/isaaclab_visualizers/viser/viser_visualizer_cfg.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the Viser visualizer.""" + +from __future__ import annotations + +from isaaclab.utils import configclass +from isaaclab.visualizers.visualizer_cfg import VisualizerCfg + + +@configclass +class ViserVisualizerCfg(VisualizerCfg): + """Configuration for Viser visualizer (web-based visualization).""" + + visualizer_type: str = "viser" + """Type identifier for Viser visualizer.""" + + port: int = 8080 + """Port of the local viser web server.""" + + open_browser: bool = True + """Whether to attempt opening the viser web viewer URL in a browser.""" + + label: str | None = "Isaac Lab Simulation" + """Optional label shown in the viewer page title.""" + + verbose: bool = True + """Whether to print viewer server startup information.""" + + share: bool = False + """Whether to request a public share URL from viser.""" + + record_to_viser: str | None = None + """Path to save a .viser recording file. None = no recording.""" diff --git a/source/isaaclab_visualizers/setup.py b/source/isaaclab_visualizers/setup.py new file mode 100644 index 000000000000..fc120619787b --- /dev/null +++ b/source/isaaclab_visualizers/setup.py @@ -0,0 +1,57 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_visualizers' python package.""" + +from setuptools import setup + +# Base requirements shared across visualizer backends. +INSTALL_REQUIRES = [ + "isaaclab", + "numpy", +] + +EXTRAS_REQUIRE = { + "kit": [], + "newton": [ + "warp-lang", + "newton @ git+https://github.com/newton-physics/newton.git@a27277ed49d6f307b8a1e4c394be7e1d14965a62", + "PyOpenGL-accelerate", + "imgui-bundle>=1.92.5", + ], + "rerun": [ + "newton @ git+https://github.com/newton-physics/newton.git@a27277ed49d6f307b8a1e4c394be7e1d14965a62", + "rerun-sdk>=0.29.0", + ], + "viser": [ + "newton @ git+https://github.com/newton-physics/newton.git@a27277ed49d6f307b8a1e4c394be7e1d14965a62", + "viser>=1.0.16", + ], +} + +EXTRAS_REQUIRE["all"] = sorted({dep for group in EXTRAS_REQUIRE.values() for dep in group}) + +setup( + name="isaaclab_visualizers", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url="https://github.com/isaac-sim/IsaacLab", + version="0.1.0", + description="Visualizer backends for Isaac Lab (Kit, Newton, Rerun, Viser).", + keywords=["robotics", "simulation", "visualization"], + license="BSD-3-Clause", + include_package_data=True, + package_data={"": ["*.pyi"]}, + python_requires=">=3.12", + install_requires=INSTALL_REQUIRES, + extras_require=EXTRAS_REQUIRE, + packages=["isaaclab_visualizers"], + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", + ], + zip_safe=False, +) diff --git a/source/isaaclab_visualizers/test/__init__.py b/source/isaaclab_visualizers/test/__init__.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/source/isaaclab_visualizers/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab_visualizers/test/test_newton_adapter.py b/source/isaaclab_visualizers/test/test_newton_adapter.py new file mode 100644 index 000000000000..3c020a8d10ee --- /dev/null +++ b/source/isaaclab_visualizers/test/test_newton_adapter.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for viewer env resolution helpers.""" + +from __future__ import annotations + +from isaaclab_visualizers.newton_adapter import apply_viewer_visible_worlds, resolve_visible_env_indices + + +def test_resolve_visible_env_indices_truncates_explicit_list(): + assert resolve_visible_env_indices([1, 3, 5], 2, 10) == [1, 3] + assert resolve_visible_env_indices([1, 3], 1, 10) == [1] + + +def test_resolve_visible_env_indices_explicit_full_list_when_no_cap(): + assert resolve_visible_env_indices([1, 3], None, 10) == [1, 3] + + +def test_resolve_visible_env_indices_cap_when_no_filter(): + # When _compute_visualized_env_ids is None, cap is max_visible_envs. + assert resolve_visible_env_indices(None, 3, 10) == [0, 1, 2] + + +def test_resolve_visible_env_indices_all_when_no_cap(): + assert resolve_visible_env_indices(None, None, 10) is None + + +def test_resolve_visible_env_indices_num_envs_zero_falls_through_like_newton(): + assert resolve_visible_env_indices(None, 5, 0) is None + + +def test_apply_viewer_visible_worlds_delegates_to_resolved(): + calls: list = [] + + class _V: + def set_visible_worlds(self, worlds): + calls.append(worlds) + + apply_viewer_visible_worlds(_V(), env_ids=None, max_visible_envs=2, num_envs=5) + assert calls == [[0, 1]] + + apply_viewer_visible_worlds(_V(), env_ids=[2], max_visible_envs=99, num_envs=5) + assert calls[-1] == [2] + + apply_viewer_visible_worlds(_V(), env_ids=None, max_visible_envs=None, num_envs=3) + assert calls[-1] is None diff --git a/source/isaaclab_visualizers/test/test_visualizer_cartpole_integration.py b/source/isaaclab_visualizers/test/test_visualizer_cartpole_integration.py new file mode 100644 index 000000000000..42d1368dcebf --- /dev/null +++ b/source/isaaclab_visualizers/test/test_visualizer_cartpole_integration.py @@ -0,0 +1,608 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Integration tests: cartpole env + per-backend visualizers (Kit Replicator, tiled camera, GL, Rerun, Viser). + +Visualizer packages use ``logging.getLogger(__name__)``, so loggers are named like +``isaaclab_visualizers.kit.kit_visualizer`` and ``isaaclab.visualizers.base_visualizer``. +:class:`~isaaclab.sim.simulation_context.SimulationContext` uses +``logging.getLogger(__name__)`` → ``isaaclab.sim.simulation_context``. + +We filter :class:`~pytest.LogCaptureFixture` records with :data:`_VIS_LOGGER_PREFIXES` +so only those namespaces count (not Omniverse, PhysX, or unrelated warnings). + +Set :data:`ASSERT_VISUALIZER_WARNINGS` to ``True`` locally or in CI if you want tests to +fail on WARNING-level records from those loggers; by default only ERROR+ fails. +""" + +from __future__ import annotations + +# Pyglet must use HeadlessWindow (EGL) before ``pyglet.window`` is imported so Newton +# ViewerGL can construct without an X11 display (matches ``headless=True`` on NewtonVisualizerCfg). +import pyglet + +pyglet.options["headless"] = True + +from isaaclab.app import AppLauncher + +# launch Kit app +simulation_app = AppLauncher(headless=True, enable_cameras=True).app + +import contextlib +import copy +import logging +import socket + +import numpy as np +import pytest +import torch +import warp as wp +from isaaclab_visualizers.kit import KitVisualizer, KitVisualizerCfg +from isaaclab_visualizers.newton import NewtonVisualizer, NewtonVisualizerCfg +from isaaclab_visualizers.rerun import RerunVisualizer, RerunVisualizerCfg +from isaaclab_visualizers.viser import ViserVisualizer, ViserVisualizerCfg + +import isaaclab.sim as sim_utils +from isaaclab.sim import SimulationContext + +from isaaclab_tasks.direct.cartpole.cartpole_camera_env import CartpoleCameraEnv +from isaaclab_tasks.direct.cartpole.cartpole_camera_presets_env_cfg import CartpoleCameraPresetsEnvCfg +from isaaclab_tasks.manager_based.classic.cartpole.cartpole_env_cfg import CartpolePhysicsCfg + +# When True, tests also fail on WARNING-level records from visualizer-related loggers. +ASSERT_VISUALIZER_WARNINGS = False + +_MAX_NON_BLACK_STEPS = 8 +"""Steps for tiled camera / Rerun / Viser smoke tests (early exit ok when non-black).""" + +_CARTPOLE_INTEGRATION_NUM_ENVS = 1 +"""Vectorized env count for cartpole + visualizer integration tests.""" + +_CARTPOLE_INTEGRATION_VISUALIZER_EYE: tuple[float, float, float] = (3.0, 3.0, 3.0) +"""Passed to :class:`~isaaclab.visualizers.visualizer_cfg.VisualizerCfg` subclasses (``eye``).""" + +_CARTPOLE_INTEGRATION_VISUALIZER_LOOKAT: tuple[float, float, float] = (-4.0, -4.0, 0.0) +"""Passed to visualizer cfgs (``lookat``); also applied to :class:`~isaaclab.envs.common.ViewerCfg` for the env.""" + +# Resolution overrides for this test module (cartpole preset defaults: tiled camera 100×100; Kit helper was 320×240). +_CARTPOLE_KIT_INTEGRATION_RENDER_RESOLUTION: tuple[int, int] = (600, 600) +"""Kit: Replicator ``render_product`` (width, height) for viewport RGB in the motion check.""" + +_CARTPOLE_NEWTON_INTEGRATION_WINDOW_SIZE: tuple[int, int] = (600, 600) +"""Newton: ``NewtonVisualizerCfg`` framebuffer (window_width × window_height) for ``get_frame()``.""" + +_CARTPOLE_TILED_CAMERA_INTEGRATION_WH: tuple[int, int] = (600, 600) +"""Tiled camera per-env tile width/height (preset default is 100×100); keeps ``observation_space`` consistent.""" + +_VIS_FRAME_TEST_STEPS = 60 +"""Steps for Kit / Newton frame capture: no early exit.""" + +# Motion check compares the 2nd vs last captured frame (e.g. 2nd vs 60th when *_STEPS* is 60). +_MOTION_FRAME_EARLY_IDX = 1 +"""0-based index of the *early* frame (2nd capture).""" + +_MOTION_FRAME_LATE_IDX = _VIS_FRAME_TEST_STEPS - 1 +"""0-based index of the *late* frame (e.g. 60th capture when :data:`_VIS_FRAME_TEST_STEPS` is 60).""" + +# Early vs late frame motion: void background stays similar; only count *strongly* differing pixels. +_FRAME_MOTION_CHANNEL_DIFF_THRESHOLD = 50 +"""A pixel counts as differing if max(|ΔR|, |ΔG|, |ΔB|) >= this (0–255 space).""" + +_FRAME_MOTION_MIN_DIFFERING_PIXELS = 100 +"""Minimum number of such pixels between early and late frames (stale/frozen viz should be near zero).""" + +_VIS_LOGGER_PREFIXES = ( + "isaaclab.visualizers", + "isaaclab_visualizers", + "isaaclab.sim.simulation_context", +) + + +def _logger_name_matches_visualizer_scope(logger_name: str) -> bool: + """Return True if *logger_name* is a visualizer / SimulationContext visualizer path.""" + return any(logger_name.startswith(prefix) for prefix in _VIS_LOGGER_PREFIXES) + + +def _assert_no_visualizer_log_issues(caplog: pytest.LogCaptureFixture, *, fail_on_warnings: bool | None = None) -> None: + """Fail if captured records include ERROR/CRITICAL (always) or WARNING (if *fail_on_warnings*). + + *fail_on_warnings* defaults to :data:`ASSERT_VISUALIZER_WARNINGS`. + """ + if fail_on_warnings is None: + fail_on_warnings = ASSERT_VISUALIZER_WARNINGS + + error_logs = [ + r for r in caplog.records if r.levelno >= logging.ERROR and _logger_name_matches_visualizer_scope(r.name) + ] + assert not error_logs, "Visualizer-related error logs: " + "; ".join( + f"{r.name}: {r.getMessage()}" for r in error_logs + ) + + if fail_on_warnings: + warning_logs = [ + r for r in caplog.records if r.levelno == logging.WARNING and _logger_name_matches_visualizer_scope(r.name) + ] + assert not warning_logs, "Visualizer-related warning logs: " + "; ".join( + f"{r.name}: {r.getMessage()}" for r in warning_logs + ) + + +def _configure_sim_for_visualizer_test(env: CartpoleCameraEnv) -> None: + """Settings used by the previous smoke tests; keep RTX sensors enabled for camera paths.""" + env.sim.set_setting("/isaaclab/render/rtx_sensors", True) + env.sim._app_control_on_stop_handle = None # type: ignore[attr-defined] + + +def _find_free_tcp_port(host: str = "127.0.0.1") -> int: + """Ask OS for a currently free local TCP port.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.bind((host, 0)) + return int(sock.getsockname()[1]) + + +def _allocate_rerun_test_ports(host: str = "127.0.0.1") -> tuple[int, int]: + """Allocate distinct free ports for rerun web and gRPC endpoints.""" + grpc_port = _find_free_tcp_port(host) + web_port = _find_free_tcp_port(host) + while web_port == grpc_port: + web_port = _find_free_tcp_port(host) + return web_port, grpc_port + + +def _cartpole_integration_visualizer_camera_kwargs() -> dict[str, tuple[float, float, float]]: + """Eye/lookat for all :class:`~isaaclab.visualizers.visualizer_cfg.VisualizerCfg` subclasses in these tests.""" + return { + "eye": _CARTPOLE_INTEGRATION_VISUALIZER_EYE, + "lookat": _CARTPOLE_INTEGRATION_VISUALIZER_LOOKAT, + } + + +def _get_visualizer_cfg(visualizer_kind: str): + """Return (visualizer_cfg, expected_visualizer_cls) for the given visualizer kind.""" + cam = _cartpole_integration_visualizer_camera_kwargs() + if visualizer_kind == "newton": + __import__("newton") + nw, nh = _CARTPOLE_NEWTON_INTEGRATION_WINDOW_SIZE + return ( + NewtonVisualizerCfg( + headless=True, + window_width=nw, + window_height=nh, + randomly_sample_visible_envs=False, + **cam, + ), + NewtonVisualizer, + ) + if visualizer_kind == "viser": + __import__("newton") + __import__("viser") + port = _find_free_tcp_port(host="127.0.0.1") + return ( + ViserVisualizerCfg(open_browser=False, port=port, randomly_sample_visible_envs=False, **cam), + ViserVisualizer, + ) + if visualizer_kind == "rerun": + __import__("newton") + __import__("rerun") + web_port, grpc_port = _allocate_rerun_test_ports(host="127.0.0.1") + return ( + RerunVisualizerCfg( + bind_address="127.0.0.1", + open_browser=False, + web_port=web_port, + grpc_port=grpc_port, + randomly_sample_visible_envs=False, + **cam, + ), + RerunVisualizer, + ) + return KitVisualizerCfg(randomly_sample_visible_envs=False, **cam), KitVisualizer + + +def _get_physics_cfg(backend_kind: str): + """Return physics config and expected backend substring for the given backend kind.""" + if backend_kind == "physx": + __import__("isaaclab_physx") + preset = CartpolePhysicsCfg() + physics_cfg = getattr(preset, "physx", None) + if physics_cfg is None: + from isaaclab_physx.physics import PhysxCfg + + physics_cfg = PhysxCfg() + return physics_cfg, "physx" + if backend_kind == "newton": + __import__("newton") + __import__("isaaclab_newton") + preset = CartpolePhysicsCfg() + physics_cfg = getattr(preset, "newton_mjwarp", None) + if physics_cfg is None: + from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg + + physics_cfg = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=5, + nconmax=3, + cone="pyramidal", + impratio=1, + integrator="implicitfast", + ), + num_substeps=1, + debug_mode=False, + use_cuda_graph=True, + ) + return physics_cfg, "newton" + raise ValueError(f"Unknown backend: {backend_kind!r}") + + +def _assert_non_black_tensor(image_tensor: torch.Tensor, *, min_nonzero_pixels: int = 1) -> None: + """Assert camera-like tensor contains non-black pixels.""" + assert isinstance(image_tensor, torch.Tensor), f"Expected torch.Tensor, got {type(image_tensor)!r}" + assert image_tensor.numel() > 0, "Image tensor is empty." + finite_tensor = torch.where(torch.isfinite(image_tensor), image_tensor, torch.zeros_like(image_tensor)) + if finite_tensor.dtype.is_floating_point: + nonzero = torch.count_nonzero(torch.abs(finite_tensor) > 1e-6).item() + else: + nonzero = torch.count_nonzero(finite_tensor > 0).item() + assert nonzero >= min_nonzero_pixels, "Rendered frame appears black (no non-zero pixels)." + + +def _frame_to_numpy(frame) -> np.ndarray: + """Convert viewer ``get_frame()`` output (numpy, torch, or Warp array) to host ``numpy.ndarray``. + + ``np.asarray(wp.array)`` is unsafe: NumPy can trigger Warp indexing that raises at dimension edges. + """ + if isinstance(frame, np.ndarray): + return frame + if isinstance(frame, torch.Tensor): + return frame.detach().cpu().numpy() + if isinstance(frame, wp.array): + return wp.to_torch(frame).detach().cpu().numpy() + return np.asarray(frame) + + +def _assert_non_black_frame_array(frame) -> None: + """Assert viewer-captured frame has visible, non-black content.""" + frame_arr = _frame_to_numpy(frame) + assert frame_arr.size > 0, "Viewer returned an empty frame." + if frame_arr.ndim == 2: + color = frame_arr + else: + assert frame_arr.shape[-1] >= 3, f"Expected at least 3 channels, got shape {frame_arr.shape}." + color = frame_arr[..., :3] + finite = np.where(np.isfinite(color), color, 0) + assert np.count_nonzero(finite) > 0, "Viewer frame appears fully black." + + +def _frame_rgb_255_space(frame) -> np.ndarray: + """Return HxWx3 float in ~0–255 space for per-channel differencing.""" + arr = _frame_to_numpy(frame) + if arr.ndim == 2: + rgb = np.stack([arr, arr, arr], axis=-1) + else: + rgb = arr[..., :3] + rgb = np.asarray(rgb, dtype=np.float64) + # Normalized HDR buffers: scale so threshold matches (0,255) semantics. + if rgb.size > 0 and float(np.nanmax(rgb)) <= 1.0 + 1e-6: + rgb = rgb * 255.0 + return rgb + + +def _count_significantly_differing_pixels( + frame_a, + frame_b, + *, + channel_diff_threshold: float = _FRAME_MOTION_CHANNEL_DIFF_THRESHOLD, +) -> int: + """Count pixels where max(|ΔR|, |ΔG|, |ΔB|) >= *channel_diff_threshold* (0–255 space).""" + a = _frame_rgb_255_space(frame_a) + b = _frame_rgb_255_space(frame_b) + assert a.shape == b.shape, f"Frame shape mismatch for motion check: {a.shape} vs {b.shape}." + per_pixel_max = np.max(np.abs(a - b), axis=-1) + return int(np.count_nonzero(per_pixel_max >= channel_diff_threshold)) + + +def _assert_early_and_late_motion_frames_differ( + frames: list, + *, + channel_diff_threshold: float = _FRAME_MOTION_CHANNEL_DIFF_THRESHOLD, + min_differing_pixels: int = _FRAME_MOTION_MIN_DIFFERING_PIXELS, +) -> None: + """Fail if early vs late frames lack enough strongly differing pixels (stale/frozen bodies). + + Compares :data:`_MOTION_FRAME_EARLY_IDX` vs :data:`_MOTION_FRAME_LATE_IDX` (e.g. 2nd vs 60th capture). + + Voids/background stay near-identical; we only count pixels that change by at least + *channel_diff_threshold* on some channel (0–255). + """ + assert len(frames) >= _VIS_FRAME_TEST_STEPS, ( + f"Need at least {_VIS_FRAME_TEST_STEPS} frames for motion check, got {len(frames)}." + ) + i_early = _MOTION_FRAME_EARLY_IDX + i_late = _MOTION_FRAME_LATE_IDX + early_1 = i_early + 1 + late_1 = i_late + 1 + n_diff = _count_significantly_differing_pixels( + frames[i_early], frames[i_late], channel_diff_threshold=channel_diff_threshold + ) + assert n_diff >= min_differing_pixels, ( + f"Viewport captures #{early_1} and #{late_1} have too few strongly differing pixels " + f"({n_diff} < {min_differing_pixels}; threshold per channel={channel_diff_threshold} in 0–255 space). " + "Possible frozen or stale robot visualization." + ) + + +def _step_until_non_black_camera(env, actions: torch.Tensor, *, max_steps: int = _MAX_NON_BLACK_STEPS) -> None: + """Step env until the env's tiled camera RGB tensor is non-black, bounded by *max_steps*.""" + last_rgb = None + for _ in range(max_steps): + env.step(action=actions) + rgb = env._tiled_camera.data.output.get("rgb") + if rgb is None: + rgb = env._tiled_camera.data.output[env.cfg.tiled_camera.data_types[0]] + last_rgb = rgb + try: + _assert_non_black_tensor(rgb) + return + except AssertionError: + continue + _assert_non_black_tensor(last_rgb) + + +def _run_newton_viewer_frame_motion_test( + viewer, + *, + step_hook, + physics_kind: str, + viz_kind: str = "newton", +) -> None: + """Exactly ``_VIS_FRAME_TEST_STEPS`` sim steps; last frame non-black; early vs late motion check.""" + frames: list = [] + for _ in range(_VIS_FRAME_TEST_STEPS): + step_hook() + frames.append(viewer.get_frame()) + _assert_non_black_frame_array(frames[-1]) + _assert_early_and_late_motion_frames_differ(frames) + + +def _step_env_without_frame_check(env, actions: torch.Tensor, *, max_steps: int = _MAX_NON_BLACK_STEPS) -> None: + """Step the env to exercise visualizers that do not implement ``get_frame`` (e.g. Rerun, Viser).""" + for _ in range(max_steps): + env.step(action=actions) + + +def _build_rgb_annotator_for_camera( + camera_path: str, + *, + resolution: tuple[int, int] | None = None, +): + """Create CPU RGB annotator attached to a camera render product.""" + import omni.replicator.core as rep + + if resolution is None: + resolution = _CARTPOLE_KIT_INTEGRATION_RENDER_RESOLUTION + render_product = rep.create.render_product(camera_path, resolution=resolution) + annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu") + annotator.attach([render_product]) + return annotator, render_product + + +def _annotator_rgb_to_numpy(rgb_data) -> np.ndarray: + """Convert replicator annotator output to HxWx3 uint8 numpy array.""" + rgb_array = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape) + if rgb_array.size == 0: + return np.zeros((1, 1, 3), dtype=np.uint8) + return rgb_array[:, :, :3] + + +def _run_kit_viewport_frame_motion_test( + env, + kit_visualizer: KitVisualizer, + *, + physics_kind: str, + viz_kind: str = "kit", +) -> None: + """Exactly ``_VIS_FRAME_TEST_STEPS`` env steps; last Replicator frame non-black; early vs late motion check.""" + camera_path = getattr(kit_visualizer, "_controlled_camera_path", None) + assert camera_path, "Kit visualizer does not expose a controlled viewport camera path." + + annotator = None + render_product = None + try: + annotator, render_product = _build_rgb_annotator_for_camera(camera_path) + actions = torch.zeros((env.num_envs, env.action_space.shape[-1]), device=env.device) + frames: list = [] + for _ in range(_VIS_FRAME_TEST_STEPS): + env.step(action=actions) + rgb_data = annotator.get_data() + frames.append(_annotator_rgb_to_numpy(rgb_data)) + _assert_non_black_frame_array(frames[-1]) + _assert_early_and_late_motion_frames_differ(frames) + finally: + if annotator is not None and render_product is not None: + with contextlib.suppress(Exception): + annotator.detach([render_product]) + + +def _make_cartpole_camera_env(visualizer_kind: str, backend_kind: str) -> CartpoleCameraEnv: + """Create cartpole camera env configured with selected visualizer and physics backend.""" + env_cfg_root = CartpoleCameraPresetsEnvCfg() + env_cfg = getattr(env_cfg_root, "default", None) + if env_cfg is None: + env_cfg = getattr(type(env_cfg_root), "default", None) + if env_cfg is None: + raise RuntimeError( + "CartpoleCameraPresetsEnvCfg does not expose a 'default' preset config. " + f"Available attributes: {sorted(vars(env_cfg_root).keys())}" + ) + env_cfg = copy.deepcopy(env_cfg) + env_cfg.scene.num_envs = _CARTPOLE_INTEGRATION_NUM_ENVS + env_cfg.viewer.eye = _CARTPOLE_INTEGRATION_VISUALIZER_EYE + env_cfg.viewer.lookat = _CARTPOLE_INTEGRATION_VISUALIZER_LOOKAT + tw, th = _CARTPOLE_TILED_CAMERA_INTEGRATION_WH + env_cfg.tiled_camera.width = tw + env_cfg.tiled_camera.height = th + if isinstance(env_cfg.observation_space, list) and len(env_cfg.observation_space) >= 3: + env_cfg.observation_space = [th, tw, env_cfg.observation_space[2]] + env_cfg.seed = None + env_cfg.sim.physics, _ = _get_physics_cfg(backend_kind) + visualizer_cfg, _ = _get_visualizer_cfg(visualizer_kind) + env_cfg.sim.visualizer_cfgs = visualizer_cfg + return CartpoleCameraEnv(env_cfg) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize( + "backend_kind", + [ + # xfail: Kit visualizer + PhysX only (Newton backend uses skip below — separate CUDA issue). + pytest.param( + "physx", + marks=pytest.mark.xfail( + reason=("Kit visualizer + PhysX: TODO remove xfail when stale Fabric transforms bug in Kit is fixed"), + strict=False, + ), + ), + pytest.param( + "newton", + marks=pytest.mark.skip( + reason=( + "TODO: Kit visualizer + Newton physics + Isaac RTX tiled camera can hit CUDA illegal access " + "or bad GPU state. Repro: rl_games train Isaac-Cartpole-Camera-Presets-Direct-v0 " + "--enable_cameras presets=newton_mjwarp --viz kit. Re-enable when fixed." + ) + ), + ), + ], +) +def test_cartpole_kit_visualizer_replicator_viewport_rgb_motion( + backend_kind: str, caplog: pytest.LogCaptureFixture +) -> None: + """Kit + cartpole: Replicator RGB on viewport camera; last frame non-black; early vs late frame differ; logs.""" + env = None + try: + sim_utils.create_new_stage() + env = _make_cartpole_camera_env(visualizer_kind="kit", backend_kind=backend_kind) + _configure_sim_for_visualizer_test(env) + with caplog.at_level(logging.WARNING): + env.reset() + kit_visualizers = [viz for viz in env.sim.visualizers if isinstance(viz, KitVisualizer)] + assert kit_visualizers, "Expected an initialized Kit visualizer." + _run_kit_viewport_frame_motion_test(env, kit_visualizers[0], physics_kind=backend_kind) + _assert_no_visualizer_log_issues(caplog) + finally: + if env is not None: + env.close() + else: + SimulationContext.clear_instance() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("backend_kind", ["physx", "newton"]) +def test_cartpole_newton_visualizer_tiled_camera_rgb_non_black( + backend_kind: str, caplog: pytest.LogCaptureFixture +) -> None: + """Newton visualizer + cartpole: env tiled-camera RGB becomes non-black within a few steps; clean logs.""" + env = None + try: + sim_utils.create_new_stage() + env = _make_cartpole_camera_env(visualizer_kind="newton", backend_kind=backend_kind) + _configure_sim_for_visualizer_test(env) + with caplog.at_level(logging.WARNING): + env.reset() + actions = torch.zeros((env.num_envs, env.action_space.shape[-1]), device=env.device) + _step_until_non_black_camera(env, actions, max_steps=_MAX_NON_BLACK_STEPS) + _assert_no_visualizer_log_issues(caplog) + finally: + if env is not None: + env.close() + else: + SimulationContext.clear_instance() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("backend_kind", ["physx", "newton"]) +def test_cartpole_newton_visualizer_viewergl_rgb_motion(backend_kind: str, caplog: pytest.LogCaptureFixture) -> None: + """Newton GL (``ViewerGL.get_frame``): full motion steps, last frame non-black; early vs late differ; logs.""" + env = None + try: + sim_utils.create_new_stage() + env = _make_cartpole_camera_env(visualizer_kind="newton", backend_kind=backend_kind) + _configure_sim_for_visualizer_test(env) + with caplog.at_level(logging.WARNING): + env.reset() + actions = torch.zeros((env.num_envs, env.action_space.shape[-1]), device=env.device) + newton_visualizers = [viz for viz in env.sim.visualizers if isinstance(viz, NewtonVisualizer)] + assert newton_visualizers, "Expected an initialized Newton visualizer." + viewer = getattr(newton_visualizers[0], "_viewer", None) + assert viewer is not None, "Newton viewer was not created." + + def _step_env() -> None: + env.step(action=actions) + + _run_newton_viewer_frame_motion_test(viewer, step_hook=_step_env, physics_kind=backend_kind) + _assert_no_visualizer_log_issues(caplog) + finally: + if env is not None: + env.close() + else: + SimulationContext.clear_instance() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("backend_kind", ["physx", "newton"]) +def test_cartpole_rerun_visualizer_smoke_steps_and_logs(backend_kind: str, caplog: pytest.LogCaptureFixture) -> None: + """Rerun + cartpole: visualizer and viewer initialize; env steps exercise the pipeline; clean logs. + + Rerun does not expose a per-frame RGB API like ``get_frame``, so we do not assert pixel content. + """ + env = None + try: + sim_utils.create_new_stage() + env = _make_cartpole_camera_env(visualizer_kind="rerun", backend_kind=backend_kind) + _configure_sim_for_visualizer_test(env) + with caplog.at_level(logging.WARNING): + env.reset() + actions = torch.zeros((env.num_envs, env.action_space.shape[-1]), device=env.device) + rerun_visualizers = [viz for viz in env.sim.visualizers if isinstance(viz, RerunVisualizer)] + assert rerun_visualizers, "Expected an initialized Rerun visualizer." + assert getattr(rerun_visualizers[0], "_viewer", None) is not None, "Rerun viewer was not created." + _step_env_without_frame_check(env, actions, max_steps=_MAX_NON_BLACK_STEPS) + _assert_no_visualizer_log_issues(caplog) + finally: + if env is not None: + env.close() + else: + SimulationContext.clear_instance() + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize("backend_kind", ["physx", "newton"]) +def test_cartpole_viser_visualizer_smoke_steps_and_logs(backend_kind: str, caplog: pytest.LogCaptureFixture) -> None: + """Viser + cartpole: visualizer and viewer initialize; env steps exercise the pipeline; clean logs. + + No per-frame RGB assertion (Viser does not mirror the Newton ``get_frame`` path used elsewhere). + """ + env = None + try: + sim_utils.create_new_stage() + env = _make_cartpole_camera_env(visualizer_kind="viser", backend_kind=backend_kind) + _configure_sim_for_visualizer_test(env) + with caplog.at_level(logging.WARNING): + env.reset() + actions = torch.zeros((env.num_envs, env.action_space.shape[-1]), device=env.device) + viser_visualizers = [viz for viz in env.sim.visualizers if isinstance(viz, ViserVisualizer)] + assert viser_visualizers, "Expected an initialized Viser visualizer." + assert getattr(viser_visualizers[0], "_viewer", None) is not None, "Viser viewer was not created." + _step_env_without_frame_check(env, actions, max_steps=_MAX_NON_BLACK_STEPS) + _assert_no_visualizer_log_issues(caplog) + finally: + if env is not None: + env.close() + else: + SimulationContext.clear_instance() + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--maxfail=1"]) diff --git a/tools/changelog/cli.py b/tools/changelog/cli.py new file mode 100644 index 000000000000..16ad551b4ca3 --- /dev/null +++ b/tools/changelog/cli.py @@ -0,0 +1,1009 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Manage changelog fragments — single entry point with two subcommands. + +Each PR drops a fragment under ``source//changelog.d/.rst``. +The slug is any short, unique name — the contributor's branch name (with +``/`` replaced by ``-``) is the recommended default. The file mirrors +the RST that will appear in the changelog — one or more section headings +(``Added``, ``Changed``, ``Deprecated``, ``Removed``, ``Fixed``) each +underlined with ``^``. The **filename suffix** declares the bump tier: + +- ``.rst`` — patch bump. +- ``.minor.rst`` — minor bump. +- ``.major.rst`` — major bump. +- ``.skip`` — no entry, no bump. + +When a batch compiles together, the highest declared bump wins for the +package (one ``.major.rst`` anywhere → major). + +Subcommands: + + check PR gate. Verifies every modified package has a valid fragment. + compile Roll accumulated fragments into ``CHANGELOG.rst`` and bump + ``extension.toml``. Run by the nightly workflow + (``.github/workflows/nightly-changelog.yml``) on a cron and + by maintainers manually when cutting a release. + +Usage: + + # ── check ───────────────────────────────────────────────────── + # CI invocation on every pull_request: + cli.py check + + # ── compile ─────────────────────────────────────────────────── + # Normal release-time invocation — bump every managed package + # from accumulated fragments, write entries, delete fragments: + cli.py compile --all + + # Preview only (no writes, no deletes): + cli.py compile --all --dry-run + + # Pin one package to a specific version (single-package only — + # each managed package has its own version trajectory): + cli.py compile --package isaaclab --version 4.7.0 + + # Preview against a worked example without touching real packages: + cli.py compile --package isaaclab --dry-run \\ + --fragments-dir tools/changelog/test/integration/02_minor_bump/fragments + +For big version jumps (e.g. ``2.1`` → ``4.7``) edit +``source//config/extension.toml`` and prepend a manual entry to +``source//docs/CHANGELOG.rst``. The compiler is for fragment-driven +incremental bumps, not for jumps. +""" + +from __future__ import annotations + +import argparse +import re +import subprocess +import sys +from dataclasses import dataclass, field +from datetime import date +from functools import cached_property +from pathlib import Path +from typing import ClassVar + +# Walk three levels up: tools/changelog/cli.py -> tools/changelog/ -> tools/ -> repo root. +REPO_ROOT = Path(__file__).parent.parent.parent +PACKAGES_ROOT = REPO_ROOT / "source" + +# Recognised fragment filename patterns. ```` is any short identifier +# the contributor chose — typically their branch name with ``/`` replaced by +# ``-``. The slug must not contain ``.`` (reserved for the tier suffix) or +# ``/`` (path separator), but otherwise mirrors what git allows in a ref name. +# These regexes live at module level because Fragment, FragmentBatch, and +# PRDiff all match against them — they are the wire-format contract between +# contributors and the gate. +FRAGMENT_RE = re.compile(r"^(?P[^./][^./]*)(?:\.(?Pminor|major))?\.rst$") +SKIP_RE = re.compile(r"^(?P[^./][^./]*)\.skip$") + + +def _display_path(p: Path) -> str: + """Pretty-print a Path. Strips ``REPO_ROOT`` if ``p`` is inside the repo, + falls back to the absolute path otherwise (``--fragments-dir`` may + legitimately point at an external directory like ``/tmp/...``). + + Lives at module level because both :class:`Package` (writing on-disk + paths) and :class:`FragmentBatch` (warning about external fragment + paths) use it. + """ + try: + return str(p.relative_to(REPO_ROOT)) + except ValueError: + return str(p) + + +# --------------------------------------------------------------------------- +# Domain objects +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class Version: + """A semver-style version string ``X.Y.Z`` (optionally suffixed with ``.devN``). + + Models a version as a value object: immutable, comparable by its text, + knows how to produce a bumped successor. PEP 440 ``.devN`` suffixes + are tolerated on the way *in* (stripped before bumping) but never + written back out — :meth:`bumped` always returns a clean ``X.Y.Z``. + + Construction validates the format up front so that an invalid + ``--version`` flag from the CLI fails fast instead of silently writing + a malformed entry to ``CHANGELOG.rst``. + """ + + # ``X.Y.Z`` with an optional PEP 440 ``.devN`` suffix. The suffix is + # tolerated on the way *in* (e.g. when reading a stale dev version out + # of an existing ``extension.toml``) but :meth:`bumped` always strips + # it before producing a successor. + _SEMVER_RE: ClassVar[re.Pattern[str]] = re.compile(r"^\d+\.\d+\.\d+(\.dev\d+)?$") + + text: str + + def __post_init__(self) -> None: + if not self._SEMVER_RE.match(self.text): + raise ValueError(f"Invalid version {self.text!r}; expected X.Y.Z (optionally suffixed with .devN)") + + def bumped(self, tier: str) -> Version: + """Return a new Version one tier ahead of this one. + + ``tier`` is ``'major'``, ``'minor'``, or ``'patch'``. Major zeros + the minor and patch components; minor zeros patch. Any ``.devN`` + suffix on the current version is stripped before bumping. + """ + # __post_init__ guarantees the format, so this split is safe. + parts = self.text.split(".dev")[0].split(".") + if tier == "major": + return Version(f"{int(parts[0]) + 1}.0.0") + if tier == "minor": + return Version(f"{parts[0]}.{int(parts[1]) + 1}.0") + return Version(f"{parts[0]}.{parts[1]}.{int(parts[2]) + 1}") + + def __str__(self) -> str: + return self.text + + +@dataclass(frozen=True) +class Fragment: + """One fragment file in a package's ``changelog.d/`` (or an examples dir). + + A :class:`Fragment` instance is just a path plus methods that interpret + it as a changelog fragment. ``.gitkeep`` and ``*.skip`` files should + not be wrapped — only files matching :data:`FRAGMENT_RE`. + """ + + path: Path + + @property + def name(self) -> str: + return self.path.name + + @cached_property + def _match(self) -> re.Match[str] | None: + return FRAGMENT_RE.match(self.name) + + @property + def is_valid_filename(self) -> bool: + return self._match is not None + + @property + def bump(self) -> str: + """Bump tier declared by the filename suffix (defaults to ``'patch'``).""" + if self._match and self._match.group("bump"): + return self._match.group("bump") + return "patch" + + def parse(self) -> dict[str, list[str]]: + """Return ``{section: [lines]}`` from this fragment's content. + + Lines are kept as-is (including trailing newlines) so the compiled + output is byte-for-byte identical to what the contributor wrote. A + section heading is a non-empty line followed by ``^`` underline of + equal-or-greater length. + """ + text = self.path.read_text(encoding="utf-8") + lines = text.splitlines(keepends=True) + sections: dict[str, list[str]] = {} + current: str | None = None + buf: list[str] = [] + + i = 0 + while i < len(lines): + raw = lines[i] + stripped = raw.rstrip("\n") + if ( + i + 1 < len(lines) + and stripped + and re.fullmatch(r"\^+", lines[i + 1].rstrip("\n")) + and len(lines[i + 1].rstrip("\n")) >= len(stripped) + ): + if current is not None: + sections[current] = self._strip_trailing_blank(buf) + current = stripped + buf = [] + i += 2 # skip heading + underline + if i < len(lines) and not lines[i].strip(): + i += 1 + continue + if current is not None: + buf.append(raw) + i += 1 + + if current is not None: + sections[current] = self._strip_trailing_blank(buf) + + return sections + + @staticmethod + def _strip_trailing_blank(lines: list[str]) -> list[str]: + """Drop trailing blank lines from a section's raw line buffer.""" + while lines and not lines[-1].strip(): + lines.pop() + return lines + + @staticmethod + def parse_slug(filename: str) -> str | None: + """Return the slug declared by a fragment / skip filename, or ``None``. + + Used by :class:`PRDiff` to detect collisions between an added + fragment's slug and an existing fragment in the same directory, + without needing to materialise a :class:`Fragment` (the diff entry + may not exist on disk yet during a gate run). + """ + m = FRAGMENT_RE.match(filename) or SKIP_RE.match(filename) + return m.group("slug") if m else None + + def merge_time(self) -> int: + """Unix timestamp of the merge commit that introduced this fragment. + + Uses ``git log --diff-filter=A --first-parent`` to follow develop's + first-parent history, so the timestamp reflects when the PR's merge + commit landed (not the feature-branch commit that originally added + the file). Falls back to the file's most recent commit time when + not yet in first-parent history (e.g. local dry-runs on a feature + branch), and ultimately to ``0`` if git is unavailable. + """ + for cmd in ( + ["git", "log", "--diff-filter=A", "--first-parent", "-1", "--format=%ct", "--", str(self.path)], + ["git", "log", "-1", "--format=%ct", "--", str(self.path)], + ): + try: + result = subprocess.run(cmd, capture_output=True, text=True, check=True, cwd=REPO_ROOT) + ts = result.stdout.strip() + if ts: + return int(ts) + except (subprocess.CalledProcessError, ValueError): + continue + return 0 + + def validate(self) -> str | None: + """Return a human-readable error string if malformed, else ``None``. + + Filename rules: must match :data:`FRAGMENT_RE` (``.gitkeep`` and + ``*.skip`` files are filtered out at :meth:`FragmentBatch.from_dir` + level and never reach this method). Content rules (for ``*.rst`` + fragments only): non-empty file with at least one valid section + heading and at least one bullet point. + """ + if not self.is_valid_filename: + return ( + "invalid filename — must be .rst, .minor.rst, " + ".major.rst, or .skip (slug = your branch name " + "with `/` replaced by `-`, no dots)" + ) + if not self.path.exists(): + # Deleted fragments don't need validating (consumed by a previous compile). + return None + text = self.path.read_text(encoding="utf-8") + if not text.strip(): + return "fragment is empty" + sections = self.parse() + if not sections: + return ( + "no recognised section headings (expected one or more of " + "Added / Changed / Deprecated / Removed / Fixed, each underlined " + "with carets ``^`` of equal-or-greater length)" + ) + # Every declared section must carry at least one bullet — otherwise + # the compiled output emits a heading with no body, which is both + # ugly and almost certainly a contributor authoring mistake (typed + # the heading, forgot the bullet). + empty = [s for s, lines in sections.items() if not any(line.lstrip().startswith("*") for line in lines)] + if empty: + return ( + f"section(s) {', '.join(repr(s) for s in empty)} have no bullet entries — " + "use ``* `` to start each entry, or remove the heading" + ) + return None + + +@dataclass(frozen=True) +class FragmentBatch: + """A collection of fragments collected from a directory. + + ``valid`` are :class:`Fragment` instances sorted by merge time + (oldest first). ``invalid`` are paths that don't match any recognised + filename pattern — surfaced so the caller can warn or fail. ``.skip`` + and ``.gitkeep`` files are tolerated but excluded from both lists. + + Holds the pure-data class methods that turn a batch (or a synthetic + list of bumps / sections) into a compiled changelog entry. The + instance methods (:meth:`aggregate_bump`, :meth:`merged_sections`, + :meth:`compile_to_entry`) read the batch's own state; the + underscore-prefixed static methods (:meth:`_aggregate`, etc.) are + the underlying pure transformations and are used directly by tests + that exercise edge cases without a real fragments directory. + """ + + # Canonical ordering of section headings in compiled output. Anything + # not listed here keeps insertion order *after* these. + _SECTION_ORDER: ClassVar[list[str]] = ["Added", "Changed", "Deprecated", "Removed", "Fixed"] + + # Strict ordering of bump tiers (``major`` strictly outranks ``minor`` + # outranks ``patch``). Unrecognised tiers sort below ``patch``. + _BUMP_RANK: ClassVar[dict[str, int]] = {"patch": 0, "minor": 1, "major": 2} + + valid: list[Fragment] + invalid: list[Path] + skip_paths: list[Path] = field(default_factory=list) + + # ---- Construction -------------------------------------------------- + + @classmethod + def from_dir(cls, fragment_dir: Path) -> FragmentBatch: + if not fragment_dir.is_dir(): + return cls([], []) + valid: list[Fragment] = [] + invalid: list[Path] = [] + skips: list[Path] = [] + for p in fragment_dir.iterdir(): + if p.is_dir() or p.name == ".gitkeep": + continue + if SKIP_RE.match(p.name): + skips.append(p) + continue + f = Fragment(p) + if f.is_valid_filename: + valid.append(f) + else: + invalid.append(p) + # Sort by merge time, breaking ties on filename so the compiled output + # is deterministic when fragments share a merge commit (or when none + # are in git history yet — e.g. a local dry-run against test fixtures). + valid.sort(key=lambda f: (f.merge_time(), f.name)) + return cls(valid, invalid, skips) + + # ---- Queries against this batch's state --------------------------- + + def aggregate_bump(self) -> str: + """Highest bump tier declared by fragments that parsed to content. + + Empty fragments (which the compiler warns about and skips) are + excluded so they don't influence the version. Defaults to + ``patch`` if nothing parsed. + """ + return self._aggregate([f.bump for f, _ in self.parsed()]) + + def parsed(self) -> list[tuple[Fragment, dict[str, list[str]]]]: + """Return ``(fragment, sections)`` pairs, dropping fragments that parse empty.""" + return [(f, s) for f, s in ((f, f.parse()) for f in self.valid) if s] + + def merged_sections(self) -> dict[str, list[str]]: + """Cross-fragment merged section map for this batch.""" + return self._merge_sections([s for _, s in self.parsed()]) + + def compile_to_entry( + self, + current_version: Version, + *, + explicit_version: Version | None = None, + ) -> tuple[Version, str, str]: + """Return ``(new_version, bump_label, entry_text)`` for this batch. + + ``new_version`` is either ``explicit_version`` verbatim or the + result of bumping ``current_version`` by the aggregated tier. + ``bump_label`` is a human-readable suffix like ``" (bump: minor)"`` + for log lines (empty when ``explicit_version`` is used). + ``entry_text`` is the rendered RST block ready to prepend to a + ``CHANGELOG.rst``. Pure computation — no I/O. + """ + if explicit_version is not None: + new_version = explicit_version + bump_label = "" + else: + chosen_bump = self.aggregate_bump() + new_version = current_version.bumped(chosen_bump) + bump_label = f" (bump: {chosen_bump})" + entry = self._format_entry(new_version.text, self.merged_sections()) + return new_version, bump_label, entry + + # ---- Cleanup ------------------------------------------------------- + + def delete_all(self) -> tuple[int, int]: + """Delete every consumed fragment + skip file. Returns ``(n_frag, n_skip)``.""" + n_frag = self.delete_valid() + n_skip = self.delete_skips() + return n_frag, n_skip + + def delete_valid(self) -> int: + for f in self.valid: + f.path.unlink() + return len(self.valid) + + def delete_skips(self) -> int: + for p in self.skip_paths: + p.unlink() + return len(self.skip_paths) + + # ---- Pure transformations (the data class methods) ---------------- + # Static so callers and tests can exercise them with synthetic + # primitives — no FragmentBatch instance needed when the question + # is "given these tiers, which wins?" or "how do these dicts merge?" + + @classmethod + def _aggregate(cls, bumps: list[str]) -> str: + """Highest-ranked bump from ``bumps`` (``major > minor > patch``). + + An empty list defaults to ``'patch'``. + """ + if not bumps: + return "patch" + return max(bumps, key=lambda b: cls._BUMP_RANK.get(b, -1)) + + @staticmethod + def _merge_sections(fragments: list[dict[str, list[str]]]) -> dict[str, list[str]]: + """Merge multiple parsed fragments into a single section map. + + Bullets from different fragments that share a section heading are + concatenated directly (no blank line between them) to match the + dominant style in IsaacLab's existing ``CHANGELOG.rst`` files. + """ + merged: dict[str, list[str]] = {} + for frag in fragments: + for section, lines in frag.items(): + if section not in merged: + merged[section] = list(lines) + else: + merged[section].extend(lines) + return merged + + @classmethod + def _format_entry(cls, version: str, sections: dict[str, list[str]]) -> str: + """Return a complete RST version entry, ready to prepend to ``CHANGELOG.rst``. + + Sections appear in :attr:`_SECTION_ORDER` (Added, Changed, + Deprecated, Removed, Fixed). Anything else keeps insertion order + *after* the canonical ones. + """ + today = date.today().strftime("%Y-%m-%d") + heading = f"{version} ({today})" + out = [heading, "~" * len(heading), ""] + + ordered = [s for s in cls._SECTION_ORDER if s in sections] + extras = [s for s in sections if s not in cls._SECTION_ORDER] + + for section in ordered + extras: + out.append(section) + out.append("^" * len(section)) + out.append("") + for line in sections[section]: + out.append(line.rstrip("\n")) + out.append("") + + return "\n".join(out) + "\n" + + +@dataclass(frozen=True) +class Package: + """A source// directory the changelog tool can manage. + + A package is "managed" if it has both a ``config/extension.toml`` (the + version file the compiler bumps) and a ``docs/CHANGELOG.rst`` (the + file the compiler updates). :meth:`discover` returns only managed + packages; instances created directly may not be managed (use + :attr:`is_managed`). + """ + + root: Path + + @property + def name(self) -> str: + return self.root.name + + @property + def changelog_path(self) -> Path: + return self.root / "docs" / "CHANGELOG.rst" + + @property + def toml_path(self) -> Path: + return self.root / "config" / "extension.toml" + + @property + def default_fragment_dir(self) -> Path: + return self.root / "changelog.d" + + @property + def is_managed(self) -> bool: + return self.toml_path.is_file() and self.changelog_path.is_file() + + def current_version(self) -> Version: + for line in self.toml_path.read_text(encoding="utf-8").splitlines(): + m = re.match(r'^version\s*=\s*"([^"]+)"', line) + if m: + return Version(m.group(1)) + raise ValueError(f"No version field found in {self.toml_path}") + + def write_changelog_entry(self, entry: str, *, dry_run: bool) -> None: + text = self.changelog_path.read_text(encoding="utf-8") + m = re.search(r"^Changelog\n-+\s*\n\s*\n", text, re.MULTILINE) + if not m: + raise ValueError(f"Could not locate changelog header in {self.changelog_path}") + updated = text[: m.end()] + entry + "\n" + text[m.end() :] + if dry_run: + print(f"\n{'=' * 60}") + print(f"DRY RUN — would write to {_display_path(self.changelog_path)}") + print(f"{'=' * 60}") + print(entry) + else: + self.changelog_path.write_text(updated, encoding="utf-8") + + def write_version(self, new_version: Version, *, dry_run: bool) -> None: + text = self.toml_path.read_text(encoding="utf-8") + updated = re.sub(r'^version\s*=\s*"[^"]+"', f'version = "{new_version}"', text, flags=re.MULTILINE) + if dry_run: + print(f'DRY RUN — would set version = "{new_version}" in {_display_path(self.toml_path)}') + else: + self.toml_path.write_text(updated, encoding="utf-8") + + @classmethod + def from_name(cls, name: str, packages_root: Path = PACKAGES_ROOT) -> Package: + return cls(packages_root / name) + + @classmethod + def discover(cls, packages_root: Path = PACKAGES_ROOT) -> list[Package]: + """Return all managed packages under ``packages_root``, sorted by name.""" + if not packages_root.is_dir(): + return [] + return sorted( + (cls(child) for child in packages_root.iterdir() if child.is_dir() and cls(child).is_managed), + key=lambda p: p.name, + ) + + def compile( + self, + *, + fragments_dir: Path | None = None, + explicit_version: Version | None = None, + dry_run: bool = False, + ) -> bool: + """Compile fragments for this package. Returns True if any were compiled. + + There are exactly two modes: ``dry_run=True`` previews and writes + nothing; ``dry_run=False`` writes the new entry, bumps the version, + **and** deletes the consumed fragments. There is deliberately no + third "write but keep fragments" mode — leaving fragments in place + after a real compile is a footgun (the next compile would re-emit + them as a duplicate version block). + + Args: + fragments_dir: Read fragments from here instead of + :attr:`default_fragment_dir`. Useful for previewing against + example fixtures. + explicit_version: Pin the new version to this string (skips the + per-fragment bump inference). + dry_run: Preview only — no files are written or deleted. + """ + batch = FragmentBatch.from_dir(self._resolve_fragments_dir(fragments_dir)) + + for p in batch.invalid: + print( + f" WARNING: {_display_path(p)} does not match any recognised fragment " + "pattern (.rst, .minor.rst, .major.rst, .skip) — skipping.", + file=sys.stderr, + ) + + if not batch.valid: + if batch.skip_paths: + n = len(batch.skip_paths) + if dry_run: + print(f" {self.name}: would clean {n} stale skip file(s).") + else: + batch.delete_skips() + print(f" {self.name}: cleaned {n} stale skip file(s).") + else: + print(f" {self.name}: no fragments, skipping.") + return False + + # Apply the same content-validation rules the PR gate uses, so a + # malformed fragment that somehow reached this package (e.g. a + # stale fragment that predates a content-rule tightening, or a + # locally-edited file) doesn't silently produce a half-empty + # version block. Runs every fragment that survived filename + # validation in ``from_dir``. + validation_errors = [(f, err) for f in batch.valid if (err := f.validate()) is not None] + if validation_errors: + for f, err in validation_errors: + print(f" ERROR: {_display_path(f.path)}: {err}", file=sys.stderr) + raise ValueError( + f"{self.name}: {len(validation_errors)} fragment(s) failed content validation; " + "fix or remove them before compiling." + ) + + parsed_pairs = batch.parsed() + if not parsed_pairs: + print(f" {self.name}: all fragments empty after parsing, skipping.") + return False + + new_version, bump_label, entry = batch.compile_to_entry( + self.current_version(), explicit_version=explicit_version + ) + print(f" {self.name}: {len(parsed_pairs)} fragment(s) → version {new_version}{bump_label}") + + if not self.changelog_path.exists(): + # Should never happen with managed packages discovered via + # ``Package.discover()`` — defensive check for callers that + # construct a ``Package`` directly with an unmanaged root. + raise ValueError( + f"{_display_path(self.changelog_path)} does not exist; " + f"package {self.name!r} is not managed (missing CHANGELOG.rst)." + ) + self.write_changelog_entry(entry, dry_run=dry_run) + self.write_version(new_version, dry_run=dry_run) + + if not dry_run: + n_frag, n_skip = batch.delete_all() + msg = f" {self.name}: deleted {n_frag} fragment(s)" + if n_skip: + msg += f" and {n_skip} skip file(s)" + print(msg + ".") + + return True + + def _resolve_fragments_dir(self, override: Path | None) -> Path: + """Pick the directory ``compile`` should read fragments from. + + ``None`` means "use this package's own ``changelog.d/``"; an + absolute path is used as-is; a relative path is resolved against + ``REPO_ROOT`` so callers can pass things like + ``tools/changelog/test/integration/01_patch_bump/fragments`` without + worrying about the cwd. + """ + if override is None: + return self.default_fragment_dir + return override if override.is_absolute() else (REPO_ROOT / override).resolve() + + +@dataclass(frozen=True) +class PRDiff: + """A snapshot of "what this PR changed against its base branch." + + Wraps two views from the same git diff: ``changed`` (any file modified + or added) and ``added`` (the strict subset that's new on this branch). + Tests construct ``PRDiff`` directly with synthetic sets; + :meth:`from_git` runs the real ``git diff`` for production use. + """ + + changed: set[str] + added: set[str] + + @classmethod + def from_git(cls, base_ref: str) -> PRDiff: + """Run ``git diff`` against ``origin/...HEAD`` to populate the diff.""" + + def _diff(extra_args: list[str]) -> set[str]: + result = subprocess.run( + ["git", "diff", "--name-only", *extra_args, f"origin/{base_ref}...HEAD"], + capture_output=True, + text=True, + check=True, + cwd=REPO_ROOT, + ) + return {f for f in result.stdout.splitlines() if f} + + return cls(changed=_diff([]), added=_diff(["--diff-filter=A"])) + + def evaluate( + self, + packages: list[Package], + ) -> tuple[list[str], list[tuple[str, str]]]: + """Apply the PR-gate rules and return ``(missing_packages, invalid_fragments)``. + + Rules: + + 1. **Immutability** — every fragment file in the diff must be in + ``added`` (added on this branch). Modifying or renaming an existing + fragment is rejected with a hint to add a new one instead. + + 2. **Content validity** — every added ``*.rst`` fragment must parse + (recognised section headings + at least one bullet). ``.skip`` and + ``.gitkeep`` are exempt. + + 3. **Slug uniqueness** — within a package's ``changelog.d/``, no two + fragments may share the same slug. If an added fragment's slug + collides with an existing or co-added fragment, fail with a hint + to rename (e.g. append ``-2``). + + 4. **Required fragment per touched package** — for each managed + package the PR touches in ``source/`` (outside ``changelog.d/``), + the PR must *add* at least one valid fragment to that package's + ``changelog.d/``. Chained PRs (parent PR's fragment shows up in + the child's diff) naturally satisfy this — slug uniqueness is + the only constraint that matters. + """ + missing: list[str] = [] + invalid_fragments: list[tuple[str, str]] = [] + + for pkg in packages: + pkg_prefix = f"source/{pkg.name}/" + changelog_dir = f"source/{pkg.name}/changelog.d/" + + source_changed = [f for f in self.changed if f.startswith(pkg_prefix) and not f.startswith(changelog_dir)] + fragment_changes = [f for f in self.changed if f.startswith(changelog_dir)] + + # Map *pre-existing* fragments in the package's changelog.d/ by slug, + # for the uniqueness check below. The CI checkout contains both + # base-branch fragments and the PR's additions side by side, so we + # must explicitly exclude added files — otherwise an added file can + # overwrite the entry for a colliding pre-existing fragment with + # the same slug, hiding the very collision we're trying to detect. + # Skip ``.gitkeep`` and unrecognised filenames — they can't collide. + added_basenames = {Path(f).name for f in self.added if f.startswith(changelog_dir)} + existing_slugs: dict[str, str] = {} + existing_dir = pkg.default_fragment_dir + if existing_dir.is_dir(): + for p in existing_dir.iterdir(): + if p.is_dir() or p.name == ".gitkeep" or p.name in added_basenames: + continue + slug = Fragment.parse_slug(p.name) + if slug is not None: + existing_slugs[slug] = p.name + + added_slugs: dict[str, str] = {} + for f in fragment_changes: + path = Path(f) + if path.name == ".gitkeep": + continue + + # Rule 1: immutability — modifying an existing fragment is forbidden. + if f not in self.added: + invalid_fragments.append( + ( + f, + "fragments are immutable — add a new fragment with a different slug " + "instead of editing an existing one", + ) + ) + continue + + # Rule 2: content validity (only for *.rst, not *.skip). + if not SKIP_RE.match(path.name): + err = Fragment(REPO_ROOT / f).validate() + if err: + invalid_fragments.append((f, err)) + continue + + # Rule 3: slug uniqueness within the package's changelog.d/. + slug = Fragment.parse_slug(path.name) + if slug is None: + # Filename validation already flagged this above for *.rst, + # but a malformed *.skip would slip through. Surface it. + invalid_fragments.append( + (f, "invalid filename — must be .rst, .minor.rst, .major.rst, or .skip") + ) + continue + if slug in existing_slugs and existing_slugs[slug] != path.name: + invalid_fragments.append( + ( + f, + f"slug {slug!r} collides with existing fragment " + f"{existing_slugs[slug]!r} — rename to {slug}-2 (or any unused slug)", + ) + ) + continue + if slug in added_slugs and added_slugs[slug] != path.name: + invalid_fragments.append( + ( + f, + f"slug {slug!r} collides with another added fragment " + f"{added_slugs[slug]!r} — rename one to {slug}-2 (or any unused slug)", + ) + ) + continue + added_slugs[slug] = path.name + + if not source_changed: + continue + + # Rule 4: this PR must add at least one valid fragment for the package. + owned = [ + f + for f in fragment_changes + if f in self.added and (FRAGMENT_RE.match(Path(f).name) or SKIP_RE.match(Path(f).name)) + ] + if not owned: + missing.append(pkg.name) + + return missing, invalid_fragments + + +# --------------------------------------------------------------------------- +# Subcommand handlers +# --------------------------------------------------------------------------- + + +def cmd_compile(args: argparse.Namespace, parser: argparse.ArgumentParser) -> int: + if args.fragments_dir is not None and args.all: + parser.error("--fragments-dir requires --package (it cannot apply to all packages at once)") + if args.version is not None and args.all: + parser.error( + "--version requires --package (each managed package has its own version trajectory; " + "pin one with --package )" + ) + # Validate ``--version`` shape up front so a typo like ``--version 4.7`` + # fails at argument parsing instead of silently writing ``4.7`` into + # ``CHANGELOG.rst`` and ``extension.toml``. + explicit_version: Version | None = None + if args.version is not None: + try: + explicit_version = Version(args.version) + except ValueError as e: + parser.error(f"--version: {e}") + + if args.package: + pkg = Package.from_name(args.package) + if not pkg.root.is_dir(): + parser.error(f"--package {args.package!r}: directory not found at {pkg.root}") + if not pkg.is_managed: + parser.error( + f"--package {args.package!r} is not managed: missing config/extension.toml or " + f"docs/CHANGELOG.rst at {pkg.root}. Run with --all to see the discovered list." + ) + packages = [pkg] + else: + packages = Package.discover() + + any_compiled = False + for pkg in packages: + try: + compiled = pkg.compile( + fragments_dir=args.fragments_dir, + explicit_version=explicit_version, + dry_run=args.dry_run, + ) + except (FileNotFoundError, ValueError) as e: + print(f" ERROR: {e}", file=sys.stderr) + return 1 + any_compiled = any_compiled or compiled + + if not any_compiled: + print("No fragments found in any package.") + return 0 + + +def cmd_check(args: argparse.Namespace, _parser: argparse.ArgumentParser) -> int: + try: + diff = PRDiff.from_git(args.base_ref) + except subprocess.CalledProcessError as e: + print(f"ERROR: git diff failed: {e.stderr}", file=sys.stderr) + return 1 + + missing, invalid_fragments = diff.evaluate(Package.discover()) + + if invalid_fragments: + print("::error::Invalid changelog fragment(s) in this PR:") + for path, reason in invalid_fragments: + print(f" • {path}") + print(f" → {reason}") + print() + + if missing: + print("::error::Missing changelog fragments for the following packages:") + for pkg_name in missing: + print(f" • {pkg_name}") + print(f" → add source/{pkg_name}/changelog.d/.rst (patch bump)") + print(f" → or source/{pkg_name}/changelog.d/.minor.rst (minor bump)") + print(f" → or source/{pkg_name}/changelog.d/.major.rst (major bump)") + print(f" → or source/{pkg_name}/changelog.d/.skip (no entry, no bump)") + print() + print("Slug = your branch name with `/` replaced by `-` (or any short, unique name).") + print() + print("Fragment format (source//changelog.d/[.minor|.major].rst):") + print() + print(" Added") + print(" ^^^^^") + print() + print(" * Added :class:`~pkg.Foo` for feature X.") + print() + print(" Fixed") + print(" ^^^^^") + print() + print(" * Fixed edge case in :meth:`~pkg.Foo.bar`.") + print() + print("See AGENTS.md ## Changelog for full guidance.") + + if invalid_fragments or missing: + return 1 + + print("✓ All modified packages have valid changelog fragments.") + return 0 + + +# --------------------------------------------------------------------------- +# CLI entry point +# --------------------------------------------------------------------------- + + +def _build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + # The module docstring carries the full usage walkthrough — surfacing + # it as the parser description means ``cli.py --help`` shows the same + # guidance someone reading the source would see. + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + sub = parser.add_subparsers(dest="cmd", required=True, metavar="{compile,check}") + + p_compile = sub.add_parser( + "compile", + help="Compile fragments into CHANGELOG.rst (maintainer release-time tool).", + description="Compile accumulated fragments into per-package CHANGELOG.rst entries and bump extension.toml.", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + p_compile.set_defaults(func=cmd_compile) + + # ── Target: which packages to compile (required, mutually exclusive) ── + target = p_compile.add_argument_group("target", "Which package(s) to compile (required, mutually exclusive)") + target_group = target.add_mutually_exclusive_group(required=True) + target_group.add_argument("--package", metavar="NAME", help="Compile a single package.") + target_group.add_argument("--all", action="store_true", help="Compile all managed packages.") + + # ── Version source: by default inferred from filename suffixes ──────── + version_group = p_compile.add_argument_group( + "version (optional)", + "By default the new version is inferred from the filename suffixes of the consumed fragments.", + ) + version_group.add_argument( + "--version", + metavar="X.Y.Z", + help=( + "Pin the package to an explicit version, skipping the per-fragment bump inference. " + "Requires --package — each managed package has its own version trajectory and " + "applying a single version to all of them would corrupt their independent histories." + ), + ) + + # ── Execution mode: preview vs apply, where to read fragments from ──── + exec_group = p_compile.add_argument_group("execution") + exec_group.add_argument( + "--dry-run", + action="store_true", + help=( + "Preview only — no files are written or deleted. Without this flag, " + "the compile writes the new entry, bumps the version, and deletes " + "the consumed fragments." + ), + ) + exec_group.add_argument( + "--fragments-dir", + type=Path, + default=None, + metavar="PATH", + help=( + "Override the directory to read fragments from " + "(default: source//changelog.d/). " + "Useful for previewing against example fragments without touching real ones. " + "Only valid with --package." + ), + ) + + p_check = sub.add_parser( + "check", + help="Verify each modified package has a valid fragment (PR gate).", + description="Verify each modified package has a valid changelog fragment.", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + p_check.set_defaults(func=cmd_check) + p_check.add_argument( + "base_ref", + help=( + "Base branch to diff against (e.g. 'main' or 'develop'). " + "The diff is taken against ``origin/...HEAD``." + ), + ) + + return parser + + +def main() -> None: + parser = _build_parser() + args = parser.parse_args() + sys.exit(args.func(args, parser)) + + +if __name__ == "__main__": + main() diff --git a/tools/changelog/pyproject.toml b/tools/changelog/pyproject.toml new file mode 100644 index 000000000000..6033f35f178c --- /dev/null +++ b/tools/changelog/pyproject.toml @@ -0,0 +1,13 @@ +# Scopes pytest to this self-contained tool: the ``[tool.pytest.ini_options]`` +# section makes pytest treat ``tools/changelog/`` as its rootdir, so: +# +# 1. ``pythonpath = ["."]`` adds ``tools/changelog/`` to ``sys.path``, +# making ``import cli`` work from the test files without any shim. +# 2. ``tools/conftest.py`` (a session-takeover hook for the IsaacLab +# source/ test suite) sits *above* rootdir and is therefore not +# loaded — no ``--noconftest`` flag required. +# +# Run with: ``./isaaclab.sh -p -m pytest tools/changelog/`` +[tool.pytest.ini_options] +pythonpath = ["."] +testpaths = ["test"] diff --git a/tools/changelog/test/integration/01_patch_bump/changelog_after.rst b/tools/changelog/test/integration/01_patch_bump/changelog_after.rst new file mode 100644 index 000000000000..199771149d62 --- /dev/null +++ b/tools/changelog/test/integration/01_patch_bump/changelog_after.rst @@ -0,0 +1,25 @@ +Changelog +--------- + +1.2.4 (2026-04-30) +~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Tightened error message in :class:`~example.Foo` when a required argument is missing. + +Fixed +^^^^^ + +* Fixed missing GPU sync in :func:`~example.refresh_buffers` that occasionally returned stale data. +* Fixed off-by-one in :meth:`~example.Foo.bar` when the input list was empty. + + +1.2.3 (2026-01-15) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.OldThing` for an earlier feature. diff --git a/tools/changelog/test/integration/01_patch_bump/changelog_before.rst b/tools/changelog/test/integration/01_patch_bump/changelog_before.rst new file mode 100644 index 000000000000..81dc30f240c3 --- /dev/null +++ b/tools/changelog/test/integration/01_patch_bump/changelog_before.rst @@ -0,0 +1,10 @@ +Changelog +--------- + +1.2.3 (2026-01-15) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.OldThing` for an earlier feature. diff --git a/tools/changelog/test/integration/01_patch_bump/fragments/asmith-fix-collision-margin.rst b/tools/changelog/test/integration/01_patch_bump/fragments/asmith-fix-collision-margin.rst new file mode 100644 index 000000000000..2f4c9e39a432 --- /dev/null +++ b/tools/changelog/test/integration/01_patch_bump/fragments/asmith-fix-collision-margin.rst @@ -0,0 +1,9 @@ +Fixed +^^^^^ + +* Fixed missing GPU sync in :func:`~example.refresh_buffers` that occasionally returned stale data. + +Changed +^^^^^^^ + +* Tightened error message in :class:`~example.Foo` when a required argument is missing. diff --git a/tools/changelog/test/integration/01_patch_bump/fragments/jdoe-fix-mass-units.rst b/tools/changelog/test/integration/01_patch_bump/fragments/jdoe-fix-mass-units.rst new file mode 100644 index 000000000000..f3a7a66a215a --- /dev/null +++ b/tools/changelog/test/integration/01_patch_bump/fragments/jdoe-fix-mass-units.rst @@ -0,0 +1,4 @@ +Fixed +^^^^^ + +* Fixed off-by-one in :meth:`~example.Foo.bar` when the input list was empty. diff --git a/tools/changelog/test/integration/02_minor_bump/changelog_after.rst b/tools/changelog/test/integration/02_minor_bump/changelog_after.rst new file mode 100644 index 000000000000..bc5a3fee0e35 --- /dev/null +++ b/tools/changelog/test/integration/02_minor_bump/changelog_after.rst @@ -0,0 +1,30 @@ +Changelog +--------- + +1.3.0 (2026-04-30) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.NewSensor` for IMU-based proprioception. +* Added :func:`~example.helper` utility for batched coordinate transforms. + +Changed +^^^^^^^ + +* Documented thread-safety guarantees for :class:`~example.Worker`. + +Fixed +^^^^^ + +* Fixed a NaN propagation in :meth:`~example.Sensor.update`. + + +1.2.3 (2026-01-15) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.OldThing` for an earlier feature. diff --git a/tools/changelog/test/integration/02_minor_bump/changelog_before.rst b/tools/changelog/test/integration/02_minor_bump/changelog_before.rst new file mode 100644 index 000000000000..81dc30f240c3 --- /dev/null +++ b/tools/changelog/test/integration/02_minor_bump/changelog_before.rst @@ -0,0 +1,10 @@ +Changelog +--------- + +1.2.3 (2026-01-15) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.OldThing` for an earlier feature. diff --git a/tools/changelog/test/integration/02_minor_bump/fragments/asmith-add-multi-asset-spawner.minor.rst b/tools/changelog/test/integration/02_minor_bump/fragments/asmith-add-multi-asset-spawner.minor.rst new file mode 100644 index 000000000000..5f623cf02ad5 --- /dev/null +++ b/tools/changelog/test/integration/02_minor_bump/fragments/asmith-add-multi-asset-spawner.minor.rst @@ -0,0 +1,4 @@ +Added +^^^^^ + +* Added :class:`~example.NewSensor` for IMU-based proprioception. diff --git a/tools/changelog/test/integration/02_minor_bump/fragments/blee-add-camera-output-contract.minor.rst b/tools/changelog/test/integration/02_minor_bump/fragments/blee-add-camera-output-contract.minor.rst new file mode 100644 index 000000000000..a3feda328623 --- /dev/null +++ b/tools/changelog/test/integration/02_minor_bump/fragments/blee-add-camera-output-contract.minor.rst @@ -0,0 +1,9 @@ +Added +^^^^^ + +* Added :func:`~example.helper` utility for batched coordinate transforms. + +Changed +^^^^^^^ + +* Documented thread-safety guarantees for :class:`~example.Worker`. diff --git a/tools/changelog/test/integration/02_minor_bump/fragments/jdoe-fix-rotation-frame.rst b/tools/changelog/test/integration/02_minor_bump/fragments/jdoe-fix-rotation-frame.rst new file mode 100644 index 000000000000..e103861d0d98 --- /dev/null +++ b/tools/changelog/test/integration/02_minor_bump/fragments/jdoe-fix-rotation-frame.rst @@ -0,0 +1,4 @@ +Fixed +^^^^^ + +* Fixed a NaN propagation in :meth:`~example.Sensor.update`. diff --git a/tools/changelog/test/integration/03_major_bump/changelog_after.rst b/tools/changelog/test/integration/03_major_bump/changelog_after.rst new file mode 100644 index 000000000000..cec9e3221263 --- /dev/null +++ b/tools/changelog/test/integration/03_major_bump/changelog_after.rst @@ -0,0 +1,34 @@ +Changelog +--------- + +2.0.0 (2026-04-30) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.AnotherSensor` for proximity detection. + +Changed +^^^^^^^ + +* **Breaking:** :meth:`~example.Foo.bar` now returns a tuple ``(value, error)`` instead of raising. + +Removed +^^^^^^^ + +* Removed deprecated module ``example.old_api`` (use :mod:`~example.api` instead). + +Fixed +^^^^^ + +* Fixed a deadlock in :class:`~example.Pool` under high concurrency. + + +1.2.3 (2026-01-15) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.OldThing` for an earlier feature. diff --git a/tools/changelog/test/integration/03_major_bump/changelog_before.rst b/tools/changelog/test/integration/03_major_bump/changelog_before.rst new file mode 100644 index 000000000000..81dc30f240c3 --- /dev/null +++ b/tools/changelog/test/integration/03_major_bump/changelog_before.rst @@ -0,0 +1,10 @@ +Changelog +--------- + +1.2.3 (2026-01-15) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~example.OldThing` for an earlier feature. diff --git a/tools/changelog/test/integration/03_major_bump/fragments/asmith-add-warp-contact-stream.minor.rst b/tools/changelog/test/integration/03_major_bump/fragments/asmith-add-warp-contact-stream.minor.rst new file mode 100644 index 000000000000..864d48ce0cef --- /dev/null +++ b/tools/changelog/test/integration/03_major_bump/fragments/asmith-add-warp-contact-stream.minor.rst @@ -0,0 +1,4 @@ +Added +^^^^^ + +* Added :class:`~example.AnotherSensor` for proximity detection. diff --git a/tools/changelog/test/integration/03_major_bump/fragments/blee-rename-articulation-api.major.rst b/tools/changelog/test/integration/03_major_bump/fragments/blee-rename-articulation-api.major.rst new file mode 100644 index 000000000000..d392bcd339c2 --- /dev/null +++ b/tools/changelog/test/integration/03_major_bump/fragments/blee-rename-articulation-api.major.rst @@ -0,0 +1,9 @@ +Removed +^^^^^^^ + +* Removed deprecated module ``example.old_api`` (use :mod:`~example.api` instead). + +Changed +^^^^^^^ + +* **Breaking:** :meth:`~example.Foo.bar` now returns a tuple ``(value, error)`` instead of raising. diff --git a/tools/changelog/test/integration/03_major_bump/fragments/jdoe-fix-articulation-state.rst b/tools/changelog/test/integration/03_major_bump/fragments/jdoe-fix-articulation-state.rst new file mode 100644 index 000000000000..b184ab45c6e2 --- /dev/null +++ b/tools/changelog/test/integration/03_major_bump/fragments/jdoe-fix-articulation-state.rst @@ -0,0 +1,4 @@ +Fixed +^^^^^ + +* Fixed a deadlock in :class:`~example.Pool` under high concurrency. diff --git a/tools/changelog/test/integration/README.md b/tools/changelog/test/integration/README.md new file mode 100644 index 000000000000..1f55d0438705 --- /dev/null +++ b/tools/changelog/test/integration/README.md @@ -0,0 +1,35 @@ +# Changelog integration fixtures + +End-to-end test fixtures for `tools/changelog/cli.py compile`. Each +subdirectory holds a worked example: input fragments, the starting +`CHANGELOG.rst`, and the expected compiled output. + +`tools/changelog/test/test_integration.py` runs the compiler +against each one and asserts the output matches `changelog_after.rst`. +The fixtures double as human-readable demos — read alongside the PR +description to see how the system handles patch / minor / major bumps +and cross-fragment section merges. + +## Layout + +| Demo | Fragments | Bump | Resulting version | +|---|---|---|---| +| `01_patch_bump/` | 2 × `.rst` | patch | `1.2.3 → 1.2.4` | +| `02_minor_bump/` | 1 × `.rst` + 2 × `.minor.rst` | minor | `1.2.3 → 1.3.0` | +| `03_major_bump/` | 1 × `.rst` + 1 × `.minor.rst` + 1 × `.major.rst` | major | `1.2.3 → 2.0.0` | + +Each demo includes a `changelog_before.rst` (initial state) and a +`changelog_after.rst` (expected post-compile state). The bump tier is the +**max** of every fragment's filename suffix in the batch. + +## Run the compiler against a demo + +```bash +./isaaclab.sh -p tools/changelog/cli.py compile --package isaaclab \ + --fragments-dir tools/changelog/test/integration/02_minor_bump/fragments \ + --dry-run +``` + +`--dry-run` prevents the compile from consuming (deleting) the fixture +fragments. The output should match `02_minor_bump/changelog_after.rst` +modulo today's date. diff --git a/tools/changelog/test/invalid_content/3001.rst b/tools/changelog/test/invalid_content/3001.rst new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/changelog/test/invalid_content/3002.rst b/tools/changelog/test/invalid_content/3002.rst new file mode 100644 index 000000000000..190621a3aaf3 --- /dev/null +++ b/tools/changelog/test/invalid_content/3002.rst @@ -0,0 +1 @@ +Just a free-form note with no section headings. diff --git a/tools/changelog/test/invalid_content/3003.rst b/tools/changelog/test/invalid_content/3003.rst new file mode 100644 index 000000000000..4470915bae8e --- /dev/null +++ b/tools/changelog/test/invalid_content/3003.rst @@ -0,0 +1,2 @@ +Added +^^^^^ diff --git a/tools/changelog/test/invalid_filenames/1234.notabump.rst b/tools/changelog/test/invalid_filenames/1234.notabump.rst new file mode 100644 index 000000000000..f70a86344dc6 --- /dev/null +++ b/tools/changelog/test/invalid_filenames/1234.notabump.rst @@ -0,0 +1,4 @@ +Added +^^^^^ + +* This file has an unrecognised bump tier and should be rejected. diff --git a/tools/changelog/test/invalid_filenames/multi.dot.slug.rst b/tools/changelog/test/invalid_filenames/multi.dot.slug.rst new file mode 100644 index 000000000000..c9ea00434158 --- /dev/null +++ b/tools/changelog/test/invalid_filenames/multi.dot.slug.rst @@ -0,0 +1,4 @@ +Added +^^^^^ + +* This file's slug contains dots (reserved for the tier suffix) and should be rejected. diff --git a/tools/changelog/test/test_bump_suffix.py b/tools/changelog/test/test_bump_suffix.py new file mode 100644 index 000000000000..1c191c6b33d9 --- /dev/null +++ b/tools/changelog/test/test_bump_suffix.py @@ -0,0 +1,135 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Bump-tier inference: filename suffix → bump, and aggregating across a batch. + +These tests use the worked examples under :file:`tools/changelog/examples/` +as fixtures so the same files double as human-readable demos and as +inputs the test suite verifies. +""" + +from __future__ import annotations + +from pathlib import Path + +import cli +import pytest + +EXAMPLES = Path(__file__).parent / "integration" + + +# --------------------------------------------------------------------------- +# Filename → bump tier (one demo per tier, tested separately) +# --------------------------------------------------------------------------- + + +def test_patch_bump_demo_aggregates_to_patch(): + """``examples/01_patch_bump/`` has two ``.rst`` files (no suffix) → patch.""" + batch = cli.FragmentBatch.from_dir(EXAMPLES / "01_patch_bump" / "fragments") + assert batch.invalid == [] + assert {f.name for f in batch.valid} == { + "jdoe-fix-mass-units.rst", + "asmith-fix-collision-margin.rst", + } + assert all(f.bump == "patch" for f in batch.valid) + assert batch.aggregate_bump() == "patch" + + +def test_minor_bump_demo_aggregates_to_minor(): + """``examples/02_minor_bump/`` mixes patch + minor fragments → minor wins.""" + batch = cli.FragmentBatch.from_dir(EXAMPLES / "02_minor_bump" / "fragments") + assert batch.invalid == [] + assert {f.name for f in batch.valid} == { + "jdoe-fix-rotation-frame.rst", + "asmith-add-multi-asset-spawner.minor.rst", + "blee-add-camera-output-contract.minor.rst", + } + bumps = sorted(f.bump for f in batch.valid) + assert bumps == ["minor", "minor", "patch"] + assert batch.aggregate_bump() == "minor" + + +def test_major_bump_demo_aggregates_to_major(): + """``examples/03_major_bump/`` mixes patch + minor + major → major wins.""" + batch = cli.FragmentBatch.from_dir(EXAMPLES / "03_major_bump" / "fragments") + assert batch.invalid == [] + assert {f.name for f in batch.valid} == { + "jdoe-fix-articulation-state.rst", + "asmith-add-warp-contact-stream.minor.rst", + "blee-rename-articulation-api.major.rst", + } + bumps = sorted(f.bump for f in batch.valid) + assert bumps == ["major", "minor", "patch"] + assert batch.aggregate_bump() == "major" + + +# --------------------------------------------------------------------------- +# Pure aggregation logic (no filesystem) +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "bumps,expected", + [ + ([], "patch"), + (["patch"], "patch"), + (["patch", "patch"], "patch"), + (["patch", "minor"], "minor"), + (["minor", "patch", "minor"], "minor"), + (["patch", "minor", "major"], "major"), + (["major", "patch"], "major"), + ], +) +def test_aggregate_bump_logic(bumps, expected): + assert cli.FragmentBatch._aggregate(bumps) == expected + + +# --------------------------------------------------------------------------- +# Filename regex — what the gate and compiler agree to accept +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "name,is_fragment,is_skip", + [ + ("1234.rst", True, False), + ("1234.minor.rst", True, False), + ("1234.major.rst", True, False), + ("1234.skip", False, True), + ("jdoe-fix-bug.rst", True, False), + ("jdoe-add-feature.minor.rst", True, False), + ("jdoe-rename-api.major.rst", True, False), + ("jdoe-ci-only.skip", False, True), + (".gitkeep", False, False), + ("README.md", False, False), + ("1234.patch.rst", False, False), # only minor/major are recognised tiers + ("foo.bar.rst", False, False), # extra dots in slug are reserved for tier suffix + ("1234.minor", False, False), # missing .rst extension + ("1234.rst.bak", False, False), + ], +) +def test_fragment_filename_regexes(name, is_fragment, is_skip): + assert bool(cli.FRAGMENT_RE.match(name)) is is_fragment + assert bool(cli.SKIP_RE.match(name)) is is_skip + + +# --------------------------------------------------------------------------- +# Fragment.parse_slug — derived from filename for collision detection +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "name,expected_slug", + [ + ("1234.rst", "1234"), + ("jdoe-add-feature.minor.rst", "jdoe-add-feature"), + ("blee-rename-api.major.rst", "blee-rename-api"), + ("ci-only.skip", "ci-only"), + ("README.md", None), + (".gitkeep", None), + ], +) +def test_parse_slug_for_filenames(name, expected_slug): + assert cli.Fragment.parse_slug(name) == expected_slug diff --git a/tools/changelog/test/test_format.py b/tools/changelog/test/test_format.py new file mode 100644 index 000000000000..24a249992f7b --- /dev/null +++ b/tools/changelog/test/test_format.py @@ -0,0 +1,94 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""FragmentBatch._merge_sections + ._format_entry + Version.bumped — the rendering pipeline.""" + +from __future__ import annotations + +import cli +import pytest + +# --------------------------------------------------------------------------- +# merge_fragments — collapses bullets across fragments under the same section +# --------------------------------------------------------------------------- + + +def test_merge_fragments_collapses_same_section_across_fragments(): + f1 = {"Added": ["* a1\n"]} + f2 = {"Added": ["* a2\n"], "Fixed": ["* f1\n"]} + merged = cli.FragmentBatch._merge_sections([f1, f2]) + # Bullets from separate fragments concatenate with no blank line in between + # (matching IsaacLab's repo convention, where successive bullets are run-on). + assert merged["Added"] == ["* a1\n", "* a2\n"] + assert merged["Fixed"] == ["* f1\n"] + + +# --------------------------------------------------------------------------- +# format_entry — section ordering + version heading +# --------------------------------------------------------------------------- + + +def test_format_entry_orders_canonical_sections(): + sections = { + "Fixed": ["* f1\n"], + "Added": ["* a1\n"], + "Removed": ["* r1\n"], + } + out = cli.FragmentBatch._format_entry("1.2.4", sections) + # Canonical order is Added, Changed, Deprecated, Removed, Fixed. + a_pos = out.index("Added") + r_pos = out.index("Removed") + f_pos = out.index("Fixed") + assert a_pos < r_pos < f_pos + + +def test_format_entry_includes_version_heading(): + out = cli.FragmentBatch._format_entry("9.9.9", {"Added": ["* x\n"]}) + assert "9.9.9 (" in out + assert "~~~~~~" in out # tilde underline + + +def test_format_entry_unknown_sections_appear_after_canonical(): + sections = {"Performance": ["* p1\n"], "Added": ["* a1\n"]} + out = cli.FragmentBatch._format_entry("1.0.0", sections) + assert out.index("Added") < out.index("Performance") + + +# --------------------------------------------------------------------------- +# bump_version — semver maths +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "current,part,expected", + [ + ("1.2.3", "patch", "1.2.4"), + ("1.2.3", "minor", "1.3.0"), # minor bump zeros patch + ("1.2.3", "major", "2.0.0"), # major bump zeros minor and patch + ("4.6.21", "patch", "4.6.22"), + ("4.6.21.dev20260301", "patch", "4.6.22"), # dev suffix stripped + ], +) +def test_version_bumped(current, part, expected): + assert cli.Version(current).bumped(part).text == expected + assert str(cli.Version(current).bumped(part)) == expected + + +def test_version_bumped_rejects_non_semver(): + # Construction itself rejects malformed input — fail-fast for bad ``--version``. + with pytest.raises(ValueError): + cli.Version("1.2") + with pytest.raises(ValueError): + cli.Version("not-semver") + with pytest.raises(ValueError): + cli.Version("1.2.3.4.5") + + +def test_version_accepts_dev_suffix(): + """PEP 440 ``.devN`` suffixes are tolerated on construction (they appear in + real ``extension.toml`` files between releases) and stripped on bump.""" + v = cli.Version("4.6.21.dev20260301") + assert v.text == "4.6.21.dev20260301" + assert v.bumped("patch").text == "4.6.22" diff --git a/tools/changelog/test/test_integration.py b/tools/changelog/test/test_integration.py new file mode 100644 index 000000000000..0e48e4a874aa --- /dev/null +++ b/tools/changelog/test/test_integration.py @@ -0,0 +1,69 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""End-to-end checks: run the compiler against each worked example and verify +the resulting changelog matches the checked-in :file:`changelog_after.rst`. + +This is what makes the examples *living docs* — if anything in the compile +pipeline drifts, an example's ``changelog_after.rst`` stops matching and +the corresponding test fails immediately. +""" + +from __future__ import annotations + +import re +import shutil +from pathlib import Path + +import cli +import pytest + +EXAMPLES = Path(__file__).parent / "integration" + +# Strip the ``(YYYY-MM-DD)`` suffix from version headings so the fixed example +# files don't drift when the compiler stamps today's date. +_DATE_RE = re.compile(r"\(\d{4}-\d{2}-\d{2}\)") + + +def _normalize(text: str) -> str: + return _DATE_RE.sub("(YYYY-MM-DD)", text) + + +@pytest.mark.parametrize( + "demo,expected_version", + [ + ("01_patch_bump", "1.2.4"), + ("02_minor_bump", "1.3.0"), + ("03_major_bump", "2.0.0"), + ], +) +def test_demo_compile_matches_changelog_after(tmp_path, demo, expected_version): + """Stage a fake package whose CHANGELOG.rst matches the demo's ``before``, + run the compiler against the demo's fragments, and verify the file ends + up byte-equal to the demo's ``after`` (modulo today's date).""" + demo_dir = EXAMPLES / demo + + # Build a minimal package layout the compiler will accept. + pkg_root = tmp_path / "demo_pkg" + (pkg_root / "config").mkdir(parents=True) + (pkg_root / "docs").mkdir(parents=True) + (pkg_root / "config" / "extension.toml").write_text('version = "1.2.3"\n', encoding="utf-8") + shutil.copy(demo_dir / "changelog_before.rst", pkg_root / "docs" / "CHANGELOG.rst") + + # Copy fragments into tmp_path so the compile's auto-clean doesn't + # delete the live checked-in examples directory. + fragments_tmp = tmp_path / "fragments" + shutil.copytree(demo_dir / "fragments", fragments_tmp) + + # Run the compiler against the (copied) fragments. + pkg = cli.Package(pkg_root) + pkg.compile(fragments_dir=fragments_tmp) + + actual = (pkg_root / "docs" / "CHANGELOG.rst").read_text(encoding="utf-8") + expected = (demo_dir / "changelog_after.rst").read_text(encoding="utf-8") + assert _normalize(actual) == _normalize(expected) + + # Version should have bumped exactly as the demo name suggests. + assert str(pkg.current_version()) == expected_version diff --git a/tools/changelog/test/test_parse.py b/tools/changelog/test/test_parse.py new file mode 100644 index 000000000000..fe8123c2f14c --- /dev/null +++ b/tools/changelog/test/test_parse.py @@ -0,0 +1,143 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Fragment.parse + FragmentBatch.from_dir + Package.discover — directory scanning.""" + +from __future__ import annotations + +from pathlib import Path + +import cli + +FIXTURES = Path(__file__).parent + + +def _write(path: Path, body: str) -> Path: + path.write_text(body, encoding="utf-8") + return path + + +# --------------------------------------------------------------------------- +# parse_fragment — section header detection (pure function) +# --------------------------------------------------------------------------- + + +def test_parse_fragment_single_section(tmp_path): + p = _write(tmp_path / "1.rst", "Added\n^^^^^\n\n* Added :class:`~pkg.Foo`.\n") + sections = cli.Fragment(p).parse() + assert list(sections.keys()) == ["Added"] + assert sections["Added"] == ["* Added :class:`~pkg.Foo`.\n"] + + +def test_parse_fragment_multiple_sections_preserves_dict_order(tmp_path): + p = _write(tmp_path / "1.rst", "Added\n^^^^^\n\n* a1\n\nFixed\n^^^^^\n\n* f1\n* f2\n") + sections = cli.Fragment(p).parse() + assert list(sections.keys()) == ["Added", "Fixed"] + assert sections["Added"] == ["* a1\n"] + assert sections["Fixed"] == ["* f1\n", "* f2\n"] + + +def test_parse_fragment_underline_must_be_at_least_heading_length(tmp_path): + """Heading 'Added' (5 chars) needs >=5 carets; '^^' must not match.""" + p = _write(tmp_path / "1.rst", "Added\n^^\n\n* a1\n") + assert cli.Fragment(p).parse() == {} + + +def test_parse_fragment_empty_file(tmp_path): + p = _write(tmp_path / "1.rst", "") + assert cli.Fragment(p).parse() == {} + + +def test_parse_fragment_no_section_headings(tmp_path): + p = _write(tmp_path / "1.rst", "Just a free-form note with no headings.\n") + assert cli.Fragment(p).parse() == {} + + +# --------------------------------------------------------------------------- +# Fragment.parse — same logic, exposed as a method on the wrapper +# --------------------------------------------------------------------------- + + +# --------------------------------------------------------------------------- +# FragmentBatch.from_dir — separates valid filenames from the rest +# --------------------------------------------------------------------------- + + +def test_fragment_batch_flags_invalid_filenames_from_fixture(): + """Files with dotted slugs or unknown bump tiers go in ``invalid``.""" + batch = cli.FragmentBatch.from_dir(FIXTURES / "invalid_filenames") + assert batch.valid == [] + assert {p.name for p in batch.invalid} == {"multi.dot.slug.rst", "1234.notabump.rst"} + + +def test_fragment_batch_missing_directory(tmp_path): + """A non-existent directory is treated as empty, not an error.""" + batch = cli.FragmentBatch.from_dir(tmp_path / "does-not-exist") + assert batch.valid == [] + assert batch.invalid == [] + assert batch.skip_paths == [] + + +def test_fragment_batch_collects_skip_files_separately(tmp_path): + """``.skip`` files are tolerated — exposed via ``skip_paths``, not ``valid``.""" + (tmp_path / "1234.skip").write_text("", encoding="utf-8") + (tmp_path / "1235.rst").write_text("Added\n^^^^^\n\n* x\n", encoding="utf-8") + batch = cli.FragmentBatch.from_dir(tmp_path) + assert {f.name for f in batch.valid} == {"1235.rst"} + assert {p.name for p in batch.skip_paths} == {"1234.skip"} + + +# --------------------------------------------------------------------------- +# Package.discover — a package is "managed" iff it has both +# config/extension.toml and docs/CHANGELOG.rst +# --------------------------------------------------------------------------- + + +def _make_pkg(root: Path, name: str, *, has_ext: bool = True, has_changelog: bool = True) -> None: + pkg = root / name + if has_ext: + (pkg / "config").mkdir(parents=True, exist_ok=True) + (pkg / "config" / "extension.toml").write_text('version = "0.0.0"\n', encoding="utf-8") + if has_changelog: + (pkg / "docs").mkdir(parents=True, exist_ok=True) + (pkg / "docs" / "CHANGELOG.rst").write_text("Changelog\n---------\n\n", encoding="utf-8") + + +def test_package_discover_includes_complete_packages(tmp_path): + _make_pkg(tmp_path, "complete_a") + _make_pkg(tmp_path, "complete_b") + pkgs = cli.Package.discover(tmp_path) + assert [p.name for p in pkgs] == ["complete_a", "complete_b"] + assert all(p.is_managed for p in pkgs) + + +def test_package_discover_excludes_packages_missing_changelog(tmp_path): + _make_pkg(tmp_path, "complete") + _make_pkg(tmp_path, "no_changelog", has_changelog=False) + assert [p.name for p in cli.Package.discover(tmp_path)] == ["complete"] + + +def test_package_discover_excludes_packages_missing_extension_toml(tmp_path): + _make_pkg(tmp_path, "complete") + _make_pkg(tmp_path, "no_extension", has_ext=False) + assert [p.name for p in cli.Package.discover(tmp_path)] == ["complete"] + + +def test_package_discover_returns_sorted_alphabetically(tmp_path): + _make_pkg(tmp_path, "zebra") + _make_pkg(tmp_path, "alpha") + _make_pkg(tmp_path, "mango") + assert [p.name for p in cli.Package.discover(tmp_path)] == ["alpha", "mango", "zebra"] + + +def test_package_discover_missing_root_returns_empty(tmp_path): + assert cli.Package.discover(tmp_path / "does-not-exist") == [] + + +def test_package_is_managed_property(tmp_path): + _make_pkg(tmp_path, "complete") + _make_pkg(tmp_path, "no_changelog", has_changelog=False) + assert cli.Package(tmp_path / "complete").is_managed is True + assert cli.Package(tmp_path / "no_changelog").is_managed is False diff --git a/tools/changelog/test/test_validate.py b/tools/changelog/test/test_validate.py new file mode 100644 index 000000000000..e02e1b95e990 --- /dev/null +++ b/tools/changelog/test/test_validate.py @@ -0,0 +1,323 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Fragment.validate — PR-gate filename + content rules.""" + +from __future__ import annotations + +from pathlib import Path + +import cli +import pytest + +FIXTURES = Path(__file__).parent + + +def _write(path: Path, body: str) -> Path: + path.write_text(body, encoding="utf-8") + return path + + +# --------------------------------------------------------------------------- +# Acceptance — well-formed fragments +# --------------------------------------------------------------------------- + + +def test_validate_accepts_well_formed(tmp_path): + p = _write(tmp_path / "1234.rst", "Added\n^^^^^\n\n* Added X.\n") + assert cli.Fragment(p).validate() is None + + +def test_validate_accepts_minor_suffix(tmp_path): + p = _write(tmp_path / "1234.minor.rst", "Added\n^^^^^\n\n* Added X.\n") + assert cli.Fragment(p).validate() is None + + +def test_validate_accepts_major_suffix(tmp_path): + p = _write(tmp_path / "1234.major.rst", "Removed\n^^^^^^^\n\n* Removed X.\n") + assert cli.Fragment(p).validate() is None + + +# --------------------------------------------------------------------------- +# Rejection — uses checked-in fixtures so the malformed inputs are reviewable +# --------------------------------------------------------------------------- + + +def test_validate_rejects_unknown_filename_from_fixture(): + err = cli.Fragment(FIXTURES / "invalid_filenames" / "multi.dot.slug.rst").validate() + assert err is not None and "invalid filename" in err + + +def test_validate_rejects_unknown_bump_tier_from_fixture(): + err = cli.Fragment(FIXTURES / "invalid_filenames" / "1234.notabump.rst").validate() + assert err is not None and "invalid filename" in err + + +def test_validate_rejects_empty_file_from_fixture(): + err = cli.Fragment(FIXTURES / "invalid_content" / "3001.rst").validate() + assert err is not None and "empty" in err + + +def test_validate_rejects_missing_section_heading_from_fixture(): + err = cli.Fragment(FIXTURES / "invalid_content" / "3002.rst").validate() + assert err is not None and "section" in err.lower() + + +def test_validate_rejects_section_without_bullets_from_fixture(): + err = cli.Fragment(FIXTURES / "invalid_content" / "3003.rst").validate() + assert err is not None and "bullet" in err.lower() + + +# --------------------------------------------------------------------------- +# check_fragments — gate orchestration: immutability, slug uniqueness, and +# the "PR must add at least one fragment per touched package" rule +# --------------------------------------------------------------------------- + + +def _pkg_under(tmp_path: Path, name: str) -> cli.Package: + """Build a managed-looking Package rooted at ``tmp_path/source/``.""" + root = tmp_path / "source" / name + (root / "config").mkdir(parents=True) + (root / "docs").mkdir(parents=True) + (root / "config" / "extension.toml").write_text('version = "0.0.0"\n', encoding="utf-8") + (root / "docs" / "CHANGELOG.rst").write_text("Changelog\n---------\n\n", encoding="utf-8") + return cli.Package(root) + + +def test_check_fragments_immutability_rejects_modified_fragment(tmp_path): + """Modifying an existing fragment is forbidden — must add a new one instead.""" + pkg = _pkg_under(tmp_path, "isaaclab") + changed = {"source/isaaclab/code.py", "source/isaaclab/changelog.d/jdoe-fix-bug.rst"} + added = {"source/isaaclab/code.py"} # fragment exists already; the PR only modified it + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + assert missing == ["isaaclab"] + invalid_map = dict(invalid) + assert "source/isaaclab/changelog.d/jdoe-fix-bug.rst" in invalid_map + assert "immutable" in invalid_map["source/isaaclab/changelog.d/jdoe-fix-bug.rst"] + + +def test_check_fragments_chain_allows_other_pr_fragment(tmp_path): + """A chained PR (B based on A's branch, A still open) sees A's fragment in + its diff. That should pass — both fragments have distinct slugs and B + contributes its own fragment for the touched package.""" + pkg = _pkg_under(tmp_path, "isaaclab") + (pkg.root / "changelog.d").mkdir() + (pkg.root / "changelog.d" / "alice-feature-a.rst").write_text("Fixed\n^^^^^\n\n* x\n", encoding="utf-8") + (pkg.root / "changelog.d" / "bob-feature-b.rst").write_text("Added\n^^^^^\n\n* y\n", encoding="utf-8") + changed = { + "source/isaaclab/code.py", + "source/isaaclab/changelog.d/alice-feature-a.rst", # parent PR's fragment + "source/isaaclab/changelog.d/bob-feature-b.rst", # this PR's own fragment + } + added = changed + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + assert missing == [] + assert invalid == [] + + +def test_check_fragments_slug_collision_with_existing(tmp_path): + """Adding a fragment whose slug collides with one already in changelog.d/ fails.""" + pkg = _pkg_under(tmp_path, "isaaclab") + (pkg.root / "changelog.d").mkdir() + # Pre-existing fragment on develop with the same slug as the one this PR adds. + (pkg.root / "changelog.d" / "jdoe-fix-bug.rst").write_text("Fixed\n^^^^^\n\n* x\n", encoding="utf-8") + # PR adds a fresh fragment whose slug collides — different tier, same slug. + (pkg.root / "changelog.d" / "jdoe-fix-bug.minor.rst").write_text("Added\n^^^^^\n\n* y\n", encoding="utf-8") + changed = {"source/isaaclab/code.py", "source/isaaclab/changelog.d/jdoe-fix-bug.minor.rst"} + added = changed + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + invalid_map = dict(invalid) + assert "source/isaaclab/changelog.d/jdoe-fix-bug.minor.rst" in invalid_map + assert "collides" in invalid_map["source/isaaclab/changelog.d/jdoe-fix-bug.minor.rst"] + + +def test_check_fragments_collision_independent_of_iterdir_order(tmp_path, monkeypatch): + """Regression: an added file must not be allowed to *replace* a colliding + pre-existing fragment in the existing-slug map. The CI checkout contains + both, and depending on filesystem iteration order the added file could + end up as the "existing" entry, hiding the collision.""" + pkg = _pkg_under(tmp_path, "isaaclab") + (pkg.root / "changelog.d").mkdir() + (pkg.root / "changelog.d" / "jdoe-foo.rst").write_text("Fixed\n^^^^^\n\n* x\n", encoding="utf-8") + (pkg.root / "changelog.d" / "jdoe-foo.minor.rst").write_text("Added\n^^^^^\n\n* y\n", encoding="utf-8") + changed = {"source/isaaclab/code.py", "source/isaaclab/changelog.d/jdoe-foo.minor.rst"} + added = changed + + # Force iterdir() to return the added file *last* so it would overwrite + # the pre-existing entry in a buggy implementation. Sort with the added + # file ranked highest, so it lands at the tail regardless of natural + # alphabetical order. + real_iterdir = Path.iterdir + added_name = "jdoe-foo.minor.rst" + + def ordered_iterdir(self): + if self == pkg.root / "changelog.d": + return iter(sorted(real_iterdir(self), key=lambda p: (p.name == added_name, p.name))) + return real_iterdir(self) + + monkeypatch.setattr(Path, "iterdir", ordered_iterdir) + + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + invalid_map = dict(invalid) + assert "source/isaaclab/changelog.d/jdoe-foo.minor.rst" in invalid_map + assert "collides" in invalid_map["source/isaaclab/changelog.d/jdoe-foo.minor.rst"] + + +def test_check_fragments_slug_collision_within_pr(tmp_path): + """Two added fragments in the same PR that share a slug (e.g. across tiers) fail.""" + pkg = _pkg_under(tmp_path, "isaaclab") + (pkg.root / "changelog.d").mkdir() + (pkg.root / "changelog.d" / "jdoe-fix.rst").write_text("Fixed\n^^^^^\n\n* x\n", encoding="utf-8") + (pkg.root / "changelog.d" / "jdoe-fix.minor.rst").write_text("Added\n^^^^^\n\n* y\n", encoding="utf-8") + changed = { + "source/isaaclab/code.py", + "source/isaaclab/changelog.d/jdoe-fix.rst", + "source/isaaclab/changelog.d/jdoe-fix.minor.rst", + } + added = changed + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + # One of the two is the offender; the other is the first-seen "winner". + invalid_paths = [p for p, _ in invalid] + assert any("jdoe-fix" in p for p in invalid_paths) + assert any("collides" in r for _, r in invalid) + + +def test_check_fragments_skip_file_satisfies_requirement(tmp_path): + """A ``.skip`` opt-out is a valid form of "PR owns a fragment for this pkg".""" + pkg = _pkg_under(tmp_path, "isaaclab") + (pkg.root / "changelog.d").mkdir() + (pkg.root / "changelog.d" / "ci-only.skip").write_text("", encoding="utf-8") + changed = {"source/isaaclab/code.py", "source/isaaclab/changelog.d/ci-only.skip"} + added = changed + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + assert missing == [] + assert invalid == [] + + +def test_check_fragments_no_source_changes_means_no_required_fragment(tmp_path): + """Pure docs / CI / changelog-tooling PRs don't trigger the requirement.""" + pkg = _pkg_under(tmp_path, "isaaclab") + changed = {"docs/something.rst"} # not under source/isaaclab/ + added = changed + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + assert missing == [] + assert invalid == [] + + +def test_check_fragments_missing_when_source_touched_without_fragment(tmp_path): + """If the PR touches a package's source but adds no fragment, the package is missing.""" + pkg = _pkg_under(tmp_path, "isaaclab") + changed = {"source/isaaclab/code.py"} + added = changed + missing, invalid = cli.PRDiff(changed=changed, added=added).evaluate([pkg]) + assert missing == ["isaaclab"] + assert invalid == [] + + +# --------------------------------------------------------------------------- +# _display_path — handles paths inside *and* outside REPO_ROOT +# --------------------------------------------------------------------------- + + +def test_display_path_strips_repo_root_for_internal_paths(): + """Inside-repo paths are shown relative for terse log lines.""" + p = cli.REPO_ROOT / "tools" / "changelog" / "cli.py" + assert cli._display_path(p) == "tools/changelog/cli.py" + + +def test_display_path_falls_back_to_absolute_for_external(tmp_path): + """External paths (e.g. ``--fragments-dir /tmp/foo`` outside the repo) + used to crash on ``relative_to(REPO_ROOT)``; the helper now returns the + absolute path in that case.""" + external = tmp_path / "external_fragments" / "1234.rst" + external.parent.mkdir(parents=True) + external.write_text("", encoding="utf-8") + assert cli._display_path(external) == str(external) + + +# --------------------------------------------------------------------------- +# Package.compile bails on unmanaged packages instead of silently warning +# --------------------------------------------------------------------------- + + +def test_compile_raises_on_package_missing_changelog(tmp_path): + """Constructing a Package directly at an unmanaged root and calling + ``compile()`` must raise (not silently warn-and-write a stale toml).""" + pkg_root = tmp_path / "pkg" + (pkg_root / "config").mkdir(parents=True) + (pkg_root / "config" / "extension.toml").write_text('version = "1.2.3"\n', encoding="utf-8") + # No docs/CHANGELOG.rst — package is not managed. + pkg = cli.Package(pkg_root) + assert pkg.is_managed is False + + fragments = tmp_path / "fragments" + fragments.mkdir() + (fragments / "1234.rst").write_text("Fixed\n^^^^^\n\n* x\n", encoding="utf-8") + + with pytest.raises(ValueError, match="not managed"): + pkg.compile(fragments_dir=fragments, dry_run=True) + + +# --------------------------------------------------------------------------- +# cmd_compile parser guards — argparse-level errors fire as SystemExit +# --------------------------------------------------------------------------- + + +def _parse_compile(argv: list[str]): + """Build the parser and parse a compile invocation. Returns (parser, args).""" + parser = cli._build_parser() + return parser, parser.parse_args(argv) + + +def test_compile_guard_version_with_all_errors(): + """``--version`` with ``--all`` is meaningless — each package has its own version.""" + parser, args = _parse_compile(["compile", "--all", "--version", "1.2.3"]) + with pytest.raises(SystemExit): + cli.cmd_compile(args, parser) + + +def test_compile_guard_fragments_dir_with_all_errors(): + """``--fragments-dir`` with ``--all`` is meaningless — different dirs per package.""" + parser, args = _parse_compile(["compile", "--all", "--fragments-dir", "/tmp/x"]) + with pytest.raises(SystemExit): + cli.cmd_compile(args, parser) + + +def test_compile_guard_malformed_version_errors(): + """A garbage ``--version`` value fails before any file is touched.""" + parser, args = _parse_compile(["compile", "--package", "isaaclab", "--version", "not-semver"]) + with pytest.raises(SystemExit): + cli.cmd_compile(args, parser) + + +def test_compile_guard_nonexistent_package_errors(): + """A ``--package`` that doesn't exist on disk fails fast.""" + parser, args = _parse_compile(["compile", "--package", "definitely_not_a_real_package_xyz"]) + with pytest.raises(SystemExit): + cli.cmd_compile(args, parser) + + +def test_compile_rejects_fragments_that_check_would_reject(tmp_path): + """``compile`` must enforce the same content rules as ``check``. + + Regression: a fragment with a section heading but no bullet body + used to slip past compile (parsed to ``{"Added": []}``, emitted an + empty Added section), while check correctly rejected it. The two + paths must agree on what a valid fragment looks like. + """ + pkg_root = tmp_path / "pkg" + (pkg_root / "config").mkdir(parents=True) + (pkg_root / "docs").mkdir(parents=True) + (pkg_root / "config" / "extension.toml").write_text('version = "1.2.3"\n', encoding="utf-8") + (pkg_root / "docs" / "CHANGELOG.rst").write_text("Changelog\n---------\n\n", encoding="utf-8") + pkg = cli.Package(pkg_root) + + fragments = tmp_path / "fragments" + fragments.mkdir() + # Header but no bullets — same shape as fixtures/invalid_content/3003.rst. + (fragments / "1234.rst").write_text("Added\n^^^^^\n\n", encoding="utf-8") + + with pytest.raises(ValueError, match="failed content validation"): + pkg.compile(fragments_dir=fragments, dry_run=True) diff --git a/tools/conftest.py b/tools/conftest.py index a61c94f2c474..55b00ce44afa 100644 --- a/tools/conftest.py +++ b/tools/conftest.py @@ -6,6 +6,7 @@ import contextlib import os import select +import signal import subprocess import sys import time @@ -23,22 +24,86 @@ def pytest_ignore_collect(collection_path, config): return True -def capture_test_output_with_timeout(cmd, timeout, env): - """Run a command with timeout and capture all output while streaming in real-time.""" +COLD_CACHE_BUFFER = 700 +"""Extra seconds added to the first camera-enabled test's hard timeout. + +The first test that uses ``enable_cameras=True`` may compile shaders during its +run (~600 s). This buffer prevents that from being misreported as a test +timeout. Only the first such test gets the extension — after it runs, the +on-disk cache is populated. +""" + +STARTUP_DEADLINE = 120 +"""Seconds to wait for AppLauncher init or pytest collection before declaring a +startup hang. + +AppLauncher prints ``[ISAACLAB] AppLauncher initialization complete`` to +``sys.__stderr__`` (never suppressed) when Kit finishes initializing, and pytest +prints ``collected N items`` to stdout after collection. If neither appears +within this deadline the process is treated as hung. Kit startup can exceed +60 s on cold CI workers, so this catches real startup hangs without killing +legitimate slow launches. +""" + +STARTUP_HANG_RETRIES = 2 +"""Number of times to retry a test that hangs during startup before giving up.""" + +TIMEOUT_RETRIES = 2 +"""Number of times to retry a test that reaches its hard timeout before giving up.""" + +SHUTDOWN_GRACE_PERIOD = 30 +"""Seconds to wait for clean exit after the JUnit XML report file appears. + +When a test completes and writes its JUnit report, the subprocess may hang +during ``SimulationApp.close()`` or Kit shutdown. Rather than wasting the +full hard timeout, we give the process a short grace period to exit, then +kill it. The test results are taken from the report file (pass/fail), not +from the kill. +""" + + +def capture_test_output_with_timeout(cmd, timeout, env, startup_deadline=0, report_file=""): + """Run a command with timeout and capture all output while streaming in real-time. + + Args: + cmd: Command to execute. + timeout: Maximum wall-clock seconds before the process is killed. + env: Environment variables for the subprocess. + startup_deadline: If > 0, the process is killed early when neither + ``AppLauncher initialization complete`` (stderr) nor ``collected`` + (stdout) appears within this many seconds. + report_file: Path to the JUnit XML report file. When set, the process + is given only :data:`SHUTDOWN_GRACE_PERIOD` seconds to exit after + the file appears on disk. + + Returns: + Tuple of ``(returncode, stdout_bytes, stderr_bytes, kill_reason, + wall_time, pre_kill_diag)``. *kill_reason* is ``""`` for normal exits, + ``"timeout"`` for hard timeouts, ``"startup_hang"`` when the process + did not reach pytest collection in time, or ``"shutdown_hang"`` when + the test completed but the process hung during shutdown. + """ stdout_data = b"" stderr_data = b"" + process = None try: - # Use Popen to capture output in real-time + # Each test gets its own session so orphaned Kit/Isaac Sim child + # processes cannot send SIGHUP to the next test's process group. process = subprocess.Popen( - cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines=False + cmd, + env=env, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + bufsize=0, + universal_newlines=False, + start_new_session=True, ) + pgid = os.getpgid(process.pid) - # Set up file descriptors for non-blocking reads stdout_fd = process.stdout.fileno() stderr_fd = process.stderr.fileno() - # Set non-blocking mode (Unix systems only) try: import fcntl @@ -46,27 +111,47 @@ def capture_test_output_with_timeout(cmd, timeout, env): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) except ImportError: - # fcntl not available on Windows, use a simpler approach pass start_time = time.time() + startup_done = startup_deadline <= 0 + shutdown_deadline = 0.0 while process.poll() is None: - # Check for timeout - if time.time() - start_time > timeout: - process.kill() + elapsed = time.time() - start_time + + if not startup_done: + if b"AppLauncher initialization complete" in stderr_data or b"collected " in stdout_data: + startup_done = True + + if report_file and not shutdown_deadline and os.path.exists(report_file): + shutdown_deadline = time.time() + SHUTDOWN_GRACE_PERIOD + + kill_reason = None + if not startup_done and elapsed > startup_deadline: + kill_reason = "startup_hang" + elif shutdown_deadline and time.time() > shutdown_deadline: + kill_reason = "shutdown_hang" + elif elapsed > timeout: + kill_reason = "timeout" + + if kill_reason: + pre_kill_diag = _capture_system_diagnostics() + + # Kill the entire process group (test + any Kit children). + try: + os.killpg(pgid, signal.SIGKILL) + except (ProcessLookupError, PermissionError, OSError): + process.kill() try: remaining_stdout, remaining_stderr = process.communicate(timeout=5) stdout_data += remaining_stdout stderr_data += remaining_stderr except subprocess.TimeoutExpired: - process.terminate() - remaining_stdout, remaining_stderr = process.communicate(timeout=1) - stdout_data += remaining_stdout - stderr_data += remaining_stderr - return -1, stdout_data, stderr_data, True # -1 indicates timeout + pass + wall_time = time.time() - start_time + return -1, stdout_data, stderr_data, kill_reason, wall_time, pre_kill_diag - # Check for available output try: ready_fds, _, _ = select.select([stdout_fd, stderr_fd], [], [], 0.1) @@ -76,75 +161,182 @@ def capture_test_output_with_timeout(cmd, timeout, env): chunk = process.stdout.read(1024) if chunk: stdout_data += chunk - # Print to stdout in real-time sys.stdout.buffer.write(chunk) sys.stdout.buffer.flush() elif fd == stderr_fd: chunk = process.stderr.read(1024) if chunk: stderr_data += chunk - # Print to stderr in real-time sys.stderr.buffer.write(chunk) sys.stderr.buffer.flush() except OSError: - # select failed, fall back to simple polling time.sleep(0.1) continue - # Get any remaining output - remaining_stdout, remaining_stderr = process.communicate() - stdout_data += remaining_stdout - stderr_data += remaining_stderr + # Drain any output the process wrote before or just after exiting. + try: + remaining_stdout, remaining_stderr = process.communicate(timeout=10) + stdout_data += remaining_stdout + stderr_data += remaining_stderr + except Exception: + pass + + # Kill any orphaned child processes (Kit, Isaac Sim) left by the test. + try: + os.killpg(pgid, signal.SIGKILL) + time.sleep(1) + except (ProcessLookupError, PermissionError, OSError): + pass - return process.returncode, stdout_data, stderr_data, False + wall_time = time.time() - start_time + return process.returncode, stdout_data, stderr_data, "", wall_time, "" except Exception as e: - return -1, str(e).encode(), b"", False + if process is not None and process.poll() is None: + process.kill() + with contextlib.suppress(Exception): + rem_out, rem_err = process.communicate(timeout=5) + stdout_data += rem_out + stderr_data += rem_err + stdout_data += f"\n[capture error: {e}]\n".encode() + return -1, stdout_data, stderr_data, "", 0.0, "" + + +_SIGNAL_DESCRIPTIONS = { + 1: "SIGHUP — session leader exit or orphaned process cleanup", + 6: "SIGABRT", + 9: "SIGKILL — likely OOM killed", + 11: "SIGSEGV — segmentation fault", + 15: "SIGTERM", +} + + +def _signal_description(sig): + """Return a human-readable description for a process killed by a signal.""" + base = f"Process killed by signal {sig}" + desc = _SIGNAL_DESCRIPTIONS.get(sig) + return f"{base} ({desc})" if desc else base + + +def _create_error_report(prefix, file_name, message, details): + """Create a JUnit XML error report for a test that failed to produce its own. + + Returns a :class:`JUnitXml` object ready to be written to disk. + """ + suite_name = os.path.splitext(file_name)[0] + suite = TestSuite(name=f"{prefix}_{suite_name}") + case = TestCase(name="test_execution", classname=suite_name) + error = Error(message=message) + error.text = details + case.result = error + suite.add_testcase(case) + report = JUnitXml() + report.add_testsuite(suite) + return report -def create_timeout_test_case(test_file, timeout, stdout_data, stderr_data): - """Create a test case entry for a timeout test with captured logs.""" - test_suite = TestSuite(name=f"timeout_{os.path.splitext(os.path.basename(test_file))[0]}") - test_case = TestCase(name="test_execution", classname=os.path.splitext(os.path.basename(test_file))[0]) +def _get_diagnostics(pre_kill_diag=""): + """Return system diagnostics, truncated to 10 000 chars.""" + diag = pre_kill_diag or _capture_system_diagnostics() + if len(diag) > 10000: + diag = diag[:10000] + "\n... (truncated)" + return diag - # Create error message with timeout info and captured logs - error_msg = f"Test timed out after {timeout} seconds" - # Add captured output to error details - details = f"Timeout after {timeout} seconds\n\n" +def _capture_system_diagnostics(): + """Capture system diagnostics (GPU, memory, processes) for crash investigation. - if stdout_data: - details += "=== STDOUT ===\n" - details += stdout_data.decode("utf-8", errors="replace") + "\n" + All errors are caught and reported inline so this never raises. + """ + sections = [] - if stderr_data: - details += "=== STDERR ===\n" - details += stderr_data.decode("utf-8", errors="replace") + "\n" + try: + r = subprocess.run(["nvidia-smi"], capture_output=True, text=True, timeout=10) + if r.stdout: + sections.append(f"--- nvidia-smi ---\n{r.stdout.strip()}") + except Exception as e: + sections.append(f"--- nvidia-smi --- FAILED: {e}") - error = Error(message=error_msg) - error.text = details - test_case.result = error + try: + with open("/proc/meminfo") as f: + lines = f.readlines() + keys = ("MemTotal", "MemFree", "MemAvailable", "Committed_AS", "SwapTotal", "SwapFree") + relevant = [line.strip() for line in lines if any(line.startswith(k) for k in keys)] + if relevant: + sections.append("--- /proc/meminfo ---\n" + "\n".join(relevant)) + except Exception as e: + sections.append(f"--- /proc/meminfo --- FAILED: {e}") + + cgroup_lines = [] + for path in ( + "/sys/fs/cgroup/memory.current", + "/sys/fs/cgroup/memory.max", + "/sys/fs/cgroup/memory.events", + "/sys/fs/cgroup/memory/memory.usage_in_bytes", + "/sys/fs/cgroup/memory/memory.limit_in_bytes", + "/sys/fs/cgroup/memory/memory.oom_control", + ): + try: + with open(path) as f: + cgroup_lines.append(f"{path}: {f.read().strip()}") + except FileNotFoundError: + pass + except Exception as e: + cgroup_lines.append(f"{path}: FAILED ({e})") + if cgroup_lines: + sections.append("--- cgroup memory ---\n" + "\n".join(cgroup_lines)) - test_suite.add_testcase(test_case) - return test_suite + try: + r = subprocess.run(["ps", "auxf"], capture_output=True, text=True, timeout=5) + if r.stdout: + sections.append(f"--- process tree (ps auxf) ---\n{r.stdout.strip()}") + except Exception as e: + sections.append(f"--- process tree --- FAILED: {e}") + + try: + r = subprocess.run(["dmesg", "-T"], capture_output=True, text=True, timeout=5) + if r.stdout: + lines = r.stdout.strip().split("\n") + sections.append("--- dmesg (last 30 lines) ---\n" + "\n".join(lines[-30:])) + except Exception: + pass + + return "\n\n".join(sections) def run_individual_tests(test_files, workspace_root, isaacsim_ci): """Run each test file separately, ensuring one finishes before starting the next.""" failed_tests = [] test_status = {} + xml_reports = [] + cold_cache_applied = False for test_file in test_files: print(f"\n\n🚀 Running {test_file} independently...\n") - # get file name from path file_name = os.path.basename(test_file) env = os.environ.copy() + env["PYTHONFAULTHANDLER"] = "1" - # Determine timeout for this test timeout = test_settings.PER_TEST_TIMEOUTS.get(file_name, test_settings.DEFAULT_TIMEOUT) - # Prepare command - # Note: Command options matter as they are used for cleanups inside AppLauncher + # Read the test file once for cold-cache check. + try: + with open(test_file) as fh: + test_content = fh.read() + except OSError: + test_content = "" + + # The first camera-enabled test in a fresh container compiles shaders + # (~600 s). Give it extra time so that doesn't look like a test timeout. + is_cold_cache_test = not cold_cache_applied and "enable_cameras=True" in test_content + if is_cold_cache_test: + timeout += COLD_CACHE_BUFFER + cold_cache_applied = True + print(f"⏱️ Adding {COLD_CACHE_BUFFER}s cold-cache buffer (timeout now {timeout}s)") + + extra = COLD_CACHE_BUFFER if is_cold_cache_test else 0 + startup_deadline = min(timeout, STARTUP_DEADLINE + extra) + cmd = [ sys.executable, "-m", @@ -159,25 +351,111 @@ def run_individual_tests(test_files, workspace_root, isaacsim_ci): cmd.append("-m") cmd.append("isaacsim_ci") - # Add the test file path last cmd.append(str(test_file)) - # Run test with timeout and capture output - returncode, stdout_data, stderr_data, timed_out = capture_test_output_with_timeout(cmd, timeout, env) - - if timed_out: - print(f"Test {test_file} timed out after {timeout} seconds...") - failed_tests.append(test_file) + report_file = f"tests/test-reports-{str(file_name)}.xml" - # Create a special XML report for timeout tests with captured logs - timeout_suite = create_timeout_test_case(test_file, timeout, stdout_data, stderr_data) - timeout_report = JUnitXml() - timeout_report.add_testsuite(timeout_suite) + # -- Run with retry on startup hang or hard timeout ----------------- + returncode, stdout_data, stderr_data, kill_reason = -1, b"", b"", "" + wall_time, pre_kill_diag = 0.0, "" + startup_hang_attempts = 0 + timeout_attempts = 0 + while True: + with contextlib.suppress(FileNotFoundError): + os.remove(report_file) + + returncode, stdout_data, stderr_data, kill_reason, wall_time, pre_kill_diag = ( + capture_test_output_with_timeout( + cmd, timeout, env, startup_deadline=startup_deadline, report_file=report_file + ) + ) + + has_report = os.path.exists(report_file) + + if kill_reason == "startup_hang" and startup_hang_attempts < STARTUP_HANG_RETRIES: + startup_hang_attempts += 1 + print( + f"⚠️ {test_file}: startup hang detected after {startup_deadline}s" + f" (attempt {startup_hang_attempts}/{STARTUP_HANG_RETRIES + 1}), retrying..." + ) + if stderr_data: + print("=== STDERR (last 5000 chars) ===") + print(stderr_data.decode("utf-8", errors="replace")[-5000:]) + diag = pre_kill_diag or _capture_system_diagnostics() + if len(diag) > 10000: + diag = diag[:10000] + "\n... (truncated)" + print(diag) + continue - # Write timeout report - report_file = f"tests/test-reports-{str(file_name)}.xml" - timeout_report.write(report_file) + if kill_reason == "timeout" and not has_report and timeout_attempts < TIMEOUT_RETRIES: + timeout_attempts += 1 + print( + f"⚠️ {test_file}: timeout detected after {timeout}s" + f" (attempt {timeout_attempts}/{TIMEOUT_RETRIES + 1}), retrying..." + ) + if stdout_data: + print("=== STDOUT (last 5000 chars) ===") + print(stdout_data.decode("utf-8", errors="replace")[-5000:]) + if stderr_data: + print("=== STDERR (last 5000 chars) ===") + print(stderr_data.decode("utf-8", errors="replace")[-5000:]) + diag = pre_kill_diag or _capture_system_diagnostics() + if len(diag) > 10000: + diag = diag[:10000] + "\n... (truncated)" + print(diag) + continue + break + + # -- Resolve result from kill_reason and report file ---------------- + has_report = os.path.exists(report_file) + + if kill_reason == "startup_hang": + diag = _get_diagnostics(pre_kill_diag) + print(f"⚠️ {test_file}: startup hang after {STARTUP_HANG_RETRIES + 1} attempt(s)") + print(diag) + + msg = f"Startup hang after {startup_deadline}s (retried {STARTUP_HANG_RETRIES} time(s))" + details = f"{msg}\n\n=== SYSTEM DIAGNOSTICS ===\n{diag}\n\n" + if stderr_data: + details += "=== STDERR (last 5000 chars) ===\n" + details += stderr_data.decode("utf-8", errors="replace")[-5000:] + "\n" + if stdout_data: + details += "=== STDOUT (last 2000 chars) ===\n" + details += stdout_data.decode("utf-8", errors="replace")[-2000:] + "\n" + + error_report = _create_error_report("startup_hang", file_name, msg, details) + error_report.write(report_file) + xml_reports.append(error_report) + failed_tests.append(test_file) + test_status[test_file] = { + "errors": 1, + "failures": 0, + "skipped": 0, + "tests": 1, + "result": "STARTUP_HANG", + "time_elapsed": 0.0, + "wall_time": wall_time, + } + continue + if kill_reason == "timeout" and not has_report: + diag = _get_diagnostics(pre_kill_diag) + print(f"Test {test_file} timed out after {timeout} seconds...") + print(diag) + + msg = f"Timeout after {timeout} seconds (retried {timeout_attempts} time(s))" + details = f"{msg}\n\n=== SYSTEM DIAGNOSTICS ===\n{diag}\n\n" + if stdout_data: + details += "=== STDOUT (last 5000 chars) ===\n" + details += stdout_data.decode("utf-8", errors="replace")[-5000:] + "\n" + if stderr_data: + details += "=== STDERR (last 5000 chars) ===\n" + details += stderr_data.decode("utf-8", errors="replace")[-5000:] + "\n" + + error_report = _create_error_report("timeout", file_name, msg, details) + error_report.write(report_file) + xml_reports.append(error_report) + failed_tests.append(test_file) test_status[test_file] = { "errors": 1, "failures": 0, @@ -185,41 +463,55 @@ def run_individual_tests(test_files, workspace_root, isaacsim_ci): "tests": 1, "result": "TIMEOUT", "time_elapsed": timeout, + "wall_time": wall_time, } continue - if returncode != 0: - failed_tests.append(test_file) - - # check report for any failures - report_file = f"tests/test-reports-{str(file_name)}.xml" - if not os.path.exists(report_file): - print(f"Warning: Test report not found at {report_file}") + if not has_report: + reason = ( + _signal_description(-returncode) + if returncode < 0 + else f"Process exited with code {returncode} but produced no report" + ) + diag = _get_diagnostics() + print(f"⚠️ {test_file}: {reason}") + print(diag) + + details = f"{reason}\n\n=== SYSTEM DIAGNOSTICS ===\n{diag}\n\n" + if stdout_data: + details += "=== STDOUT (last 2000 chars) ===\n" + details += stdout_data.decode("utf-8", errors="replace")[-2000:] + "\n" + if stderr_data: + details += "=== STDERR (last 2000 chars) ===\n" + details += stderr_data.decode("utf-8", errors="replace")[-2000:] + "\n" + + error_report = _create_error_report("crash", file_name, reason, details) + error_report.write(report_file) + xml_reports.append(error_report) failed_tests.append(test_file) test_status[test_file] = { - "errors": 1, # Assume error since we can't read the report + "errors": 1, "failures": 0, "skipped": 0, - "tests": 0, - "result": "FAILED", + "tests": 1, + "result": "CRASHED", "time_elapsed": 0.0, + "wall_time": wall_time, } continue + # -- Report file exists: parse actual test results ----------------- + if kill_reason in ("shutdown_hang", "timeout"): + print(f"⚠️ {test_file}: shutdown hanged (killed after {wall_time:.0f}s, test had completed)") + try: report = JUnitXml.fromfile(report_file) - - # Rename test suites to be more descriptive for suite in report: if suite.name == "pytest": - # Remove .py extension and use the filename as the test suite name - suite_name = os.path.splitext(file_name)[0] - suite.name = suite_name - - # Write the updated report back + suite.name = os.path.splitext(file_name)[0] report.write(report_file) + xml_reports.append(report) - # Parse the integer values with None handling errors = int(report.errors) if report.errors is not None else 0 failures = int(report.failures) if report.failures is not None else 0 skipped = int(report.skipped) if report.skipped is not None else 0 @@ -235,25 +527,109 @@ def run_individual_tests(test_files, workspace_root, isaacsim_ci): "tests": 0, "result": "FAILED", "time_elapsed": 0.0, + "wall_time": wall_time, } continue - # Check if there were any failures - if errors > 0 or failures > 0: + has_test_failures = errors > 0 or failures > 0 + shutdown_hanged = kill_reason in ("shutdown_hang", "timeout") and not has_test_failures + + if has_test_failures or (returncode != 0 and not shutdown_hanged): failed_tests.append(test_file) + if shutdown_hanged: + result = "passed (shutdown hanged)" + elif has_test_failures: + result = "FAILED" + else: + result = "passed" + test_status[test_file] = { "errors": errors, "failures": failures, "skipped": skipped, "tests": tests, - "result": "FAILED" if errors > 0 or failures > 0 else "passed", + "result": result, "time_elapsed": time_elapsed, + "wall_time": wall_time, } print("~~~~~~~~~~~~ Finished running all tests") - return failed_tests, test_status + return failed_tests, test_status, xml_reports + + +def _collect_test_files( + source_dirs, + filter_pattern, + exclude_pattern, + include_files, + quarantined_only, + curobo_only, +): + """Collect test files from source directories, applying all active filters.""" + test_files = [] + for source_dir in source_dirs: + if not os.path.exists(source_dir): + print(f"Error: source directory not found at {source_dir}") + pytest.exit("Source directory not found", returncode=1) + + for root, _, files in os.walk(source_dir): + for file in files: + if not (file.startswith("test_") and file.endswith(".py")): + continue + + # Mode-exclusive filters (each bypasses TESTS_TO_SKIP) + if quarantined_only: + if file not in test_settings.QUARANTINED_TESTS: + continue + elif curobo_only: + if file not in test_settings.CUROBO_TESTS: + continue + else: + # An explicit include_files entry overrides TESTS_TO_SKIP, allowing + # dedicated jobs (e.g. test-environments-training) to run tests that + # are otherwise excluded from general CI runs. + if file in test_settings.TESTS_TO_SKIP and file not in include_files: + print(f"Skipping {file} as it's in the skip list") + continue + + full_path = os.path.join(root, file) + + if filter_pattern and filter_pattern not in full_path: + print(f"Skipping {full_path} (does not match include pattern: {filter_pattern})") + continue + if exclude_pattern and any(p.strip() in full_path for p in exclude_pattern.split(",")): + print(f"Skipping {full_path} (matches exclude pattern: {exclude_pattern})") + continue + if include_files and file not in include_files: + print(f"Skipping {full_path} (not in include files list)") + continue + + test_files.append(full_path) + + # Apply file-level sharding: sort deterministically, then select every Nth file. + # Skip when include_files is set — in that case the test's own conftest handles + # sharding at the test-item level (e.g. parametrized test cases). + shard_index = os.environ.get("TEST_SHARD_INDEX", "") + shard_count = os.environ.get("TEST_SHARD_COUNT", "") + if shard_index and shard_count and not include_files: + shard_index = int(shard_index) + shard_count = int(shard_count) + test_files.sort() + test_files = [f for i, f in enumerate(test_files) if i % shard_count == shard_index] + print(f"Shard {shard_index}/{shard_count}: selected {len(test_files)} test files") + + return test_files + + +def _write_empty_report(): + """Write an empty JUnit XML report so downstream CI steps find a valid file.""" + os.makedirs("tests", exist_ok=True) + result_file = os.environ.get("TEST_RESULT_FILE", "full_report.xml") + report = JUnitXml() + report.write(f"tests/{result_file}") + print(f"Wrote empty report to tests/{result_file}") def pytest_sessionstart(session): @@ -268,9 +644,20 @@ def pytest_sessionstart(session): # Get filter pattern from environment variable or command line filter_pattern = os.environ.get("TEST_FILTER_PATTERN", "") exclude_pattern = os.environ.get("TEST_EXCLUDE_PATTERN", "") + include_files_str = os.environ.get("TEST_INCLUDE_FILES", "") + quarantined_only = os.environ.get("TEST_QUARANTINED_ONLY", "false") == "true" + curobo_only = os.environ.get("TEST_CUROBO_ONLY", "false") == "true" isaacsim_ci = os.environ.get("ISAACSIM_CI_SHORT", "false") == "true" + # Parse include files list (comma-separated paths) + include_files = set() + if include_files_str: + for f in include_files_str.split(","): + f = f.strip() + if f: + include_files.add(os.path.basename(f)) + # Also try to get from pytest config if hasattr(session.config, "option") and hasattr(session.config.option, "filter_pattern"): filter_pattern = filter_pattern or getattr(session.config.option, "filter_pattern", "") @@ -282,39 +669,25 @@ def pytest_sessionstart(session): print("=" * 50) print(f"Filter pattern: '{filter_pattern}'") print(f"Exclude pattern: '{exclude_pattern}'") + print(f"Include files: {include_files if include_files else 'none'}") + print(f"Quarantined-only mode: {quarantined_only}") + print(f"Curobo-only mode: {curobo_only}") print(f"TEST_FILTER_PATTERN env var: '{os.environ.get('TEST_FILTER_PATTERN', 'NOT_SET')}'") print(f"TEST_EXCLUDE_PATTERN env var: '{os.environ.get('TEST_EXCLUDE_PATTERN', 'NOT_SET')}'") + print(f"TEST_INCLUDE_FILES env var: '{os.environ.get('TEST_INCLUDE_FILES', 'NOT_SET')}'") + print(f"TEST_QUARANTINED_ONLY env var: '{os.environ.get('TEST_QUARANTINED_ONLY', 'NOT_SET')}'") + print(f"TEST_CUROBO_ONLY env var: '{os.environ.get('TEST_CUROBO_ONLY', 'NOT_SET')}'") print("=" * 50) # Get all test files in the source directories - test_files = [] - - for source_dir in source_dirs: - if not os.path.exists(source_dir): - print(f"Error: source directory not found at {source_dir}") - pytest.exit("Source directory not found", returncode=1) - - for root, _, files in os.walk(source_dir): - for file in files: - if file.startswith("test_") and file.endswith(".py"): - # Skip if the file is in TESTS_TO_SKIP - if file in test_settings.TESTS_TO_SKIP: - print(f"Skipping {file} as it's in the skip list") - continue - - full_path = os.path.join(root, file) - - # Apply include filter - if filter_pattern and filter_pattern not in full_path: - print(f"Skipping {full_path} (does not match include pattern: {filter_pattern})") - continue - - # Apply exclude filter - if exclude_pattern and exclude_pattern in full_path: - print(f"Skipping {full_path} (matches exclude pattern: {exclude_pattern})") - continue - - test_files.append(full_path) + test_files = _collect_test_files( + source_dirs, + filter_pattern, + exclude_pattern, + include_files, + quarantined_only, + curobo_only, + ) if isaacsim_ci: new_test_files = [] @@ -325,6 +698,14 @@ def pytest_sessionstart(session): test_files = new_test_files if not test_files: + if quarantined_only: + print("No quarantined tests configured — nothing to run.") + _write_empty_report() + pytest.exit("No quarantined tests configured", returncode=0) + if filter_pattern: + print(f"No test files found matching filter pattern '{filter_pattern}' — nothing to run.") + _write_empty_report() + pytest.exit("No test files found for filter", returncode=0) print("No test files found in source directory") pytest.exit("No test files found", returncode=1) @@ -333,21 +714,20 @@ def pytest_sessionstart(session): print(f" - {test_file}") # Run all tests individually - failed_tests, test_status = run_individual_tests(test_files, workspace_root, isaacsim_ci) + failed_tests, test_status, xml_reports = run_individual_tests(test_files, workspace_root, isaacsim_ci) print("failed tests:", failed_tests) # Collect reports print("~~~~~~~~~ Collecting final report...") - # create new full report + # Merge in-memory report objects collected during the test run. Reading the + # on-disk files again risks losing elements if the junitparser + # read/write round-trip does not preserve them faithfully. full_report = JUnitXml() - # read all reports and merge them - for report in os.listdir("tests"): - if report.endswith(".xml"): - print(report) - report_file = JUnitXml.fromfile(f"tests/{report}") - full_report += report_file + for xml_report in xml_reports: + print(xml_report) + full_report += xml_report print("~~~~~~~~~~~~ Writing final report...") # write content to full report result_file = os.environ.get("TEST_RESULT_FILE", "full_report.xml") @@ -359,9 +739,11 @@ def pytest_sessionstart(session): # print test status in a nice table # Calculate the number and percentage of passing tests num_tests = len(test_status) - num_passing = len([test_path for test_path in test_files if test_status[test_path]["result"] == "passed"]) - num_failing = len([test_path for test_path in test_files if test_status[test_path]["result"] == "FAILED"]) - num_timeout = len([test_path for test_path in test_files if test_status[test_path]["result"] == "TIMEOUT"]) + num_passing = len([p for p in test_files if test_status[p]["result"].startswith("passed")]) + num_failing = len([p for p in test_files if test_status[p]["result"] == "FAILED"]) + num_timeout = len([p for p in test_files if test_status[p]["result"] == "TIMEOUT"]) + num_crashed = len([p for p in test_files if test_status[p]["result"] == "CRASHED"]) + num_startup_hang = len([p for p in test_files if test_status[p]["result"] == "STARTUP_HANG"]) if num_tests == 0: passing_percentage = 100 @@ -377,24 +759,25 @@ def pytest_sessionstart(session): summary_str += f"Total: {num_tests}\n" summary_str += f"Passing: {num_passing}\n" summary_str += f"Failing: {num_failing}\n" + summary_str += f"Crashed: {num_crashed}\n" + summary_str += f"Startup Hang: {num_startup_hang}\n" summary_str += f"Timeout: {num_timeout}\n" summary_str += f"Passing Percentage: {passing_percentage:.2f}%\n" - # Print time elapsed in hours, minutes, seconds - total_time = sum([test_status[test_path]["time_elapsed"] for test_path in test_files]) + total_wall = sum(test_status[test_path]["wall_time"] for test_path in test_files) + total_test = sum(test_status[test_path]["time_elapsed"] for test_path in test_files) - summary_str += f"Total Time Elapsed: {total_time // 3600}h" - summary_str += f"{total_time // 60 % 60}m" - summary_str += f"{total_time % 60:.2f}s" + summary_str += f"Total Wall Time: {total_wall // 3600:.0f}h{total_wall // 60 % 60:.0f}m{total_wall % 60:.2f}s\n" + summary_str += f"Total Test Time: {total_test // 3600:.0f}h{total_test // 60 % 60:.0f}m{total_test % 60:.2f}s" summary_str += "\n\n=======================\n" summary_str += "Per Test Result Summary\n" summary_str += "=======================\n" - # Construct table of results per test - per_test_result_table = PrettyTable(field_names=["Test Path", "Result", "Time (s)", "# Tests"]) + per_test_result_table = PrettyTable(field_names=["Test Path", "Result", "Test (s)", "Wall (s)", "# Tests"]) per_test_result_table.align["Test Path"] = "l" - per_test_result_table.align["Time (s)"] = "r" + per_test_result_table.align["Test (s)"] = "r" + per_test_result_table.align["Wall (s)"] = "r" for test_path in test_files: num_tests_passed = ( test_status[test_path]["tests"] @@ -407,6 +790,7 @@ def pytest_sessionstart(session): test_path, test_status[test_path]["result"], f"{test_status[test_path]['time_elapsed']:0.2f}", + f"{test_status[test_path]['wall_time']:0.2f}", f"{num_tests_passed}/{test_status[test_path]['tests']}", ] ) @@ -417,4 +801,7 @@ def pytest_sessionstart(session): print(summary_str) # Exit pytest after custom execution to prevent normal pytest from overwriting our report - pytest.exit("Custom test execution completed", returncode=0 if num_failing == 0 else 1) + pytest.exit( + "Custom test execution completed", + returncode=0 if (num_failing == 0 and num_timeout == 0 and num_crashed == 0 and num_startup_hang == 0) else 1, + ) diff --git a/tools/run_install_ci.py b/tools/run_install_ci.py new file mode 100755 index 000000000000..ba2dedd1fe60 --- /dev/null +++ b/tools/run_install_ci.py @@ -0,0 +1,364 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unified runner for Isaac Lab installation CI tests. + +Modes + +``docker`` + Build a clean-room Docker container and execute pytest inside it. +``native`` + Run pytest directly on the host (no Docker). + +Examples + +.. code-block:: bash + + # Run all tests inside Docker (Ubuntu 24.04) + tools/run_install_ci.py docker + + # Run only pip tests with a custom base image + tools/run_install_ci.py docker --base-image ubuntu:22.04 -- -vs -k "testname" + + # Run with GPU support (passes --gpus all to Docker) + tools/run_install_ci.py docker --gpu + + # Filter by marker (e.g. only uv tests, only slow tests) + tools/run_install_ci.py docker -- -m uv + tools/run_install_ci.py docker --gpu -- -m "slow and gpu" + + # Filter by bug ID (dashes become underscores) + tools/run_install_ci.py docker --gpu -- -m nvbugs_5968136 + + # Drop into a shell for debugging + tools/run_install_ci.py docker --shell + + # Run natively (no Docker) + tools/run_install_ci.py native -- -vs + + # Pass a pre-built wheel + tools/run_install_ci.py docker --wheel /tmp/isaaclab-3.0.0-py3-none-any.whl +""" + +from __future__ import annotations + +import argparse +import os +import shutil +import subprocess +import sys +import time +from pathlib import Path + +_DIM = "\033[2m" +_MAGENTA = "\033[95m" +_RESET = "\033[0m" + +# Controls whether run_cmd() streams output by default. +stream_output: bool = False + + +def run_cmd( + args: list[str], + *, + cwd: str | Path | None = None, + env: dict[str, str] | None = None, + timeout: int = 600, + check: bool = True, + stream: bool | None = None, +) -> subprocess.CompletedProcess: + """Run a command, merging *env* into the current environment. + + Args: + args: Command and arguments to run. + cwd: Working directory for the subprocess. + env: Extra environment variables merged into the current environment. + timeout: Timeout in seconds. + check: Raise CalledProcessError on non-zero exit. + stream: When True, stream stdout/stderr to the console in + real time instead of capturing them. + + Returns: + The CompletedProcess; raises CalledProcessError when *check* is + True and return code != 0. + """ + if stream is None: + stream = stream_output + merged_env = {**os.environ, **(env or {})} + cmd_str = " ".join(str(a) for a in args) + if stream: + sys.stdout.write(f"{_MAGENTA}[COMMAND] {cmd_str}{_RESET}\n") + sys.stdout.flush() + t0 = time.monotonic() + proc = subprocess.Popen( + [str(a) for a in args], + cwd=str(cwd) if cwd else None, + env=merged_env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ) + lines: list[str] = [] + try: + for line in proc.stdout: + lines.append(line) + sys.stdout.write(f"{_DIM}{line}{_RESET}") + sys.stdout.flush() + except Exception: + proc.kill() + raise + try: + proc.wait(timeout=timeout) + except subprocess.TimeoutExpired: + proc.kill() + proc.wait() + raise + elapsed = time.monotonic() - t0 + sys.stdout.write(f"{_MAGENTA}[{elapsed:.1f}s]{_RESET}\n") + sys.stdout.flush() + result = subprocess.CompletedProcess( + args=proc.args, + returncode=proc.returncode, + stdout="".join(lines), + stderr="", + ) + if check and result.returncode != 0: + raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) + return result + return subprocess.run( + [str(a) for a in args], + cwd=str(cwd) if cwd else None, + env=merged_env, + capture_output=True, + text=True, + timeout=timeout, + check=check, + ) + + +def _find_repo_root() -> Path: + """Walk up from CWD or this file to find the repo root.""" + for anchor in [Path.cwd(), Path(__file__).resolve().parent]: + for parent in [anchor] + list(anchor.parents): + if (parent / "isaaclab.sh").exists(): + return parent + raise FileNotFoundError("Could not locate IsaacLab repository root") + + +# Docker mode + + +def _cmd_docker(args: argparse.Namespace) -> int: + """Build the Docker image and run tests inside the container based on *args*.""" + + repo_root = _find_repo_root() + dockerfile = repo_root / "docker" / "Dockerfile.installci" + image_tag = f"isaaclab-installci:{args.base_image.replace(':', '-').replace('/', '-')}" + + # Build the Docker image + build_cmd = [ + "docker", + "build", + "--build-arg", + f"BASE_IMAGE={args.base_image}", + "-f", + str(dockerfile), + "-t", + image_tag, + "--progress=plain", + ] + + if args.no_cache: + build_cmd.append("--no-cache") + + build_cmd.append(str(repo_root)) + + result = run_cmd(build_cmd, check=False, stream=True) + if result.returncode != 0: + print(f"Docker build failed (exit {result.returncode})") + return result.returncode + + print(f"Docker image built successfully: {image_tag}") + + # Run + docker_run_cmd: list[str] = [ + "docker", + "run", + "--rm", + "--network=host", + ] + + if args.gpu: + docker_run_cmd.extend(["--gpus", "all"]) + + # Persist pip and uv caches across runs via named Docker volumes + if not args.no_pip_cache: + docker_run_cmd.extend(["-v", "isaaclab-installci-pip-cache:/root/.cache/pip"]) + if not args.no_uv_cache: + docker_run_cmd.extend(["-v", "isaaclab-installci-uv-cache:/root/.cache/uv"]) + + # Pass environment variables + docker_run_cmd.extend(["-e", "OMNI_KIT_ACCEPT_EULA=Y"]) + docker_run_cmd.extend(["-e", "ACCEPT_EULA=Y"]) + + if args.results_dir: + results_abs = Path(args.results_dir).resolve() + results_abs.mkdir(parents=True, exist_ok=True) + docker_run_cmd.extend(["-v", f"{results_abs}:/tmp/results"]) + + if args.wheel: + wheel_abs = Path(args.wheel).resolve() + container_wheel = f"/tmp/wheels/{wheel_abs.name}" + docker_run_cmd.extend(["-v", f"{wheel_abs}:{container_wheel}:ro"]) + docker_run_cmd.extend(["-e", f"ISAACLAB_WHEEL={container_wheel}"]) + + if args.shell: + # Interactive debugging mode + docker_run_cmd.extend(["-it", "--entrypoint", "bash", image_tag]) + else: + # Test execution mode + pytest_args = args.pytest_args or ["--tb=short"] + if args.results_dir: + pytest_args = ["--junitxml=/tmp/results/results.xml"] + pytest_args + docker_run_cmd.extend([image_tag] + pytest_args) + + print(f"{_MAGENTA}[COMMAND] {' '.join(docker_run_cmd)}{_RESET}") + + t0 = time.monotonic() + try: + ret = subprocess.call(docker_run_cmd, timeout=5400) + except subprocess.TimeoutExpired: + print("Docker run timed out after 90 minutes", file=sys.stderr) + ret = 124 + elapsed = time.monotonic() - t0 + print(f"{_MAGENTA}[{elapsed:.1f}s]{_RESET}") + return ret + + +# Native mode + + +def _cmd_native(args: argparse.Namespace) -> int: + """Run tests directly on the host OS.""" + + repo_root = _find_repo_root() + test_dir = repo_root / "source" / "isaaclab" / "test" / "install_ci" + + env = os.environ.copy() + if args.wheel: + env["ISAACLAB_WHEEL"] = str(Path(args.wheel).resolve()) + + pytest_args = args.pytest_args or ["--tb=short"] + cmd = [sys.executable, "-m", "pytest"] + pytest_args + + print(f"{_MAGENTA}[COMMAND] {' '.join(cmd)}{_RESET}") + + t0 = time.monotonic() + ret = subprocess.call(cmd, cwd=str(test_dir), env=env) + elapsed = time.monotonic() - t0 + print(f"{_MAGENTA}[{elapsed:.1f}s]{_RESET}") + return ret + + +# CLI + + +def main() -> int: + """Parse CLI arguments and dispatch to the docker or native test runner. + + Returns: + Exit code: 0 on success, non-zero on failure. + """ + parser = argparse.ArgumentParser( + description="Isaac Lab Installation CI test runner", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog="""\ +docker options: + --base-image IMAGE Docker base image (default: ubuntu:24.04) + --gpu Pass --gpus all to docker run + --shell Drop into interactive bash instead of running tests + --no-cache Build Docker image without layer cache + --no-pip-cache Disable persistent pip cache volume + --no-uv-cache Disable persistent uv cache volume + --results-dir DIR Host directory for test results (auto-adds --junitxml) + --wheel PATH Path to pre-built isaaclab wheel file + +native options: + --wheel PATH Path to pre-built isaaclab wheel file + +pytest arguments: + Pass pytest options after '--'. Without '--', defaults to '-sv --tb=short'. + + examples: + %(prog)s docker # run all tests in Docker + %(prog)s docker --base-image ubuntu:22.04 -- -vs -k "testname" # custom base image + %(prog)s docker --gpu # GPU support (--gpus all) + %(prog)s docker -- -m uv # filter by marker + %(prog)s docker --gpu -- -m "slow and gpu" # combine markers with GPU + %(prog)s docker --gpu -- -m nvbugs_5968136 # filter by bug ID + %(prog)s docker --shell # drop into shell for debugging + %(prog)s native -- -vs # run natively (no Docker) + %(prog)s docker --wheel /tmp/isaaclab.whl # pass a pre-built wheel + %(prog)s docker -- --collect-only # list tests without running +""", + ) + sub = parser.add_subparsers(dest="mode") + + # docker subcommand + docker_p = sub.add_parser("docker", help="Build container and run tests inside Docker") + docker_p.add_argument( + "--base-image", + default="ubuntu:24.04", + help="Docker base image (default: ubuntu:24.04)", + ) + docker_p.add_argument("--gpu", action="store_true", help="Pass --gpus all to docker run") + docker_p.add_argument( + "--shell", action="store_true", help="Drop into an interactive bash shell instead of running tests" + ) + docker_p.add_argument("--no-cache", action="store_true", help="Build Docker image without cache") + docker_p.add_argument("--no-pip-cache", action="store_true", help="Disable persistent pip cache volume") + docker_p.add_argument("--no-uv-cache", action="store_true", help="Disable persistent uv cache volume") + docker_p.add_argument( + "--results-dir", type=str, default=None, help="Host directory for test results (auto-adds --junitxml)" + ) + docker_p.add_argument("--wheel", type=str, default=None, help="Path to pre-built isaaclab wheel file") + docker_p.add_argument("pytest_args", nargs="*", help="Arguments forwarded to pytest (use -- to separate)") + + # native subcommand + native_p = sub.add_parser("native", help="Run tests directly on the host OS") + native_p.add_argument("--wheel", type=str, default=None, help="Path to pre-built isaaclab wheel file") + native_p.add_argument("pytest_args", nargs="*", help="Arguments forwarded to pytest (use -- to separate)") + + # If '--' is in sys.argv, split there so pytest args are captured correctly + argv = sys.argv[1:] + if "--" in argv: + split_idx = argv.index("--") + our_args = argv[:split_idx] + pytest_extra = argv[split_idx + 1 :] + else: + our_args = argv + pytest_extra = [] + + args = parser.parse_args(our_args) + + # Merge any args after '--' into pytest_args + if pytest_extra: + args.pytest_args = (args.pytest_args or []) + pytest_extra + + if args.mode == "docker": + if not shutil.which("docker"): + print("Error: docker is not available on PATH", file=sys.stderr) + return 1 + return _cmd_docker(args) + elif args.mode == "native": + return _cmd_native(args) + else: + parser.print_help() + return 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tools/template/templates/extension/setup.py b/tools/template/templates/extension/setup.py index e991708e34d8..4c7dcc237e70 100644 --- a/tools/template/templates/extension/setup.py +++ b/tools/template/templates/extension/setup.py @@ -34,14 +34,11 @@ install_requires=INSTALL_REQUIRES, license="Apache-2.0", include_package_data=True, - python_requires=">=3.10", + python_requires=">=3.12", classifiers=[ "Natural Language :: English", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Isaac Sim :: 4.5.0", - "Isaac Sim :: 5.0.0", - "Isaac Sim :: 5.1.0", + "Programming Language :: Python :: 3.12", + "Isaac Sim :: 6.0.0", ], zip_safe=False, ) diff --git a/tools/template/templates/external/.vscode/tools/setup_vscode.py b/tools/template/templates/external/.vscode/tools/setup_vscode.py index f8c61b76c5b7..8d29dafee080 100644 --- a/tools/template/templates/external/.vscode/tools/setup_vscode.py +++ b/tools/template/templates/external/.vscode/tools/setup_vscode.py @@ -12,49 +12,38 @@ when the "setup_python_env.sh" is run as part of the vs-code launch configuration. """ -import argparse -import os -import pathlib -import platform import re +import subprocess import sys +import os +import pathlib -PROJECT_DIR = pathlib.Path(__file__).parents[2] -"""Path to the the project directory.""" - -try: - import isaacsim # noqa: F401 - isaacsim_dir = os.environ.get("ISAAC_PATH", "") -except ModuleNotFoundError or ImportError: - # Create a parser to get the isaac-sim path - parser = argparse.ArgumentParser(description="Setup the VSCode settings for the project.") - parser.add_argument("--isaac_path", type=str, help="The absolute path to the Isaac Sim installation.") - args = parser.parse_args() +ISAACLAB_DIR = pathlib.Path(__file__).parents[2] +"""Path to the Isaac Lab directory.""" - # parse the isaac-sim directory - isaacsim_dir = args.isaac_path - # check if the isaac-sim directory is provided - if not os.path.exists(isaacsim_dir): - raise FileNotFoundError( - f"Could not find the isaac-sim directory: {isaacsim_dir}. Please provide the correct path to the Isaac Sim" - " installation." - ) -except EOFError: - print("Unable to trigger EULA acceptance. This is likely due to the script being run in a non-interactive shell.") - print("Please run the script in an interactive shell to accept the EULA.") - print("Skipping the setup of the VSCode settings...") - sys.exit(0) +# Try to find IsaacSim dir +_isaacsim_probe = subprocess.run( + [sys.executable, "-c", "import isaacsim; import os; print(os.environ.get('ISAAC_PATH', ''))"], + capture_output=True, + text=True, + check=False, + # avoid EULA prompt + stdin=subprocess.DEVNULL, +) +if _isaacsim_probe.returncode == 0 and _isaacsim_probe.stdout.strip(): + isaacsim_dir = _isaacsim_probe.stdout.strip() +else: + isaacsim_dir = os.path.join(ISAACLAB_DIR, "_isaac_sim") # check if the isaac-sim directory exists if not os.path.exists(isaacsim_dir): - raise FileNotFoundError( - f"Could not find the isaac-sim directory: {isaacsim_dir}. There are two possible reasons for this:\n\t1. The" - " Isaac Sim directory does not exist as provided CLI path.\n\t2. The script couldn't import the 'isaacsim'" - " package. This could be due to the 'isaacsim' package not being installed in the Python" - " environment.\n\nPlease make sure that the Isaac Sim directory exists or that the 'isaacsim' package is" - " installed." + print( + f"[WARN] Could not find the isaac-sim directory: {isaacsim_dir}." + "\n\tIsaac Sim does not appear to be installed. VS Code settings will be generated" + "\n\twithout Isaac Sim extra paths." ) + isaacsim_dir = "" ISAACSIM_DIR = isaacsim_dir """Path to the isaac-sim directory.""" @@ -79,7 +68,7 @@ def overwrite_python_analysis_extra_paths(isaaclab_settings: str) -> str: # we use the isaac-sim settings file to get the python.analysis.extraPaths for kit extensions # if this file does not exist, we will not add any extra paths - if os.path.exists(isaacsim_vscode_filename): + if ISAACSIM_DIR and os.path.exists(isaacsim_vscode_filename): # read the path names from the isaac-sim settings file with open(isaacsim_vscode_filename) as f: vscode_settings = f.read() @@ -98,20 +87,13 @@ def overwrite_python_analysis_extra_paths(isaaclab_settings: str) -> str: path_names = [path_name for path_name in path_names if len(path_name) > 0] # change the path names to be relative to the Isaac Lab directory - rel_path = os.path.relpath(ISAACSIM_DIR, PROJECT_DIR) + rel_path = os.path.relpath(ISAACSIM_DIR, ISAACLAB_DIR) path_names = ['"${workspaceFolder}/' + rel_path + "/" + path_name + '"' for path_name in path_names] else: path_names = [] - print( - f"[WARN] Could not find Isaac Sim VSCode settings: {isaacsim_vscode_filename}." - "\n\tThis will result in missing 'python.analysis.extraPaths' in the VSCode" - "\n\tsettings, which limits the functionality of the Python language server." - "\n\tHowever, it does not affect the functionality of the Isaac Lab project." - "\n\tWe are working on a fix for this issue with the Isaac Sim team." - ) # add the path names that are in the Isaac Lab extensions directory - isaaclab_extensions = os.listdir(os.path.join(PROJECT_DIR, "source")) + isaaclab_extensions = os.listdir(os.path.join(ISAACLAB_DIR, "source")) path_names.extend(['"${workspaceFolder}/source/' + ext + '"' for ext in isaaclab_extensions]) # combine them into a single string @@ -144,17 +126,15 @@ def overwrite_default_python_interpreter(isaaclab_settings: str) -> str: The settings string with overwritten default python interpreter. """ # read executable name - python_exe = os.path.normpath(sys.executable) - - # replace with Isaac Sim's python.sh or python.bat scripts to make sure python with correct - # source paths is set as default - if f"kit{os.sep}python{os.sep}bin{os.sep}python" in python_exe: - # Check if the OS is Windows or Linux to use appropriate shell file - if platform.system() == "Windows": - python_exe = python_exe.replace(f"kit{os.sep}python{os.sep}bin{os.sep}python3", "python.bat") - else: - python_exe = python_exe.replace(f"kit{os.sep}python{os.sep}bin{os.sep}python3", "python.sh") - + python_exe = sys.executable.replace("\\", "/") + + # We make an exception for replacing the default interpreter if the + # path (/kit/python/bin/python3) indicates that we are using a local/container + # installation of IsaacSim. We will preserve the calling script as the default, python.sh. + # We want to use python.sh because it modifies LD_LIBRARY_PATH and PYTHONPATH + # (among other envars) that we need for all of our dependencies to be accessible. + if "kit/python/bin/python3" in python_exe: + return isaaclab_settings # replace the default python interpreter in the Isaac Lab settings file with the path to the # python interpreter in the Isaac Lab directory isaaclab_settings = re.sub( @@ -169,7 +149,7 @@ def overwrite_default_python_interpreter(isaaclab_settings: str) -> str: def main(): # Isaac Lab template settings - isaaclab_vscode_template_filename = os.path.join(PROJECT_DIR, ".vscode", "tools", "settings.template.json") + isaaclab_vscode_template_filename = os.path.join(ISAACLAB_DIR, ".vscode", "tools", "settings.template.json") # make sure the Isaac Lab template settings file exists if not os.path.exists(isaaclab_vscode_template_filename): raise FileNotFoundError( @@ -195,13 +175,13 @@ def main(): isaaclab_settings = header_message + isaaclab_settings # write the Isaac Lab settings file - isaaclab_vscode_filename = os.path.join(PROJECT_DIR, ".vscode", "settings.json") + isaaclab_vscode_filename = os.path.join(ISAACLAB_DIR, ".vscode", "settings.json") with open(isaaclab_vscode_filename, "w") as f: f.write(isaaclab_settings) # copy the launch.json file if it doesn't exist - isaaclab_vscode_launch_filename = os.path.join(PROJECT_DIR, ".vscode", "launch.json") - isaaclab_vscode_template_launch_filename = os.path.join(PROJECT_DIR, ".vscode", "tools", "launch.template.json") + isaaclab_vscode_launch_filename = os.path.join(ISAACLAB_DIR, ".vscode", "launch.json") + isaaclab_vscode_template_launch_filename = os.path.join(ISAACLAB_DIR, ".vscode", "tools", "launch.template.json") if not os.path.exists(isaaclab_vscode_launch_filename): # read template launch settings with open(isaaclab_vscode_template_launch_filename) as f: diff --git a/tools/test_settings.py b/tools/test_settings.py index 16438f7f3c59..66832541e5cc 100644 --- a/tools/test_settings.py +++ b/tools/test_settings.py @@ -12,37 +12,105 @@ ISAACLAB_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) """Path to the root directory of the Isaac Lab repository.""" -DEFAULT_TIMEOUT = 300 +DEFAULT_TIMEOUT = 1000 """The default timeout for each test in seconds.""" + PER_TEST_TIMEOUTS = { - "test_articulation.py": 500, - "test_stage_in_memory.py": 500, - "test_environments.py": 2500, # This test runs through all the environments for 100 steps each + "test_articulation.py": 1500, + "test_stage_in_memory.py": 1000, + "test_imu.py": 1000, + "test_environments.py": 10000, # This test runs through all the environments for 100 steps each "test_environments_with_stage_in_memory.py": ( - 2500 + 10000 ), # Like the above, with stage in memory and with and without fabric cloning "test_environment_determinism.py": 1000, # This test runs through many the environments for 100 steps each + "test_pickplace_stack_environments.py": 10000, # This test runs through PickPlace and Stack environments "test_factory_environments.py": 1000, # This test runs through Factory environments for 100 steps each "test_multi_agent_environments.py": 800, # This test runs through multi-agent environments for 100 steps each - "test_generate_dataset.py": 500, # This test runs annotation for 10 demos and generation until one succeeds + "test_generate_dataset_franka_state.py": 10000, # This test runs annotation for 10 demos and generation for 1 demo + "test_generate_dataset_franka_visuomotor.py": 10000, # This test runs generation until one succeeds + "test_generate_dataset_gr1t2_nutpour.py": 10000, # This test runs generation until one succeeds + "test_generate_dataset_gr1t2_pickplace.py": 10000, # This test runs generation until one succeeds + "test_generate_dataset_skillgen.py": 10000, # This test runs generation for skillgen "test_pink_ik.py": 1000, # This test runs through all the pink IK environments through various motions "test_environments_training.py": ( - 6000 + 10000 ), # This test runs through training for several environments and compares thresholds - "test_simulation_render_config.py": 500, - "test_operational_space.py": 500, + "test_environments_skillgen.py": 1000, + "test_environments_automate.py": 2500, + "test_teleop_environments.py": 5000, + "test_teleop_environments_with_stage_in_memory.py": 5000, + "test_cartpole_showcase_environments.py": 5000, + "test_cartpole_showcase_environments_with_stage_in_memory.py": 5000, + "test_simulation_render_config.py": 1000, + "test_operational_space.py": 1000, "test_non_headless_launch.py": 1000, # This test launches the app in non-headless mode and starts simulation - "test_rl_games_wrapper.py": 500, - "test_skrl_wrapper.py": 500, - "test_rsl_rl_wrapper.py": 500, - "test_sb3_wrapper.py": 500, + "test_rl_games_wrapper.py": 1000, + "test_rsl_rl_export_flow.py": 4000, + "test_rsl_rl_wrapper.py": 1000, + "test_sb3_wrapper.py": 1000, + "test_skrl_wrapper.py": 1000, + "test_action_state_recorder_term.py": 1000, + "test_manager_based_rl_env_obs_spaces.py": 1000, + "test_visuotactile_sensor.py": 1000, + "test_visuotactile_render.py": 1000, + "test_rigid_object_collection.py": 1500, + "test_outdated_sensor.py": 1000, + "test_multi_tiled_camera.py": 1000, + "test_multirotor.py": 1000, + "test_shadow_hand_vision_presets.py": 5000, + "test_environments_newton.py": 5000, + "test_surface_gripper.py": 3000, } """A dictionary of tests and their timeouts in seconds. Note: Any tests not listed here will use the default timeout. """ +CUROBO_PLANNER_TESTS = [ + "test_curobo_planner_franka.py", + "test_curobo_planner_cube_stack.py", + "test_pink_ik.py", +] +"""Tests for the cuRobo motion planner and Pink IK controller. + +These tests are skipped in the base image CI jobs and run in the dedicated +``test-curobo`` CI job which uses the cuRobo Docker image. +""" + +SKILLGEN_TESTS = [ + "test_generate_dataset_skillgen.py", + "test_environments_skillgen.py", + "test_environments_automate.py", +] +"""SkillGen and AutoMate environment tests. + +These tests are skipped in the base image CI jobs and run in the dedicated +``test-skillgen`` CI job which uses the cuRobo Docker image. +""" + +CUROBO_TESTS = [ + *CUROBO_PLANNER_TESTS, + *SKILLGEN_TESTS, +] +"""A list of tests that require cuRobo installation. + +These tests are skipped in the base image CI jobs and run separately in the +dedicated ``test-curobo`` and ``test-skillgen`` CI jobs which use the cuRobo +Docker image. +""" + +QUARANTINED_TESTS: list[str] = [] +"""A list of tests that are quarantined due to known instability. + +These tests are skipped in normal CI runs. When the ``test-quarantined`` +CI job is enabled (gated by the ``RUN_QUARANTINED_TESTS`` repository +variable), they run in a dedicated job where failures do not block PR +merges. The job is currently disabled. Add test filenames here to +quarantine them from regular CI. +""" + TESTS_TO_SKIP = [ # lab "test_argparser_launch.py", # app.close issue @@ -53,8 +121,13 @@ # lab_tasks "test_record_video.py", # Failing "test_tiled_camera_env.py", # Need to improve the logic + # curobo / skillgen - require cuRobo installation; run via test-curobo and test-skillgen CI jobs + *CUROBO_TESTS, + # quarantined tests - run in dedicated CI job that does not block PR merges + *QUARANTINED_TESTS, + "test_environments_training.py", # Long-running RL training test; runs in dedicated CI job ] -"""A list of tests to skip by run_tests.py""" +"""A list of tests to skip in CI (see conftest.py).""" TEST_RL_ENVS = [ # classic control diff --git a/tools/wheel_builder/.gitignore b/tools/wheel_builder/.gitignore new file mode 100644 index 000000000000..a007feab071f --- /dev/null +++ b/tools/wheel_builder/.gitignore @@ -0,0 +1 @@ +build/* diff --git a/tools/wheel_builder/build.sh b/tools/wheel_builder/build.sh new file mode 100755 index 000000000000..12e6a00e3abc --- /dev/null +++ b/tools/wheel_builder/build.sh @@ -0,0 +1,105 @@ +#!/bin/bash +set -e + +SELF_DIR="$(dirname "$(realpath "$0")")" +cd "$SELF_DIR/../.." + +VERSION=$(cat VERSION) +BUILD_DIR=$SELF_DIR/build/stage +DIST_DIR=$SELF_DIR/build/dist + +# Compose a PEP 440 local version when CI metadata is provided so the wheel is +# traceable to a specific build and commit. With both env vars set the version +# becomes e.g. "3.0.0+build123.abc1234" (build number is monotonic, sha slug +# pins the source). If either is missing, fall back to the plain VERSION so +# local dev builds stay simple. +WHEEL_BUILD_NUMBER="${WHEEL_BUILD_NUMBER:-}" +WHEEL_SHA="${WHEEL_SHA:-}" +if [ -n "$WHEEL_BUILD_NUMBER" ] && [ -n "$WHEEL_SHA" ]; then + SHA_SLUG="${WHEEL_SHA:0:7}" + WHEEL_VERSION="${VERSION}+build${WHEEL_BUILD_NUMBER}.${SHA_SLUG}" +else + WHEEL_VERSION="${VERSION}" +fi +echo "[WHEEL VERSION] $WHEEL_VERSION" + +# Platform tags matching the official IsaacLab wheel +PYTHON_TAG="${PYTHON_TAG:-cp312}" +ABI_TAG="${ABI_TAG:-cp312}" +# Auto-detect platform +ARCH=$(uname -m) +case "$ARCH" in + x86_64|AMD64) PLATFORM_TAG="${PLATFORM_TAG:-manylinux_2_35_x86_64}" ;; + aarch64|arm64) PLATFORM_TAG="${PLATFORM_TAG:-manylinux_2_35_aarch64}" ;; + *) PLATFORM_TAG="${PLATFORM_TAG:-linux_$ARCH}" ;; +esac + +rm -rf "$BUILD_DIR" "$DIST_DIR" +mkdir -p "$BUILD_DIR/src/isaaclab" + +# 1. Copy inventory (same as python_packages.toml inventory.includes.all) +cp -r apps "$BUILD_DIR/src/isaaclab/" +cp -r source "$BUILD_DIR/src/isaaclab/" + +# Ensure apps/ is discovered as a Python sub-package (it has no __init__.py) +find "$BUILD_DIR/src/isaaclab/apps" -type d -exec touch {}/__init__.py \; + +# Promote sub-packages (isaaclab_assets, isaaclab_rl, etc.) to top-level +# so they are importable as e.g. `import isaaclab_assets`. +# Each extension has the structure: source/isaaclab_FOO/isaaclab_FOO/ (Python pkg) +# plus sibling dirs like config/, data/. The __init__.py references ../config etc. +# We copy the inner Python package to src/ and also copy sibling resource dirs +# (config, data) into it so the relative-path lookups in __init__.py work. +for ext_dir in "$BUILD_DIR"/src/isaaclab/source/isaaclab_*; do + pkg=$(basename "$ext_dir") + inner="$ext_dir/$pkg" + if [ -d "$inner" ] && [ -f "$inner/__init__.py" ]; then + cp -r "$inner" "$BUILD_DIR/src/$pkg" + # Copy resource dirs (config/, data/) into the Python package + for res_dir in config data; do + if [ -d "$ext_dir/$res_dir" ]; then + cp -r "$ext_dir/$res_dir" "$BUILD_DIR/src/$pkg/$res_dir" + fi + done + # Patch EXT_DIR: change '../' to '.' so __init__.py finds config/ inside + # the package dir rather than one level up. + sed -i 's|os\.path\.join(os\.path\.dirname(__file__), "\.\./"|os.path.join(os.path.dirname(__file__), ""|g' \ + "$BUILD_DIR/src/$pkg/__init__.py" + # Remove the original from inside the isaaclab bundle to avoid duplication + rm -rf "$ext_dir" + fi +done + +# Clean build artifacts that shouldn't be in the wheel +find "$BUILD_DIR/src" -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true +find "$BUILD_DIR/src" -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true +find "$BUILD_DIR/src" -name "*.pyc" -delete 2>/dev/null || true + +# 2. Copy the custom res __init__.py and __main__.py +cp "$SELF_DIR/res/__init__.py" "$BUILD_DIR/src/isaaclab/" +cp "$SELF_DIR/res/__main__.py" "$BUILD_DIR/src/isaaclab/" + +# 3. Generate pyproject.toml with dependencies from python_packages.toml +python3 "$SELF_DIR/gen_pyproject.py" "$SELF_DIR/res/python_packages.toml" "$BUILD_DIR/pyproject.toml" "$WHEEL_VERSION" + +# 4. Build the wheel +cd "$BUILD_DIR" +# Prefer --user to avoid polluting system Python; fall back to --break-system-packages +# for environments where --user is unsupported (e.g. Docker, ephemeral CI runners). +python3 -m pip install --user build wheel 2>/dev/null || python3 -m pip install --break-system-packages build wheel +python3 -m build --wheel --outdir "$DIST_DIR/" + +# 5. Retag the wheel to match official platform tags +# cd "$DIST_DIR" +# GENERIC_WHL=$(ls isaaclab-*.whl) +# echo "Retagging $GENERIC_WHL -> $PYTHON_TAG-$ABI_TAG-$PLATFORM_TAG" +# python3 -m wheel tags --python-tag "$PYTHON_TAG" --abi-tag "$ABI_TAG" --platform-tag "$PLATFORM_TAG" "$GENERIC_WHL" +# # Remove the generic wheel (wheel tags creates a new file) +# TAGGED_WHL=$(ls isaaclab-*"$PLATFORM_TAG"*.whl 2>/dev/null) +# if [ "$GENERIC_WHL" != "$TAGGED_WHL" ] && [ -n "$TAGGED_WHL" ]; then +# rm -f "$GENERIC_WHL" +# fi + +echo "" +echo "[WHEEL BUILT]" +ls -lh $DIST_DIR/isaaclab-*.whl diff --git a/tools/wheel_builder/gen_pyproject.py b/tools/wheel_builder/gen_pyproject.py new file mode 100644 index 000000000000..a8cf3fdd8d5d --- /dev/null +++ b/tools/wheel_builder/gen_pyproject.py @@ -0,0 +1,82 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Generate pyproject.toml for the isaaclab wheel from python_packages.toml.""" + +import sys + +import tomllib + +if len(sys.argv) != 4: + print(f"Usage: {sys.argv[0]} ", file=sys.stderr) + sys.exit(1) + +packages_toml_path = sys.argv[1] +output_path = sys.argv[2] +version = sys.argv[3] + +with open(packages_toml_path, "rb") as f: + data = tomllib.load(f) +pkg = data["isaaclab"] + +# Collect dependencies (deduplicated, preserving order) +raw_deps = pkg["pyproject"]["dependencies"]["all"] +seen = set() +deps = [] +for d in raw_deps: + # Normalize for dedup: lowercase package name (before any version specifier) + key = d.split(">")[0].split("<")[0].split("=")[0].split("!")[0].split("[")[0].strip().lower() + if key not in seen: + seen.add(key) + deps.append(d) + +# Collect optional dependencies +opt_deps = {} +for entry in pkg["pyproject"]["optional-dependencies"]["all"]: + # Each entry is a dict like {"name": ["dep1", "dep2"]} + for name, dep_list in entry.items(): + opt_deps[name] = dep_list + +# Write pyproject.toml +lines = [] +lines.append("[build-system]") +lines.append('requires = ["setuptools >= 70.0, < 82.0.0"]') +lines.append('build-backend = "setuptools.build_meta"') +lines.append("") +lines.append("[tool.setuptools]") +lines.append("include-package-data = true") +lines.append('package-dir = {"" = "src"}') +lines.append("") +lines.append("[tool.setuptools.packages.find]") +lines.append('where = ["src"]') +lines.append("") +lines.append("# Include all non-.py files (kit apps, toml configs, usd, yaml, etc.)") +lines.append("[tool.setuptools.package-data]") +lines.append('"*" = ["**/*"]') +lines.append("") +lines.append("[project]") +lines.append('name = "isaaclab"') +lines.append(f'version = "{version}"') +lines.append('requires-python = ">=3.12"') +lines.append('description = "Isaac Lab"') +lines.append('license = {text = "BSD-3-Clause"}') +lines.append("dependencies = [") +for d in deps: + lines.append(f' "{d}",') +lines.append("]") +lines.append("") +lines.append("[project.scripts]") +lines.append('isaaclab = "isaaclab:main"') +lines.append("") +lines.append("[project.optional-dependencies]") +for name, dep_list in opt_deps.items(): + formatted = ", ".join(f'"{d}"' for d in dep_list) + lines.append(f"{name} = [{formatted}]") +lines.append("") + +with open(output_path, "w") as f: + f.write("\n".join(lines) + "\n") + +print(f"Generated {output_path} with {len(deps)} dependencies and {len(opt_deps)} optional groups") diff --git a/tools/wheel_builder/res/__init__.py b/tools/wheel_builder/res/__init__.py new file mode 100644 index 000000000000..bd162c1edd6c --- /dev/null +++ b/tools/wheel_builder/res/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import os +import sys +from importlib.metadata import version +from importlib.util import find_spec + +__version__ = version("isaaclab") + +# Extend the package search path so subpackages (app/, envs/, etc.) in the +# nested source tree are importable as isaaclab.app, isaaclab.envs, etc. +__path__.append(os.path.join(os.path.dirname(__file__), "source", "isaaclab", "isaaclab")) + +# TODO(myurasov-nv): bootstrap_kernel() is ported from the internal GitLab wheel builder +# for backwards compatibility. It is not called currently, but may be needed if Isaac Sim +# requires explicit kernel bootstrapping before use. Remove once confirmed unnecessary. +def bootstrap_kernel(): + # Isaac Lab path + isaaclab_path = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) + + # bootstrap kernel via Isaac Sim + if find_spec("isaacsim") is not None: + import isaacsim + + # log info + if find_spec("carb") is not None: + import carb + carb.log_info(f"Isaac Lab path: {isaaclab_path}") + +def main(): + """Entry point for the ``isaaclab`` console script (python -m isaaclab).""" + from isaaclab.__main__ import main as _main + + sys.exit(_main()) + + +if __name__ == "__main__": + bootstrap_kernel() + main() diff --git a/tools/wheel_builder/res/__main__.py b/tools/wheel_builder/res/__main__.py new file mode 100644 index 000000000000..1bf1917d3012 --- /dev/null +++ b/tools/wheel_builder/res/__main__.py @@ -0,0 +1,155 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import argparse +import os +import re +import sys +import textwrap + +import tomllib + +VSCODE_SETTINGS_TEMPLATE = """ +{ + "editor.rulers": [120], + + // Enables python language server (seems to work slightly better than jedi) + "python.languageServer": "Pylance", + "python.jediEnabled": false, + + // Those paths are automatically filled by isaaclab (see: 'python -m isaaclab --help') + "python.defaultInterpreterPath": "PYTHON.DEFAULTINTERPRETERPATH", + "python.analysis.extraPaths": [ + PYTHON.ANALYSIS.EXTRAPATHS + ], + + // Use "black" as a formatter + "python.formatting.provider": "black", + "python.formatting.blackArgs": ["--line-length", "120"], + + // Use "flake8" for linting + "python.linting.pylintEnabled": false, + "python.linting.flake8Enabled": true, +} +""" + + +def generate_vscode_settings(): + def _mock_python_modules(ext_path: str, ext_name: str) -> None: + # parse config/extension.toml + cprint(f" |-- Parsing extension config ({ext_path})") + config_path = os.path.join(ext_path, "config", "extension.toml") + try: + with open(config_path, "rb") as f: + config = tomllib.load(f) + except Exception as e: + cprint(f" | |-- [Warning] {e}") + return + # get python modules + for item in config.get("python", {}).get("module", []): + if list(item.keys()) == ["name"]: + # skip tests + if item.get("name", "").endswith(".tests"): + continue + # mock __init__.py for each submodule (if not exists) + submodule_path = ext_path + for submodule in item.get("name", "").split("."): + init_path = os.path.join(submodule_path, "__init__.py") + if not os.path.isfile(init_path): + try: + cprint(f" |-- Mocking {init_path}") + with open(init_path, "w") as f: + f.write("# Generated by 'isaaclab' package") + except Exception as e: + cprint(f" | |-- [Warning] {e}") + continue + submodule_path = os.path.join(submodule_path, submodule) + + def _get_paths(base_path: str, mock_python_modules: bool = False) -> list[str]: + paths = [] + if os.path.isdir(base_path): + for folder in os.listdir(base_path): + folder_path = os.path.join(base_path, folder) + if os.path.isdir(folder_path): + paths.append(folder_path) + cprint(f"Registering extension: {folder_path}") + if mock_python_modules: + _mock_python_modules(folder_path, re.split(r"-\d+", folder)[0]) + return paths + + try: + import omni.kit_app # importing 'omni.kit_app' will bootstrap kernel + + kit_path = os.path.dirname(os.path.abspath(os.path.realpath(omni.kit_app.__file__))) + except ModuleNotFoundError: + print("Unable to find 'omniverse-kit' package") + # exit() + try: + import isaacsim + + isaacsim_path = os.path.dirname(os.path.abspath(os.path.realpath(isaacsim.__file__))) + except ModuleNotFoundError: + print("Unable to find 'isaacsim' package") + # exit() + + cwd = os.getcwd() + vscode_settings_path = os.path.join(cwd, ".vscode", "settings.json") + # check if .vscode/settings.json exists + if os.path.exists(vscode_settings_path): + print(f"VS Code settings already exists: {vscode_settings_path}") + if input("Overwrite? (y/N): ").lower() not in ["y", "yes"]: + print("Cancelled: VS Code settings not overwritten") + return + + # get extensions paths + extensions_paths = [] + # - omniverse-kit + folder_path = os.path.join(kit_path, "kernel", "py") + if os.path.isdir(folder_path): + extensions_paths.append(folder_path) + for folder in ["exts", "extscore"]: + extensions_paths.extend(_get_paths(os.path.join(kit_path, folder), mock_python_modules=True)) + # - isaacsim + for folder in ["exts", "extscache", "extsDeprecated", "extsUser"]: + extensions_paths.extend(_get_paths(os.path.join(isaacsim_path, folder), mock_python_modules=True)) + # - isaaclab + isaaclab_path = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) + for folder in ["source"]: + extensions_paths.extend(_get_paths(os.path.join(isaaclab_path, folder), mock_python_modules=True)) + + # update 'python.defaultInterpreterPath' + template = VSCODE_SETTINGS_TEMPLATE[:] + template = template.replace("PYTHON.DEFAULTINTERPRETERPATH", sys.executable) + + # update 'python.analysis.extraPaths' + content = "\n".join([f'"{path}",' for path in extensions_paths]) + content = textwrap.indent(content, prefix=" " * 8)[8:] + template = template.replace("PYTHON.ANALYSIS.EXTRAPATHS", content) + + # create .vscode/settings.json + os.makedirs(os.path.join(cwd, ".vscode"), exist_ok=True) + with open(vscode_settings_path, "w") as f: + f.write(template) + print("VS Code settings generated at", vscode_settings_path) + + +def main(): + """Entry point for ``isaaclab`` console script and ``python -m isaaclab``.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--generate-vscode-settings", default=False, action="store_true", help="Generate VS Code settings." + ) + parser.add_argument("--verbose", default=False, action="store_true", help="Verbose output.") + args, _ = parser.parse_known_args() + + global cprint + cprint = print if args.verbose else lambda *args, **kwargs: None + + if args.generate_vscode_settings: + generate_vscode_settings() + + +if __name__ == "__main__": + main() diff --git a/tools/wheel_builder/res/python_packages.toml b/tools/wheel_builder/res/python_packages.toml new file mode 100644 index 000000000000..1676fdc8f905 --- /dev/null +++ b/tools/wheel_builder/res/python_packages.toml @@ -0,0 +1,120 @@ +[isaaclab] +# pyproject.toml +pyproject.description = "Isaac Lab" +pyproject.keywords = ["nvidia", "omniverse", "isaacsim", "isaaclab"] +pyproject.dependencies.all = [ + # ================================================================================ + # https://github.com/isaac-sim/IsaacLab/blob/main/source/isaaclab/setup.py + # ================================================================================ + # generic + "numpy>=2", + "torch>=2.10", + "onnx>=1.18.0", # 1.16.2 throws access violation on Windows + "prettytable==3.3.0", + "toml", + # devices + "hidapi==0.14.0.post2", + # reinforcement learning + "gymnasium==1.2.0", + # procedural-generation + "trimesh", + "pyglet>=2.1.6", + # image processing + "transformers==4.57.6", + "einops", # needed for transformers, doesn't always auto-install + "warp-lang==1.12.0", + "matplotlib>=3.10.3", + # make sure this is consistent with isaac sim version + "pillow==12.1.1", + "botocore", + # livestream + "starlette==0.49.1", # TODO: update starlette once Isaac Lab be released with Isaac Sim 6.0.0 + "omniverseclient==2.71.1.7015", + # testing + "pytest", + "pytest-mock", + "junitparser", + "flatdict==4.0.0", + "flaky", + "packaging", + # visualizers + "imgui-bundle==1.92.4", + "rerun-sdk>=0.29.0", + "viser>=1.0.16", + "typing_extensions==4.12.2", + "lazy_loader>=0.4", + "pin ; platform_system == 'Linux'", + "pin-pink>=2.3.6 ; platform_system == 'Linux'", + # ================================================================================ + # https://github.com/isaac-sim/IsaacLab/blob/main/source/isaaclab_rl/setup.py + # ================================================================================ + # generic + "numpy>=2", + "torch>=2.10", + "torchvision>=0.25.0", # ensure compatibility with torch 2.10 + "protobuf>=4.25.8,!=5.26.0", + # configuration management + "hydra-core", + # data collection + "h5py", + # basic logger + "tensorboard", + # video recording + "moviepy", + "packaging<24", + "tqdm==4.67.1", + # ================================================================================ + # https://github.com/isaac-sim/IsaacLab/blob/main/source/isaaclab_tasks/setup.py + # ================================================================================ + # generic + "numpy>=2", + "torch>=2.10", + "torchvision>=0.25.0", # ensure compatibility with torch 2.10 + "protobuf>=4.25.8,!=5.26.0", + # basic logger + "tensorboard", + "numba>=0.63.1", +] +pyproject.optional-dependencies.all = [ + # Isaac Sim + { "isaacsim" = ["isaacsim[all,extscache]==6.0.0.*"] }, + # ================================================================================ + # https://github.com/isaac-sim/IsaacLab/blob/main/source/isaaclab_newton/setup.py + # ================================================================================ + { "newton" = [ + "warp-lang==1.12.0", + "mujoco==3.6.0", + "mujoco-warp==3.6.0", + "newton @ git+https://github.com/newton-physics/newton.git@a27277ed49d6f307b8a1e4c394be7e1d14965a62", + "PyOpenGL-accelerate==3.1.10" + ] }, + # ================================================================================ + # https://github.com/isaac-sim/IsaacLab/blob/main/source/isaaclab_rl/setup.py + # ================================================================================ + # RL libraries + { "sb3" = ["stable-baselines3>=2.6", "tqdm", "rich"] }, + { "skrl" = ["skrl>=1.4.3"] }, + # { "rl-games" = ["rl-games==1.6.1"] }, # TODO: re-enable when rl-games Python package supports Python 3.11 + # { "rl_games" = ["rl-games==1.6.1"] }, # TODO: re-enable when rl-games Python package supports Python 3.11 + { "rsl-rl" = ["rsl-rl-lib==3.1.2", "onnxscript>=0.5"] }, + { "rsl_rl" = ["rsl-rl-lib==3.1.2", "onnxscript>=0.5"] }, + # RL libraries (all) + { "all" = [ + "stable-baselines3>=2.6", + "tqdm", + "rich", + "skrl>=1.4.3", + # "rl-games==1.6.1", # TODO: re-enable when rl-games Python package supports Python 3.11 + "rsl-rl-lib==3.1.2", + "onnxscript>=0.5", + ] }, +] +# inventory (relative to the `source_folder` config) +inventory.includes.all = [ + # Isaac Lab + "apps", + "source", + # python package/module + [ "../source/python_packages/isaaclab/__init__.py", "." ], + [ "../source/python_packages/isaaclab/__main__.py", "." ], +]